diff --git a/Makefile b/Makefile index fe3b5e71b4..bbcd6431ad 100644 --- a/Makefile +++ b/Makefile @@ -47,7 +47,7 @@ endif # allow users to pass additional flags via the conventional LDFLAGS variable LD_FLAGS += $(LDFLAGS) -# Process Docker environment varible TARGETPLATFORM +# Process Docker environment variable TARGETPLATFORM # in order to build binary with correspondent ARCH # by default will always build for linux/amd64 TARGETPLATFORM ?= @@ -322,7 +322,7 @@ endif # Run a nodejs tool to test endpoints against a localnet # The command takes care of starting and stopping the network -# prerequisits: build-contract-tests-hooks build-linux +# prerequisites: build-contract-tests-hooks build-linux # the two build commands were not added to let this command run from generic containers or machines. # The binaries should be built beforehand contract-tests: diff --git a/SECURITY.md b/SECURITY.md index 01b989c6b1..c8dfb979a6 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -147,7 +147,7 @@ also be interested in! * Conceptual flaws * Ambiguities, inconsistencies, or incorrect statements -* Mis-match between specification and implementation of any component +* Mismatch between specification and implementation of any component ### Consensus diff --git a/abci/types/application.go b/abci/types/application.go index c7ca96e8f0..e20e680918 100644 --- a/abci/types/application.go +++ b/abci/types/application.go @@ -31,7 +31,7 @@ type Application interface { ListSnapshots(RequestListSnapshots) ResponseListSnapshots // List available snapshots OfferSnapshot(RequestOfferSnapshot) ResponseOfferSnapshot // Offer a snapshot to the application LoadSnapshotChunk(RequestLoadSnapshotChunk) ResponseLoadSnapshotChunk // Load a snapshot chunk - ApplySnapshotChunk(RequestApplySnapshotChunk) ResponseApplySnapshotChunk // Apply a shapshot chunk + ApplySnapshotChunk(RequestApplySnapshotChunk) ResponseApplySnapshotChunk // Apply a snapshot chunk } //------------------------------------------------------- diff --git a/abci/types/types.pb.go b/abci/types/types.pb.go index ad936d96d9..e210ab71d4 100644 --- a/abci/types/types.pb.go +++ b/abci/types/types.pb.go @@ -2124,7 +2124,7 @@ type ResponseCheckTx struct { Sender string `protobuf:"bytes,9,opt,name=sender,proto3" json:"sender,omitempty"` Priority int64 `protobuf:"varint,10,opt,name=priority,proto3" json:"priority,omitempty"` // mempool_error is set by CometBFT. - // ABCI applictions creating a ResponseCheckTX should not set mempool_error. + // ABCI applications creating a ResponseCheckTX should not set mempool_error. MempoolError string `protobuf:"bytes,11,opt,name=mempool_error,json=mempoolError,proto3" json:"mempool_error,omitempty"` } diff --git a/behaviour/doc.go b/behaviour/doc.go index 7b00ae1eb3..0663448d4c 100644 --- a/behaviour/doc.go +++ b/behaviour/doc.go @@ -2,7 +2,7 @@ Package Behaviour provides a mechanism for reactors to report behaviour of peers. Instead of a reactor calling the switch directly it will call the behaviour module which will -handle the stoping and marking peer as good on behalf of the reactor. +handle the stopping and marking peer as good on behalf of the reactor. There are four different behaviours a reactor can report. diff --git a/consensus/common_test.go b/consensus/common_test.go index a19b863948..6c91378686 100644 --- a/consensus/common_test.go +++ b/consensus/common_test.go @@ -112,7 +112,7 @@ func (vs *validatorStub) signVote( return nil, fmt.Errorf("sign vote failed: %w", err) } - // ref: signVote in FilePV, the vote should use the privious vote info when the sign data is the same. + // ref: signVote in FilePV, the vote should use the previous vote info when the sign data is the same. if signDataIsEqual(vs.lastVote, v) { v.Signature = vs.lastVote.Signature v.Timestamp = vs.lastVote.Timestamp @@ -829,7 +829,7 @@ func getSwitchIndex(switches []*p2p.Switch, peer p2p.Peer) int { return i } } - panic("didnt find peer in switches") + panic("didn't find peer in switches") } //------------------------------------------------------------------------------- diff --git a/consensus/reactor.go b/consensus/reactor.go index 098e6fbbe4..12b08c6c0c 100644 --- a/consensus/reactor.go +++ b/consensus/reactor.go @@ -607,7 +607,7 @@ OUTER_LOOP: if blockStoreBase > 0 && 0 < prs.Height && prs.Height < rs.Height && prs.Height >= blockStoreBase { heightLogger := logger.With("height", prs.Height) - // if we never received the commit message from the peer, the block parts wont be initialized + // if we never received the commit message from the peer, the block parts won't be initialized if prs.ProposalBlockParts == nil { blockMeta := conR.conS.blockStore.LoadBlockMeta(prs.Height) if blockMeta == nil { diff --git a/consensus/replay_file.go b/consensus/replay_file.go index fb883542df..b0221becfa 100644 --- a/consensus/replay_file.go +++ b/consensus/replay_file.go @@ -144,7 +144,7 @@ func (pb *playback) replayReset(count int, newStepSub types.Subscription) error pb.fp = fp pb.dec = NewWALDecoder(fp) count = pb.count - count - fmt.Printf("Reseting from %d to %d\n", pb.count, count) + fmt.Printf("Resetting from %d to %d\n", pb.count, count) pb.count = 0 pb.cs = newCS var msg *TimedWALMessage diff --git a/consensus/replay_test.go b/consensus/replay_test.go index c80a7d9369..5ba19fd2dc 100644 --- a/consensus/replay_test.go +++ b/consensus/replay_test.go @@ -52,7 +52,7 @@ func TestMain(m *testing.M) { // These tests ensure we can always recover from failure at any part of the consensus process. // There are two general failure scenarios: failure during consensus, and failure while applying the block. // Only the latter interacts with the app and store, -// but the former has to deal with restrictions on re-use of priv_validator keys. +// but the former has to deal with restrictions on reuse of priv_validator keys. // The `WAL Tests` are for failures during the consensus; // the `Handshake Tests` are for failures in applying the block. // With the help of the WAL, we can recover from it all! @@ -1102,7 +1102,7 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) { } commitHeight := thisBlockCommit.Height if commitHeight != height+1 { - panic(fmt.Sprintf("commit doesnt match. got height %d, expected %d", commitHeight, height+1)) + panic(fmt.Sprintf("commit doesn't match. got height %d, expected %d", commitHeight, height+1)) } blocks = append(blocks, block) commits = append(commits, thisBlockCommit) @@ -1141,7 +1141,7 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) { } commitHeight := thisBlockCommit.Height if commitHeight != height+1 { - panic(fmt.Sprintf("commit doesnt match. got height %d, expected %d", commitHeight, height+1)) + panic(fmt.Sprintf("commit doesn't match. got height %d, expected %d", commitHeight, height+1)) } blocks = append(blocks, block) commits = append(commits, thisBlockCommit) diff --git a/consensus/state.go b/consensus/state.go index de445e2a18..861319773b 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -1678,7 +1678,7 @@ func (cs *State) finalizeCommit(height int64) { stateCopy := cs.state.Copy() // Execute and commit the block, update and save the state, and update the mempool. - // NOTE The block.AppHash wont reflect these txs until the next block. + // NOTE The block.AppHash won't reflect these txs until the next block. var ( err error retainHeight int64 @@ -2305,7 +2305,7 @@ func (cs *State) signAddVote(msgType cmtproto.SignedMsgType, hash []byte, header return nil } -// updatePrivValidatorPubKey get's the private validator public key and +// updatePrivValidatorPubKey gets the private validator public key and // memoizes it. This func returns an error if the private validator is not // responding or responds with an error. func (cs *State) updatePrivValidatorPubKey() error { diff --git a/consensus/state_test.go b/consensus/state_test.go index a8c8a343cc..722d06d1fa 100644 --- a/consensus/state_test.go +++ b/consensus/state_test.go @@ -645,7 +645,7 @@ func TestStateLockPOLRelock(t *testing.T) { ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) round++ // moving to the next round - //XXX: this isnt guaranteed to get there before the timeoutPropose ... + //XXX: this isn't guaranteed to get there before the timeoutPropose ... if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil { t.Fatal(err) } @@ -745,7 +745,7 @@ func TestStateLockPOLUnlock(t *testing.T) { Round2 (vs2, C) // B nil nil nil // nil nil nil _ cs1 unlocks! */ - //XXX: this isnt guaranteed to get there before the timeoutPropose ... + //XXX: this isn't guaranteed to get there before the timeoutPropose ... if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil { t.Fatal(err) } @@ -950,7 +950,7 @@ func TestStateLockPOLSafety1(t *testing.T) { round++ // moving to the next round ensureNewRound(newRoundCh, height, round) - //XXX: this isnt guaranteed to get there before the timeoutPropose ... + //XXX: this isn't guaranteed to get there before the timeoutPropose ... if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil { t.Fatal(err) } @@ -1101,7 +1101,7 @@ func TestStateLockPOLSafety2(t *testing.T) { ensureNewRound(newRoundCh, height, round) t.Log("### ONTO Round 2") /*Round2 - // now we see the polka from round 1, but we shouldnt unlock + // now we see the polka from round 1, but we shouldn't unlock */ ensureNewProposal(proposalCh, height, round) @@ -1767,7 +1767,7 @@ func TestStateHalt1(t *testing.T) { validatePrecommit(t, cs1, round, round, vss[0], propBlock.Hash(), propBlock.Hash()) // add precommits from the rest - signAddVotes(cs1, cmtproto.PrecommitType, nil, types.PartSetHeader{}, vs2) // didnt receive proposal + signAddVotes(cs1, cmtproto.PrecommitType, nil, types.PartSetHeader{}, vs2) // didn't receive proposal signAddVotes(cs1, cmtproto.PrecommitType, propBlock.Hash(), propBlockParts.Header(), vs3) // we receive this later, but vs3 might receive it earlier and with ours will go to commit! precommit4 := signVote(vs4, cmtproto.PrecommitType, propBlock.Hash(), propBlockParts.Header()) diff --git a/consensus/ticker.go b/consensus/ticker.go index ae5fab794a..c0bd8d23ce 100644 --- a/consensus/ticker.go +++ b/consensus/ticker.go @@ -89,7 +89,7 @@ func (t *timeoutTicker) stopTimer() { } // send on tickChan to start a new timer. -// timers are interupted and replaced by new ticks from later steps +// timers are interrupted and replaced by new ticks from later steps // timeouts of 0 on the tickChan will be immediately relayed to the tockChan func (t *timeoutTicker) timeoutRoutine() { t.Logger.Debug("Starting timeout routine") diff --git a/consensus/wal.go b/consensus/wal.go index 02f23eced5..fd8a610c40 100644 --- a/consensus/wal.go +++ b/consensus/wal.go @@ -334,7 +334,7 @@ func IsDataCorruptionError(err error) bool { return ok } -// DataCorruptionError is an error that occures if data on disk was corrupted. +// DataCorruptionError is an error that occurs if data on disk was corrupted. type DataCorruptionError struct { cause error } diff --git a/crypto/merkle/proof.go b/crypto/merkle/proof.go index 85b2db1e91..08dc2c2336 100644 --- a/crypto/merkle/proof.go +++ b/crypto/merkle/proof.go @@ -12,7 +12,7 @@ import ( const ( // MaxAunts is the maximum number of aunts that can be included in a Proof. // This corresponds to a tree of size 2^100, which should be sufficient for all conceivable purposes. - // This maximum helps prevent Denial-of-Service attacks by limitting the size of the proofs. + // This maximum helps prevent Denial-of-Service attacks by limiting the size of the proofs. MaxAunts = 100 ) diff --git a/crypto/merkle/proof_op.go b/crypto/merkle/proof_op.go index 17c107adf3..36fdc064ae 100644 --- a/crypto/merkle/proof_op.go +++ b/crypto/merkle/proof_op.go @@ -149,7 +149,7 @@ func (prt *ProofRuntime) VerifyValueFromKeys(proof *cmtcrypto.ProofOps, root []b return prt.VerifyFromKeys(proof, root, keys, [][]byte{value}) } -// TODO In the long run we'll need a method of classifcation of ops, +// TODO In the long run we'll need a method of classification of ops, // whether existence or absence or perhaps a third? func (prt *ProofRuntime) VerifyAbsence(proof *cmtcrypto.ProofOps, root []byte, keypath string) (err error) { return prt.Verify(proof, root, keypath, nil) diff --git a/crypto/merkle/tree_test.go b/crypto/merkle/tree_test.go index 72f1402d65..4b901862e1 100644 --- a/crypto/merkle/tree_test.go +++ b/crypto/merkle/tree_test.go @@ -68,7 +68,7 @@ func TestProof(t *testing.T) { proof := proofs[i] // Check total/index - require.EqualValues(t, proof.Index, i, "Unmatched indicies: %d vs %d", proof.Index, i) + require.EqualValues(t, proof.Index, i, "Unmatched indices: %d vs %d", proof.Index, i) require.EqualValues(t, proof.Total, total, "Unmatched totals: %d vs %d", proof.Total, total) diff --git a/crypto/secp256k1/secp256k1_test.go b/crypto/secp256k1/secp256k1_test.go index 195d9dde70..a9fc1edc6b 100644 --- a/crypto/secp256k1/secp256k1_test.go +++ b/crypto/secp256k1/secp256k1_test.go @@ -86,7 +86,7 @@ func TestSecp256k1LoadPrivkeyAndSerializeIsIdentity(t *testing.T) { } func TestGenPrivKeySecp256k1(t *testing.T) { - // curve oder N + // curve order N N := underlyingSecp256k1.S256().N tests := []struct { name string diff --git a/docs/celestia-architecture/adr-002-ipld-da-sampling.md b/docs/celestia-architecture/adr-002-ipld-da-sampling.md index b95975375a..340ed8a2aa 100644 --- a/docs/celestia-architecture/adr-002-ipld-da-sampling.md +++ b/docs/celestia-architecture/adr-002-ipld-da-sampling.md @@ -231,7 +231,7 @@ Proposed ### Positive - simplicity & ease of implementation -- can re-use an existing networking and p2p stack (go-ipfs) +- can reuse an existing networking and p2p stack (go-ipfs) - potential support of large, cool, and helpful community - high-level API definitions independent of the used stack diff --git a/docs/celestia-architecture/adr-004-mvp-light-client.md b/docs/celestia-architecture/adr-004-mvp-light-client.md index ef2425e287..a41449765b 100644 --- a/docs/celestia-architecture/adr-004-mvp-light-client.md +++ b/docs/celestia-architecture/adr-004-mvp-light-client.md @@ -97,7 +97,7 @@ The light client stores data in its own [badgerdb instance](https://github.com/c db, err := badgerdb.NewDB("light-client-db", dir) ``` -While it is not critical for this feature, we should at least try to re-use that same DB instance for the local ipld store. +While it is not critical for this feature, we should at least try to reuse that same DB instance for the local ipld store. Otherwise, we introduce yet another DB instance; something we want to avoid, especially on the long run (see [#283](https://github.com/celestiaorg/celestia-core/issues/283)). For the first implementation, it might still be simpler to create a separate DB instance and tackle cleaning this up in a separate pull request, e.g. together with other [instances]([#283](https://github.com/celestiaorg/celestia-core/issues/283)). diff --git a/docs/celestia-architecture/adr-007-minimal-changes-to-tendermint.md b/docs/celestia-architecture/adr-007-minimal-changes-to-tendermint.md index 68f25a0446..1117aeb10f 100644 --- a/docs/celestia-architecture/adr-007-minimal-changes-to-tendermint.md +++ b/docs/celestia-architecture/adr-007-minimal-changes-to-tendermint.md @@ -140,7 +140,7 @@ describe how to hash the block data here: #### Optionally remove some unused code -- Removing misc unsued code (https://github.com/celestiaorg/celestia-core/pull/208) +- Removing misc unused code (https://github.com/celestiaorg/celestia-core/pull/208) - Remove docs deployment (https://github.com/celestiaorg/celestia-core/pull/134) - Start deleting docs (https://github.com/celestiaorg/celestia-core/pull/209) - Remove tendermint-db in favor of badgerdb (https://github.com/celestiaorg/celestia-core/pull/241) @@ -174,7 +174,7 @@ minimum desired changes specified above. - Introduction (https://github.com/celestiaorg/celestia-core/pull/144) - Initial integration (https://github.com/celestiaorg/celestia-core/pull/152) - Custom Multihash (https://github.com/celestiaorg/celestia-core/pull/155) -- Puting data during proposal (https://github.com/celestiaorg/celestia-core/pull/178) +- Putting data during proposal (https://github.com/celestiaorg/celestia-core/pull/178) - Module name (https://github.com/celestiaorg/celestia-core/pull/151) - Update rsmt2d (https://github.com/celestiaorg/celestia-core/pull/290) - Make plugin a package (https://github.com/celestiaorg/celestia-core/pull/294) diff --git a/docs/celestia-architecture/adr-009-cat-pool.md b/docs/celestia-architecture/adr-009-cat-pool.md index a98366c0fc..44772e152f 100644 --- a/docs/celestia-architecture/adr-009-cat-pool.md +++ b/docs/celestia-architecture/adr-009-cat-pool.md @@ -6,7 +6,7 @@ ## Context -One of the criterias of success for Celestia as a reliable data availability layer is the ability to handle large transactional throughput. A component that plays a significant role in this is the mempool. It's purpose is to receive transactions from clients and broadcast them to all other nodes, eventually reaching the next block proposer who includes it in their block. Given Celestia's aggregator-like role whereby larger transactions, i.e. blobs, are expected to dominate network traffic, a content-addressable algorithm, common in many other [peer-to-peer file sharing protocols](https://en.wikipedia.org/wiki/InterPlanetary_File_System), could be far more beneficial than the current transaction-flooding protocol that Tendermint currently uses. +One of the criteria of success for Celestia as a reliable data availability layer is the ability to handle large transactional throughput. A component that plays a significant role in this is the mempool. It's purpose is to receive transactions from clients and broadcast them to all other nodes, eventually reaching the next block proposer who includes it in their block. Given Celestia's aggregator-like role whereby larger transactions, i.e. blobs, are expected to dominate network traffic, a content-addressable algorithm, common in many other [peer-to-peer file sharing protocols](https://en.wikipedia.org/wiki/InterPlanetary_File_System), could be far more beneficial than the current transaction-flooding protocol that Tendermint currently uses. This ADR describes the content addressable transaction protocol and through a comparative analysis with the existing gossip protocol, presents the case for it's adoption in Celestia. @@ -32,7 +32,7 @@ A series of new metrics have been added to monitor effectiveness: - SuccessfulTxs: number of transactions committed in a block (to be used as a baseline) - AlreadySeenTxs: transactions that are received more than once - RequestedTxs: the number of initial requests for a transaction -- RerequestedTxs: the numer of follow up requests for a transaction. If this is high, it may indicate that the request timeout is too short. +- RerequestedTxs: the number of follow up requests for a transaction. If this is high, it may indicate that the request timeout is too short. The CAT pool has had numerous unit tests added. It has been tested in the local e2e networks and put under strain in large, geographically dispersed 100 node networks. diff --git a/docs/core/running-in-production.md b/docs/core/running-in-production.md index a907ec594a..d3e6717762 100644 --- a/docs/core/running-in-production.md +++ b/docs/core/running-in-production.md @@ -358,7 +358,7 @@ applications, setting it to true is not a problem. - `consensus.peer_gossip_sleep_duration` You can try to reduce the time your node sleeps before checking if -theres something to send its peers. +there's something to send its peers. - `consensus.timeout_commit` diff --git a/docs/qa/CometBFT-QA-34.md b/docs/qa/CometBFT-QA-34.md index 10101e0360..b400dec20d 100644 --- a/docs/qa/CometBFT-QA-34.md +++ b/docs/qa/CometBFT-QA-34.md @@ -114,7 +114,7 @@ This section reports on the key Prometheus metrics extracted from the following * Mixed network, 1/3 Tendermint Core `v0.34.26` and 2/3 running CometBFT: experiment with UUID starting with `fc5e`. * Mixed network, 2/3 Tendermint Core `v0.34.26` and 1/3 running CometBFT: experiment with UUID starting with `4759`. -We make explicit comparisons between the baseline and the homogenous setups, but refrain from +We make explicit comparisons between the baseline and the homogeneous setups, but refrain from commenting on the mixed network experiment unless they show some exceptional results. ### Mempool Size @@ -191,7 +191,7 @@ on the corresponding plot, shown above. ### Peers -The following plots show how many peers a node had throughtout the experiment. +The following plots show how many peers a node had throughout the experiment. The thick red dashed line represents the moving average over a sliding window of 20 seconds. @@ -236,7 +236,7 @@ The thick red dashed line show the rates' moving averages. #### Baseline -The average number of blocks/minute oscilate between 10 and 40. +The average number of blocks/minute oscillate between 10 and 40. ![heights](img34/baseline/block_rate_regular.png) @@ -327,7 +327,7 @@ command, and their average value. #### CometBFT Homogeneous network -The load in the homogenous network is, similarly to the baseline case, below 5 and, therefore, normal. +The load in the homogeneous network is, similarly to the baseline case, below 5 and, therefore, normal. ![load1-homogeneous](img34/homogeneous/cpu.png) @@ -358,7 +358,7 @@ As expected, the average plot also looks similar. The comparison of the baseline results and the homogeneous case show that both scenarios had similar numbers and are therefore equivalent. The mixed nodes cases show that networks operate normally with a mix of compatible Tendermint Core and CometBFT versions. -Although not the main goal, a comparison of metric numbers with the homogenous case and the baseline scenarios show similar results and therefore we can conclude that mixing compatible Tendermint Core and CometBFT introduces not performance degradation. +Although not the main goal, a comparison of metric numbers with the homogeneous case and the baseline scenarios show similar results and therefore we can conclude that mixing compatible Tendermint Core and CometBFT introduces not performance degradation. A conclusion of these tests is shown in the following table, along with the commit versions used in the experiments. diff --git a/docs/qa/v034/README.md b/docs/qa/v034/README.md index 7a66371b52..435a653154 100644 --- a/docs/qa/v034/README.md +++ b/docs/qa/v034/README.md @@ -112,7 +112,7 @@ This section reports on the key Prometheus metrics extracted from the following * Mixed network, 1/3 Tendermint Core `v0.34.26` and 2/3 running CometBFT: experiment with UUID starting with `fc5e`. * Mixed network, 2/3 Tendermint Core `v0.34.26` and 1/3 running CometBFT: experiment with UUID starting with `4759`. -We make explicit comparisons between the baseline and the homogenous setups, but refrain from +We make explicit comparisons between the baseline and the homogeneous setups, but refrain from commenting on the mixed network experiment unless they show some exceptional results. ### Mempool Size @@ -189,7 +189,7 @@ on the corresponding plot, shown above. ### Peers -The following plots show how many peers a node had throughtout the experiment. +The following plots show how many peers a node had throughout the experiment. The thick red dashed line represents the moving average over a sliding window of 20 seconds. @@ -234,7 +234,7 @@ The thick red dashed line show the rates' moving averages. #### Baseline -The average number of blocks/minute oscilate between 10 and 40. +The average number of blocks/minute oscillate between 10 and 40. ![heights](./img/baseline/block_rate_regular.png) @@ -325,7 +325,7 @@ command, and their average value. #### CometBFT Homogeneous network -The load in the homogenous network is, similarly to the baseline case, below 5 and, therefore, normal. +The load in the homogeneous network is, similarly to the baseline case, below 5 and, therefore, normal. ![load1-homogeneous](./img/homogeneous/cpu.png) @@ -356,7 +356,7 @@ As expected, the average plot also looks similar. The comparison of the baseline results and the homogeneous case show that both scenarios had similar numbers and are therefore equivalent. The mixed nodes cases show that networks operate normally with a mix of compatible Tendermint Core and CometBFT versions. -Although not the main goal, a comparison of metric numbers with the homogenous case and the baseline scenarios show similar results and therefore we can conclude that mixing compatible Tendermint Core and CometBFT introduces not performance degradation. +Although not the main goal, a comparison of metric numbers with the homogeneous case and the baseline scenarios show similar results and therefore we can conclude that mixing compatible Tendermint Core and CometBFT introduces not performance degradation. A conclusion of these tests is shown in the following table, along with the commit versions used in the experiments. diff --git a/libs/clist/clist.go b/libs/clist/clist.go index b18306490f..d35b030a10 100644 --- a/libs/clist/clist.go +++ b/libs/clist/clist.go @@ -166,7 +166,7 @@ func (e *CElement) SetNext(newNext *CElement) { // If a WaitGroup is reused to wait for several independent sets of // events, new Add calls must happen after all previous Wait calls have // returned. - e.nextWg = waitGroup1() // WaitGroups are difficult to re-use. + e.nextWg = waitGroup1() // WaitGroups are difficult to reuse. e.nextWaitCh = make(chan struct{}) } if oldNext == nil && newNext != nil { @@ -184,7 +184,7 @@ func (e *CElement) SetPrev(newPrev *CElement) { oldPrev := e.prev e.prev = newPrev if oldPrev != nil && newPrev == nil { - e.prevWg = waitGroup1() // WaitGroups are difficult to re-use. + e.prevWg = waitGroup1() // WaitGroups are difficult to reuse. e.prevWaitCh = make(chan struct{}) } if oldPrev == nil && newPrev != nil { @@ -374,7 +374,7 @@ func (l *CList) Remove(e *CElement) interface{} { // If we're removing the only item, make CList FrontWait/BackWait wait. if l.len == 1 { - l.wg = waitGroup1() // WaitGroups are difficult to re-use. + l.wg = waitGroup1() // WaitGroups are difficult to reuse. l.waitCh = make(chan struct{}) } diff --git a/libs/math/fraction.go b/libs/math/fraction.go index a8d2855924..ca4f946873 100644 --- a/libs/math/fraction.go +++ b/libs/math/fraction.go @@ -27,7 +27,7 @@ func (fr Fraction) String() string { func ParseFraction(f string) (Fraction, error) { o := strings.Split(f, "/") if len(o) != 2 { - return Fraction{}, errors.New("incorrect formating: should have a single slash i.e. \"1/3\"") + return Fraction{}, errors.New("incorrect formatting: should have a single slash i.e. \"1/3\"") } numerator, err := strconv.ParseUint(o[0], 10, 64) if err != nil { diff --git a/libs/service/service_test.go b/libs/service/service_test.go index 7abc6f4fba..f32525cfaa 100644 --- a/libs/service/service_test.go +++ b/libs/service/service_test.go @@ -44,7 +44,7 @@ func TestBaseServiceReset(t *testing.T) { require.NoError(t, err) err = ts.Reset() - require.Error(t, err, "expected cant reset service error") + require.Error(t, err, "expected can't reset service error") err = ts.Stop() require.NoError(t, err) diff --git a/libs/tempfile/tempfile.go b/libs/tempfile/tempfile.go index f79cd0e163..8c621f5996 100644 --- a/libs/tempfile/tempfile.go +++ b/libs/tempfile/tempfile.go @@ -65,7 +65,7 @@ func randWriteFileSuffix() string { suffix := strconv.Itoa(int(r)) if string(suffix[0]) == "-" { // Replace first "-" with "0". This is purely for UI clarity, - // as otherwhise there would be two `-` in a row. + // as otherwise there would be two `-` in a row. suffix = strings.Replace(suffix, "-", "0", 1) } return suffix diff --git a/light/detector.go b/light/detector.go index 024326ff9c..5812259a71 100644 --- a/light/detector.go +++ b/light/detector.go @@ -238,7 +238,7 @@ func (c *Client) handleConflictingHeaders( // and generate evidence against the primary that we can send to the witness commonBlock, trustedBlock := witnessTrace[0], witnessTrace[len(witnessTrace)-1] evidenceAgainstPrimary := newLightClientAttackEvidence(primaryBlock, trustedBlock, commonBlock) - c.logger.Error("ATTEMPTED ATTACK DETECTED. Sending evidence againt primary by witness", "ev", evidenceAgainstPrimary, + c.logger.Error("ATTEMPTED ATTACK DETECTED. Sending evidence against primary by witness", "ev", evidenceAgainstPrimary, "primary", c.primary, "witness", supportingWitness) c.sendEvidence(ctx, evidenceAgainstPrimary, supportingWitness) @@ -274,7 +274,7 @@ func (c *Client) handleConflictingHeaders( } // examineConflictingHeaderAgainstTrace takes a trace from one provider and a divergent header that -// it has received from another and preforms verifySkipping at the heights of each of the intermediate +// it has received from another and performs verifySkipping at the heights of each of the intermediate // headers in the trace until it reaches the divergentHeader. 1 of 2 things can happen. // // 1. The light client verifies a header that is different to the intermediate header in the trace. This @@ -394,7 +394,7 @@ func (c *Client) getTargetBlockOrLatest( if lightBlock.Height > height { // the witness has caught up. We recursively call the function again. However in order - // to avoud a wild goose chase where the witness sends us one header below and one header + // to avoid a wild goose chase where the witness sends us one header below and one header // above the height we set a timeout to the context lightBlock, err := witness.LightBlock(ctx, height) return true, lightBlock, err diff --git a/light/errors.go b/light/errors.go index bc6357def6..a93c09b810 100644 --- a/light/errors.go +++ b/light/errors.go @@ -27,7 +27,7 @@ type ErrNewValSetCantBeTrusted struct { } func (e ErrNewValSetCantBeTrusted) Error() string { - return fmt.Sprintf("cant trust new val set: %v", e.Reason) + return fmt.Sprintf("can't trust new val set: %v", e.Reason) } // ErrInvalidHeader means the header either failed the basic validation or diff --git a/mempool/cat/cache.go b/mempool/cat/cache.go index d73755bbba..75b4db452b 100644 --- a/mempool/cat/cache.go +++ b/mempool/cat/cache.go @@ -10,7 +10,7 @@ import ( // LRUTxCache maintains a thread-safe LRU cache of raw transactions. The cache // only stores the hash of the raw transaction. -// NOTE: This has been copied from mempool/cache with the main diffence of using +// NOTE: This has been copied from mempool/cache with the main difference of using // tx keys instead of raw transactions. type LRUTxCache struct { staticSize int diff --git a/mempool/cat/pool.go b/mempool/cat/pool.go index d462f5a6b6..efe644f809 100644 --- a/mempool/cat/pool.go +++ b/mempool/cat/pool.go @@ -29,7 +29,7 @@ var ( // TxPoolOption sets an optional parameter on the TxPool. type TxPoolOption func(*TxPool) -// TxPool implemements the Mempool interface and allows the application to +// TxPool implements the Mempool interface and allows the application to // set priority values on transactions in the CheckTx response. When selecting // transactions to include in a block, higher-priority transactions are chosen // first. When evicting transactions from the mempool for size constraints, diff --git a/mempool/cat/reactor.go b/mempool/cat/reactor.go index 2e537c4af6..1452db8268 100644 --- a/mempool/cat/reactor.go +++ b/mempool/cat/reactor.go @@ -249,7 +249,7 @@ func (memR *Reactor) ReceiveEnvelope(e p2p.Envelope) { // If we didn't request the transaction we simply mark the peer as having the // tx (we'd have already done it if we were requesting the tx). memR.mempool.PeerHasTx(peerID, key) - memR.Logger.Debug("received new trasaction", "peerID", peerID, "txKey", key) + memR.Logger.Debug("received new transaction", "peerID", peerID, "txKey", key) } _, err = memR.mempool.TryAddNewTx(ntx, key, txInfo) if err != nil && err != ErrTxInMempool { diff --git a/mempool/cat/requests.go b/mempool/cat/requests.go index 148a968015..5fdb344a87 100644 --- a/mempool/cat/requests.go +++ b/mempool/cat/requests.go @@ -24,7 +24,7 @@ type requestScheduler struct { globalTimeout time.Duration // requestsByPeer is a lookup table of requests by peer. - // Multiple tranasctions can be requested by a single peer at one + // Multiple transactions can be requested by a single peer at one requestsByPeer map[uint16]requestSet // requestsByTx is a lookup table for requested txs. diff --git a/mempool/cat/requests_test.go b/mempool/cat/requests_test.go index 72bc165eb1..608883fb65 100644 --- a/mempool/cat/requests_test.go +++ b/mempool/cat/requests_test.go @@ -48,7 +48,7 @@ func TestRequestSchedulerRerequest(t *testing.T) { // wait for the scheduler to invoke the timeout <-closeCh - // check that the request stil exists + // check that the request still exists require.True(t, requests.Has(peerA, key)) // check that peerB was requested require.True(t, requests.Has(peerB, key)) diff --git a/mempool/cat/spec.md b/mempool/cat/spec.md index 2cd35bd87a..6fa8a9acd5 100644 --- a/mempool/cat/spec.md +++ b/mempool/cat/spec.md @@ -36,9 +36,9 @@ message WantTx { } ``` -Both `SeenTx` and `WantTx` contain the sha256 hash of the raw transaction bytes. `SeenTx` also contains an optional `p2p.ID` that corresponds to the peer that the node recieved the tx from. The only validation for both is that the byte slice of the `tx_key` MUST have a length of 32. +Both `SeenTx` and `WantTx` contain the sha256 hash of the raw transaction bytes. `SeenTx` also contains an optional `p2p.ID` that corresponds to the peer that the node received the tx from. The only validation for both is that the byte slice of the `tx_key` MUST have a length of 32. -Both messages are sent across a new channel with the ID: `byte(0x31)`. This enables cross compatability as discussed in greater detail below. +Both messages are sent across a new channel with the ID: `byte(0x31)`. This enables cross compatibility as discussed in greater detail below. > **Note:** > The term `SeenTx` is used over the more common `HasTx` because the transaction pool contains sophisticated eviction logic. TTL's, higher priority transactions and reCheckTx may mean that a transaction pool *had* a transaction but does not have it any more. Semantically it's more appropriate to use `SeenTx` to imply not the presence of a transaction but that the node has seen it and dealt with it accordingly. @@ -75,7 +75,7 @@ Transaction pools are solely run in-memory; thus when a node stops, all transact Upon receiving a `Txs` message: -- Check whether it is in reponse to a request or simply an unsolicited broadcast +- Check whether it is in response to a request or simply an unsolicited broadcast - Validate the tx against current resources and the applications `CheckTx` - If rejected or evicted, mark accordingly - If successful, send a `SeenTx` message to all connected peers excluding the original sender. If it was from an initial broadcast, the `SeenTx` should populate the `From` field with the `p2p.ID` of the recipient else if it is in response to a request `From` should remain empty. diff --git a/mempool/v0/clist_mempool.go b/mempool/v0/clist_mempool.go index e2112a615a..17dda10487 100644 --- a/mempool/v0/clist_mempool.go +++ b/mempool/v0/clist_mempool.go @@ -492,7 +492,7 @@ func (mem *CListMempool) resCbRecheck(req *abci.Request, res *abci.Response) { // Done! mem.logger.Debug("done rechecking txs") - // incase the recheck removed all txs + // in case the recheck removed all txs if mem.Size() > 0 { mem.notifyTxsAvailable() } diff --git a/mempool/v0/clist_mempool_test.go b/mempool/v0/clist_mempool_test.go index 8555b6577f..0c303714c9 100644 --- a/mempool/v0/clist_mempool_test.go +++ b/mempool/v0/clist_mempool_test.go @@ -358,7 +358,7 @@ func TestTxsAvailable(t *testing.T) { timeoutMS := 500 - // with no txs, it shouldnt fire + // with no txs, it shouldn't fire ensureNoFire(t, mp.TxsAvailable(), timeoutMS) // send a bunch of txs, it should only fire once @@ -376,7 +376,7 @@ func TestTxsAvailable(t *testing.T) { ensureFire(t, mp.TxsAvailable(), timeoutMS) ensureNoFire(t, mp.TxsAvailable(), timeoutMS) - // send a bunch more txs. we already fired for this height so it shouldnt fire again + // send a bunch more txs. we already fired for this height so it shouldn't fire again moreTxs := checkTxs(t, mp, 50, mempool.UnknownPeerID) ensureNoFire(t, mp.TxsAvailable(), timeoutMS) @@ -489,7 +489,7 @@ func TestSerialReap(t *testing.T) { // Reap again. We should get the same amount reapCheck(1000) - // Commit from the conensus AppConn + // Commit from the consensus AppConn commitRange(0, 500) updateRange(0, 500) diff --git a/p2p/conn/connection.go b/p2p/conn/connection.go index 60681de8f8..9f3f3a615d 100644 --- a/p2p/conn/connection.go +++ b/p2p/conn/connection.go @@ -588,7 +588,7 @@ FOR_LOOP: c.recvMonitor.Update(_n) if err != nil { // stopServices was invoked and we are shutting down - // receiving is excpected to fail since we will close the connection + // receiving is expected to fail since we will close the connection select { case <-c.quitRecvRoutine: break FOR_LOOP diff --git a/p2p/conn/connection_test.go b/p2p/conn/connection_test.go index f628b8aec9..1cf1f7a0f4 100644 --- a/p2p/conn/connection_test.go +++ b/p2p/conn/connection_test.go @@ -640,7 +640,7 @@ func TestMConnectionReadErrorLongMessage(t *testing.T) { client := mconnClient.conn protoWriter := protoio.NewDelimitedWriter(client) - // send msg thats just right + // send msg that's just right var packet = tmp2p.PacketMsg{ ChannelID: 0x01, EOF: true, @@ -651,7 +651,7 @@ func TestMConnectionReadErrorLongMessage(t *testing.T) { require.NoError(t, err) assert.True(t, expectSend(chOnRcv), "msg just right") - // send msg thats too long + // send msg that's too long packet = tmp2p.PacketMsg{ ChannelID: 0x01, EOF: true, diff --git a/p2p/conn/secret_connection_test.go b/p2p/conn/secret_connection_test.go index 516302c35b..2f2504cb06 100644 --- a/p2p/conn/secret_connection_test.go +++ b/p2p/conn/secret_connection_test.go @@ -121,7 +121,7 @@ func TestSecretConnectionReadWrite(t *testing.T) { // A helper that will run with (fooConn, fooWrites, fooReads) and vice versa genNodeRunner := func(id string, nodeConn kvstoreConn, nodeWrites []string, nodeReads *[]string) async.Task { return func(_ int) (interface{}, bool, error) { - // Initiate cryptographic private key and secret connection trhough nodeConn. + // Initiate cryptographic private key and secret connection through nodeConn. nodePrvKey := ed25519.GenPrivKey() nodeSecretConn, err := MakeSecretConnection(nodeConn, nodePrvKey) if err != nil { diff --git a/p2p/node_info.go b/p2p/node_info.go index 59cf885cb1..05472d77f6 100644 --- a/p2p/node_info.go +++ b/p2p/node_info.go @@ -95,7 +95,7 @@ type DefaultNodeInfo struct { Other DefaultNodeInfoOther `json:"other"` // other application specific data } -// DefaultNodeInfoOther is the misc. applcation specific data +// DefaultNodeInfoOther is the misc. application specific data type DefaultNodeInfoOther struct { TxIndex string `json:"tx_index"` RPCAddress string `json:"rpc_address"` @@ -173,7 +173,7 @@ func (info DefaultNodeInfo) Validate() error { return nil } -// CompatibleWith checks if two DefaultNodeInfo are compatible with eachother. +// CompatibleWith checks if two DefaultNodeInfo are compatible with each other. // CONTRACT: two nodes are compatible if the Block version and network match // and they have at least one channel in common. func (info DefaultNodeInfo) CompatibleWith(otherInfo NodeInfo) error { diff --git a/p2p/peer.go b/p2p/peer.go index dc7b47ad6e..65129b19b5 100644 --- a/p2p/peer.go +++ b/p2p/peer.go @@ -285,7 +285,7 @@ func (p *peer) IsOutbound() bool { return p.peerConn.outbound } -// IsPersistent returns true if the peer is persitent, false otherwise. +// IsPersistent returns true if the peer is persistent, false otherwise. func (p *peer) IsPersistent() bool { return p.peerConn.persistent } diff --git a/p2p/pex/addrbook.go b/p2p/pex/addrbook.go index 857a6a063f..b5a05f39e5 100644 --- a/p2p/pex/addrbook.go +++ b/p2p/pex/addrbook.go @@ -545,7 +545,7 @@ func (a *addrBook) addToNewBucket(ka *knownAddress, bucketIdx int) error { // Add to bucket. bucket[addrStr] = ka - // increment nNew if the peer doesnt already exist in a bucket + // increment nNew if the peer doesn't already exist in a bucket if ka.addBucketRef(bucketIdx) == 1 { a.nNew++ } diff --git a/p2p/pex/pex_reactor.go b/p2p/pex/pex_reactor.go index 409bee8917..322f171845 100644 --- a/p2p/pex/pex_reactor.go +++ b/p2p/pex/pex_reactor.go @@ -366,7 +366,7 @@ func (r *Reactor) RequestAddrs(p Peer) { }, r.Logger) } -// ReceiveAddrs adds the given addrs to the addrbook if theres an open +// ReceiveAddrs adds the given addrs to the addrbook if there's an open // request for this peer and deletes the open request. // If there's no open request for the src peer, it returns an error. func (r *Reactor) ReceiveAddrs(addrs []*p2p.NetAddress, src Peer) error { diff --git a/p2p/transport.go b/p2p/transport.go index bab31f38e5..7203374f43 100644 --- a/p2p/transport.go +++ b/p2p/transport.go @@ -80,7 +80,7 @@ type transportLifecycle interface { } // ConnFilterFunc to be implemented by filter hooks after a new connection has -// been established. The set of exisiting connections is passed along together +// been established. The set of existing connections is passed along together // with all resolved IPs for the new connection. type ConnFilterFunc func(ConnSet, net.Conn, []net.IP) error diff --git a/pkg/trace/client.go b/pkg/trace/client.go index 85c54913d5..61b0995530 100644 --- a/pkg/trace/client.go +++ b/pkg/trace/client.go @@ -162,7 +162,7 @@ func stringToMap(tables string) map[string]struct{} { // UTF-8 sequence. First part is equivalent to strings.SplitN with a count of // -1. also filter out empty strings, only return non-empty strings. // -// NOTE: this is copy pasted from the config pacakage to avoid a circular +// NOTE: this is copy pasted from the config package to avoid a circular // dependency. See the function of the same name for tests. func splitAndTrimEmpty(s, sep, cutset string) []string { if s == "" { diff --git a/pkg/trace/schema/consensus.go b/pkg/trace/schema/consensus.go index 9089d2e9fe..6609a8f363 100644 --- a/pkg/trace/schema/consensus.go +++ b/pkg/trace/schema/consensus.go @@ -101,7 +101,7 @@ const ( // SquareSizeFieldKey is the name of the field that stores the square size // of the block. SquareSize is the number of shares in a single row or - // column of the origianl data square. + // column of the original data square. SquareSizeFieldKey = "square_size" // BlockSizeFieldKey is the name of the field that stores the size of diff --git a/privval/msgs_test.go b/privval/msgs_test.go index 154151eed3..c2f50821f1 100644 --- a/privval/msgs_test.go +++ b/privval/msgs_test.go @@ -71,7 +71,7 @@ func TestPrivvalVectors(t *testing.T) { proposal := exampleProposal() proposalpb := proposal.ToProto() - // Create a Reuseable remote error + // Create a reusable remote error remoteError := &privproto.RemoteSignerError{Code: 1, Description: "it's a error"} testCases := []struct { diff --git a/proto/tendermint/abci/types.proto b/proto/tendermint/abci/types.proto index 5aeea175f7..83658ad82b 100644 --- a/proto/tendermint/abci/types.proto +++ b/proto/tendermint/abci/types.proto @@ -234,7 +234,7 @@ message ResponseCheckTx { int64 priority = 10; // mempool_error is set by CometBFT. - // ABCI applictions creating a ResponseCheckTX should not set mempool_error. + // ABCI applications creating a ResponseCheckTX should not set mempool_error. string mempool_error = 11; } diff --git a/proto/tendermint/crypto/proof.pb.go b/proto/tendermint/crypto/proof.pb.go index 3b6d0036d7..21c8d5154e 100644 --- a/proto/tendermint/crypto/proof.pb.go +++ b/proto/tendermint/crypto/proof.pb.go @@ -206,7 +206,7 @@ func (m *DominoOp) GetOutput() string { } // ProofOp defines an operation used for calculating Merkle root -// The data could be arbitrary format, providing nessecary data +// The data could be arbitrary format, providing necessary data // for example neighbouring node hash type ProofOp struct { Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` diff --git a/proto/tendermint/crypto/proof.proto b/proto/tendermint/crypto/proof.proto index ae72195e86..7f22a0052e 100644 --- a/proto/tendermint/crypto/proof.proto +++ b/proto/tendermint/crypto/proof.proto @@ -27,7 +27,7 @@ message DominoOp { } // ProofOp defines an operation used for calculating Merkle root -// The data could be arbitrary format, providing nessecary data +// The data could be arbitrary format, providing necessary data // for example neighbouring node hash message ProofOp { string type = 1; diff --git a/rpc/client/event_test.go b/rpc/client/event_test.go index deeda665b4..53fe914e24 100644 --- a/rpc/client/event_test.go +++ b/rpc/client/event_test.go @@ -19,7 +19,7 @@ import ( var waitForEventTimeout = 8 * time.Second -// MakeTxKV returns a text transaction, allong with expected key, value pair +// MakeTxKV returns a text transaction, along with expected key, value pair func MakeTxKV() ([]byte, []byte, []byte) { k := []byte(cmtrand.Str(8)) v := []byte(cmtrand.Str(8)) diff --git a/rpc/client/mock/client.go b/rpc/client/mock/client.go index a564ec42d4..d29d17c5d4 100644 --- a/rpc/client/mock/client.go +++ b/rpc/client/mock/client.go @@ -54,7 +54,7 @@ type Call struct { Error error } -// GetResponse will generate the apporiate response for us, when +// GetResponse will generate the appropriate response for us, when // using the Call struct to configure a Mock handler. // // When configuring a response, if only one of Response or Error is diff --git a/rpc/jsonrpc/server/http_server.go b/rpc/jsonrpc/server/http_server.go index a8005f5a4d..9779bad112 100644 --- a/rpc/jsonrpc/server/http_server.go +++ b/rpc/jsonrpc/server/http_server.go @@ -171,7 +171,7 @@ func RecoverAndLogHandler(handler http.Handler, logger log.Logger) http.Handler // to avoid any further panics. However, we try to return a 500, since it otherwise // defaults to 200 and there is no other way to terminate the connection. If that // should panic for whatever reason then the Go HTTP server will handle it and - // terminate the connection - panicing is the de-facto and only way to get the Go HTTP + // terminate the connection - panicking is the de-facto and only way to get the Go HTTP // server to terminate the request and close the connection/stream: // https://github.com/golang/go/issues/17790#issuecomment-258481416 if e := recover(); e != nil { diff --git a/rpc/jsonrpc/server/http_uri_handler.go b/rpc/jsonrpc/server/http_uri_handler.go index 134eff20f0..51dfd47a8d 100644 --- a/rpc/jsonrpc/server/http_uri_handler.go +++ b/rpc/jsonrpc/server/http_uri_handler.go @@ -77,7 +77,7 @@ func makeHTTPHandler(rpcFunc *RPCFunc, logger log.Logger) func(http.ResponseWrit } } -// Covert an http query to a list of properly typed values. +// Convert an http query to a list of properly typed values. // To be properly decoded the arg must be a concrete type from CometBFT (if its an interface). func httpParamsToArgs(rpcFunc *RPCFunc, r *http.Request) ([]reflect.Value, error) { // skip types.Context diff --git a/rpc/jsonrpc/server/parse_test.go b/rpc/jsonrpc/server/parse_test.go index c29224ea2e..7c6afc9862 100644 --- a/rpc/jsonrpc/server/parse_test.go +++ b/rpc/jsonrpc/server/parse_test.go @@ -187,7 +187,7 @@ func TestParseURI(t *testing.T) { // can parse numbers quoted, too {[]string{`"7"`, `"flew"`}, 7, "flew", false}, {[]string{`"-10"`, `"bob"`}, -10, "bob", false}, - // cant parse strings uquoted + // can't parse strings uquoted {[]string{`"-10"`, `bob`}, -10, "bob", true}, } for idx, tc := range cases { diff --git a/rpc/openapi/openapi.yaml b/rpc/openapi/openapi.yaml index 19c732d634..2c18ef4825 100644 --- a/rpc/openapi/openapi.yaml +++ b/rpc/openapi/openapi.yaml @@ -73,7 +73,7 @@ servers: description: Interact with CometBFT RPC node running locally tags: - name: Info - description: Informations about the node APIs + description: Information about the node APIs - name: Tx description: Transactions broadcast APIs - name: ABCI @@ -457,7 +457,7 @@ paths: Get Header. responses: "200": - description: Header informations. + description: Header information. content: application/json: schema: @@ -486,7 +486,7 @@ paths: Get Header By Hash. responses: "200": - description: Header informations. + description: Header information. content: application/json: schema: @@ -518,7 +518,7 @@ paths: `Cache-Control` header will be set with the default maximum age. responses: "200": - description: Block informations. + description: Block information. content: application/json: schema: @@ -550,7 +550,7 @@ paths: maximum age. responses: "200": - description: Block informations. + description: Block information. content: application/json: schema: @@ -601,7 +601,7 @@ paths: parameters: - in: query name: height - description: height to return. If no height is provided, it will fetch commit informations regarding the latest block. + description: height to return. If no height is provided, it will fetch commit information regarding the latest block. schema: type: integer default: 0 @@ -792,7 +792,7 @@ paths: parameters: - in: query name: height - description: height to return. If no height is provided, it will fetch commit informations regarding the latest block. + description: height to return. If no height is provided, it will fetch commit information regarding the latest block. schema: type: integer default: 0 @@ -2863,9 +2863,9 @@ components: type: string example: "Dialing seeds in progress. See /net_info for details" - ###### Reuseable types ###### + ###### Reusable types ###### - # Validator type with proposer prioirty + # Validator type with proposer priority ValidatorPriority: type: object properties: diff --git a/spec/abci/abci.md b/spec/abci/abci.md index 2d5cc4eaa4..695118b5db 100644 --- a/spec/abci/abci.md +++ b/spec/abci/abci.md @@ -72,7 +72,7 @@ it is already included. ### DeliverTx The `DeliverTx` ABCI method delivers transactions from CometBFT to the application. -When CometBFT recieves a `ResponseDeliverTx` with a non-zero `Code`, the response code is logged. +When CometBFT receives a `ResponseDeliverTx` with a non-zero `Code`, the response code is logged. The transaction was already included in a block, so the `Code` does not influence CometBFT consensus. @@ -562,7 +562,7 @@ the blockchain's `AppHash` which is verified via [light client verification](../ | Name | Type | Description | Field Number | |-------|-------|-------------------------------------------------------------------------------------------------------------------------------------------------------|--------------| - | chunk | bytes | The binary chunk contents, in an arbitray format. Chunk messages cannot be larger than 16 MB _including metadata_, so 10 MB is a good starting point. | 1 | + | chunk | bytes | The binary chunk contents, in an arbitrary format. Chunk messages cannot be larger than 16 MB _including metadata_, so 10 MB is a good starting point. | 1 | * **Usage**: * Used during state sync to retrieve snapshot chunks from peers. diff --git a/spec/abci/apps.md b/spec/abci/apps.md index de488a877e..cb56e06ecd 100644 --- a/spec/abci/apps.md +++ b/spec/abci/apps.md @@ -466,9 +466,9 @@ implementation of On startup, CometBFT calls the `Info` method on the Info Connection to get the latest committed state of the app. The app MUST return information consistent with the -last block it succesfully completed Commit for. +last block it successfully completed Commit for. -If the app succesfully committed block H, then `last_block_height = H` and `last_block_app_hash = `. If the app +If the app successfully committed block H, then `last_block_height = H` and `last_block_app_hash = `. If the app failed during the Commit of block H, then `last_block_height = H-1` and `last_block_app_hash = `. @@ -479,7 +479,7 @@ the app. storeBlockHeight = height of the last block CometBFT saw a commit for stateBlockHeight = height of the last block for which CometBFT completed all block processing and saved all ABCI results to disk -appBlockHeight = height of the last block for which ABCI app succesfully +appBlockHeight = height of the last block for which ABCI app successfully completed Commit ``` diff --git a/spec/consensus/evidence.md b/spec/consensus/evidence.md index 85df159b26..93ad573e6e 100644 --- a/spec/consensus/evidence.md +++ b/spec/consensus/evidence.md @@ -4,7 +4,7 @@ # Evidence Evidence is an important component of CometBFT's security model. Whilst the core -consensus protocol provides correctness gaurantees for state machine replication +consensus protocol provides correctness guarantees for state machine replication that can tolerate less than 1/3 failures, the evidence system looks to detect and gossip byzantine faults whose combined power is greater than or equal to 1/3. It is worth noting that the evidence system is designed purely to detect possible attacks, gossip them, @@ -77,7 +77,7 @@ should be committed within a certain period from the point that it occurred (timely). Timelines is defined by the `EvidenceParams`: `MaxAgeNumBlocks` and `MaxAgeDuration`. In Proof of Stake chains where validators are bonded, evidence age should be less than the unbonding period so validators still can be -punished. Given these two propoerties the following initial checks are made. +punished. Given these two properties the following initial checks are made. 1. Has the evidence expired? This is done by taking the height of the `Vote` within `DuplicateVoteEvidence` or `CommonHeight` within @@ -126,11 +126,11 @@ Valid Light Client Attack Evidence must adhere to the following rules: ## Gossiping -If a node verifies evidence it then broadcasts it to all peers, continously sending +If a node verifies evidence it then broadcasts it to all peers, continuously sending the same evidence once every 10 seconds until the evidence is seen on chain or expires. -## Commiting on Chain +## Committing on Chain Evidence takes strict priority over regular transactions, thus a block is filled with evidence first and transactions take up the remainder of the space. To diff --git a/spec/consensus/proposer-based-timestamp/pbts_001_draft.md b/spec/consensus/proposer-based-timestamp/pbts_001_draft.md index bcb01d7364..84509f8dd6 100644 --- a/spec/consensus/proposer-based-timestamp/pbts_001_draft.md +++ b/spec/consensus/proposer-based-timestamp/pbts_001_draft.md @@ -21,7 +21,7 @@ In CometBFT, the first version of how time is computed and stored in a block wor 1. **Liveness.** The liveness of the protocol: 1. does not depend on clock synchronization, 1. depends on bounded message delays. -1. **Relation to real time.** There is no clock synchronizaton, which implies that there is **no relation** between the computed block `time` and real time. +1. **Relation to real time.** There is no clock synchronization, which implies that there is **no relation** between the computed block `time` and real time. 1. **Aggregate signatures.** As the `precommit` messages contain the local times, all these `precommit` messages typically differ in the time field, which **prevents** the use of aggregate signatures. ## Suggested Proposer-Based Time diff --git a/spec/consensus/proposer-selection.md b/spec/consensus/proposer-selection.md index 98442e28ab..5d3babb01c 100644 --- a/spec/consensus/proposer-selection.md +++ b/spec/consensus/proposer-selection.md @@ -179,7 +179,7 @@ In order to prevent this, when a new validator is added, its initial priority is where P is the total voting power of the set including V. -Curent implementation uses the penalty factor of 1.125 because it provides a small punishment that is efficient to calculate. See [here](https://github.com/cometbft/cometbft/pull/2785#discussion_r235038971) for more details. +Current implementation uses the penalty factor of 1.125 because it provides a small punishment that is efficient to calculate. See [here](https://github.com/cometbft/cometbft/pull/2785#discussion_r235038971) for more details. If we consider the validator set where p3 has just been added: diff --git a/spec/core/data_structures.md b/spec/core/data_structures.md index 2dcb852f3c..c37c865e88 100644 --- a/spec/core/data_structures.md +++ b/spec/core/data_structures.md @@ -72,7 +72,7 @@ set (TODO). Execute is defined as: ```go func Execute(state State, app ABCIApp, block Block) State { - // Fuction ApplyBlock executes block of transactions against the app and returns the new root hash of the app state, + // Function ApplyBlock executes block of transactions against the app and returns the new root hash of the app state, // modifications to the validator set and the changes of the consensus parameters. AppHash, ValidatorChanges, ConsensusParamChanges := app.ApplyBlock(block) @@ -131,7 +131,7 @@ the data in the current block, the previous block, and the results returned by t | ValidatorHash | slice of bytes (`[]byte`) | MerkleRoot of the current validator set. The validators are first sorted by voting power (descending), then by address (ascending) prior to computing the MerkleRoot. | Must be of length 32 | | NextValidatorHash | slice of bytes (`[]byte`) | MerkleRoot of the next validator set. The validators are first sorted by voting power (descending), then by address (ascending) prior to computing the MerkleRoot. | Must be of length 32 | | ConsensusHash | slice of bytes (`[]byte`) | Hash of the protobuf encoded consensus parameters. | Must be of length 32 | -| AppHash | slice of bytes (`[]byte`) | Arbitrary byte array returned by the application after executing and commiting the previous block. It serves as the basis for validating any merkle proofs that comes from the ABCI application and represents the state of the actual application rather than the state of the blockchain itself. The first block's `block.Header.AppHash` is given by `ResponseInitChain.app_hash`. | This hash is determined by the application, CometBFT can not perform validation on it. | +| AppHash | slice of bytes (`[]byte`) | Arbitrary byte array returned by the application after executing and committing the previous block. It serves as the basis for validating any merkle proofs that comes from the ABCI application and represents the state of the actual application rather than the state of the blockchain itself. The first block's `block.Header.AppHash` is given by `ResponseInitChain.app_hash`. | This hash is determined by the application, CometBFT can not perform validation on it. | | LastResultHash | slice of bytes (`[]byte`) | `LastResultsHash` is the root hash of a Merkle tree built from `ResponseDeliverTx` responses (`Log`,`Info`, `Codespace` and `Events` fields are ignored). | Must be of length 32. The first block has `block.Header.ResultsHash == MerkleRoot(nil)`, i.e. the hash of an empty input, for RFC-6962 conformance. | | EvidenceHash | slice of bytes (`[]byte`) | MerkleRoot of the evidence of Byzantine behavior included in this block. | Must be of length 32 | | ProposerAddress | slice of bytes (`[]byte`) | Address of the original proposer of the block. Validator must be in the current validatorSet. | Must be of length 20 | diff --git a/spec/core/encoding.md b/spec/core/encoding.md index f8f921ac6d..05f2ed95d2 100644 --- a/spec/core/encoding.md +++ b/spec/core/encoding.md @@ -19,7 +19,7 @@ For details on varints, see the [protobuf spec](https://developers.google.com/protocol-buffers/docs/encoding#varints). For example, the byte-array `[0xA, 0xB]` would be encoded as `0x020A0B`, -while a byte-array containing 300 entires beginning with `[0xA, 0xB, ...]` would +while a byte-array containing 300 entries beginning with `[0xA, 0xB, ...]` would be encoded as `0xAC020A0B...` where `0xAC02` is the UVarint encoding of 300. ## Hashing diff --git a/spec/ivy-proofs/tendermint_test.ivy b/spec/ivy-proofs/tendermint_test.ivy index 1299fc086d..35ac6300f2 100644 --- a/spec/ivy-proofs/tendermint_test.ivy +++ b/spec/ivy-proofs/tendermint_test.ivy @@ -26,7 +26,7 @@ isolate protocol = { # Finally, call the action: #``` scenarios.scenario_1 # Note that Ivy will check at runtime that all action preconditions are -# satisfied. For example, runing the scenario twice will cause a violation of +# satisfied. For example, running the scenario twice will cause a violation of # the precondition of the `start` action, because a node cannot start twice # (see `require ~_has_started` in action `start`). diff --git a/spec/light-client/attacks/isolate-attackers_001_draft.md b/spec/light-client/attacks/isolate-attackers_001_draft.md index 5408527921..fc9468f727 100644 --- a/spec/light-client/attacks/isolate-attackers_001_draft.md +++ b/spec/light-client/attacks/isolate-attackers_001_draft.md @@ -135,7 +135,7 @@ func violatesTMValidity(ref Header, ev Header) boolean ``` - Implementation remarks - - checks whether the evidence header `ev` violates the validity property of Tendermint consensus algorithm, by checking agains a reference header + - checks whether the evidence header `ev` violates the validity property of Tendermint consensus algorithm, by checking against a reference header - Expected precondition - `ref.Height == ev.Height` - Expected postcondition diff --git a/spec/light-client/detection/detection_001_reviewed.md b/spec/light-client/detection/detection_001_reviewed.md index b204677d69..543bd1a718 100644 --- a/spec/light-client/detection/detection_001_reviewed.md +++ b/spec/light-client/detection/detection_001_reviewed.md @@ -621,7 +621,7 @@ func AttackDetector(root LightBlock, primary_trace []LightBlock) for each secondary in Secondaries { // we replay the primary trace with the secondary, in // order to generate evidence that we can submit to the - // secodary. We return the evidence + the trace the + // secondary. We return the evidence + the trace the // secondary told us that spans the evidence at its local store EvidenceForSecondary, newroot, secondary_trace, result := diff --git a/spec/light-client/detection/draft-functions.md b/spec/light-client/detection/draft-functions.md index f983fded12..53f65c0a18 100644 --- a/spec/light-client/detection/draft-functions.md +++ b/spec/light-client/detection/draft-functions.md @@ -120,7 +120,7 @@ func SubmitIBCProofOfFork( else { // the ibc component does not have the TrustedBlock and might // even be on yet a different branch. We have to compute a PoF - // that the ibc component can verifiy based on its current + // that the ibc component can verify based on its current // knowledge ibcLightBlock, lblock, _, result := commonRoot(lightStore, ibc, PoF.TrustedBlock) @@ -194,7 +194,7 @@ LightBlock) (LightBlock, LightBlock, LightStore, Result) { - a lightBlock b1 from the IBC component, and - a lightBlock b2 from the local lightStore with height less than - lblock.Header.Hight, s.t. b1 supports b2, and + lblock.Header.Height, s.t. b1 supports b2, and - a lightstore with the blocks downloaded from the ibc component @@ -281,7 +281,7 @@ func DetectIBCFork(ibc IBCComponent, lightStore LightStore) (LightNodeProofOfFor **TODO:** finish conditions - Implementation remark - - we ask the handler for the lastest check. Cross-check with the + - we ask the handler for the latest check. Cross-check with the chain. In case they deviate we generate PoF. - we assume IBC component is correct. It has verified the consensus state diff --git a/spec/light-client/detection/req-ibc-detection.md b/spec/light-client/detection/req-ibc-detection.md index fe4f1a55c8..177e291ea8 100644 --- a/spec/light-client/detection/req-ibc-detection.md +++ b/spec/light-client/detection/req-ibc-detection.md @@ -17,7 +17,7 @@ In the following, I distilled what I considered relevant from | IBC Term | Cosmos Spec Term | Comment | |----------|-------------------------| --------| | `CommitmentRoot` | AppState | app hash | -| `ConsensusState` | Lightblock | not all fields are there. NextValidator is definitly needed | +| `ConsensusState` | Lightblock | not all fields are there. NextValidator is definitely needed | | `ClientState` | latest light block + configuration parameters (e.g., trusting period + `frozenHeight` | NextValidators missing; what is `proofSpecs`?| | `frozenHeight` | height of fork | set when a fork is detected | | "would-have-been-fooled" | light node fork detection | light node may submit proof of fork to IBC component to halt it | diff --git a/spec/light-client/supervisor/supervisor_001_draft.md b/spec/light-client/supervisor/supervisor_001_draft.md index c8bbe8916c..93cfc2a3a1 100644 --- a/spec/light-client/supervisor/supervisor_001_draft.md +++ b/spec/light-client/supervisor/supervisor_001_draft.md @@ -439,7 +439,7 @@ func Sequential-Supervisor (initdata LCInitData) (Error) { - Implementation remark - infinite loop unless a light client attack is detected - In typical implementations (e.g., the one in Rust), - there are mutliple input actions: + there are multiple input actions: `VerifytoLatest`, `LatestTrusted`, and `GetStatus`. The information can be easily obtained from the lightstore, so that we do not treat these requests explicitly here but just consider @@ -580,7 +580,7 @@ func VerifyAndDetect (lightStore LightStore, targetHeight Height) } // get the lightblock with maximum height smaller than targetHeight - // would typically be the heighest, if we always move forward + // would typically be the highest, if we always move forward root_of_trust, r2 = lightStore.LatestPrevious(targetHeight); if r2 = false { diff --git a/spec/p2p/messages/state-sync.md b/spec/p2p/messages/state-sync.md index cfc958e08d..8e739fce8d 100644 --- a/spec/p2p/messages/state-sync.md +++ b/spec/p2p/messages/state-sync.md @@ -108,7 +108,7 @@ In order to build the state, the state provider will request the params at the h ### ParamsResponse -A reciever to the request will use the state store to fetch the consensus params at that height and return it to the sender. +A receiver to the request will use the state store to fetch the consensus params at that height and return it to the sender. | Name | Type | Description | Field Number | |----------|--------|---------------------------------|--------------| diff --git a/spec/p2p/v0.34/peer_manager.md b/spec/p2p/v0.34/peer_manager.md index 42df89de7a..8c4e1752b2 100644 --- a/spec/p2p/v0.34/peer_manager.md +++ b/spec/p2p/v0.34/peer_manager.md @@ -128,7 +128,7 @@ The picture below is a first attempt of illustrating the life cycle of an outbou A peer can be in the following states: -- Candidate peers: peer addresses stored in the address boook, that can be +- Candidate peers: peer addresses stored in the address book, that can be retrieved via the [`PickAddress`](./addressbook.md#pick-address) method - [Dialing](switch.md#dialing-peers): peer addresses that are currently being dialed. This state exists to ensure that a single dialing routine exist per peer. diff --git a/spec/p2p/v0.34/pex.md b/spec/p2p/v0.34/pex.md index 8243eaa559..8f49e84af7 100644 --- a/spec/p2p/v0.34/pex.md +++ b/spec/p2p/v0.34/pex.md @@ -106,6 +106,6 @@ A node receives two type of messages as part of the PEX protocol: - `PexRequest`: a request for addresses received from a peer, handled as described [here](./pex-protocol.md#providing-addresses) -- `PexAddrs`: a list of addresses received from a peer, as a reponse to a PEX +- `PexAddrs`: a list of addresses received from a peer, as a response to a PEX request sent by the node, as described [here](./pex-protocol.md#responses) diff --git a/spec/p2p/v0.34/types.md b/spec/p2p/v0.34/types.md index 6d71da03fb..c93908fe6d 100644 --- a/spec/p2p/v0.34/types.md +++ b/spec/p2p/v0.34/types.md @@ -116,7 +116,7 @@ Interface `IPeerSet` offers methods to access a table of [`Peer`](#peergo) insta Type `PeerSet` implements a thread-safe table of [`Peer`](#peergo) instances, used by the [switch](#switchgo). -The switch provides limited access to this table by returing a `IPeerSet` +The switch provides limited access to this table by returning a `IPeerSet` instance, used by the [PEX reactor](#pex_reactorgo). ### `switch.go` diff --git a/spec/reactors/consensus.md b/spec/reactors/consensus.md index e1771bd515..3f4f769ad8 100644 --- a/spec/reactors/consensus.md +++ b/spec/reactors/consensus.md @@ -150,7 +150,7 @@ The node sends the `Proposal` to the peer and updates the peer's round state wit - The current round and height of the receiving peer match the proposal's, and the peer's state hasn't been updated yet. - If the peer's state for that proposal remains uninitialized since the proposal's transmission, the node initializes it by assigning the `ProposalBlockPartSetHeader` and an empty bit array with a size equal to the number of parts in the header for the `ProposalBlockParts`. - + ### Receiving Procedure @@ -214,7 +214,7 @@ type NewRoundStepMessage struct { } ``` - + ### New Valid Block Message @@ -237,7 +237,7 @@ type NewValidBlockMessage struct { Upon receiving this message, the node will only modify the peer's round state under these conditions: - The `Height` specified in the message aligns with the peer's current `Height`. -- The `Round` matches the most recent round known for the peer OR the message indicates the block's commitment i.e., `IsCommit` is `ture`. +- The `Round` matches the most recent round known for the peer OR the message indicates the block's commitment i.e., `IsCommit` is `true`. Following these verifications, the node will then update its peer state's `ProposaBlockPartSetHeader` and `ProposaBlockParts` based on the `BlockPartSetHeader` and `BlockParts` values from the received message. @@ -245,7 +245,7 @@ Following these verifications, the node will then update its peer state's `Propo +Answer: After further investigation, it looks like that this is purely based on votes and does not signify the proposal completion on the sender side. --> ## Network Traffic Analysis diff --git a/spec/rpc/README.md b/spec/rpc/README.md index ff9ce2ce9e..3382df6782 100644 --- a/spec/rpc/README.md +++ b/spec/rpc/README.md @@ -802,7 +802,7 @@ curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\ ### GenesisChunked -Get the genesis document in a chunks to support easily transfering larger documents. +Get the genesis document in a chunks to support easily transferring larger documents. #### Parameters diff --git a/state/execution.go b/state/execution.go index cdc9fce83d..edd9dc014c 100644 --- a/state/execution.go +++ b/state/execution.go @@ -90,7 +90,7 @@ func (blockExec *BlockExecutor) SetEventBus(eventBus types.BlockEventPublisher) // CreateProposalBlock calls state.MakeBlock with evidence from the evpool // and txs from the mempool. The max bytes must be big enough to fit the commit. -// Up to 1/10th of the block space is allcoated for maximum sized evidence. +// Up to 1/10th of the block space is allocated for maximum sized evidence. // The rest is given to txs, up to the max gas. // // Contract: application will not return more bytes than are sent over the wire. @@ -283,7 +283,7 @@ func (blockExec *BlockExecutor) ApplyBlock( fail.Fail() // XXX // Events are fired after everything else. - // NOTE: if we crash between Commit and Save, events wont be fired during replay + // NOTE: if we crash between Commit and Save, events won't be fired during replay fireEvents(blockExec.logger, blockExec.eventBus, block, abciResponses, validatorUpdates, state.LastValidators, commit) return state, retainHeight, nil diff --git a/state/store_test.go b/state/store_test.go index 62ff0ea547..36fbbd290c 100644 --- a/state/store_test.go +++ b/state/store_test.go @@ -266,10 +266,10 @@ func TestLastABCIResponses(t *testing.T) { require.NoError(t, err) // check to see if the saved response height is the same as the loaded height. assert.Equal(t, lastResponse, response1) - // use an incorret height to make sure the state store errors. + // use an incorrect height to make sure the state store errors. _, err = stateStore.LoadLastABCIResponse(height + 1) assert.Error(t, err) - // check if the abci response didnt save in the abciresponses. + // check if the abci response didn't save in the abciresponses. responses, err = stateStore.LoadABCIResponses(height) require.NoError(t, err, responses) require.Equal(t, response1, responses) diff --git a/statesync/syncer.go b/statesync/syncer.go index aeb58677d7..8c551eb070 100644 --- a/statesync/syncer.go +++ b/statesync/syncer.go @@ -491,7 +491,7 @@ func (s *syncer) verifyApp(snapshot *snapshot, appVersion uint64) error { // sanity check that the app version in the block matches the application's own record // of its version if resp.AppVersion != appVersion { - // An error here most likely means that the app hasn't inplemented state sync + // An error here most likely means that the app hasn't implemented state sync // or the Info call correctly return fmt.Errorf("app version mismatch. Expected: %d, got: %d", appVersion, resp.AppVersion) diff --git a/store/store.go b/store/store.go index 72655359ec..1c5e4404c1 100644 --- a/store/store.go +++ b/store/store.go @@ -285,7 +285,7 @@ func (bs *BlockStore) PruneBlocks(height int64) (uint64, error) { batch := bs.db.NewBatch() defer batch.Close() flush := func(batch dbm.Batch, base int64) error { - // We can't trust batches to be atomic, so update base first to make sure noone + // We can't trust batches to be atomic, so update base first to make sure no one // tries to access missing blocks. bs.mtx.Lock() bs.base = base diff --git a/test/app/counter_test.sh b/test/app/counter_test.sh index 3af2b885f1..b301e1b6aa 100755 --- a/test/app/counter_test.sh +++ b/test/app/counter_test.sh @@ -24,7 +24,7 @@ function getCode() { fi if [[ $(echo $R | jq 'has("code")') == "true" ]]; then - # this wont actually work if theres an error ... + # this won't actually work if there's an error ... echo "$R" | jq ".code" else # protobuf auto adds `omitempty` to everything so code OK and empty data/log diff --git a/test/maverick/consensus/misbehavior.go b/test/maverick/consensus/misbehavior.go index 32089cb9d0..ddc29cadd6 100644 --- a/test/maverick/consensus/misbehavior.go +++ b/test/maverick/consensus/misbehavior.go @@ -137,7 +137,7 @@ func defaultEnterPropose(cs *State, height int64, round int32) { if err != nil { // If this node is a validator & proposer in the currentx round, it will // miss the opportunity to create a block. - logger.Error("Error on retrival of pubkey", "err", err) + logger.Error("Error on retrieval of pubkey", "err", err) return } address := pubKey.Address() diff --git a/test/maverick/consensus/reactor.go b/test/maverick/consensus/reactor.go index 7c602e6e48..536d9e4ff5 100644 --- a/test/maverick/consensus/reactor.go +++ b/test/maverick/consensus/reactor.go @@ -572,7 +572,7 @@ OUTER_LOOP: if (0 < prs.Height) && (prs.Height < rs.Height) && (prs.Height >= conR.conS.blockStore.Base()) { heightLogger := logger.With("height", prs.Height) - // if we never received the commit message from the peer, the block parts wont be initialized + // if we never received the commit message from the peer, the block parts won't be initialized if prs.ProposalBlockParts == nil { blockMeta := conR.conS.blockStore.LoadBlockMeta(prs.Height) if blockMeta == nil { diff --git a/test/maverick/consensus/replay_file.go b/test/maverick/consensus/replay_file.go index 1b903153cf..f14182b395 100644 --- a/test/maverick/consensus/replay_file.go +++ b/test/maverick/consensus/replay_file.go @@ -145,7 +145,7 @@ func (pb *playback) replayReset(count int, newStepSub types.Subscription) error pb.fp = fp pb.dec = NewWALDecoder(fp) count = pb.count - count - fmt.Printf("Reseting from %d to %d\n", pb.count, count) + fmt.Printf("Resetting from %d to %d\n", pb.count, count) pb.count = 0 pb.cs = newCS var msg *cmtcon.TimedWALMessage diff --git a/test/maverick/consensus/state.go b/test/maverick/consensus/state.go index 726f3da835..525ed982ee 100644 --- a/test/maverick/consensus/state.go +++ b/test/maverick/consensus/state.go @@ -1527,7 +1527,7 @@ func (cs *State) finalizeCommit(height int64) { stateCopy := cs.state.Copy() // Execute and commit the block, update and save the state, and update the mempool. - // NOTE The block.AppHash wont reflect these txs until the next block. + // NOTE The block.AppHash won't reflect these txs until the next block. var err error var retainHeight int64 stateCopy, retainHeight, err = cs.blockExec.ApplyBlock( @@ -1897,7 +1897,7 @@ func (cs *State) signAddVote(msgType cmtproto.SignedMsgType, hash []byte, header return nil } -// updatePrivValidatorPubKey get's the private validator public key and +// updatePrivValidatorPubKey gets the private validator public key and // memoizes it. This func returns an error if the private validator is not // responding or responds with an error. func (cs *State) updatePrivValidatorPubKey() error { diff --git a/test/maverick/consensus/ticker.go b/test/maverick/consensus/ticker.go index ae5fab794a..c0bd8d23ce 100644 --- a/test/maverick/consensus/ticker.go +++ b/test/maverick/consensus/ticker.go @@ -89,7 +89,7 @@ func (t *timeoutTicker) stopTimer() { } // send on tickChan to start a new timer. -// timers are interupted and replaced by new ticks from later steps +// timers are interrupted and replaced by new ticks from later steps // timeouts of 0 on the tickChan will be immediately relayed to the tockChan func (t *timeoutTicker) timeoutRoutine() { t.Logger.Debug("Starting timeout routine") diff --git a/test/maverick/consensus/wal.go b/test/maverick/consensus/wal.go index 3f7532ea63..948f3f712a 100644 --- a/test/maverick/consensus/wal.go +++ b/test/maverick/consensus/wal.go @@ -307,7 +307,7 @@ func IsDataCorruptionError(err error) bool { return ok } -// DataCorruptionError is an error that occures if data on disk was corrupted. +// DataCorruptionError is an error that occurs if data on disk was corrupted. type DataCorruptionError struct { cause error } diff --git a/types/block_test.go b/types/block_test.go index 59143f08fd..70a587d982 100644 --- a/types/block_test.go +++ b/types/block_test.go @@ -75,7 +75,7 @@ func TestBlockValidateBasic(t *testing.T) { {"Negative Height", func(blk *Block) { blk.Height = -1 }, true}, {"Remove 1/2 the commits", func(blk *Block) { blk.LastCommit.Signatures = commit.Signatures[:commit.Size()/2] - blk.LastCommit.hash = nil // clear hash or change wont be noticed + blk.LastCommit.hash = nil // clear hash or change won't be noticed }, true}, {"Remove LastCommitHash", func(blk *Block) { blk.LastCommitHash = []byte("something else") }, true}, {"Tampered EvidenceHash", func(blk *Block) { diff --git a/types/evidence.go b/types/evidence.go index 175dbf9519..533818b8a0 100644 --- a/types/evidence.go +++ b/types/evidence.go @@ -278,7 +278,7 @@ func (l *LightClientAttackEvidence) GetByzantineValidators(commonVals *Validator return validators } -// ConflictingHeaderIsInvalid takes a trusted header and matches it againt a conflicting header +// ConflictingHeaderIsInvalid takes a trusted header and matches it against a conflicting header // to determine whether the conflicting header was the product of a valid state transition // or not. If it is then all the deterministic fields of the header should be the same. // If not, it is an invalid header and constitutes a lunatic attack. diff --git a/types/validator.go b/types/validator.go index 886b32756d..6e161848c1 100644 --- a/types/validator.go +++ b/types/validator.go @@ -132,7 +132,7 @@ func (v *Validator) Bytes() []byte { return bz } -// ToProto converts Valiator to protobuf +// ToProto converts Validator to protobuf func (v *Validator) ToProto() (*cmtproto.Validator, error) { if v == nil { return nil, errors.New("nil validator") diff --git a/types/validator_set.go b/types/validator_set.go index 019834ac1d..bf009e7245 100644 --- a/types/validator_set.go +++ b/types/validator_set.go @@ -687,7 +687,7 @@ func (vals *ValidatorSet) VerifyCommit(chainID string, blockID BlockID, continue // OK, some signatures can be absent. } - // The vals and commit have a 1-to-1 correspondance. + // The vals and commit have a 1-to-1 correspondence. // This means we don't need the validator address or to do any lookup. val := vals.Validators[idx] @@ -743,7 +743,7 @@ func (vals *ValidatorSet) VerifyCommitLight(chainID string, blockID BlockID, continue } - // The vals and commit have a 1-to-1 correspondance. + // The vals and commit have a 1-to-1 correspondence. // This means we don't need the validator address or to do any lookup. val := vals.Validators[idx]