diff --git a/gossip/src/cluster_info.rs b/gossip/src/cluster_info.rs index 9408b0fdcc8760..1ef4a85711894f 100644 --- a/gossip/src/cluster_info.rs +++ b/gossip/src/cluster_info.rs @@ -1021,7 +1021,7 @@ impl ClusterInfo { &'a self, label: &'static str, counter: &'a Counter, - ) -> TimedGuard<'a, RwLockReadGuard> { + ) -> TimedGuard<'a, RwLockReadGuard<'a, Crds>> { TimedGuard::new(self.gossip.crds.read().unwrap(), label, counter) } diff --git a/gossip/src/push_active_set.rs b/gossip/src/push_active_set.rs index 83f84b8a0624ed..ee33e0fa32f6b1 100644 --- a/gossip/src/push_active_set.rs +++ b/gossip/src/push_active_set.rs @@ -34,7 +34,7 @@ impl PushActiveSet { // If true forces gossip push even if the node has pruned the origin. should_force_push: impl FnMut(&Pubkey) -> bool + 'a, stakes: &HashMap, - ) -> impl Iterator + 'a { + ) -> impl Iterator + 'a { let stake = stakes.get(pubkey).min(stakes.get(origin)); self.get_entry(stake) .get_nodes(pubkey, origin, should_force_push) @@ -115,7 +115,7 @@ impl PushActiveSetEntry { origin: &'a Pubkey, // CRDS value owner. // If true forces gossip push even if the node has pruned the origin. mut should_force_push: impl FnMut(&Pubkey) -> bool + 'a, - ) -> impl Iterator + 'a { + ) -> impl Iterator + 'a { let pubkey_eq_origin = pubkey == origin; self.0 .iter() diff --git a/ledger/ledger.lock b/ledger/ledger.lock new file mode 100644 index 00000000000000..e69de29bb2d1d6 diff --git a/ledger/src/blockstore.rs b/ledger/src/blockstore.rs index 07d9a6d0cb5404..a3b6588ae680c2 100644 --- a/ledger/src/blockstore.rs +++ b/ledger/src/blockstore.rs @@ -541,7 +541,7 @@ impl Blockstore { &'a self, erasure_set: ErasureSetId, erasure_metas: &'a BTreeMap>, - ) -> Result)>> { + ) -> Result)>> { let (slot, fec_set_index) = erasure_set.store_key(); // Check the previous entry from the in memory map to see if it is the consecutive @@ -1534,7 +1534,7 @@ impl Blockstore { slot: Slot, erasure_meta: &ErasureMeta, just_received_shreds: &'a HashMap, - ) -> Option>> { + ) -> Option>> { // Search for the shred which set the initial erasure config, either inserted, // or in the current batch in just_received_shreds. let index = erasure_meta.first_received_coding_shred_index()?; diff --git a/runtime/src/bank.rs b/runtime/src/bank.rs index a72b2ec66284d7..3ffc2b1445c7ca 100644 --- a/runtime/src/bank.rs +++ b/runtime/src/bank.rs @@ -3499,7 +3499,7 @@ impl Bank { pub fn prepare_unlocked_batch_from_single_tx<'a>( &'a self, transaction: &'a SanitizedTransaction, - ) -> TransactionBatch<'_, '_, SanitizedTransaction> { + ) -> TransactionBatch<'a, 'a, SanitizedTransaction> { let tx_account_lock_limit = self.get_transaction_account_lock_limit(); let lock_result = validate_account_locks(transaction.message().account_keys(), tx_account_lock_limit);