diff --git a/.cargo/config.toml b/.cargo/config.toml new file mode 100644 index 000000000000..66b28b3485d8 --- /dev/null +++ b/.cargo/config.toml @@ -0,0 +1,32 @@ +# +# An auto defined `clippy` feature was introduced, +# but it was found to clash with user defined features, +# so was renamed to `cargo-clippy`. +# +# If you want standard clippy run: +# RUSTFLAGS= cargo clippy +[target.'cfg(feature = "cargo-clippy")'] +rustflags = [ + "-Aclippy::all", + "-Dclippy::correctness", + "-Aclippy::if-same-then-else", + "-Aclippy::clone-double-ref", + "-Dclippy::complexity", + "-Aclippy::zero-prefixed-literal", # 00_1000_000 + "-Aclippy::type_complexity", # raison d'etre + "-Aclippy::nonminimal-bool", # maybe + "-Aclippy::borrowed-box", # Reasonable to fix this one + "-Aclippy::too-many-arguments", # (Turning this on would lead to) + "-Aclippy::unnecessary_cast", # Types may change + "-Aclippy::identity-op", # One case where we do 0 + + "-Aclippy::useless_conversion", # Types may change + "-Aclippy::unit_arg", # styalistic. + "-Aclippy::option-map-unit-fn", # styalistic + "-Aclippy::bind_instead_of_map", # styalistic + "-Aclippy::erasing_op", # E.g. 0 * DOLLARS + "-Aclippy::eq_op", # In tests we test equality. + "-Aclippy::while_immutable_condition", # false positives + "-Aclippy::needless_option_as_deref", # false positives + "-Aclippy::derivable_impls", # false positives + "-Aclippy::stable_sort_primitive", # prefer stable sort +] diff --git a/.gitignore b/.gitignore index 5ea0458ddfc8..f9ab33eb63f3 100644 --- a/.gitignore +++ b/.gitignore @@ -10,5 +10,4 @@ polkadot.* !polkadot.service !.rpm/* .DS_Store -.cargo .env diff --git a/cli/src/command.rs b/cli/src/command.rs index 5ce7c05162c1..0995e1d265d4 100644 --- a/cli/src/command.rs +++ b/cli/src/command.rs @@ -591,27 +591,27 @@ pub fn run() -> Result<()> { #[cfg(feature = "kusama-native")] if chain_spec.is_kusama() { - return Ok(runner.sync_run(|config| { + return runner.sync_run(|config| { cmd.run::(config) .map_err(|e| Error::SubstrateCli(e)) - })?) + }) } #[cfg(feature = "westend-native")] if chain_spec.is_westend() { - return Ok(runner.sync_run(|config| { + return runner.sync_run(|config| { cmd.run::(config) .map_err(|e| Error::SubstrateCli(e)) - })?) + }) } // else we assume it is polkadot. #[cfg(feature = "polkadot-native")] { - return Ok(runner.sync_run(|config| { + return runner.sync_run(|config| { cmd.run::(config) .map_err(|e| Error::SubstrateCli(e)) - })?) + }) } #[cfg(not(feature = "polkadot-native"))] diff --git a/erasure-coding/src/lib.rs b/erasure-coding/src/lib.rs index 5e85809f4117..6abd7dce4dd3 100644 --- a/erasure-coding/src/lib.rs +++ b/erasure-coding/src/lib.rs @@ -216,7 +216,7 @@ pub struct Branches<'a, I> { impl<'a, I: AsRef<[u8]>> Branches<'a, I> { /// Get the trie root. pub fn root(&self) -> H256 { - self.root.clone() + self.root } } diff --git a/node/client/src/benchmarking.rs b/node/client/src/benchmarking.rs index 7990bc88d218..aaa60a168b4d 100644 --- a/node/client/src/benchmarking.rs +++ b/node/client/src/benchmarking.rs @@ -165,7 +165,7 @@ impl BenchmarkCallSigner (), runtime::VERSION.spec_version, runtime::VERSION.transaction_version, - genesis.clone(), + genesis, genesis, (), (), @@ -220,7 +220,7 @@ impl BenchmarkCallSigner (), runtime::VERSION.spec_version, runtime::VERSION.transaction_version, - genesis.clone(), + genesis, genesis, (), (), @@ -274,7 +274,7 @@ impl BenchmarkCallSigner (), runtime::VERSION.spec_version, runtime::VERSION.transaction_version, - genesis.clone(), + genesis, genesis, (), (), @@ -328,7 +328,7 @@ impl BenchmarkCallSigner (), runtime::VERSION.spec_version, runtime::VERSION.transaction_version, - genesis.clone(), + genesis, genesis, (), (), diff --git a/node/core/approval-voting/src/approval_checking.rs b/node/core/approval-voting/src/approval_checking.rs index b513c18895b3..82a9a8c89bf5 100644 --- a/node/core/approval-voting/src/approval_checking.rs +++ b/node/core/approval-voting/src/approval_checking.rs @@ -282,8 +282,8 @@ impl State { /// Constructs an infinite iterator from an array of `TrancheEntry` values. Any missing tranches /// are filled with empty assignments, as they are needed to compute the approved tranches. -fn filled_tranche_iterator<'a>( - tranches: &'a [TrancheEntry], +fn filled_tranche_iterator( + tranches: &[TrancheEntry], ) -> impl Iterator { let mut gap_end = None; diff --git a/node/core/approval-voting/src/criteria.rs b/node/core/approval-voting/src/criteria.rs index fea71d79c098..520a1a745056 100644 --- a/node/core/approval-voting/src/criteria.rs +++ b/node/core/approval-voting/src/criteria.rs @@ -155,10 +155,10 @@ impl<'a> From<&'a SessionInfo> for Config { Config { assignment_keys: s.assignment_keys.clone(), validator_groups: s.validator_groups.clone(), - n_cores: s.n_cores.clone(), - zeroth_delay_tranche_width: s.zeroth_delay_tranche_width.clone(), - relay_vrf_modulo_samples: s.relay_vrf_modulo_samples.clone(), - n_delay_tranches: s.n_delay_tranches.clone(), + n_cores: s.n_cores, + zeroth_delay_tranche_width: s.zeroth_delay_tranche_width, + relay_vrf_modulo_samples: s.relay_vrf_modulo_samples, + n_delay_tranches: s.n_delay_tranches, } } } diff --git a/node/core/approval-voting/src/import.rs b/node/core/approval-voting/src/import.rs index 20629dd022d4..2331b50b6bb1 100644 --- a/node/core/approval-voting/src/import.rs +++ b/node/core/approval-voting/src/import.rs @@ -415,11 +415,8 @@ pub(crate) async fn handle_new_head( Err(error) => { // It's possible that we've lost a race with finality. let (tx, rx) = oneshot::channel(); - ctx.send_message(ChainApiMessage::FinalizedBlockHash( - block_header.number.clone(), - tx, - )) - .await; + ctx.send_message(ChainApiMessage::FinalizedBlockHash(block_header.number, tx)) + .await; let lost_to_finality = match rx.await { Ok(Ok(Some(h))) if h != block_hash => true, diff --git a/node/core/approval-voting/src/lib.rs b/node/core/approval-voting/src/lib.rs index bc63549795c2..06a4f0b24bb0 100644 --- a/node/core/approval-voting/src/lib.rs +++ b/node/core/approval-voting/src/lib.rs @@ -621,10 +621,7 @@ impl CurrentlyCheckingSet { .candidate_hash_map .remove(&approval_state.candidate_hash) .unwrap_or_default(); - approvals_cache.put( - approval_state.candidate_hash.clone(), - approval_state.approval_outcome.clone(), - ); + approvals_cache.put(approval_state.candidate_hash, approval_state.approval_outcome); return (out, approval_state) } } @@ -768,7 +765,7 @@ async fn run( where B: Backend, { - if let Err(err) = db_sanity_check(subsystem.db.clone(), subsystem.db_config.clone()) { + if let Err(err) = db_sanity_check(subsystem.db.clone(), subsystem.db_config) { gum::warn!(target: LOG_TARGET, ?err, "Could not run approval vote DB sanity check"); } @@ -1278,7 +1275,7 @@ async fn get_approval_signatures_for_candidate( Some(e) => e, }; - let relay_hashes = entry.block_assignments.iter().map(|(relay_hash, _)| relay_hash); + let relay_hashes = entry.block_assignments.keys(); let mut candidate_indices = HashSet::new(); // Retrieve `CoreIndices`/`CandidateIndices` as required by approval-distribution: @@ -2502,7 +2499,7 @@ async fn issue_approval( }; let candidate_hash = match block_entry.candidate(candidate_index as usize) { - Some((_, h)) => h.clone(), + Some((_, h)) => *h, None => { gum::warn!( target: LOG_TARGET, diff --git a/node/core/av-store/src/lib.rs b/node/core/av-store/src/lib.rs index 4fbbf3740ab0..cbbbf2bbd7dc 100644 --- a/node/core/av-store/src/lib.rs +++ b/node/core/av-store/src/lib.rs @@ -61,7 +61,7 @@ const PRUNE_BY_TIME_PREFIX: &[u8; 13] = b"prune_by_time"; // We have some keys we want to map to empty values because existence of the key is enough. We use this because // rocksdb doesn't support empty values. -const TOMBSTONE_VALUE: &[u8] = &*b" "; +const TOMBSTONE_VALUE: &[u8] = b" "; /// Unavailable blocks are kept for 1 hour. const KEEP_UNAVAILABLE_FOR: Duration = Duration::from_secs(60 * 60); diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index a9ae518e3103..2f8aa4490f27 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -482,9 +482,7 @@ impl TableContextTrait for TableContext { } fn is_member_of(&self, authority: &ValidatorIndex, group: &ParaId) -> bool { - self.groups - .get(group) - .map_or(false, |g| g.iter().position(|a| a == authority).is_some()) + self.groups.get(group).map_or(false, |g| g.iter().any(|a| a == authority)) } fn requisite_votes(&self, group: &ParaId) -> usize { @@ -499,7 +497,7 @@ struct InvalidErasureRoot; fn primitive_statement_to_table(s: &SignedFullStatement) -> TableSignedStatement { let statement = match s.payload() { Statement::Seconded(c) => TableStatement::Seconded(c.clone()), - Statement::Valid(h) => TableStatement::Valid(h.clone()), + Statement::Valid(h) => TableStatement::Valid(*h), }; TableSignedStatement { diff --git a/node/core/candidate-validation/src/lib.rs b/node/core/candidate-validation/src/lib.rs index 7d9db4f3d794..f21f1be2f1bf 100644 --- a/node/core/candidate-validation/src/lib.rs +++ b/node/core/candidate-validation/src/lib.rs @@ -502,7 +502,7 @@ async fn validate_candidate_exhaustive( let _timer = metrics.time_validate_candidate_exhaustive(); let validation_code_hash = validation_code.hash(); - let para_id = candidate_receipt.descriptor.para_id.clone(); + let para_id = candidate_receipt.descriptor.para_id; gum::debug!( target: LOG_TARGET, ?validation_code_hash, @@ -513,7 +513,7 @@ async fn validate_candidate_exhaustive( if let Err(e) = perform_basic_checks( &candidate_receipt.descriptor, persisted_validation_data.max_pov_size, - &*pov, + &pov, &validation_code_hash, ) { gum::info!(target: LOG_TARGET, ?para_id, "Invalid candidate (basic checks)"); diff --git a/node/core/chain-selection/src/lib.rs b/node/core/chain-selection/src/lib.rs index e5ffe6811d6e..786454fb9891 100644 --- a/node/core/chain-selection/src/lib.rs +++ b/node/core/chain-selection/src/lib.rs @@ -381,6 +381,7 @@ async fn run( ) where B: Backend, { + #![allow(clippy::all)] loop { let res = run_until_error( &mut ctx, diff --git a/node/core/dispute-coordinator/src/import.rs b/node/core/dispute-coordinator/src/import.rs index c0f0d3d9e009..28eacffab861 100644 --- a/node/core/dispute-coordinator/src/import.rs +++ b/node/core/dispute-coordinator/src/import.rs @@ -169,7 +169,7 @@ impl CandidateVoteState { } /// Create a new `CandidateVoteState` from already existing votes. - pub fn new<'a>(votes: CandidateVotes, env: &CandidateEnvironment<'a>, now: Timestamp) -> Self { + pub fn new(votes: CandidateVotes, env: &CandidateEnvironment, now: Timestamp) -> Self { let own_vote = OwnVoteState::new(&votes, env); let n_validators = env.validators().len(); diff --git a/node/core/dispute-coordinator/src/initialized.rs b/node/core/dispute-coordinator/src/initialized.rs index ab9faca39868..f8d7791b01b9 100644 --- a/node/core/dispute-coordinator/src/initialized.rs +++ b/node/core/dispute-coordinator/src/initialized.rs @@ -713,20 +713,22 @@ impl Initialized { return Ok(ImportStatementsResult::InvalidImport) } - let env = - match CandidateEnvironment::new(&*self.keystore, &self.rolling_session_window, session) - { - None => { - gum::warn!( - target: LOG_TARGET, - session, - "We are lacking a `SessionInfo` for handling import of statements." - ); + let env = match CandidateEnvironment::new( + &self.keystore, + &self.rolling_session_window, + session, + ) { + None => { + gum::warn!( + target: LOG_TARGET, + session, + "We are lacking a `SessionInfo` for handling import of statements." + ); - return Ok(ImportStatementsResult::InvalidImport) - }, - Some(env) => env, - }; + return Ok(ImportStatementsResult::InvalidImport) + }, + Some(env) => env, + }; let candidate_hash = candidate_receipt.hash(); @@ -1075,20 +1077,22 @@ impl Initialized { "Issuing local statement for candidate!" ); // Load environment: - let env = - match CandidateEnvironment::new(&*self.keystore, &self.rolling_session_window, session) - { - None => { - gum::warn!( - target: LOG_TARGET, - session, - "Missing info for session which has an active dispute", - ); + let env = match CandidateEnvironment::new( + &self.keystore, + &self.rolling_session_window, + session, + ) { + None => { + gum::warn!( + target: LOG_TARGET, + session, + "Missing info for session which has an active dispute", + ); - return Ok(()) - }, - Some(env) => env, - }; + return Ok(()) + }, + Some(env) => env, + }; let votes = overlay_db .load_candidate_votes(session, &candidate_hash)? @@ -1257,7 +1261,7 @@ fn make_dispute_message( votes.invalid.iter().next().ok_or(DisputeMessageCreationError::NoOppositeVote)?; let other_vote = SignedDisputeStatement::new_checked( DisputeStatement::Invalid(*statement_kind), - our_vote.candidate_hash().clone(), + *our_vote.candidate_hash(), our_vote.session_index(), validators .get(*validator_index) @@ -1272,7 +1276,7 @@ fn make_dispute_message( votes.valid.iter().next().ok_or(DisputeMessageCreationError::NoOppositeVote)?; let other_vote = SignedDisputeStatement::new_checked( DisputeStatement::Valid(*statement_kind), - our_vote.candidate_hash().clone(), + *our_vote.candidate_hash(), our_vote.session_index(), validators .get(*validator_index) diff --git a/node/core/dispute-coordinator/src/participation/mod.rs b/node/core/dispute-coordinator/src/participation/mod.rs index 874f37e63213..e923e13e8142 100644 --- a/node/core/dispute-coordinator/src/participation/mod.rs +++ b/node/core/dispute-coordinator/src/participation/mod.rs @@ -235,7 +235,7 @@ impl Participation { req: ParticipationRequest, recent_head: Hash, ) -> FatalResult<()> { - if self.running_participations.insert(req.candidate_hash().clone()) { + if self.running_participations.insert(*req.candidate_hash()) { let sender = ctx.sender().clone(); ctx.spawn( "participation-worker", diff --git a/node/core/dispute-coordinator/src/participation/queues/mod.rs b/node/core/dispute-coordinator/src/participation/queues/mod.rs index d2fcab1ba258..29380bd77df1 100644 --- a/node/core/dispute-coordinator/src/participation/queues/mod.rs +++ b/node/core/dispute-coordinator/src/participation/queues/mod.rs @@ -204,7 +204,7 @@ impl Queues { // Once https://github.com/rust-lang/rust/issues/62924 is there, we can use a simple: // target.pop_first(). if let Some((comparator, _)) = target.iter().next() { - let comparator = comparator.clone(); + let comparator = *comparator; target .remove(&comparator) .map(|participation_request| (comparator, participation_request)) diff --git a/node/core/provisioner/src/disputes/prioritized_selection/mod.rs b/node/core/provisioner/src/disputes/prioritized_selection/mod.rs index dcfcd0d1c2f0..4231dc840c2c 100644 --- a/node/core/provisioner/src/disputes/prioritized_selection/mod.rs +++ b/node/core/provisioner/src/disputes/prioritized_selection/mod.rs @@ -99,7 +99,7 @@ where ); // Fetch the onchain disputes. We'll do a prioritization based on them. - let onchain = match get_onchain_disputes(sender, leaf.hash.clone()).await { + let onchain = match get_onchain_disputes(sender, leaf.hash).await { Ok(r) => r, Err(GetOnchainDisputesError::NotSupported(runtime_api_err, relay_parent)) => { // Runtime version is checked before calling this method, so the error below should never happen! diff --git a/node/core/provisioner/src/lib.rs b/node/core/provisioner/src/lib.rs index 0530d48aabda..fcb65d66f286 100644 --- a/node/core/provisioner/src/lib.rs +++ b/node/core/provisioner/src/lib.rs @@ -373,7 +373,7 @@ async fn send_inherent_data( let disputes = match has_required_runtime( from_job, - leaf.hash.clone(), + leaf.hash, PRIORITIZED_SELECTION_RUNTIME_VERSION_REQUIREMENT, ) .await @@ -506,7 +506,7 @@ fn select_availability_bitfields( bitfields.len() ); - selected.into_iter().map(|(_, b)| b).collect() + selected.into_values().collect() } /// Determine which cores are free, and then to the degree possible, pick a candidate appropriate to each free core. diff --git a/node/core/pvf/src/execute/queue.rs b/node/core/pvf/src/execute/queue.rs index b4c6a66b7719..17fb5765f7d3 100644 --- a/node/core/pvf/src/execute/queue.rs +++ b/node/core/pvf/src/execute/queue.rs @@ -225,10 +225,8 @@ fn handle_job_finish( result_tx: ResultSender, ) { let (idle_worker, result) = match outcome { - Outcome::Ok { result_descriptor, duration_ms, idle_worker } => { + Outcome::Ok { result_descriptor, duration_ms: _, idle_worker } => { // TODO: propagate the soft timeout - drop(duration_ms); - (Some(idle_worker), Ok(result_descriptor)) }, Outcome::InvalidCandidate { err, idle_worker } => ( diff --git a/node/core/pvf/src/executor_intf.rs b/node/core/pvf/src/executor_intf.rs index bbeb6195e1dc..c5578f5f81ad 100644 --- a/node/core/pvf/src/executor_intf.rs +++ b/node/core/pvf/src/executor_intf.rs @@ -424,7 +424,7 @@ impl sp_core::traits::ReadRuntimeVersion for ReadRuntimeVersion { use parity_scale_codec::Encode; Ok(version.encode()) }, - None => Err(format!("runtime version section is not found")), + None => Err("runtime version section is not found".to_string()), } } } diff --git a/node/core/pvf/src/prepare/worker.rs b/node/core/pvf/src/prepare/worker.rs index 1cf512894740..a16b9b94176e 100644 --- a/node/core/pvf/src/prepare/worker.rs +++ b/node/core/pvf/src/prepare/worker.rs @@ -219,7 +219,7 @@ async fn send_request( code: Arc>, tmp_file: &Path, ) -> io::Result<()> { - framed_send(stream, &*code).await?; + framed_send(stream, &code).await?; framed_send(stream, path_to_bytes(tmp_file)).await?; Ok(()) } diff --git a/node/core/pvf/src/testing.rs b/node/core/pvf/src/testing.rs index 3b64d130fc6a..cbd37b06d403 100644 --- a/node/core/pvf/src/testing.rs +++ b/node/core/pvf/src/testing.rs @@ -34,7 +34,7 @@ pub fn validate_candidate( let code = sp_maybe_compressed_blob::decompress(code, 10 * 1024 * 1024) .expect("Decompressing code failed"); - let blob = prevalidate(&*code)?; + let blob = prevalidate(&code)?; let artifact = prepare(blob)?; let tmpdir = tempfile::tempdir()?; let artifact_path = tmpdir.path().join("blob"); diff --git a/node/core/runtime-api/src/lib.rs b/node/core/runtime-api/src/lib.rs index 36355b5759e6..de42ace3af0c 100644 --- a/node/core/runtime-api/src/lib.rs +++ b/node/core/runtime-api/src/lib.rs @@ -268,7 +268,7 @@ where let (sender, receiver) = oneshot::channel(); // TODO: make the cache great again https://github.com/paritytech/polkadot/issues/5546 - let request = match self.query_cache(relay_parent.clone(), request) { + let request = match self.query_cache(relay_parent, request) { Some(request) => request, None => return, }; diff --git a/node/metrics/src/metronome.rs b/node/metrics/src/metronome.rs index 9184f7ac20ad..ac47e20319d8 100644 --- a/node/metrics/src/metronome.rs +++ b/node/metrics/src/metronome.rs @@ -49,7 +49,7 @@ impl futures::Stream for Metronome { loop { match self.state { MetronomeState::SetAlarm => { - let val = self.period.clone(); + let val = self.period; self.delay.reset(val); self.state = MetronomeState::Snooze; }, diff --git a/node/network/approval-distribution/src/lib.rs b/node/network/approval-distribution/src/lib.rs index 5afae66ae818..017538cae5f3 100644 --- a/node/network/approval-distribution/src/lib.rs +++ b/node/network/approval-distribution/src/lib.rs @@ -309,7 +309,7 @@ enum MessageSource { impl MessageSource { fn peer_id(&self) -> Option { match self { - Self::Peer(id) => Some(id.clone()), + Self::Peer(id) => Some(*id), Self::Local => None, } } @@ -389,7 +389,7 @@ impl State { ) { let mut new_hashes = HashSet::new(); for meta in &metas { - match self.blocks.entry(meta.hash.clone()) { + match self.blocks.entry(meta.hash) { hash_map::Entry::Vacant(entry) => { let candidates_count = meta.candidates.len(); let mut candidates = Vec::with_capacity(candidates_count); @@ -398,7 +398,7 @@ impl State { entry.insert(BlockEntry { known_by: HashMap::new(), number: meta.number, - parent_hash: meta.parent_hash.clone(), + parent_hash: meta.parent_hash, knowledge: Knowledge::default(), candidates, session: meta.session, @@ -406,7 +406,7 @@ impl State { self.topologies.inc_session_refs(meta.session); - new_hashes.insert(meta.hash.clone()); + new_hashes.insert(meta.hash); // In case there are duplicates, we should only set this if the entry // was vacant. @@ -433,7 +433,7 @@ impl State { &mut self.blocks, &self.topologies, self.peer_views.len(), - peer_id.clone(), + *peer_id, view_intersection, rng, ) @@ -563,10 +563,8 @@ impl State { "Pending assignment", ); - pending.push(( - peer_id.clone(), - PendingMessage::Assignment(assignment, claimed_index), - )); + pending + .push((peer_id, PendingMessage::Assignment(assignment, claimed_index))); continue } @@ -574,7 +572,7 @@ impl State { self.import_and_circulate_assignment( ctx, metrics, - MessageSource::Peer(peer_id.clone()), + MessageSource::Peer(peer_id), assignment, claimed_index, rng, @@ -604,7 +602,7 @@ impl State { "Pending approval", ); - pending.push((peer_id.clone(), PendingMessage::Approval(approval_vote))); + pending.push((peer_id, PendingMessage::Approval(approval_vote))); continue } @@ -612,7 +610,7 @@ impl State { self.import_and_circulate_approval( ctx, metrics, - MessageSource::Peer(peer_id.clone()), + MessageSource::Peer(peer_id), approval_vote, ) .await; @@ -663,7 +661,7 @@ impl State { &mut self.blocks, &self.topologies, self.peer_views.len(), - peer_id.clone(), + peer_id, view, rng, ) @@ -709,7 +707,7 @@ impl State { ) where R: CryptoRng + Rng, { - let block_hash = assignment.block_hash.clone(); + let block_hash = assignment.block_hash; let validator_index = assignment.validator; let entry = match self.blocks.get_mut(&block_hash) { @@ -737,7 +735,7 @@ impl State { if let Some(peer_id) = source.peer_id() { // check if our knowledge of the peer already contains this assignment - match entry.known_by.entry(peer_id.clone()) { + match entry.known_by.entry(peer_id) { hash_map::Entry::Occupied(mut peer_knowledge) => { let peer_knowledge = peer_knowledge.get_mut(); if peer_knowledge.contains(&message_subject, message_kind) { @@ -761,13 +759,13 @@ impl State { ?message_subject, "Assignment from a peer is out of view", ); - modify_reputation(ctx.sender(), peer_id.clone(), COST_UNEXPECTED_MESSAGE).await; + modify_reputation(ctx.sender(), peer_id, COST_UNEXPECTED_MESSAGE).await; }, } // if the assignment is known to be valid, reward the peer if entry.knowledge.contains(&message_subject, message_kind) { - modify_reputation(ctx.sender(), peer_id.clone(), BENEFIT_VALID_MESSAGE).await; + modify_reputation(ctx.sender(), peer_id, BENEFIT_VALID_MESSAGE).await; if let Some(peer_knowledge) = entry.known_by.get_mut(&peer_id) { gum::trace!(target: LOG_TARGET, ?peer_id, ?message_subject, "Known assignment"); peer_knowledge.received.insert(message_subject, message_kind); @@ -803,8 +801,7 @@ impl State { ); match result { AssignmentCheckResult::Accepted => { - modify_reputation(ctx.sender(), peer_id.clone(), BENEFIT_VALID_MESSAGE_FIRST) - .await; + modify_reputation(ctx.sender(), peer_id, BENEFIT_VALID_MESSAGE_FIRST).await; entry.knowledge.known_messages.insert(message_subject.clone(), message_kind); if let Some(peer_knowledge) = entry.known_by.get_mut(&peer_id) { peer_knowledge.received.insert(message_subject.clone(), message_kind); @@ -970,7 +967,7 @@ impl State { source: MessageSource, vote: IndirectSignedApprovalVote, ) { - let block_hash = vote.block_hash.clone(); + let block_hash = vote.block_hash; let validator_index = vote.validator; let candidate_index = vote.candidate_index; @@ -1003,7 +1000,7 @@ impl State { } // check if our knowledge of the peer already contains this approval - match entry.known_by.entry(peer_id.clone()) { + match entry.known_by.entry(peer_id) { hash_map::Entry::Occupied(mut knowledge) => { let peer_knowledge = knowledge.get_mut(); if peer_knowledge.contains(&message_subject, message_kind) { @@ -1027,14 +1024,14 @@ impl State { ?message_subject, "Approval from a peer is out of view", ); - modify_reputation(ctx.sender(), peer_id.clone(), COST_UNEXPECTED_MESSAGE).await; + modify_reputation(ctx.sender(), peer_id, COST_UNEXPECTED_MESSAGE).await; }, } // if the approval is known to be valid, reward the peer if entry.knowledge.contains(&message_subject, message_kind) { gum::trace!(target: LOG_TARGET, ?peer_id, ?message_subject, "Known approval"); - modify_reputation(ctx.sender(), peer_id.clone(), BENEFIT_VALID_MESSAGE).await; + modify_reputation(ctx.sender(), peer_id, BENEFIT_VALID_MESSAGE).await; if let Some(peer_knowledge) = entry.known_by.get_mut(&peer_id) { peer_knowledge.received.insert(message_subject.clone(), message_kind); } @@ -1065,8 +1062,7 @@ impl State { ); match result { ApprovalCheckResult::Accepted => { - modify_reputation(ctx.sender(), peer_id.clone(), BENEFIT_VALID_MESSAGE_FIRST) - .await; + modify_reputation(ctx.sender(), peer_id, BENEFIT_VALID_MESSAGE_FIRST).await; entry.knowledge.insert(message_subject.clone(), message_kind); if let Some(peer_knowledge) = entry.known_by.get_mut(&peer_id) { @@ -1301,7 +1297,7 @@ impl State { break } - let peer_knowledge = entry.known_by.entry(peer_id.clone()).or_default(); + let peer_knowledge = entry.known_by.entry(peer_id).or_default(); let topology = topologies.get_topology(entry.session); @@ -1335,13 +1331,12 @@ impl State { } } - let message_subject = - MessageSubject(block.clone(), candidate_index, validator.clone()); + let message_subject = MessageSubject(block, candidate_index, *validator); let assignment_message = ( IndirectAssignmentCert { - block_hash: block.clone(), - validator: validator.clone(), + block_hash: block, + validator: *validator, cert: message_state.approval_state.assignment_cert().clone(), }, candidate_index, @@ -1350,8 +1345,8 @@ impl State { let approval_message = message_state.approval_state.approval_signature().map(|signature| { IndirectSignedApprovalVote { - block_hash: block.clone(), - validator: validator.clone(), + block_hash: block, + validator: *validator, candidate_index, signature, } @@ -1374,7 +1369,7 @@ impl State { } } - block = entry.parent_hash.clone(); + block = entry.parent_hash; } } @@ -1388,7 +1383,7 @@ impl State { sender .send_message(NetworkBridgeTxMessage::SendValidationMessage( - vec![peer_id.clone()], + vec![peer_id], Versioned::V1(protocol_v1::ValidationProtocol::ApprovalDistribution( protocol_v1::ApprovalDistributionMessage::Assignments(assignments_to_send), )), @@ -1558,13 +1553,12 @@ async fn adjust_required_routing_and_propagate( ctx.send_message(AvailabilityStoreMessage::QueryAvailableData(candidate_hash, tx)) .await; - Ok(rx.await.map_err(error::Error::CanceledQueryFullData)?) + rx.await.map_err(error::Error::CanceledQueryFullData) } #[overseer::contextbounds(AvailabilityRecovery, prefix = self::overseer)] diff --git a/node/network/bitfield-distribution/src/lib.rs b/node/network/bitfield-distribution/src/lib.rs index 2a1e3b8d9ef3..1bd9230a3787 100644 --- a/node/network/bitfield-distribution/src/lib.rs +++ b/node/network/bitfield-distribution/src/lib.rs @@ -319,7 +319,7 @@ async fn handle_bitfield_distribution( } let validator_index = signed_availability.validator_index(); - let validator = if let Some(validator) = validator_set.get(*&validator_index.0 as usize) { + let validator = if let Some(validator) = validator_set.get(validator_index.0 as usize) { validator.clone() } else { gum::debug!(target: LOG_TARGET, validator_index = ?validator_index.0, "Could not find a validator for index"); @@ -395,7 +395,7 @@ async fn relay_message( }; if need_routing { - Some(peer.clone()) + Some(*peer) } else { None } @@ -412,7 +412,7 @@ async fn relay_message( // track the message as sent for this peer job_data .message_sent_to_peer - .entry(peer.clone()) + .entry(*peer) .or_default() .insert(validator.clone()); }); @@ -497,7 +497,7 @@ async fn process_incoming_peer_message( // Check if the peer already sent us a message for the validator denoted in the message earlier. // Must be done after validator index verification, in order to avoid storing an unbounded // number of set entries. - let received_set = job_data.message_received_from_peer.entry(origin.clone()).or_default(); + let received_set = job_data.message_received_from_peer.entry(origin).or_default(); if !received_set.contains(&validator) { received_set.insert(validator.clone()); @@ -656,7 +656,7 @@ async fn handle_peer_view_change( ) { let added = state .peer_views - .entry(origin.clone()) + .entry(origin) .or_default() .replace_difference(view) .cloned() @@ -681,11 +681,10 @@ async fn handle_peer_view_change( let delta_set: Vec<(ValidatorId, BitfieldGossipMessage)> = added .into_iter() .filter_map(|new_relay_parent_interest| { - if let Some(job_data) = (&*state).per_relay_parent.get(&new_relay_parent_interest) { + if let Some(job_data) = state.per_relay_parent.get(&new_relay_parent_interest) { // Send all jointly known messages for a validator (given the current relay parent) // to the peer `origin`... let one_per_validator = job_data.one_per_validator.clone(); - let origin = origin.clone(); Some(one_per_validator.into_iter().filter(move |(validator, _message)| { // ..except for the ones the peer already has. job_data.message_from_validator_needed_by_peer(&origin, validator) @@ -699,7 +698,7 @@ async fn handle_peer_view_change( .collect(); for (validator, message) in delta_set.into_iter() { - send_tracked_gossip_message(ctx, state, origin.clone(), validator, message).await; + send_tracked_gossip_message(ctx, state, origin, validator, message).await; } } @@ -727,11 +726,7 @@ async fn send_tracked_gossip_message( "Sending gossip message" ); - job_data - .message_sent_to_peer - .entry(dest.clone()) - .or_default() - .insert(validator.clone()); + job_data.message_sent_to_peer.entry(dest).or_default().insert(validator.clone()); ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( vec![dest], @@ -760,14 +755,14 @@ async fn query_basics( // query validators ctx.send_message(RuntimeApiMessage::Request( - relay_parent.clone(), + relay_parent, RuntimeApiRequest::Validators(validators_tx), )) .await; // query signing context ctx.send_message(RuntimeApiMessage::Request( - relay_parent.clone(), + relay_parent, RuntimeApiRequest::SessionIndexForChild(session_tx), )) .await; diff --git a/node/network/bridge/src/network.rs b/node/network/bridge/src/network.rs index 9b326cbbfb38..32dc79d25814 100644 --- a/node/network/bridge/src/network.rs +++ b/node/network/bridge/src/network.rs @@ -174,7 +174,7 @@ impl Network for Arc> { Ok(v) => v, Err(_) => continue, }; - NetworkService::add_known_address(&*self, peer_id.clone(), addr); + NetworkService::add_known_address(self, peer_id, addr); found_peer_id = Some(peer_id); } found_peer_id @@ -197,7 +197,7 @@ impl Network for Arc> { }; NetworkService::start_request( - &*self, + self, peer_id, req_protocol_names.get_name(protocol), payload, diff --git a/node/network/bridge/src/rx/mod.rs b/node/network/bridge/src/rx/mod.rs index 8adbcf857811..1d3052d3a218 100644 --- a/node/network/bridge/src/rx/mod.rs +++ b/node/network/bridge/src/rx/mod.rs @@ -213,7 +213,7 @@ where PeerSet::Collation => &mut shared.collation_peers, }; - match peer_map.entry(peer.clone()) { + match peer_map.entry(peer) { hash_map::Entry::Occupied(_) => continue, hash_map::Entry::Vacant(vacant) => { vacant.insert(PeerData { view: View::default(), version }); @@ -234,12 +234,12 @@ where dispatch_validation_events_to_all( vec![ NetworkBridgeEvent::PeerConnected( - peer.clone(), + peer, role, version, maybe_authority, ), - NetworkBridgeEvent::PeerViewChange(peer.clone(), View::default()), + NetworkBridgeEvent::PeerViewChange(peer, View::default()), ], &mut sender, ) @@ -259,12 +259,12 @@ where dispatch_collation_events_to_all( vec![ NetworkBridgeEvent::PeerConnected( - peer.clone(), + peer, role, version, maybe_authority, ), - NetworkBridgeEvent::PeerViewChange(peer.clone(), View::default()), + NetworkBridgeEvent::PeerViewChange(peer, View::default()), ], &mut sender, ) @@ -421,7 +421,7 @@ where Some(ValidationVersion::V1.into()) { handle_v1_peer_messages::( - remote.clone(), + remote, PeerSet::Validation, &mut shared.0.lock().validation_peers, v_messages, @@ -442,7 +442,7 @@ where }; for report in reports { - network_service.report_peer(remote.clone(), report); + network_service.report_peer(remote, report); } dispatch_validation_events_to_all(events, &mut sender).await; @@ -454,7 +454,7 @@ where Some(CollationVersion::V1.into()) { handle_v1_peer_messages::( - remote.clone(), + remote, PeerSet::Collation, &mut shared.0.lock().collation_peers, c_messages, @@ -475,7 +475,7 @@ where }; for report in reports { - network_service.report_peer(remote.clone(), report); + network_service.report_peer(remote, report); } dispatch_collation_events_to_all(events, &mut sender).await; @@ -795,11 +795,11 @@ fn handle_v1_peer_messages>( } else { peer_data.view = new_view; - NetworkBridgeEvent::PeerViewChange(peer.clone(), peer_data.view.clone()) + NetworkBridgeEvent::PeerViewChange(peer, peer_data.view.clone()) } }, WireMessage::ProtocolMessage(message) => - NetworkBridgeEvent::PeerMessage(peer.clone(), message.into()), + NetworkBridgeEvent::PeerMessage(peer, message.into()), }) } diff --git a/node/network/collator-protocol/src/collator_side/mod.rs b/node/network/collator-protocol/src/collator_side/mod.rs index 7a603a8a404a..f7b27583a6dd 100644 --- a/node/network/collator-protocol/src/collator_side/mod.rs +++ b/node/network/collator-protocol/src/collator_side/mod.rs @@ -561,7 +561,7 @@ async fn advertise_collation( let wire_message = protocol_v1::CollatorProtocolMessage::AdvertiseCollation(relay_parent); ctx.send_message(NetworkBridgeTxMessage::SendCollationMessage( - vec![peer.clone()], + vec![peer], Versioned::V1(protocol_v1::CollationProtocol::CollatorProtocol(wire_message)), )) .await; @@ -707,11 +707,8 @@ async fn handle_incoming_peer_message( "AdvertiseCollation message is not expected on the collator side of the protocol", ); - ctx.send_message(NetworkBridgeTxMessage::ReportPeer( - origin.clone(), - COST_UNEXPECTED_MESSAGE, - )) - .await; + ctx.send_message(NetworkBridgeTxMessage::ReportPeer(origin, COST_UNEXPECTED_MESSAGE)) + .await; // If we are advertised to, this is another collator, and we should disconnect. ctx.send_message(NetworkBridgeTxMessage::DisconnectPeer(origin, PeerSet::Collation)) @@ -838,14 +835,14 @@ async fn handle_peer_view_change( peer_id: PeerId, view: View, ) { - let current = state.peer_views.entry(peer_id.clone()).or_default(); + let current = state.peer_views.entry(peer_id).or_default(); let added: Vec = view.difference(&*current).cloned().collect(); *current = view; for added in added.into_iter() { - advertise_collation(ctx, state, added, peer_id.clone()).await; + advertise_collation(ctx, state, added, peer_id).await; } } diff --git a/node/network/collator-protocol/src/validator_side/mod.rs b/node/network/collator-protocol/src/validator_side/mod.rs index b2b3dc4824b5..1442fbcc2bcb 100644 --- a/node/network/collator-protocol/src/validator_side/mod.rs +++ b/node/network/collator-protocol/src/validator_side/mod.rs @@ -287,7 +287,7 @@ impl PeerData { PeerState::Collating(ref mut state) => if state.advertisements.insert(on_relay_parent) { state.last_active = Instant::now(); - Ok((state.collator_id.clone(), state.para_id.clone())) + Ok((state.collator_id.clone(), state.para_id)) } else { Err(AdvertisementError::Duplicate) }, @@ -375,22 +375,19 @@ impl ActiveParas { .await .await .ok() - .map(|x| x.ok()) - .flatten(); + .and_then(|x| x.ok()); let mg = polkadot_node_subsystem_util::request_validator_groups(relay_parent, sender) .await .await .ok() - .map(|x| x.ok()) - .flatten(); + .and_then(|x| x.ok()); let mc = polkadot_node_subsystem_util::request_availability_cores(relay_parent, sender) .await .await .ok() - .map(|x| x.ok()) - .flatten(); + .and_then(|x| x.ok()); let (validators, groups, rotation_info, cores) = match (mv, mg, mc) { (Some(v), Some((g, r)), Some(c)) => (v, g, r, c), @@ -486,12 +483,7 @@ struct PendingCollation { impl PendingCollation { fn new(relay_parent: Hash, para_id: &ParaId, peer_id: &PeerId) -> Self { - Self { - relay_parent, - para_id: para_id.clone(), - peer_id: peer_id.clone(), - commitments_hash: None, - } + Self { relay_parent, para_id: *para_id, peer_id: *peer_id, commitments_hash: None } } } @@ -629,9 +621,9 @@ fn collator_peer_id( peer_data: &HashMap, collator_id: &CollatorId, ) -> Option { - peer_data.iter().find_map(|(peer, data)| { - data.collator_id().filter(|c| c == &collator_id).map(|_| peer.clone()) - }) + peer_data + .iter() + .find_map(|(peer, data)| data.collator_id().filter(|c| c == &collator_id).map(|_| *peer)) } async fn disconnect_peer(sender: &mut impl overseer::CollatorProtocolSenderTrait, peer_id: PeerId) { @@ -655,9 +647,7 @@ async fn fetch_collation( Delay::new(MAX_UNSHARED_DOWNLOAD_TIME).await; (collator_id, relay_parent) }; - state - .collation_fetch_timeouts - .push(timeout(id.clone(), relay_parent.clone()).boxed()); + state.collation_fetch_timeouts.push(timeout(id.clone(), relay_parent).boxed()); if let Some(peer_data) = state.peer_data.get(&peer_id) { if peer_data.has_advertised(&relay_parent) { @@ -729,7 +719,7 @@ async fn notify_collation_seconded( /// - Ongoing collation requests have to be canceled. /// - Advertisements by this peer that are no longer relevant have to be removed. async fn handle_peer_view_change(state: &mut State, peer_id: PeerId, view: View) -> Result<()> { - let peer_data = state.peer_data.entry(peer_id.clone()).or_default(); + let peer_data = state.peer_data.entry(peer_id).or_default(); peer_data.update_view(view); state @@ -883,7 +873,7 @@ async fn process_incoming_peer_message( "Declared as collator for unneeded para", ); - modify_reputation(ctx.sender(), origin.clone(), COST_UNNEEDED_COLLATOR).await; + modify_reputation(ctx.sender(), origin, COST_UNNEEDED_COLLATOR).await; gum::trace!(target: LOG_TARGET, "Disconnecting unneeded collator"); disconnect_peer(ctx.sender(), origin).await; } @@ -1013,7 +1003,7 @@ async fn handle_our_view_change( .span_per_head() .iter() .filter(|v| !old_view.contains(&v.0)) - .map(|v| (v.0.clone(), v.1.clone())) + .map(|v| (*v.0, v.1.clone())) .collect(); added.into_iter().for_each(|(h, s)| { @@ -1046,7 +1036,7 @@ async fn handle_our_view_change( ?para_id, "Disconnecting peer on view change (not current parachain id)" ); - disconnect_peer(ctx.sender(), peer_id.clone()).await; + disconnect_peer(ctx.sender(), *peer_id).await; } } } @@ -1254,7 +1244,7 @@ async fn poll_requests( retained_requested.insert(pending_collation.clone()); } if let CollationFetchResult::Error(Some(rep)) = result { - reputation_changes.push((pending_collation.peer_id.clone(), rep)); + reputation_changes.push((pending_collation.peer_id, rep)); } } requested_collations.retain(|k, _| retained_requested.contains(k)); @@ -1337,11 +1327,7 @@ async fn handle_collation_fetched_result( if let Entry::Vacant(entry) = state.pending_candidates.entry(relay_parent) { collation_event.1.commitments_hash = Some(candidate_receipt.commitments_hash); ctx.sender() - .send_message(CandidateBackingMessage::Second( - relay_parent.clone(), - candidate_receipt, - pov, - )) + .send_message(CandidateBackingMessage::Second(relay_parent, candidate_receipt, pov)) .await; entry.insert(collation_event); @@ -1366,7 +1352,7 @@ async fn disconnect_inactive_peers( for (peer, peer_data) in peers { if peer_data.is_inactive(&eviction_policy) { gum::trace!(target: LOG_TARGET, "Disconnecting inactive peer"); - disconnect_peer(sender, peer.clone()).await; + disconnect_peer(sender, *peer).await; } } } diff --git a/node/network/dispute-distribution/src/receiver/mod.rs b/node/network/dispute-distribution/src/receiver/mod.rs index 9030fc0b3f96..b84be7b2dfde 100644 --- a/node/network/dispute-distribution/src/receiver/mod.rs +++ b/node/network/dispute-distribution/src/receiver/mod.rs @@ -430,7 +430,7 @@ where ); return }, - Some(vote) => (vote.0.session_index(), vote.0.candidate_hash().clone()), + Some(vote) => (vote.0.session_index(), *vote.0.candidate_hash()), }; let (pending_confirmation, confirmation_rx) = oneshot::channel(); diff --git a/node/network/dispute-distribution/src/sender/mod.rs b/node/network/dispute-distribution/src/sender/mod.rs index b25561df5652..8cecc96c8dc7 100644 --- a/node/network/dispute-distribution/src/sender/mod.rs +++ b/node/network/dispute-distribution/src/sender/mod.rs @@ -304,7 +304,7 @@ impl DisputeSender { .get(*valid_index) .ok_or(JfyiError::InvalidStatementFromCoordinator)?; let valid_signed = SignedDisputeStatement::new_checked( - DisputeStatement::Valid(kind.clone()), + DisputeStatement::Valid(*kind), candidate_hash, session_index, valid_public.clone(), @@ -319,7 +319,7 @@ impl DisputeSender { .get(*invalid_index) .ok_or(JfyiError::InvalidValidatorIndexFromCoordinator)?; let invalid_signed = SignedDisputeStatement::new_checked( - DisputeStatement::Invalid(kind.clone()), + DisputeStatement::Invalid(*kind), candidate_hash, session_index, invalid_public.clone(), diff --git a/node/network/protocol/src/grid_topology.rs b/node/network/protocol/src/grid_topology.rs index 100ef66957bd..2ae43c07c355 100644 --- a/node/network/protocol/src/grid_topology.rs +++ b/node/network/protocol/src/grid_topology.rs @@ -94,7 +94,7 @@ impl SessionGridTopology { let n = &self.canonical_shuffling[r_n]; grid_subset.validator_indices_x.insert(n.validator_index); for p in &n.peer_ids { - grid_subset.peers_x.insert(p.clone()); + grid_subset.peers_x.insert(*p); } } @@ -102,7 +102,7 @@ impl SessionGridTopology { let n = &self.canonical_shuffling[c_n]; grid_subset.validator_indices_y.insert(n.validator_index); for p in &n.peer_ids { - grid_subset.peers_y.insert(p.clone()); + grid_subset.peers_y.insert(*p); } } diff --git a/node/network/protocol/src/lib.rs b/node/network/protocol/src/lib.rs index 169d916ce6f9..744217133eed 100644 --- a/node/network/protocol/src/lib.rs +++ b/node/network/protocol/src/lib.rs @@ -207,7 +207,7 @@ impl View { } /// Obtain an iterator over all heads. - pub fn iter<'a>(&'a self) -> impl Iterator { + pub fn iter(&self) -> impl Iterator { self.heads.iter() } diff --git a/node/network/statement-distribution/src/lib.rs b/node/network/statement-distribution/src/lib.rs index 055fd4123f9a..271072ab1031 100644 --- a/node/network/statement-distribution/src/lib.rs +++ b/node/network/statement-distribution/src/lib.rs @@ -278,10 +278,10 @@ impl PeerRelayParentKnowledge { let new_known = match fingerprint.0 { CompactStatement::Seconded(ref h) => { - self.seconded_counts.entry(fingerprint.1).or_default().note_local(h.clone()); + self.seconded_counts.entry(fingerprint.1).or_default().note_local(*h); let was_known = self.is_known_candidate(h); - self.sent_candidates.insert(h.clone()); + self.sent_candidates.insert(*h); !was_known }, CompactStatement::Valid(_) => false, @@ -345,7 +345,7 @@ impl PeerRelayParentKnowledge { .seconded_counts .entry(fingerprint.1) .or_insert_with(Default::default) - .note_remote(h.clone()); + .note_remote(*h); if !allowed_remote { return Err(COST_UNEXPECTED_STATEMENT_REMOTE) @@ -374,7 +374,7 @@ impl PeerRelayParentKnowledge { } self.received_statements.insert(fingerprint.clone()); - self.received_candidates.insert(candidate_hash.clone()); + self.received_candidates.insert(*candidate_hash); Ok(fresh) } @@ -1025,13 +1025,15 @@ async fn circulate_statement<'a, Context>( let mut peers_to_send: Vec = peers .iter() - .filter_map(|(peer, data)| { - if data.can_send(&relay_parent, &fingerprint) { - Some(peer.clone()) - } else { - None - } - }) + .filter_map( + |(peer, data)| { + if data.can_send(&relay_parent, &fingerprint) { + Some(*peer) + } else { + None + } + }, + ) .collect(); let good_peers: HashSet<&PeerId> = peers_to_send.iter().collect(); @@ -1087,7 +1089,7 @@ async fn circulate_statement<'a, Context>( "Sending statement", ); ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( - peers_to_send.iter().map(|(p, _)| p.clone()).collect(), + peers_to_send.iter().map(|(p, _)| *p).collect(), payload, )) .await; @@ -1126,11 +1128,8 @@ async fn send_statements_about( statement = ?statement.statement, "Sending statement", ); - ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( - vec![peer.clone()], - payload, - )) - .await; + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage(vec![peer], payload)) + .await; metrics.on_statement_distributed(); } @@ -1161,11 +1160,8 @@ async fn send_statements( statement = ?statement.statement, "Sending statement" ); - ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( - vec![peer.clone()], - payload, - )) - .await; + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage(vec![peer], payload)) + .await; metrics.on_statement_distributed(); } @@ -1431,7 +1427,7 @@ async fn handle_incoming_message<'a, Context>( } let fingerprint = message.get_fingerprint(); - let candidate_hash = fingerprint.0.candidate_hash().clone(); + let candidate_hash = *fingerprint.0.candidate_hash(); let handle_incoming_span = active_head .span .child("handle-incoming") @@ -1551,7 +1547,7 @@ async fn handle_incoming_message<'a, Context>( // Send the peer all statements concerning the candidate that we have, // since it appears to have just learned about the candidate. send_statements_about( - peer.clone(), + peer, peer_data, ctx, relay_parent, @@ -1627,7 +1623,7 @@ async fn update_peer_view_and_maybe_send_unlocked( continue } if let Some(active_head) = active_heads.get(&new) { - send_statements(peer.clone(), peer_data, ctx, new, active_head, metrics).await; + send_statements(peer, peer_data, ctx, new, active_head, metrics).await; } } } @@ -1710,7 +1706,7 @@ async fn handle_network_update( topology_storage, peers, active_heads, - &*recent_outdated_heads, + recent_outdated_heads, ctx, message, req_sender, diff --git a/node/overseer/src/dummy.rs b/node/overseer/src/dummy.rs index 84ecdd1e8a89..0706244356aa 100644 --- a/node/overseer/src/dummy.rs +++ b/node/overseer/src/dummy.rs @@ -56,10 +56,10 @@ where /// Create an overseer with all subsystem being `Sub`. /// /// Preferred way of initializing a dummy overseer for subsystem tests. -pub fn dummy_overseer_builder<'a, Spawner, SupportsParachains>( +pub fn dummy_overseer_builder( spawner: Spawner, supports_parachains: SupportsParachains, - registry: Option<&'a Registry>, + registry: Option<&Registry>, ) -> Result< InitializedOverseerBuilder< SpawnGlue, @@ -97,11 +97,11 @@ where } /// Create an overseer with all subsystem being `Sub`. -pub fn one_for_all_overseer_builder<'a, Spawner, SupportsParachains, Sub>( +pub fn one_for_all_overseer_builder( spawner: Spawner, supports_parachains: SupportsParachains, subsystem: Sub, - registry: Option<&'a Registry>, + registry: Option<&Registry>, ) -> Result< InitializedOverseerBuilder< SpawnGlue, diff --git a/node/overseer/src/lib.rs b/node/overseer/src/lib.rs index 70dbe92b2432..92baa12be79c 100644 --- a/node/overseer/src/lib.rs +++ b/node/overseer/src/lib.rs @@ -686,7 +686,7 @@ where subsystem_meters .iter() .cloned() - .filter_map(|x| x) + .flatten() .map(|(name, ref meters)| (name, meters.read())), ); @@ -861,7 +861,7 @@ where let mut span = jaeger::Span::new(*hash, "leaf-activated"); if let Some(parent_span) = parent_hash.and_then(|h| self.span_per_active_leaf.get(&h)) { - span.add_follows_from(&*parent_span); + span.add_follows_from(parent_span); } let span = Arc::new(span); diff --git a/node/primitives/src/disputes/message.rs b/node/primitives/src/disputes/message.rs index 1a943f8dcee6..c31ff1ecb283 100644 --- a/node/primitives/src/disputes/message.rs +++ b/node/primitives/src/disputes/message.rs @@ -170,13 +170,13 @@ impl DisputeMessage { let valid_vote = ValidDisputeVote { validator_index: valid_index, signature: valid_statement.validator_signature().clone(), - kind: valid_kind.clone(), + kind: *valid_kind, }; let invalid_vote = InvalidDisputeVote { validator_index: invalid_index, signature: invalid_statement.validator_signature().clone(), - kind: invalid_kind.clone(), + kind: *invalid_kind, }; Ok(DisputeMessage(UncheckedDisputeMessage { diff --git a/node/service/src/overseer.rs b/node/service/src/overseer.rs index a8ce3e5eaaf0..7dff86693827 100644 --- a/node/service/src/overseer.rs +++ b/node/service/src/overseer.rs @@ -129,7 +129,7 @@ where /// Obtain a prepared `OverseerBuilder`, that is initialized /// with all default values. -pub fn prepared_overseer_builder<'a, Spawner, RuntimeClient>( +pub fn prepared_overseer_builder( OverseerGenArgs { leaves, keystore, @@ -155,7 +155,7 @@ pub fn prepared_overseer_builder<'a, Spawner, RuntimeClient>( overseer_message_channel_capacity_override, req_protocol_names, peerset_protocol_names, - }: OverseerGenArgs<'a, Spawner, RuntimeClient>, + }: OverseerGenArgs, ) -> Result< InitializedOverseerBuilder< SpawnGlue, @@ -257,7 +257,7 @@ where .collator_protocol({ let side = match is_collator { IsCollator::Yes(collator_pair) => ProtocolSide::Collator( - network_service.local_peer_id().clone(), + network_service.local_peer_id(), collator_pair, collation_req_receiver, Metrics::register(registry)?, @@ -334,10 +334,10 @@ where /// would do. pub trait OverseerGen { /// Overwrite the full generation of the overseer, including the subsystems. - fn generate<'a, Spawner, RuntimeClient>( + fn generate( &self, connector: OverseerConnector, - args: OverseerGenArgs<'a, Spawner, RuntimeClient>, + args: OverseerGenArgs, ) -> Result<(Overseer, Arc>, OverseerHandle), Error> where RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend + AuxStore, @@ -358,10 +358,10 @@ use polkadot_overseer::KNOWN_LEAVES_CACHE_SIZE; pub struct RealOverseerGen; impl OverseerGen for RealOverseerGen { - fn generate<'a, Spawner, RuntimeClient>( + fn generate( &self, connector: OverseerConnector, - args: OverseerGenArgs<'a, Spawner, RuntimeClient>, + args: OverseerGenArgs, ) -> Result<(Overseer, Arc>, OverseerHandle), Error> where RuntimeClient: 'static + ProvideRuntimeApi + HeaderBackend + AuxStore, diff --git a/node/service/src/relay_chain_selection.rs b/node/service/src/relay_chain_selection.rs index df3e68cc7b1a..890e4c16ec8f 100644 --- a/node/service/src/relay_chain_selection.rs +++ b/node/service/src/relay_chain_selection.rs @@ -343,12 +343,11 @@ where // The Chain Selection subsystem is supposed to treat the finalized // block as the best leaf in the case that there are no viable // leaves, so this should not happen in practice. - let best_leaf = self + let best_leaf = *self .leaves() .await? .first() - .ok_or_else(|| ConsensusError::Other(Box::new(Error::EmptyLeaves)))? - .clone(); + .ok_or_else(|| ConsensusError::Other(Box::new(Error::EmptyLeaves)))?; gum::trace!(target: LOG_TARGET, ?best_leaf, "Best chain"); diff --git a/node/subsystem-types/src/errors.rs b/node/subsystem-types/src/errors.rs index 27c4fcdf8d37..48829e7fc779 100644 --- a/node/subsystem-types/src/errors.rs +++ b/node/subsystem-types/src/errors.rs @@ -79,7 +79,12 @@ pub enum RecoveryError { impl std::fmt::Display for RecoveryError { fn fmt(&self, f: &mut core::fmt::Formatter) -> Result<(), core::fmt::Error> { - write!(f, "{}", self) + let msg = match self { + RecoveryError::Invalid => "Invalid", + RecoveryError::Unavailable => "Unavailable", + }; + + write!(f, "{}", msg) } } diff --git a/node/subsystem-types/src/messages.rs b/node/subsystem-types/src/messages.rs index cb7caebcaa23..94562ae6baef 100644 --- a/node/subsystem-types/src/messages.rs +++ b/node/subsystem-types/src/messages.rs @@ -541,9 +541,7 @@ pub enum AvailabilityStoreMessage { impl AvailabilityStoreMessage { /// In fact, none of the `AvailabilityStore` messages assume a particular relay parent. pub fn relay_parent(&self) -> Option { - match self { - _ => None, - } + None } } diff --git a/node/subsystem-types/src/messages/network_bridge_event.rs b/node/subsystem-types/src/messages/network_bridge_event.rs index 5abad8a3c22c..06654153357a 100644 --- a/node/subsystem-types/src/messages/network_bridge_event.rs +++ b/node/subsystem-types/src/messages/network_bridge_event.rs @@ -86,24 +86,19 @@ impl NetworkBridgeEvent { { Ok(match *self { NetworkBridgeEvent::PeerMessage(ref peer, ref msg) => - NetworkBridgeEvent::PeerMessage(peer.clone(), T::try_from(msg)?), + NetworkBridgeEvent::PeerMessage(*peer, T::try_from(msg)?), NetworkBridgeEvent::PeerConnected( ref peer, ref role, ref version, ref authority_id, - ) => NetworkBridgeEvent::PeerConnected( - peer.clone(), - role.clone(), - *version, - authority_id.clone(), - ), + ) => NetworkBridgeEvent::PeerConnected(*peer, *role, *version, authority_id.clone()), NetworkBridgeEvent::PeerDisconnected(ref peer) => - NetworkBridgeEvent::PeerDisconnected(peer.clone()), + NetworkBridgeEvent::PeerDisconnected(*peer), NetworkBridgeEvent::NewGossipTopology(ref topology) => NetworkBridgeEvent::NewGossipTopology(topology.clone()), NetworkBridgeEvent::PeerViewChange(ref peer, ref view) => - NetworkBridgeEvent::PeerViewChange(peer.clone(), view.clone()), + NetworkBridgeEvent::PeerViewChange(*peer, view.clone()), NetworkBridgeEvent::OurViewChange(ref view) => NetworkBridgeEvent::OurViewChange(view.clone()), }) diff --git a/primitives/src/v2/mod.rs b/primitives/src/v2/mod.rs index ea1f4fcca0b9..646c657e8adf 100644 --- a/primitives/src/v2/mod.rs +++ b/primitives/src/v2/mod.rs @@ -766,7 +766,7 @@ pub fn check_candidate_backing + Clone + Encode>( .zip(backed.validity_votes.iter()) { let validator_id = validator_lookup(val_in_group_idx).ok_or(())?; - let payload = attestation.signed_payload(hash.clone(), signing_context); + let payload = attestation.signed_payload(hash, signing_context); let sig = attestation.signature(); if sig.verify(&payload[..], &validator_id) { diff --git a/runtime/common/src/claims.rs b/runtime/common/src/claims.rs index bb0663ec34f7..1bb5b0cdc8d3 100644 --- a/runtime/common/src/claims.rs +++ b/runtime/common/src/claims.rs @@ -247,12 +247,9 @@ pub mod pallet { impl GenesisBuild for GenesisConfig { fn build(&self) { // build `Claims` - self.claims - .iter() - .map(|(a, b, _, _)| (a.clone(), b.clone())) - .for_each(|(a, b)| { - Claims::::insert(a, b); - }); + self.claims.iter().map(|(a, b, _, _)| (*a, *b)).for_each(|(a, b)| { + Claims::::insert(a, b); + }); // build `Total` Total::::put( self.claims @@ -266,17 +263,16 @@ pub mod pallet { // build `Signing` self.claims .iter() - .filter_map(|(a, _, _, s)| Some((a.clone(), s.clone()?))) + .filter_map(|(a, _, _, s)| Some((*a, (*s)?))) .for_each(|(a, s)| { Signing::::insert(a, s); }); // build `Preclaims` - self.claims - .iter() - .filter_map(|(a, _, i, _)| Some((i.clone()?, a.clone()))) - .for_each(|(i, a)| { + self.claims.iter().filter_map(|(a, _, i, _)| Some((i.clone()?, *a))).for_each( + |(i, a)| { Preclaims::::insert(i, a); - }); + }, + ); } } @@ -538,7 +534,7 @@ impl Pallet { } let mut v = b"\x19Ethereum Signed Message:\n".to_vec(); v.extend(rev.into_iter().rev()); - v.extend_from_slice(&prefix[..]); + v.extend_from_slice(prefix); v.extend_from_slice(what); v.extend_from_slice(extra); v @@ -645,7 +641,7 @@ where info: &DispatchInfoOf, len: usize, ) -> Result { - Ok(self.validate(who, call, info, len).map(|_| ())?) + self.validate(who, call, info, len).map(|_| ()) } // diff --git a/runtime/common/src/crowdloan/migration.rs b/runtime/common/src/crowdloan/migration.rs index 775d70f92458..1ba1f20e8060 100644 --- a/runtime/common/src/crowdloan/migration.rs +++ b/runtime/common/src/crowdloan/migration.rs @@ -67,12 +67,10 @@ pub mod crowdloan_index_migration { let leases = Leases::::get(para_id).unwrap_or_default(); let mut found_lease_deposit = false; - for maybe_deposit in leases.iter() { - if let Some((who, _amount)) = maybe_deposit { - if *who == old_fund_account { - found_lease_deposit = true; - break - } + for (who, _amount) in leases.iter().flatten() { + if *who == old_fund_account { + found_lease_deposit = true; + break } } if found_lease_deposit { @@ -112,11 +110,9 @@ pub mod crowdloan_index_migration { weight = weight.saturating_add(T::DbWeight::get().reads_writes(1, 2)); let mut leases = Leases::::get(para_id).unwrap_or_default(); - for maybe_deposit in leases.iter_mut() { - if let Some((who, _amount)) = maybe_deposit { - if *who == old_fund_account { - *who = new_fund_account.clone(); - } + for (who, _amount) in leases.iter_mut().flatten() { + if *who == old_fund_account { + *who = new_fund_account.clone(); } } @@ -162,13 +158,11 @@ pub mod crowdloan_index_migration { let leases = Leases::::get(para_id).unwrap_or_default(); let mut new_account_found = false; - for maybe_deposit in leases.iter() { - if let Some((who, _amount)) = maybe_deposit { - if *who == old_fund_account { - panic!("Old fund account found after migration!"); - } else if *who == new_fund_account { - new_account_found = true; - } + for (who, _amount) in leases.iter().flatten() { + if *who == old_fund_account { + panic!("Old fund account found after migration!"); + } else if *who == new_fund_account { + new_account_found = true; } } if new_account_found { diff --git a/runtime/common/src/slots/migration.rs b/runtime/common/src/slots/migration.rs index 33d221b209d5..a87f1cd7a074 100644 --- a/runtime/common/src/slots/migration.rs +++ b/runtime/common/src/slots/migration.rs @@ -31,18 +31,16 @@ pub mod slots_crowdloan_index_migration { for (para_id, leases) in Leases::::iter() { let old_fund_account = old_fund_account_id::(para_id); - for maybe_deposit in leases.iter() { - if let Some((who, _amount)) = maybe_deposit { - if *who == old_fund_account { - let crowdloan = - crowdloan::Funds::::get(para_id).ok_or("no crowdloan found")?; - log::info!( - target: "runtime", - "para_id={:?}, old_fund_account={:?}, fund_id={:?}, leases={:?}", - para_id, old_fund_account, crowdloan.fund_index, leases, - ); - break - } + for (who, _amount) in leases.iter().flatten() { + if *who == old_fund_account { + let crowdloan = + crowdloan::Funds::::get(para_id).ok_or("no crowdloan found")?; + log::info!( + target: "runtime", + "para_id={:?}, old_fund_account={:?}, fund_id={:?}, leases={:?}", + para_id, old_fund_account, crowdloan.fund_index, leases, + ); + break } } } @@ -61,11 +59,9 @@ pub mod slots_crowdloan_index_migration { let new_fund_account = crowdloan::Pallet::::fund_account_id(fund.fund_index); // look for places the old account is used, and replace with the new account. - for maybe_deposit in leases.iter_mut() { - if let Some((who, _amount)) = maybe_deposit { - if *who == old_fund_account { - *who = new_fund_account.clone(); - } + for (who, _amount) in leases.iter_mut().flatten() { + if *who == old_fund_account { + *who = new_fund_account.clone(); } } @@ -83,11 +79,9 @@ pub mod slots_crowdloan_index_migration { let old_fund_account = old_fund_account_id::(para_id); log::info!(target: "runtime", "checking para_id: {:?}", para_id); // check the old fund account doesn't exist anywhere. - for maybe_deposit in leases.iter() { - if let Some((who, _amount)) = maybe_deposit { - if *who == old_fund_account { - panic!("old fund account found after migration!"); - } + for (who, _amount) in leases.iter().flatten() { + if *who == old_fund_account { + panic!("old fund account found after migration!"); } } } diff --git a/runtime/parachains/src/disputes.rs b/runtime/parachains/src/disputes.rs index 9f458421e2ed..b5e9d2540045 100644 --- a/runtime/parachains/src/disputes.rs +++ b/runtime/parachains/src/disputes.rs @@ -1112,7 +1112,7 @@ impl Pallet { // it's sufficient to count the votes in the statement set after they set.statements.iter().for_each(|(statement, v_i, _signature)| { if Some(true) == - summary.new_participants.get(v_i.0 as usize).map(|b| b.as_ref().clone()) + summary.new_participants.get(v_i.0 as usize).map(|b| *b.as_ref()) { match statement { // `summary.new_flags` contains the spam free votes. diff --git a/runtime/parachains/src/hrmp.rs b/runtime/parachains/src/hrmp.rs index 53ad6781048f..c0624cdcacfd 100644 --- a/runtime/parachains/src/hrmp.rs +++ b/runtime/parachains/src/hrmp.rs @@ -751,10 +751,10 @@ impl Pallet { let ingress = ::HrmpIngressChannelsIndex::take(outgoing_para) .into_iter() - .map(|sender| HrmpChannelId { sender, recipient: outgoing_para.clone() }); + .map(|sender| HrmpChannelId { sender, recipient: *outgoing_para }); let egress = ::HrmpEgressChannelsIndex::take(outgoing_para) .into_iter() - .map(|recipient| HrmpChannelId { sender: outgoing_para.clone(), recipient }); + .map(|recipient| HrmpChannelId { sender: *outgoing_para, recipient }); let mut to_close = ingress.chain(egress).collect::>(); to_close.sort(); to_close.dedup(); @@ -1075,7 +1075,7 @@ impl Pallet { channel.total_size += inbound.data.len() as u32; // compute the new MQC head of the channel - let prev_head = channel.mqc_head.clone().unwrap_or(Default::default()); + let prev_head = channel.mqc_head.unwrap_or(Default::default()); let new_head = BlakeTwo256::hash_of(&( prev_head, inbound.sent_at, diff --git a/runtime/parachains/src/inclusion/mod.rs b/runtime/parachains/src/inclusion/mod.rs index f74a8cfd3f8d..1df6c141e9df 100644 --- a/runtime/parachains/src/inclusion/mod.rs +++ b/runtime/parachains/src/inclusion/mod.rs @@ -102,7 +102,7 @@ impl CandidatePendingAvailability { /// Get the core index. pub(crate) fn core_occupied(&self) -> CoreIndex { - self.core.clone() + self.core } /// Get the candidate hash. @@ -383,7 +383,7 @@ impl Pallet { let mut freed_cores = Vec::with_capacity(expected_bits); for (para_id, pending_availability) in assigned_paras_record .into_iter() - .filter_map(|x| x) + .flatten() .filter_map(|(id, p)| p.map(|p| (id, p))) { if pending_availability.availability_votes.count_ones() >= threshold { @@ -644,8 +644,7 @@ impl Pallet { }; // one more sweep for actually writing to storage. - let core_indices = - core_indices_and_backers.iter().map(|&(ref c, _, _)| c.clone()).collect(); + let core_indices = core_indices_and_backers.iter().map(|&(ref c, _, _)| *c).collect(); for (candidate, (core, backers, group)) in candidates.into_iter().zip(core_indices_and_backers) { diff --git a/runtime/parachains/src/initializer.rs b/runtime/parachains/src/initializer.rs index eaa4510fafcf..ef00e5b884cc 100644 --- a/runtime/parachains/src/initializer.rs +++ b/runtime/parachains/src/initializer.rs @@ -247,7 +247,7 @@ impl Pallet { let validators = shared::Pallet::::initializer_on_new_session( session_index, - random_seed.clone(), + random_seed, &new_config, all_validators, ); diff --git a/runtime/parachains/src/paras_inherent/mod.rs b/runtime/parachains/src/paras_inherent/mod.rs index 188a8f677979..a053e3dbfaf9 100644 --- a/runtime/parachains/src/paras_inherent/mod.rs +++ b/runtime/parachains/src/paras_inherent/mod.rs @@ -513,7 +513,7 @@ impl Pallet { METRICS.on_candidates_sanitized(backed_candidates.len() as u64); // Process backed candidates according to scheduled cores. - let parent_storage_root = parent_header.state_root().clone(); + let parent_storage_root = *parent_header.state_root(); let inclusion::ProcessedCandidates::<::Hash> { core_indices: occupied, candidate_receipt_with_backing_validator_indices, @@ -711,7 +711,7 @@ impl Pallet { let scheduled = >::scheduled(); let relay_parent_number = now - One::one(); - let parent_storage_root = parent_header.state_root().clone(); + let parent_storage_root = *parent_header.state_root(); let check_ctx = CandidateCheckContext::::new(now, relay_parent_number); let backed_candidates = sanitize_backed_candidates::( @@ -1201,7 +1201,7 @@ fn compute_entropy(parent_hash: T::Hash) -> [u8; 32] { // known 2 epochs ago. it is marginally better than using the parent block // hash since it's harder to influence the VRF output than the block hash. let vrf_random = ParentBlockRandomness::::random(&CANDIDATE_SEED_SUBJECT[..]).0; - let mut entropy: [u8; 32] = CANDIDATE_SEED_SUBJECT.clone(); + let mut entropy: [u8; 32] = CANDIDATE_SEED_SUBJECT; if let Some(vrf_random) = vrf_random { entropy.as_mut().copy_from_slice(vrf_random.as_ref()); } else { diff --git a/runtime/parachains/src/runtime_api_impl/v2.rs b/runtime/parachains/src/runtime_api_impl/v2.rs index 77ea96742b54..57345a819de0 100644 --- a/runtime/parachains/src/runtime_api_impl/v2.rs +++ b/runtime/parachains/src/runtime_api_impl/v2.rs @@ -107,7 +107,7 @@ pub fn availability_cores() -> Vec>::pending_availability(para_id) .expect("Occupied core always has pending availability; qed"); - let backed_in_number = pending_availability.backed_in_number().clone(); + let backed_in_number = *pending_availability.backed_in_number(); OccupiedCore { next_up_on_available: >::next_up_on_available( CoreIndex(i as u32), @@ -135,7 +135,7 @@ pub fn availability_cores() -> Vec>::pending_availability(para_id) .expect("Occupied core always has pending availability; qed"); - let backed_in_number = pending_availability.backed_in_number().clone(); + let backed_in_number = *pending_availability.backed_in_number(); OccupiedCore { next_up_on_available: >::next_up_on_available( CoreIndex(i as u32), diff --git a/runtime/parachains/src/scheduler.rs b/runtime/parachains/src/scheduler.rs index 0185817b2aa1..6eb1b732705f 100644 --- a/runtime/parachains/src/scheduler.rs +++ b/runtime/parachains/src/scheduler.rs @@ -483,7 +483,7 @@ impl Pallet { Some(CoreAssignment { kind: AssignmentKind::Parachain, para_id: parachains[core_index], - core: core.clone(), + core, group_idx: Self::group_assigned_to_core(core, now).expect( "core is not out of bounds and we are guaranteed \ to be after the most recent session start; qed", @@ -496,7 +496,7 @@ impl Pallet { parathread_queue.take_next_on_core(core_offset).map(|entry| CoreAssignment { kind: AssignmentKind::Parathread(entry.claim.1, entry.retries), para_id: entry.claim.0, - core: core.clone(), + core, group_idx: Self::group_assigned_to_core(core, now).expect( "core is not out of bounds and we are guaranteed \ to be after the most recent session start; qed", @@ -610,11 +610,9 @@ impl Pallet { (at - session_start_block) / config.group_rotation_frequency.into(); let rotations_since_session_start = - match >::try_into(rotations_since_session_start) { - Ok(i) => i, - Err(_) => 0, // can only happen if rotations occur only once every u32::max(), - // so functionally no difference in behavior. - }; + >::try_into(rotations_since_session_start).unwrap_or(0); + // Error case can only happen if rotations occur only once every u32::max(), + // so functionally no difference in behavior. let group_idx = (core.0 as usize + rotations_since_session_start as usize) % validator_groups.len(); diff --git a/runtime/parachains/src/ump.rs b/runtime/parachains/src/ump.rs index 5aa7b17d923c..8d734acb3464 100644 --- a/runtime/parachains/src/ump.rs +++ b/runtime/parachains/src/ump.rs @@ -107,7 +107,7 @@ impl, C: Config> UmpSink VersionedXcm, }; - let id = upward_message_id(&data[..]); + let id = upward_message_id(data); let maybe_msg_and_weight = VersionedXcm::::decode_all_with_depth_limit( xcm::MAX_XCM_DECODE_DEPTH, &mut data, diff --git a/scripts/ci/gitlab/pipeline/test.yml b/scripts/ci/gitlab/pipeline/test.yml index 9a3dd0270fbb..3a21a77d90f2 100644 --- a/scripts/ci/gitlab/pipeline/test.yml +++ b/scripts/ci/gitlab/pipeline/test.yml @@ -72,3 +72,18 @@ test-deterministic-wasm: - .compiler-info script: - ./scripts/ci/gitlab/test_deterministic_wasm.sh + +cargo-clippy: + stage: test + # this is an artificial job dependency, for pipeline optimization using GitLab's DAGs + # the job can be found in check.yml + needs: + - job: job-starter + artifacts: false + variables: + RUSTY_CACHIER_TOOLCHAIN: nightly + extends: + - .docker-env + - .test-refs + script: + - SKIP_WASM_BUILD=1 env -u RUSTFLAGS cargo +nightly clippy --all-targets diff --git a/tests/benchmark_block.rs b/tests/benchmark_block.rs index ee68d43b2aa5..dc3b174599a9 100644 --- a/tests/benchmark_block.rs +++ b/tests/benchmark_block.rs @@ -32,7 +32,7 @@ use tempfile::tempdir; pub mod common; -static RUNTIMES: [&'static str; 4] = ["polkadot", "kusama", "westend", "rococo"]; +static RUNTIMES: [&str; 4] = ["polkadot", "kusama", "westend", "rococo"]; /// `benchmark block` works for all dev runtimes using the wasm executor. #[tokio::test] @@ -54,7 +54,7 @@ async fn build_chain(runtime: &str, base_path: &Path) -> Result<(), String> { let mut cmd = Command::new(cargo_bin("polkadot")) .stdout(process::Stdio::piped()) .stderr(process::Stdio::piped()) - .args(["--chain", &runtime, "--force-authoring", "--alice"]) + .args(["--chain", runtime, "--force-authoring", "--alice"]) .arg("-d") .arg(base_path) .arg("--no-hardware-benchmarks") @@ -77,7 +77,7 @@ async fn build_chain(runtime: &str, base_path: &Path) -> Result<(), String> { fn benchmark_block(runtime: &str, base_path: &Path, block: u32) -> Result<(), String> { // Invoke `benchmark block` with all options to make sure that they are valid. let status = Command::new(cargo_bin("polkadot")) - .args(["benchmark", "block", "--chain", &runtime]) + .args(["benchmark", "block", "--chain", runtime]) .arg("-d") .arg(base_path) .args(["--from", &block.to_string(), "--to", &block.to_string()]) diff --git a/tests/benchmark_extrinsic.rs b/tests/benchmark_extrinsic.rs index c112a8c023f8..79a7d4c45322 100644 --- a/tests/benchmark_extrinsic.rs +++ b/tests/benchmark_extrinsic.rs @@ -17,10 +17,9 @@ use assert_cmd::cargo::cargo_bin; use std::{process::Command, result::Result}; -static RUNTIMES: [&'static str; 4] = ["polkadot", "kusama", "westend", "rococo"]; +static RUNTIMES: [&str; 4] = ["polkadot", "kusama", "westend", "rococo"]; -static EXTRINSICS: [(&'static str, &'static str); 2] = - [("system", "remark"), ("balances", "transfer_keep_alive")]; +static EXTRINSICS: [(&str, &str); 2] = [("system", "remark"), ("balances", "transfer_keep_alive")]; /// `benchmark extrinsic` works for all dev runtimes and some extrinsics. #[test] @@ -43,8 +42,8 @@ fn benchmark_extrinsic_rejects_non_dev_runtimes() { fn benchmark_extrinsic(runtime: &str, pallet: &str, extrinsic: &str) -> Result<(), String> { let status = Command::new(cargo_bin("polkadot")) - .args(["benchmark", "extrinsic", "--chain", &runtime]) - .args(&["--pallet", pallet, "--extrinsic", extrinsic]) + .args(["benchmark", "extrinsic", "--chain", runtime]) + .args(["--pallet", pallet, "--extrinsic", extrinsic]) // Run with low repeats for faster execution. .args(["--repeat=1", "--warmup=1", "--max-ext-per-block=1"]) .status() diff --git a/tests/benchmark_overhead.rs b/tests/benchmark_overhead.rs index a3b4ed1160ea..10582870168e 100644 --- a/tests/benchmark_overhead.rs +++ b/tests/benchmark_overhead.rs @@ -18,7 +18,7 @@ use assert_cmd::cargo::cargo_bin; use std::{process::Command, result::Result}; use tempfile::tempdir; -static RUNTIMES: [&'static str; 4] = ["polkadot", "kusama", "westend", "rococo"]; +static RUNTIMES: [&str; 4] = ["polkadot", "kusama", "westend", "rococo"]; /// `benchmark overhead` works for all dev runtimes. #[test] diff --git a/tests/benchmark_storage_works.rs b/tests/benchmark_storage_works.rs index f5e2851f250f..8d9694aa0a0e 100644 --- a/tests/benchmark_storage_works.rs +++ b/tests/benchmark_storage_works.rs @@ -38,7 +38,7 @@ fn benchmark_storage_works() { /// Invoke the `benchmark storage` sub-command. fn benchmark_storage(db: &str, base_path: &Path) -> ExitStatus { Command::new(cargo_bin("polkadot")) - .args(&["benchmark", "storage", "--dev"]) + .args(["benchmark", "storage", "--dev"]) .arg("--db") .arg(db) .arg("--weight-path") diff --git a/tests/common.rs b/tests/common.rs index 3f040208972c..6a41975f87fa 100644 --- a/tests/common.rs +++ b/tests/common.rs @@ -91,11 +91,13 @@ pub fn find_ws_url_from_output(read: impl Read + Send) -> (String, String) { // does the line contain our port (we expect this specific output from substrate). let sock_addr = match line.split_once("Running JSON-RPC WS server: addr=") { None => return None, - Some((_, after)) => after.split_once(",").unwrap().0, + Some((_, after)) => after.split_once(',').unwrap().0, }; Some(format!("ws://{}", sock_addr)) }) - .expect(&format!("Could not find WebSocket address in process output:\n{}", &data)); + .unwrap_or_else(|| { + panic!("Could not find WebSocket address in process output:\n{}", &data) + }); (ws_url, data) } diff --git a/tests/invalid_order_arguments.rs b/tests/invalid_order_arguments.rs index f205e935ab95..f8dc32a82a26 100644 --- a/tests/invalid_order_arguments.rs +++ b/tests/invalid_order_arguments.rs @@ -24,7 +24,7 @@ fn invalid_order_arguments() { let tmpdir = tempdir().expect("could not create temp dir"); let status = Command::new(cargo_bin("polkadot")) - .args(&["--dev", "invalid_order_arguments", "-d"]) + .args(["--dev", "invalid_order_arguments", "-d"]) .arg(tmpdir.path()) .arg("-y") .status() diff --git a/tests/purge_chain_works.rs b/tests/purge_chain_works.rs index c69d8cc4a81a..ab3ee506b60a 100644 --- a/tests/purge_chain_works.rs +++ b/tests/purge_chain_works.rs @@ -36,7 +36,7 @@ async fn purge_chain_rocksdb_works() { let mut cmd = Command::new(cargo_bin("polkadot")) .stdout(process::Stdio::piped()) .stderr(process::Stdio::piped()) - .args(&["--dev", "-d"]) + .args(["--dev", "-d"]) .arg(tmpdir.path()) .arg("--port") .arg("33034") @@ -61,7 +61,7 @@ async fn purge_chain_rocksdb_works() { // Purge chain let status = Command::new(cargo_bin("polkadot")) - .args(&["purge-chain", "--dev", "-d"]) + .args(["purge-chain", "--dev", "-d"]) .arg(tmpdir.path()) .arg("-y") .status() @@ -86,7 +86,7 @@ async fn purge_chain_paritydb_works() { let mut cmd = Command::new(cargo_bin("polkadot")) .stdout(process::Stdio::piped()) .stderr(process::Stdio::piped()) - .args(&["--dev", "-d"]) + .args(["--dev", "-d"]) .arg(tmpdir.path()) .arg("--database") .arg("paritydb-experimental") @@ -111,7 +111,7 @@ async fn purge_chain_paritydb_works() { // Purge chain let status = Command::new(cargo_bin("polkadot")) - .args(&["purge-chain", "--dev", "-d"]) + .args(["purge-chain", "--dev", "-d"]) .arg(tmpdir.path()) .arg("--database") .arg("paritydb-experimental") diff --git a/tests/running_the_node_and_interrupt.rs b/tests/running_the_node_and_interrupt.rs index 895db534bc5c..5b0e6ec8b013 100644 --- a/tests/running_the_node_and_interrupt.rs +++ b/tests/running_the_node_and_interrupt.rs @@ -40,7 +40,7 @@ async fn running_the_node_works_and_can_be_interrupted() { let mut cmd = Command::new(cargo_bin("polkadot")) .stdout(process::Stdio::piped()) .stderr(process::Stdio::piped()) - .args(&["--dev", "-d"]) + .args(["--dev", "-d"]) .arg(tmpdir.path()) .arg("--no-hardware-benchmarks") .spawn() diff --git a/xcm/xcm-executor/src/assets.rs b/xcm/xcm-executor/src/assets.rs index 324e92dce9ff..6ecbf0e0cf44 100644 --- a/xcm/xcm-executor/src/assets.rs +++ b/xcm/xcm-executor/src/assets.rs @@ -100,14 +100,14 @@ impl Assets { } /// A borrowing iterator over the fungible assets. - pub fn fungible_assets_iter<'a>(&'a self) -> impl Iterator + 'a { + pub fn fungible_assets_iter(&self) -> impl Iterator + '_ { self.fungible .iter() .map(|(id, &amount)| MultiAsset { fun: Fungible(amount), id: id.clone() }) } /// A borrowing iterator over the non-fungible assets. - pub fn non_fungible_assets_iter<'a>(&'a self) -> impl Iterator + 'a { + pub fn non_fungible_assets_iter(&self) -> impl Iterator + '_ { self.non_fungible .iter() .map(|(id, instance)| MultiAsset { fun: NonFungible(instance.clone()), id: id.clone() }) @@ -126,7 +126,7 @@ impl Assets { } /// A borrowing iterator over all assets. - pub fn assets_iter<'a>(&'a self) -> impl Iterator + 'a { + pub fn assets_iter(&self) -> impl Iterator + '_ { self.fungible_assets_iter().chain(self.non_fungible_assets_iter()) }