diff --git a/Cargo.lock b/Cargo.lock index fcaa07ff59c0..4ca3144ef1f9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6495,6 +6495,7 @@ dependencies = [ "polkadot-primitives", "polkadot-primitives-test-helpers", "sc-keystore", + "sp-application-crypto", "sp-core", "sp-keyring", "sp-keystore", diff --git a/node/core/approval-voting/src/lib.rs b/node/core/approval-voting/src/lib.rs index 5f3a71cb9db9..342fc0341b99 100644 --- a/node/core/approval-voting/src/lib.rs +++ b/node/core/approval-voting/src/lib.rs @@ -26,7 +26,7 @@ use polkadot_node_primitives::{ approval::{ BlockApprovalMeta, DelayTranche, IndirectAssignmentCert, IndirectSignedApprovalVote, }, - SignedDisputeStatement, ValidationResult, APPROVAL_EXECUTION_TIMEOUT, + ValidationResult, APPROVAL_EXECUTION_TIMEOUT, }; use polkadot_node_subsystem::{ errors::RecoveryError, @@ -99,6 +99,11 @@ mod tests; pub const APPROVAL_SESSIONS: SessionWindowSize = new_session_window_size!(6); const APPROVAL_CHECKING_TIMEOUT: Duration = Duration::from_secs(120); +/// How long are we willing to wait for approval signatures? +/// +/// Value rather arbitrarily: Should not be hit in practice, it exists to more easily diagnose dead +/// lock issues for example. +const WAIT_FOR_SIGS_TIMEOUT: Duration = Duration::from_millis(500); const APPROVAL_CACHE_SIZE: usize = 1024; const TICK_TOO_FAR_IN_FUTURE: Tick = 20; // 10 seconds. const APPROVAL_DELAY: Tick = 2; @@ -152,6 +157,7 @@ struct MetricsInner { block_approval_time_ticks: prometheus::Histogram, time_db_transaction: prometheus::Histogram, time_recover_and_approve: prometheus::Histogram, + candidate_signatures_requests_total: prometheus::Counter, } /// Approval Voting metrics. @@ -225,6 +231,12 @@ impl Metrics { } } + fn on_candidate_signatures_request(&self) { + if let Some(metrics) = &self.0 { + metrics.candidate_signatures_requests_total.inc(); + } + } + fn time_db_transaction(&self) -> Option { self.0.as_ref().map(|metrics| metrics.time_db_transaction.start_timer()) } @@ -315,6 +327,13 @@ impl metrics::Metrics for Metrics { )?, registry, )?, + candidate_signatures_requests_total: prometheus::register( + prometheus::Counter::new( + "polkadot_parachain_approval_candidate_signatures_requests_total", + "Number of times signatures got requested by other subsystems", + )?, + registry, + )?, }; Ok(Metrics(Some(metrics))) @@ -691,13 +710,6 @@ enum Action { candidate: CandidateReceipt, backing_group: GroupIndex, }, - InformDisputeCoordinator { - candidate_hash: CandidateHash, - candidate_receipt: CandidateReceipt, - session: SessionIndex, - dispute_statement: SignedDisputeStatement, - validator_index: ValidatorIndex, - }, NoteApprovedInChainSelection(Hash), IssueApproval(CandidateHash, ApprovalVoteRequest), BecomeActive, @@ -957,22 +969,6 @@ async fn handle_actions( Some(_) => {}, } }, - Action::InformDisputeCoordinator { - candidate_hash, - candidate_receipt, - session, - dispute_statement, - validator_index, - } => { - ctx.send_message(DisputeCoordinatorMessage::ImportStatements { - candidate_hash, - candidate_receipt, - session, - statements: vec![(dispute_statement, validator_index)], - pending_confirmation: None, - }) - .await; - }, Action::NoteApprovedInChainSelection(block_hash) => { ctx.send_message(ChainSelectionMessage::Approved(block_hash)).await; }, @@ -1192,12 +1188,94 @@ async fn handle_from_overseer( Vec::new() }, + ApprovalVotingMessage::GetApprovalSignaturesForCandidate(candidate_hash, tx) => { + metrics.on_candidate_signatures_request(); + get_approval_signatures_for_candidate(ctx, db, candidate_hash, tx).await?; + Vec::new() + }, }, }; Ok(actions) } +/// Retrieve approval signatures. +/// +/// This involves an unbounded message send to approval-distribution, the caller has to ensure that +/// calls to this function are infrequent and bounded. +#[overseer::contextbounds(ApprovalVoting, prefix = self::overseer)] +async fn get_approval_signatures_for_candidate( + ctx: &mut Context, + db: &OverlayedBackend<'_, impl Backend>, + candidate_hash: CandidateHash, + tx: oneshot::Sender>, +) -> SubsystemResult<()> { + let entry = match db.load_candidate_entry(&candidate_hash)? { + None => return Ok(()), + Some(e) => e, + }; + + let relay_hashes = entry.block_assignments.iter().map(|(relay_hash, _)| relay_hash); + + let mut candidate_indices = HashSet::new(); + // Retrieve `CoreIndices`/`CandidateIndices` as required by approval-distribution: + for hash in relay_hashes { + let entry = match db.load_block_entry(hash)? { + None => { + gum::debug!( + target: LOG_TARGET, + ?candidate_hash, + ?hash, + "Block entry for assignment missing." + ); + continue + }, + Some(e) => e, + }; + for (candidate_index, (_core_index, c_hash)) in entry.candidates().iter().enumerate() { + if c_hash == &candidate_hash { + candidate_indices.insert((*hash, candidate_index as u32)); + break + } + } + } + + let mut sender = ctx.sender().clone(); + let get_approvals = async move { + let (tx_distribution, rx_distribution) = oneshot::channel(); + sender.send_unbounded_message(ApprovalDistributionMessage::GetApprovalSignatures( + candidate_indices, + tx_distribution, + )); + + // Because of the unbounded sending and the nature of the call (just fetching data from state), + // this should not block long: + match rx_distribution.timeout(WAIT_FOR_SIGS_TIMEOUT).await { + None => { + gum::warn!( + target: LOG_TARGET, + "Waiting for approval signatures timed out - dead lock?" + ); + }, + Some(Err(_)) => gum::debug!( + target: LOG_TARGET, + "Request for approval signatures got cancelled by `approval-distribution`." + ), + Some(Ok(votes)) => + if let Err(_) = tx.send(votes) { + gum::debug!( + target: LOG_TARGET, + "Sending approval signatures back failed, as receiver got closed" + ); + }, + } + }; + + // No need to block subsystem on this (also required to break cycle). + // We should not be sending this message frequently - caller must make sure this is bounded. + ctx.spawn("get-approval-signatures", Box::pin(get_approvals)) +} + #[overseer::contextbounds(ApprovalVoting, prefix = self::overseer)] async fn handle_approved_ancestor( ctx: &mut Context, @@ -1717,19 +1795,17 @@ fn check_and_import_approval( )), }; - // Transform the approval vote into the wrapper used to import statements into disputes. - // This also does signature checking. - let signed_dispute_statement = match SignedDisputeStatement::new_checked( - DisputeStatement::Valid(ValidDisputeStatementKind::ApprovalChecking), + // Signature check: + match DisputeStatement::Valid(ValidDisputeStatementKind::ApprovalChecking).check_signature( + &pubkey, approved_candidate_hash, block_entry.session(), - pubkey.clone(), - approval.signature.clone(), + &approval.signature, ) { Err(_) => respond_early!(ApprovalCheckResult::Bad(ApprovalCheckError::InvalidSignature( approval.validator ),)), - Ok(s) => s, + Ok(()) => {}, }; let candidate_entry = match db.load_candidate_entry(&approved_candidate_hash)? { @@ -1770,23 +1846,7 @@ fn check_and_import_approval( "Importing approval vote", ); - let inform_disputes_action = if !candidate_entry.has_approved(approval.validator) { - // The approval voting system requires a separate approval for each assignment - // to the candidate. It's possible that there are semi-duplicate approvals, - // but we only need to inform the dispute coordinator about the first expressed - // opinion by the validator about the candidate. - Some(Action::InformDisputeCoordinator { - candidate_hash: approved_candidate_hash, - candidate_receipt: candidate_entry.candidate_receipt().clone(), - session: block_entry.session(), - dispute_statement: signed_dispute_statement, - validator_index: approval.validator, - }) - } else { - None - }; - - let mut actions = advance_approval_state( + let actions = advance_approval_state( state, db, &metrics, @@ -1796,8 +1856,6 @@ fn check_and_import_approval( ApprovalStateTransition::RemoteApproval(approval.validator), ); - actions.extend(inform_disputes_action); - Ok((actions, t)) } @@ -2242,15 +2300,12 @@ async fn launch_approval( "Data recovery invalid for candidate {:?}", (candidate_hash, candidate.descriptor.para_id), ); - - sender - .send_message(DisputeCoordinatorMessage::IssueLocalStatement( - session_index, - candidate_hash, - candidate.clone(), - false, - )) - .await; + issue_local_invalid_statement( + &mut sender, + session_index, + candidate_hash, + candidate.clone(), + ); metrics_guard.take().on_approval_invalid(); }, } @@ -2304,14 +2359,12 @@ async fn launch_approval( return ApprovalState::approved(validator_index, candidate_hash) } else { // Commitments mismatch - issue a dispute. - sender - .send_message(DisputeCoordinatorMessage::IssueLocalStatement( - session_index, - candidate_hash, - candidate.clone(), - false, - )) - .await; + issue_local_invalid_statement( + &mut sender, + session_index, + candidate_hash, + candidate.clone(), + ); metrics_guard.take().on_approval_invalid(); return ApprovalState::failed(validator_index, candidate_hash) @@ -2326,14 +2379,12 @@ async fn launch_approval( "Detected invalid candidate as an approval checker.", ); - sender - .send_message(DisputeCoordinatorMessage::IssueLocalStatement( - session_index, - candidate_hash, - candidate.clone(), - false, - )) - .await; + issue_local_invalid_statement( + &mut sender, + session_index, + candidate_hash, + candidate.clone(), + ); metrics_guard.take().on_approval_invalid(); return ApprovalState::failed(validator_index, candidate_hash) @@ -2468,17 +2519,6 @@ async fn issue_approval( }, }; - // Record our statement in the dispute coordinator for later - // participation in disputes on the same candidate. - let signed_dispute_statement = SignedDisputeStatement::new_checked( - DisputeStatement::Valid(ValidDisputeStatementKind::ApprovalChecking), - candidate_hash, - session, - validator_pubkey.clone(), - sig.clone(), - ) - .expect("Statement just signed; should pass checks; qed"); - gum::trace!( target: LOG_TARGET, ?candidate_hash, @@ -2487,25 +2527,7 @@ async fn issue_approval( "Issuing approval vote", ); - let candidate_receipt = candidate_entry.candidate_receipt().clone(); - - let inform_disputes_action = if candidate_entry.has_approved(validator_index) { - // The approval voting system requires a separate approval for each assignment - // to the candidate. It's possible that there are semi-duplicate approvals, - // but we only need to inform the dispute coordinator about the first expressed - // opinion by the validator about the candidate. - Some(Action::InformDisputeCoordinator { - candidate_hash, - candidate_receipt, - session, - dispute_statement: signed_dispute_statement, - validator_index, - }) - } else { - None - }; - - let mut actions = advance_approval_state( + let actions = advance_approval_state( state, db, metrics, @@ -2527,9 +2549,6 @@ async fn issue_approval( }, )); - // dispatch to dispute coordinator. - actions.extend(inform_disputes_action); - Ok(actions) } @@ -2546,3 +2565,29 @@ fn sign_approval( Some(key.sign(&payload[..])) } + +/// Send `IssueLocalStatement` to dispute-coordinator. +fn issue_local_invalid_statement( + sender: &mut Sender, + session_index: SessionIndex, + candidate_hash: CandidateHash, + candidate: CandidateReceipt, +) where + Sender: overseer::ApprovalVotingSenderTrait, +{ + // We need to send an unbounded message here to break a cycle: + // DisputeCoordinatorMessage::IssueLocalStatement -> + // ApprovalVotingMessage::GetApprovalSignaturesForCandidate. + // + // Use of unbounded _should_ be fine here as raising a dispute should be an + // exceptional event. Even in case of bugs: There can be no more than + // number of slots per block requests every block. Also for sending this + // message a full recovery and validation procedure took place, which takes + // longer than issuing a local statement + import. + sender.send_unbounded_message(DisputeCoordinatorMessage::IssueLocalStatement( + session_index, + candidate_hash, + candidate.clone(), + false, + )); +} diff --git a/node/core/approval-voting/src/tests.rs b/node/core/approval-voting/src/tests.rs index 6182b4a99e5c..66d1402ed6dc 100644 --- a/node/core/approval-voting/src/tests.rs +++ b/node/core/approval-voting/src/tests.rs @@ -577,7 +577,6 @@ async fn check_and_import_approval( candidate_hash: CandidateHash, session_index: SessionIndex, expect_chain_approved: bool, - expect_coordinator: bool, signature_opt: Option, ) -> oneshot::Receiver { let signature = signature_opt.unwrap_or(sign_approval( @@ -604,18 +603,6 @@ async fn check_and_import_approval( } ); } - if expect_coordinator { - assert_matches!( - overseer_recv(overseer).await, - AllMessages::DisputeCoordinator(DisputeCoordinatorMessage::ImportStatements { - candidate_hash: c_hash, - pending_confirmation: None, - .. - }) => { - assert_eq!(c_hash, candidate_hash); - } - ); - } rx } @@ -1160,7 +1147,6 @@ fn subsystem_rejects_approval_if_no_candidate_entry() { candidate_hash, session_index, false, - false, None, ) .await; @@ -1202,7 +1188,6 @@ fn subsystem_rejects_approval_if_no_block_entry() { candidate_hash, session_index, false, - false, None, ) .await; @@ -1263,7 +1248,6 @@ fn subsystem_rejects_approval_before_assignment() { candidate_hash, session_index, false, - false, None, ) .await; @@ -1488,7 +1472,6 @@ fn subsystem_accepts_and_imports_approval_after_assignment() { candidate_hash, session_index, true, - true, None, ) .await; @@ -1582,7 +1565,6 @@ fn subsystem_second_approval_import_only_schedules_wakeups() { candidate_hash, session_index, false, - true, None, ) .await; @@ -1600,7 +1582,6 @@ fn subsystem_second_approval_import_only_schedules_wakeups() { candidate_hash, session_index, false, - false, None, ) .await; @@ -1936,7 +1917,6 @@ fn import_checked_approval_updates_entries_and_schedules() { candidate_hash, session_index, false, - true, Some(sig_a), ) .await; @@ -1964,7 +1944,6 @@ fn import_checked_approval_updates_entries_and_schedules() { candidate_hash, session_index, true, - true, Some(sig_b), ) .await; @@ -2104,7 +2083,6 @@ fn subsystem_import_checked_approval_sets_one_block_bit_at_a_time() { *candidate_hash, session_index, expect_block_approved, - true, Some(signature), ) .await; @@ -2208,7 +2186,6 @@ fn approved_ancestor_test( candidate_hash, i as u32 + 1, true, - true, None, ) .await; @@ -2586,7 +2563,6 @@ where candidate_hash, 1, expect_chain_approved, - true, Some(sign_approval( validators[validator_index as usize].clone(), candidate_hash, @@ -2930,7 +2906,6 @@ fn pre_covers_dont_stall_approval() { candidate_hash, session_index, false, - true, Some(sig_b), ) .await; @@ -2946,7 +2921,6 @@ fn pre_covers_dont_stall_approval() { candidate_hash, session_index, false, - true, Some(sig_c), ) .await; @@ -3101,7 +3075,6 @@ fn waits_until_approving_assignments_are_old_enough() { candidate_hash, session_index, false, - true, Some(sig_a), ) .await; @@ -3118,7 +3091,6 @@ fn waits_until_approving_assignments_are_old_enough() { candidate_hash, session_index, false, - true, Some(sig_b), ) .await; diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index a189b5955c89..01c2a614855f 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -31,15 +31,15 @@ use futures::{ use error::{Error, FatalResult}; use polkadot_node_primitives::{ - AvailableData, InvalidCandidate, PoV, SignedDisputeStatement, SignedFullStatement, Statement, - ValidationResult, BACKING_EXECUTION_TIMEOUT, + AvailableData, InvalidCandidate, PoV, SignedFullStatement, Statement, ValidationResult, + BACKING_EXECUTION_TIMEOUT, }; use polkadot_node_subsystem::{ jaeger, messages::{ AvailabilityDistributionMessage, AvailabilityStoreMessage, CandidateBackingMessage, - CandidateValidationMessage, CollatorProtocolMessage, DisputeCoordinatorMessage, - ProvisionableData, ProvisionerMessage, RuntimeApiRequest, StatementDistributionMessage, + CandidateValidationMessage, CollatorProtocolMessage, ProvisionableData, ProvisionerMessage, + RuntimeApiRequest, StatementDistributionMessage, }, overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, PerLeafSpan, SpawnedSubsystem, Stage, SubsystemError, @@ -50,8 +50,8 @@ use polkadot_node_subsystem_util::{ }; use polkadot_primitives::v2::{ BackedCandidate, CandidateCommitments, CandidateHash, CandidateReceipt, CollatorId, - CommittedCandidateReceipt, CoreIndex, CoreState, Hash, Id as ParaId, SessionIndex, - SigningContext, ValidatorId, ValidatorIndex, ValidatorSignature, ValidityAttestation, + CommittedCandidateReceipt, CoreIndex, CoreState, Hash, Id as ParaId, SigningContext, + ValidatorId, ValidatorIndex, ValidatorSignature, ValidityAttestation, }; use sp_keystore::SyncCryptoStorePtr; use statement_table::{ @@ -380,7 +380,6 @@ async fn handle_active_leaves_update( let job = CandidateBackingJob { parent, - session_index, assignment, required_collator, issued_statements: HashSet::new(), @@ -411,8 +410,6 @@ struct JobAndSpan { struct CandidateBackingJob { /// The hash of the relay parent on top of which this job is doing it's work. parent: Hash, - /// The session index this corresponds to. - session_index: SessionIndex, /// The `ParaId` assigned to this validator assignment: Option, /// The collator required to author the candidate, if any. @@ -783,8 +780,6 @@ async fn validate_and_make_available( tx_command.send((relay_parent, make_command(res))).await.map_err(Into::into) } -struct ValidatorIndexOutOfBounds; - #[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] impl CandidateBackingJob { async fn handle_validated_candidate_command( @@ -1014,21 +1009,6 @@ impl CandidateBackingJob { ) }; - if let Err(ValidatorIndexOutOfBounds) = self - .dispatch_new_statement_to_dispute_coordinator(ctx.sender(), candidate_hash, &statement) - .await - { - gum::warn!( - target: LOG_TARGET, - session_index = ?self.session_index, - relay_parent = ?self.parent, - validator_index = statement.validator_index().0, - "Supposedly 'Signed' statement has validator index out of bounds." - ); - - return Ok(None) - } - let stmt = primitive_statement_to_table(statement); let summary = self.table.import_statement(&self.table_context, stmt); @@ -1083,67 +1063,6 @@ impl CandidateBackingJob { Ok(summary) } - /// The dispute coordinator keeps track of all statements by validators about every recent - /// candidate. - /// - /// When importing a statement, this should be called access the candidate receipt either - /// from the statement itself or from the underlying statement table in order to craft - /// and dispatch the notification to the dispute coordinator. - /// - /// This also does bounds-checking on the validator index and will return an error if the - /// validator index is out of bounds for the current validator set. It's expected that - /// this should never happen due to the interface of the candidate backing subsystem - - /// the networking component responsible for feeding statements to the backing subsystem - /// is meant to check the signature and provenance of all statements before submission. - async fn dispatch_new_statement_to_dispute_coordinator( - &self, - sender: &mut impl overseer::CandidateBackingSenderTrait, - candidate_hash: CandidateHash, - statement: &SignedFullStatement, - ) -> Result<(), ValidatorIndexOutOfBounds> { - // Dispatch the statement to the dispute coordinator. - let validator_index = statement.validator_index(); - let signing_context = - SigningContext { parent_hash: self.parent, session_index: self.session_index }; - - let validator_public = match self.table_context.validators.get(validator_index.0 as usize) { - None => return Err(ValidatorIndexOutOfBounds), - Some(v) => v, - }; - - let maybe_candidate_receipt = match statement.payload() { - Statement::Seconded(receipt) => Some(receipt.to_plain()), - Statement::Valid(candidate_hash) => { - // Valid statements are only supposed to be imported - // once we've seen at least one `Seconded` statement. - self.table.get_candidate(&candidate_hash).map(|c| c.to_plain()) - }, - }; - - let maybe_signed_dispute_statement = SignedDisputeStatement::from_backing_statement( - statement.as_unchecked(), - signing_context, - validator_public.clone(), - ) - .ok(); - - if let (Some(candidate_receipt), Some(dispute_statement)) = - (maybe_candidate_receipt, maybe_signed_dispute_statement) - { - sender - .send_message(DisputeCoordinatorMessage::ImportStatements { - candidate_hash, - candidate_receipt, - session: self.session_index, - statements: vec![(dispute_statement, validator_index)], - pending_confirmation: None, - }) - .await; - } - - Ok(()) - } - async fn handle_second_msg( &mut self, root_span: &jaeger::Span, diff --git a/node/core/backing/src/tests.rs b/node/core/backing/src/tests.rs index 0243c68c7c4c..24f671ec5371 100644 --- a/node/core/backing/src/tests.rs +++ b/node/core/backing/src/tests.rs @@ -66,12 +66,6 @@ struct TestState { relay_parent: Hash, } -impl TestState { - fn session(&self) -> SessionIndex { - self.signing_context.session_index - } -} - impl Default for TestState { fn default() -> Self { let chain_a = ParaId::from(1); @@ -273,34 +267,6 @@ async fn test_startup(virtual_overseer: &mut VirtualOverseer, test_state: &TestS ); } -async fn test_dispute_coordinator_notifications( - virtual_overseer: &mut VirtualOverseer, - candidate_hash: CandidateHash, - session: SessionIndex, - validator_indices: Vec, -) { - for validator_index in validator_indices { - assert_matches!( - virtual_overseer.recv().await, - AllMessages::DisputeCoordinator( - DisputeCoordinatorMessage::ImportStatements { - candidate_hash: c_hash, - candidate_receipt: c_receipt, - session: s, - statements, - pending_confirmation: None, - } - ) => { - assert_eq!(c_hash, candidate_hash); - assert_eq!(c_receipt.hash(), c_hash); - assert_eq!(s, session); - assert_eq!(statements.len(), 1); - assert_eq!(statements[0].1, validator_index); - } - ) - } -} - // Test that a `CandidateBackingMessage::Second` issues validation work // and in case validation is successful issues a `StatementDistributionMessage`. #[test] @@ -364,14 +330,6 @@ fn backing_second_works() { } ); - test_dispute_coordinator_notifications( - &mut virtual_overseer, - candidate.hash(), - test_state.session(), - vec![ValidatorIndex(0)], - ) - .await; - assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( @@ -469,14 +427,6 @@ fn backing_works() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - test_dispute_coordinator_notifications( - &mut virtual_overseer, - candidate_a_hash, - test_state.session(), - vec![ValidatorIndex(2)], - ) - .await; - // Sending a `Statement::Seconded` for our assignment will start // validation process. The first thing requested is the PoV. assert_matches!( @@ -526,14 +476,6 @@ fn backing_works() { } ); - test_dispute_coordinator_notifications( - &mut virtual_overseer, - candidate_a_hash, - test_state.session(), - vec![ValidatorIndex(0)], - ) - .await; - assert_matches!( virtual_overseer.recv().await, AllMessages::Provisioner( @@ -560,14 +502,6 @@ fn backing_works() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - test_dispute_coordinator_notifications( - &mut virtual_overseer, - candidate_a_hash, - test_state.session(), - vec![ValidatorIndex(5)], - ) - .await; - virtual_overseer .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves( ActiveLeavesUpdate::stop_work(test_state.relay_parent), @@ -664,14 +598,6 @@ fn backing_works_while_validation_ongoing() { CandidateBackingMessage::Statement(test_state.relay_parent, signed_a.clone()); virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - test_dispute_coordinator_notifications( - &mut virtual_overseer, - candidate_a.hash(), - test_state.session(), - vec![ValidatorIndex(2)], - ) - .await; - // Sending a `Statement::Seconded` for our assignment will start // validation process. The first thing requested is PoV from the // `PoVDistribution`. @@ -711,14 +637,6 @@ fn backing_works_while_validation_ongoing() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - test_dispute_coordinator_notifications( - &mut virtual_overseer, - candidate_a.hash(), - test_state.session(), - vec![ValidatorIndex(5)], - ) - .await; - // Candidate gets backed entirely by other votes. assert_matches!( virtual_overseer.recv().await, @@ -738,14 +656,6 @@ fn backing_works_while_validation_ongoing() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - test_dispute_coordinator_notifications( - &mut virtual_overseer, - candidate_a.hash(), - test_state.session(), - vec![ValidatorIndex(3)], - ) - .await; - let (tx, rx) = oneshot::channel(); let msg = CandidateBackingMessage::GetBackedCandidates( test_state.relay_parent, @@ -845,14 +755,6 @@ fn backing_misbehavior_works() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - test_dispute_coordinator_notifications( - &mut virtual_overseer, - candidate_a_hash, - test_state.session(), - vec![ValidatorIndex(2)], - ) - .await; - assert_matches!( virtual_overseer.recv().await, AllMessages::AvailabilityDistribution( @@ -898,14 +800,6 @@ fn backing_misbehavior_works() { } ); - test_dispute_coordinator_notifications( - &mut virtual_overseer, - candidate_a_hash, - test_state.session(), - vec![ValidatorIndex(0)], - ) - .await; - assert_matches!( virtual_overseer.recv().await, AllMessages::Provisioner( @@ -937,14 +831,6 @@ fn backing_misbehavior_works() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - test_dispute_coordinator_notifications( - &mut virtual_overseer, - candidate_a_hash, - test_state.session(), - vec![ValidatorIndex(2)], - ) - .await; - assert_matches!( virtual_overseer.recv().await, AllMessages::Provisioner( @@ -1087,14 +973,6 @@ fn backing_dont_second_invalid() { } ); - test_dispute_coordinator_notifications( - &mut virtual_overseer, - candidate_b.hash(), - test_state.session(), - vec![ValidatorIndex(0)], - ) - .await; - assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( @@ -1163,14 +1041,6 @@ fn backing_second_after_first_fails_works() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - test_dispute_coordinator_notifications( - &mut virtual_overseer, - candidate.hash(), - test_state.session(), - vec![ValidatorIndex(2)], - ) - .await; - // Subsystem requests PoV and requests validation. assert_matches!( virtual_overseer.recv().await, @@ -1297,14 +1167,6 @@ fn backing_works_after_failed_validation() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - test_dispute_coordinator_notifications( - &mut virtual_overseer, - candidate.hash(), - test_state.session(), - vec![ValidatorIndex(2)], - ) - .await; - // Subsystem requests PoV and requests validation. assert_matches!( virtual_overseer.recv().await, @@ -1615,14 +1477,6 @@ fn retry_works() { CandidateBackingMessage::Statement(test_state.relay_parent, signed_a.clone()); virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - test_dispute_coordinator_notifications( - &mut virtual_overseer, - candidate.hash(), - test_state.session(), - vec![ValidatorIndex(2)], - ) - .await; - // Subsystem requests PoV and requests validation. // We cancel - should mean retry on next backing statement. assert_matches!( @@ -1642,14 +1496,6 @@ fn retry_works() { CandidateBackingMessage::Statement(test_state.relay_parent, signed_b.clone()); virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - test_dispute_coordinator_notifications( - &mut virtual_overseer, - candidate.hash(), - test_state.session(), - vec![ValidatorIndex(3)], - ) - .await; - // Not deterministic which message comes first: for _ in 0u32..2 { match virtual_overseer.recv().await { @@ -1674,14 +1520,6 @@ fn retry_works() { CandidateBackingMessage::Statement(test_state.relay_parent, signed_c.clone()); virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - test_dispute_coordinator_notifications( - &mut virtual_overseer, - candidate.hash(), - test_state.session(), - vec![ValidatorIndex(5)], - ) - .await; - assert_matches!( virtual_overseer.recv().await, AllMessages::AvailabilityDistribution( @@ -1806,14 +1644,6 @@ fn observes_backing_even_if_not_validator() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - test_dispute_coordinator_notifications( - &mut virtual_overseer, - candidate_a_hash, - test_state.session(), - vec![ValidatorIndex(0), ValidatorIndex(5)], - ) - .await; - assert_matches!( virtual_overseer.recv().await, AllMessages::Provisioner( @@ -1831,14 +1661,6 @@ fn observes_backing_even_if_not_validator() { virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; - test_dispute_coordinator_notifications( - &mut virtual_overseer, - candidate_a_hash, - test_state.session(), - vec![ValidatorIndex(2)], - ) - .await; - virtual_overseer .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves( ActiveLeavesUpdate::stop_work(test_state.relay_parent), diff --git a/node/core/dispute-coordinator/Cargo.toml b/node/core/dispute-coordinator/Cargo.toml index 96174033ab7d..2f60ba065fe3 100644 --- a/node/core/dispute-coordinator/Cargo.toml +++ b/node/core/dispute-coordinator/Cargo.toml @@ -30,6 +30,7 @@ sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "maste assert_matches = "1.4.0" test-helpers = { package = "polkadot-primitives-test-helpers", path = "../../../primitives/test-helpers" } futures-timer = "3.0.2" +sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "master" } [features] # If not enabled, the dispute coordinator will do nothing. diff --git a/node/core/dispute-coordinator/src/db/v1.rs b/node/core/dispute-coordinator/src/db/v1.rs index 0f0883649b5b..4d33949db644 100644 --- a/node/core/dispute-coordinator/src/db/v1.rs +++ b/node/core/dispute-coordinator/src/db/v1.rs @@ -213,8 +213,8 @@ impl From for polkadot_node_primitives::CandidateVotes { fn from(db_votes: CandidateVotes) -> polkadot_node_primitives::CandidateVotes { polkadot_node_primitives::CandidateVotes { candidate_receipt: db_votes.candidate_receipt, - valid: db_votes.valid, - invalid: db_votes.invalid, + valid: db_votes.valid.into_iter().map(|(kind, i, sig)| (i, (kind, sig))).collect(), + invalid: db_votes.invalid.into_iter().map(|(kind, i, sig)| (i, (kind, sig))).collect(), } } } @@ -223,8 +223,12 @@ impl From for CandidateVotes { fn from(primitive_votes: polkadot_node_primitives::CandidateVotes) -> CandidateVotes { CandidateVotes { candidate_receipt: primitive_votes.candidate_receipt, - valid: primitive_votes.valid, - invalid: primitive_votes.invalid, + valid: primitive_votes + .valid + .into_iter() + .map(|(i, (kind, sig))| (kind, i, sig)) + .collect(), + invalid: primitive_votes.invalid.into_iter().map(|(i, (k, sig))| (k, i, sig)).collect(), } } } diff --git a/node/core/dispute-coordinator/src/error.rs b/node/core/dispute-coordinator/src/error.rs index 4306413a6ab9..7a059b8861d8 100644 --- a/node/core/dispute-coordinator/src/error.rs +++ b/node/core/dispute-coordinator/src/error.rs @@ -122,7 +122,7 @@ impl JfyiError { pub fn log(self) { match self { // don't spam the log with spurious errors - Self::Runtime(_) | Self::Oneshot(_) => { + Self::Runtime(runtime::Error::RuntimeRequestCanceled(_)) | Self::Oneshot(_) => { gum::debug!(target: LOG_TARGET, error = ?self) }, // it's worth reporting otherwise diff --git a/node/core/dispute-coordinator/src/import.rs b/node/core/dispute-coordinator/src/import.rs new file mode 100644 index 000000000000..020d04792191 --- /dev/null +++ b/node/core/dispute-coordinator/src/import.rs @@ -0,0 +1,564 @@ +// Copyright 2022 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Vote import logic. +//! +//! This module encapsulates the actual logic for importing new votes and provides easy access of +//! the current state for votes for a particular candidate. +//! +//! In particular there is `CandidateVoteState` which tells what can be concluded for a particular set of +//! votes. E.g. whether a dispute is ongoing, whether it is confirmed, concluded, .. +//! +//! Then there is `ImportResult` which reveals information about what changed once additional votes +//! got imported on top of an existing `CandidateVoteState` and reveals "dynamic" information, like whether +//! due to the import a dispute was raised/got confirmed, ... + +use std::collections::{BTreeMap, HashMap, HashSet}; + +use polkadot_node_primitives::{CandidateVotes, SignedDisputeStatement}; +use polkadot_node_subsystem_util::rolling_session_window::RollingSessionWindow; +use polkadot_primitives::v2::{ + CandidateReceipt, DisputeStatement, SessionIndex, SessionInfo, ValidDisputeStatementKind, + ValidatorId, ValidatorIndex, ValidatorPair, ValidatorSignature, +}; +use sc_keystore::LocalKeystore; + +use crate::LOG_TARGET; + +/// (Session) environment of a candidate. +pub struct CandidateEnvironment<'a> { + /// The session the candidate appeared in. + session_index: SessionIndex, + /// Session for above index. + session: &'a SessionInfo, + /// Validator indices controlled by this node. + controlled_indices: HashSet, +} + +impl<'a> CandidateEnvironment<'a> { + /// Create `CandidateEnvironment`. + /// + /// Return: `None` in case session is outside of session window. + pub fn new( + keystore: &LocalKeystore, + session_window: &'a RollingSessionWindow, + session_index: SessionIndex, + ) -> Option { + let session = session_window.session_info(session_index)?; + let controlled_indices = find_controlled_validator_indices(keystore, &session.validators); + Some(Self { session_index, session, controlled_indices }) + } + + /// Validators in the candidate's session. + pub fn validators(&self) -> &Vec { + &self.session.validators + } + + /// `SessionInfo` for the candidate's session. + pub fn session_info(&self) -> &SessionInfo { + &self.session + } + + /// Retrieve `SessionIndex` for this environment. + pub fn session_index(&self) -> SessionIndex { + self.session_index + } + + /// Indices controlled by this node. + pub fn controlled_indices(&'a self) -> &'a HashSet { + &self.controlled_indices + } +} + +/// Whether or not we already issued some statement about a candidate. +pub enum OwnVoteState { + /// We already voted/issued a statement for the candidate. + Voted, + /// We already voted/issued a statement for the candidate and it was an approval vote. + /// + /// Needs special treatment as we have to make sure to propagate it to peers, to guarantee the + /// dispute can conclude. + VotedApproval(Vec<(ValidatorIndex, ValidatorSignature)>), + /// We not yet voted for the dispute. + NoVote, +} + +impl OwnVoteState { + fn new<'a>(votes: &CandidateVotes, env: &CandidateEnvironment<'a>) -> Self { + let mut our_valid_votes = env + .controlled_indices() + .iter() + .filter_map(|i| votes.valid.get_key_value(i)) + .peekable(); + let mut our_invalid_votes = + env.controlled_indices.iter().filter_map(|i| votes.invalid.get_key_value(i)); + let has_valid_votes = our_valid_votes.peek().is_some(); + let has_invalid_votes = our_invalid_votes.next().is_some(); + let our_approval_votes: Vec<_> = our_valid_votes + .filter_map(|(index, (k, sig))| { + if let ValidDisputeStatementKind::ApprovalChecking = k { + Some((*index, sig.clone())) + } else { + None + } + }) + .collect(); + + if !our_approval_votes.is_empty() { + return Self::VotedApproval(our_approval_votes) + } + if has_valid_votes || has_invalid_votes { + return Self::Voted + } + Self::NoVote + } + + /// Whether or not we issued a statement for the candidate already. + fn voted(&self) -> bool { + match self { + Self::Voted | Self::VotedApproval(_) => true, + Self::NoVote => false, + } + } + + /// Get own approval votes, if any. + fn approval_votes(&self) -> Option<&Vec<(ValidatorIndex, ValidatorSignature)>> { + match self { + Self::VotedApproval(votes) => Some(&votes), + _ => None, + } + } +} + +/// Complete state of votes for a candidate. +/// +/// All votes + information whether a dispute is ongoing, confirmed, concluded, whether we already +/// voted, ... +pub struct CandidateVoteState { + /// Votes already existing for the candidate + receipt. + votes: Votes, + + /// Information about own votes: + own_vote: OwnVoteState, + + /// Whether or not the dispute concluded invalid. + concluded_invalid: bool, + + /// Whether or not the dispute concluded valid. + /// + /// Note: Due to equivocations it is technically possible for a dispute to conclude both valid + /// and invalid. In that case the invalid result takes precedence. + concluded_valid: bool, + + /// There is an ongoing dispute and we reached f+1 votes -> the dispute is confirmed + /// + /// as at least one honest validator cast a vote for the candidate. + is_confirmed: bool, + + /// Whether or not we have an ongoing dispute. + is_disputed: bool, +} + +impl CandidateVoteState { + /// Create an empty `CandidateVoteState` + /// + /// in case there have not been any previous votes. + pub fn new_from_receipt(candidate_receipt: CandidateReceipt) -> Self { + let votes = + CandidateVotes { candidate_receipt, valid: BTreeMap::new(), invalid: BTreeMap::new() }; + Self { + votes, + own_vote: OwnVoteState::NoVote, + concluded_invalid: false, + concluded_valid: false, + is_confirmed: false, + is_disputed: false, + } + } + + /// Create a new `CandidateVoteState` from already existing votes. + pub fn new<'a>(votes: CandidateVotes, env: &CandidateEnvironment<'a>) -> Self { + let own_vote = OwnVoteState::new(&votes, env); + + let n_validators = env.validators().len(); + + let supermajority_threshold = + polkadot_primitives::v2::supermajority_threshold(n_validators); + + let concluded_invalid = votes.invalid.len() >= supermajority_threshold; + let concluded_valid = votes.valid.len() >= supermajority_threshold; + + // We have a dispute, if we have votes on both sides: + let is_disputed = !votes.invalid.is_empty() && !votes.valid.is_empty(); + + let byzantine_threshold = polkadot_primitives::v2::byzantine_threshold(n_validators); + let is_confirmed = votes.voted_indices().len() > byzantine_threshold && is_disputed; + + Self { votes, own_vote, concluded_invalid, concluded_valid, is_confirmed, is_disputed } + } + + /// Import fresh statements. + /// + /// Result will be a new state plus information about things that changed due to the import. + pub fn import_statements( + self, + env: &CandidateEnvironment, + statements: Vec<(SignedDisputeStatement, ValidatorIndex)>, + ) -> ImportResult { + let (mut votes, old_state) = self.into_old_state(); + + let mut new_invalid_voters = Vec::new(); + let mut imported_invalid_votes = 0; + let mut imported_valid_votes = 0; + + let expected_candidate_hash = votes.candidate_receipt.hash(); + + for (statement, val_index) in statements { + if env + .validators() + .get(val_index.0 as usize) + .map_or(true, |v| v != statement.validator_public()) + { + gum::error!( + target: LOG_TARGET, + ?val_index, + session= ?env.session_index, + claimed_key = ?statement.validator_public(), + "Validator index doesn't match claimed key", + ); + + continue + } + if statement.candidate_hash() != &expected_candidate_hash { + gum::error!( + target: LOG_TARGET, + ?val_index, + session= ?env.session_index, + given_candidate_hash = ?statement.candidate_hash(), + ?expected_candidate_hash, + "Vote is for unexpected candidate!", + ); + continue + } + if statement.session_index() != env.session_index() { + gum::error!( + target: LOG_TARGET, + ?val_index, + session= ?env.session_index, + given_candidate_hash = ?statement.candidate_hash(), + ?expected_candidate_hash, + "Vote is for unexpected session!", + ); + continue + } + + match statement.statement() { + DisputeStatement::Valid(valid_kind) => { + let fresh = insert_into_statements( + &mut votes.valid, + *valid_kind, + val_index, + statement.into_validator_signature(), + ); + + if fresh { + imported_valid_votes += 1; + } + }, + DisputeStatement::Invalid(invalid_kind) => { + let fresh = insert_into_statements( + &mut votes.invalid, + *invalid_kind, + val_index, + statement.into_validator_signature(), + ); + + if fresh { + new_invalid_voters.push(val_index); + imported_invalid_votes += 1; + } + }, + } + } + + let new_state = Self::new(votes, env); + + ImportResult { + old_state, + new_state, + imported_invalid_votes, + imported_valid_votes, + imported_approval_votes: 0, + new_invalid_voters, + } + } + + /// Retrieve `CandidateReceipt` in `CandidateVotes`. + pub fn candidate_receipt(&self) -> &CandidateReceipt { + &self.votes.candidate_receipt + } + + /// Extract `CandidateVotes` for handling import of new statements. + fn into_old_state(self) -> (CandidateVotes, CandidateVoteState<()>) { + let CandidateVoteState { + votes, + own_vote, + concluded_invalid, + concluded_valid, + is_confirmed, + is_disputed, + } = self; + ( + votes, + CandidateVoteState { + votes: (), + own_vote, + concluded_invalid, + concluded_valid, + is_confirmed, + is_disputed, + }, + ) + } +} + +impl CandidateVoteState { + /// Whether or not we have an ongoing dispute. + pub fn is_disputed(&self) -> bool { + self.is_disputed + } + + /// Whether there is an ongoing confirmed dispute. + /// + /// This checks whether there is a dispute ongoing and we have more than byzantine threshold + /// votes. + pub fn is_confirmed(&self) -> bool { + self.is_confirmed + } + + /// This machine already cast some vote in that dispute/for that candidate. + pub fn has_own_vote(&self) -> bool { + self.own_vote.voted() + } + + /// Own approval votes if any: + pub fn own_approval_votes(&self) -> Option<&Vec<(ValidatorIndex, ValidatorSignature)>> { + self.own_vote.approval_votes() + } + + /// Whether or not this dispute has already enough valid votes to conclude. + pub fn is_concluded_valid(&self) -> bool { + self.concluded_valid + } + + /// Whether or not this dispute has already enough invalid votes to conclude. + pub fn is_concluded_invalid(&self) -> bool { + self.concluded_invalid + } + + /// Access to underlying votes. + pub fn votes(&self) -> &V { + &self.votes + } +} + +/// An ongoing statement/vote import. +pub struct ImportResult { + /// The state we had before importing new statements. + old_state: CandidateVoteState<()>, + /// The new state after importing the new statements. + new_state: CandidateVoteState, + /// New invalid voters as of this import. + new_invalid_voters: Vec, + /// Number of successfully imported valid votes. + imported_invalid_votes: u32, + /// Number of successfully imported invalid votes. + imported_valid_votes: u32, + /// Number of approval votes imported via `import_approval_votes()`. + /// + /// And only those: If normal import included approval votes, those are not counted here. + /// + /// In other words, without a call `import_approval_votes()` this will always be 0. + imported_approval_votes: u32, +} + +impl ImportResult { + /// Whether or not anything has changed due to the import. + pub fn votes_changed(&self) -> bool { + self.imported_valid_votes != 0 || self.imported_invalid_votes != 0 + } + + /// The dispute state has changed in some way. + /// + /// - freshly disputed + /// - freshly confirmed + /// - freshly concluded (valid or invalid) + pub fn dispute_state_changed(&self) -> bool { + self.is_freshly_disputed() || self.is_freshly_confirmed() || self.is_freshly_concluded() + } + + /// State as it was before import. + pub fn old_state(&self) -> &CandidateVoteState<()> { + &self.old_state + } + + /// State after import + pub fn new_state(&self) -> &CandidateVoteState { + &self.new_state + } + + /// New "invalid" voters encountered during import. + pub fn new_invalid_voters(&self) -> &Vec { + &self.new_invalid_voters + } + + /// Number of imported valid votes. + pub fn imported_valid_votes(&self) -> u32 { + self.imported_valid_votes + } + + /// Number of imported invalid votes. + pub fn imported_invalid_votes(&self) -> u32 { + self.imported_invalid_votes + } + + /// Number of imported approval votes. + pub fn imported_approval_votes(&self) -> u32 { + self.imported_approval_votes + } + + /// Whether we now have a dispute and did not prior to the import. + pub fn is_freshly_disputed(&self) -> bool { + !self.old_state().is_disputed() && self.new_state().is_disputed() + } + + /// Whether we just surpassed the byzantine threshold. + pub fn is_freshly_confirmed(&self) -> bool { + !self.old_state().is_confirmed() && self.new_state().is_confirmed() + } + + /// Whether or not any dispute just concluded valid due to the import. + pub fn is_freshly_concluded_valid(&self) -> bool { + !self.old_state().is_concluded_valid() && self.new_state().is_concluded_valid() + } + + /// Whether or not any dispute just concluded invalid due to the import. + pub fn is_freshly_concluded_invalid(&self) -> bool { + !self.old_state().is_concluded_invalid() && self.new_state().is_concluded_invalid() + } + + /// Whether or not any dispute just concluded either invalid or valid due to the import. + pub fn is_freshly_concluded(&self) -> bool { + self.is_freshly_concluded_invalid() || self.is_freshly_concluded_valid() + } + + /// Modify this `ImportResult`s, by importing additional approval votes. + /// + /// Both results and `new_state` will be changed as if those approval votes had been in the + /// original import. + pub fn import_approval_votes( + self, + env: &CandidateEnvironment, + approval_votes: HashMap, + ) -> Self { + let Self { + old_state, + new_state, + new_invalid_voters, + mut imported_valid_votes, + imported_invalid_votes, + mut imported_approval_votes, + } = self; + + let (mut votes, _) = new_state.into_old_state(); + + for (index, sig) in approval_votes.into_iter() { + debug_assert!( + { + let pub_key = &env.session_info().validators[index.0 as usize]; + let candidate_hash = votes.candidate_receipt.hash(); + let session_index = env.session_index(); + DisputeStatement::Valid(ValidDisputeStatementKind::ApprovalChecking) + .check_signature(pub_key, candidate_hash, session_index, &sig) + .is_ok() + }, + "Signature check for imported approval votes failed! This is a serious bug. Session: {:?}, candidate hash: {:?}, validator index: {:?}", env.session_index(), votes.candidate_receipt.hash(), index + ); + if insert_into_statements( + &mut votes.valid, + ValidDisputeStatementKind::ApprovalChecking, + index, + sig, + ) { + imported_valid_votes += 1; + imported_approval_votes += 1; + } + } + + let new_state = CandidateVoteState::new(votes, env); + + Self { + old_state, + new_state, + new_invalid_voters, + imported_valid_votes, + imported_invalid_votes, + imported_approval_votes, + } + } + + /// All done, give me those votes. + /// + /// Returns: `None` in case nothing has changed (import was redundant). + pub fn into_updated_votes(self) -> Option { + if self.votes_changed() { + let CandidateVoteState { votes, .. } = self.new_state; + Some(votes) + } else { + None + } + } +} + +/// Find indices controlled by this validator. +/// +/// That is all `ValidatorIndex`es we have private keys for. Usually this will only be one. +fn find_controlled_validator_indices( + keystore: &LocalKeystore, + validators: &[ValidatorId], +) -> HashSet { + let mut controlled = HashSet::new(); + for (index, validator) in validators.iter().enumerate() { + if keystore.key_pair::(validator).ok().flatten().is_none() { + continue + } + + controlled.insert(ValidatorIndex(index as _)); + } + + controlled +} + +// Returns 'true' if no other vote by that validator was already +// present and 'false' otherwise. Same semantics as `HashSet`. +fn insert_into_statements( + m: &mut BTreeMap, + tag: T, + val_index: ValidatorIndex, + val_signature: ValidatorSignature, +) -> bool { + m.insert(val_index, (tag, val_signature)).is_none() +} diff --git a/node/core/dispute-coordinator/src/initialized.rs b/node/core/dispute-coordinator/src/initialized.rs index 2dc67608337b..e37459dc5142 100644 --- a/node/core/dispute-coordinator/src/initialized.rs +++ b/node/core/dispute-coordinator/src/initialized.rs @@ -16,12 +16,12 @@ //! Dispute coordinator subsystem in initialized state (after first active leaf is received). -use std::{ - collections::{BTreeMap, HashSet}, - sync::Arc, -}; +use std::{collections::BTreeMap, sync::Arc}; -use futures::{channel::mpsc, FutureExt, StreamExt}; +use futures::{ + channel::{mpsc, oneshot}, + FutureExt, StreamExt, +}; use sc_keystore::LocalKeystore; @@ -31,8 +31,8 @@ use polkadot_node_primitives::{ }; use polkadot_node_subsystem::{ messages::{ - BlockDescription, DisputeCoordinatorMessage, DisputeDistributionMessage, - ImportStatementsResult, + ApprovalVotingMessage, BlockDescription, DisputeCoordinatorMessage, + DisputeDistributionMessage, ImportStatementsResult, }, overseer, ActivatedLeaf, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, }; @@ -40,13 +40,14 @@ use polkadot_node_subsystem_util::rolling_session_window::{ RollingSessionWindow, SessionWindowUpdate, SessionsUnavailable, }; use polkadot_primitives::v2::{ - byzantine_threshold, BlockNumber, CandidateHash, CandidateReceipt, CompactStatement, - DisputeStatement, DisputeStatementSet, Hash, ScrapedOnChainVotes, SessionIndex, SessionInfo, - ValidDisputeStatementKind, ValidatorId, ValidatorIndex, ValidatorPair, ValidatorSignature, + BlockNumber, CandidateHash, CandidateReceipt, CompactStatement, DisputeStatement, + DisputeStatementSet, Hash, ScrapedOnChainVotes, SessionIndex, SessionInfo, + ValidDisputeStatementKind, ValidatorId, ValidatorIndex, }; use crate::{ error::{log_error, Error, FatalError, FatalResult, JfyiError, JfyiResult, Result}, + import::{CandidateEnvironment, CandidateVoteState}, metrics::Metrics, status::{get_active_with_status, Clock, DisputeStatus, Timestamp}, DisputeCoordinatorSubsystem, LOG_TARGET, @@ -194,11 +195,13 @@ impl Initialized { } loop { + gum::trace!(target: LOG_TARGET, "Waiting for message"); let mut overlay_db = OverlayedBackend::new(backend); let default_confirm = Box::new(|| Ok(())); let confirm_write = match MuxedMessage::receive(ctx, &mut self.participation_receiver).await? { MuxedMessage::Participation(msg) => { + gum::trace!(target: LOG_TARGET, "MuxedMessage::Participation"); let ParticipationStatement { session, candidate_hash, @@ -206,6 +209,13 @@ impl Initialized { outcome, } = self.participation.get_participation_result(ctx, msg).await?; if let Some(valid) = outcome.validity() { + gum::trace!( + target: LOG_TARGET, + ?session, + ?candidate_hash, + ?valid, + "Issuing local statement based on participation outcome." + ); self.issue_local_statement( ctx, &mut overlay_db, @@ -222,6 +232,7 @@ impl Initialized { MuxedMessage::Subsystem(msg) => match msg { FromOrchestra::Signal(OverseerSignal::Conclude) => return Ok(()), FromOrchestra::Signal(OverseerSignal::ActiveLeaves(update)) => { + gum::trace!(target: LOG_TARGET, "OverseerSignal::ActiveLeaves"); self.process_active_leaves_update( ctx, &mut overlay_db, @@ -232,6 +243,7 @@ impl Initialized { default_confirm }, FromOrchestra::Signal(OverseerSignal::BlockFinalized(_, n)) => { + gum::trace!(target: LOG_TARGET, "OverseerSignal::BlockFinalized"); self.scraper.process_finalized_block(&n); default_confirm }, @@ -349,6 +361,12 @@ impl Initialized { for (candidate_receipt, backers) in backing_validators_per_candidate { let relay_parent = candidate_receipt.descriptor.relay_parent; let candidate_hash = candidate_receipt.hash(); + gum::trace!( + target: LOG_TARGET, + ?candidate_hash, + ?relay_parent, + "Importing backing votes from chain for candidate" + ); let statements = backers .into_iter() .filter_map(|(validator_index, attestation)| { @@ -373,6 +391,19 @@ impl Initialized { CompactStatement::Valid(_) => ValidDisputeStatementKind::BackingValid(relay_parent), }; + debug_assert!( + SignedDisputeStatement::new_checked( + DisputeStatement::Valid(valid_statement_kind), + candidate_hash, + session, + validator_public.clone(), + validator_signature.clone(), + ).is_ok(), + "Scraped backing votes had invalid signature! candidate: {:?}, session: {:?}, validator_public: {:?}", + candidate_hash, + session, + validator_public, + ); let signed_dispute_statement = SignedDisputeStatement::new_unchecked_from_trusted_source( DisputeStatement::Valid(valid_statement_kind), @@ -389,7 +420,6 @@ impl Initialized { .handle_import_statements( ctx, overlay_db, - candidate_hash, MaybeCandidateReceipt::Provides(candidate_receipt), session, statements, @@ -412,13 +442,19 @@ impl Initialized { } } - // Import concluded disputes from on-chain, this already went through a vote so it's assumed + // Import disputes from on-chain, this already went through a vote so it's assumed // as verified. This will only be stored, gossiping it is not necessary. // First try to obtain all the backings which ultimately contain the candidate // receipt which we need. for DisputeStatementSet { candidate_hash, session, statements } in disputes { + gum::trace!( + target: LOG_TARGET, + ?candidate_hash, + ?session, + "Importing dispute votes from chain for candidate" + ); let statements = statements .into_iter() .filter_map(|(dispute_statement, validator_index, validator_signature)| { @@ -449,6 +485,21 @@ impl Initialized { }) .cloned()?; + debug_assert!( + SignedDisputeStatement::new_checked( + dispute_statement.clone(), + candidate_hash, + session, + validator_public.clone(), + validator_signature.clone(), + ).is_ok(), + "Scraped dispute votes had invalid signature! candidate: {:?}, session: {:?}, dispute_statement: {:?}, validator_public: {:?}", + candidate_hash, + session, + dispute_statement, + validator_public, + ); + Some(( SignedDisputeStatement::new_unchecked_from_trusted_source( dispute_statement, @@ -465,9 +516,8 @@ impl Initialized { .handle_import_statements( ctx, overlay_db, - candidate_hash, // TODO - MaybeCandidateReceipt::AssumeBackingVotePresent, + MaybeCandidateReceipt::AssumeBackingVotePresent(candidate_hash), session, statements, now, @@ -478,13 +528,13 @@ impl Initialized { target: LOG_TARGET, ?candidate_hash, ?session, - "Imported statement of concluded dispute from on-chain" + "Imported statement of dispute from on-chain" ), ImportStatementsResult::InvalidImport => gum::warn!( target: LOG_TARGET, ?candidate_hash, ?session, - "Attempted import of on-chain statement of concluded dispute failed" + "Attempted import of on-chain statement of dispute failed" ), } } @@ -501,17 +551,21 @@ impl Initialized { ) -> Result JfyiResult<()>>> { match message { DisputeCoordinatorMessage::ImportStatements { - candidate_hash, candidate_receipt, session, statements, pending_confirmation, } => { + gum::trace!( + target: LOG_TARGET, + candidate_hash = ?candidate_receipt.hash(), + ?session, + "DisputeCoordinatorMessage::ImportStatements" + ); let outcome = self .handle_import_statements( ctx, overlay_db, - candidate_hash, MaybeCandidateReceipt::Provides(candidate_receipt), session, statements, @@ -537,11 +591,13 @@ impl Initialized { // Return error if session information is missing. self.ensure_available_session_info()?; + gum::trace!(target: LOG_TARGET, "Loading recent disputes from db"); let recent_disputes = if let Some(disputes) = overlay_db.load_recent_disputes()? { disputes } else { BTreeMap::new() }; + gum::trace!(target: LOG_TARGET, "Loaded recent disputes from db"); let _ = tx.send(recent_disputes.keys().cloned().collect()); }, @@ -549,6 +605,8 @@ impl Initialized { // Return error if session information is missing. self.ensure_available_session_info()?; + gum::trace!(target: LOG_TARGET, "DisputeCoordinatorMessage::ActiveDisputes"); + let recent_disputes = if let Some(disputes) = overlay_db.load_recent_disputes()? { disputes } else { @@ -565,6 +623,8 @@ impl Initialized { // Return error if session information is missing. self.ensure_available_session_info()?; + gum::trace!(target: LOG_TARGET, "DisputeCoordinatorMessage::QueryCandidateVotes"); + let mut query_output = Vec::new(); for (session_index, candidate_hash) in query { if let Some(v) = @@ -587,6 +647,7 @@ impl Initialized { candidate_receipt, valid, ) => { + gum::trace!(target: LOG_TARGET, "DisputeCoordinatorMessage::IssueLocalStatement"); self.issue_local_statement( ctx, overlay_db, @@ -605,6 +666,10 @@ impl Initialized { } => { // Return error if session information is missing. self.ensure_available_session_info()?; + gum::trace!( + target: LOG_TARGET, + "DisputeCoordinatorMessage::DetermineUndisputedChain" + ); let undisputed_chain = determine_undisputed_chain( overlay_db, @@ -633,200 +698,185 @@ impl Initialized { &mut self, ctx: &mut Context, overlay_db: &mut OverlayedBackend<'_, impl Backend>, - candidate_hash: CandidateHash, candidate_receipt: MaybeCandidateReceipt, session: SessionIndex, statements: Vec<(SignedDisputeStatement, ValidatorIndex)>, now: Timestamp, ) -> Result { + gum::trace!(target: LOG_TARGET, ?statements, "In handle import statements"); if session + DISPUTE_WINDOW.get() < self.highest_session { // It is not valid to participate in an ancient dispute (spam?). return Ok(ImportStatementsResult::InvalidImport) } - let session_info = match self.rolling_session_window.session_info(session) { - None => { - gum::warn!( - target: LOG_TARGET, - session, - "Importing statement lacks info for session which has an active dispute", - ); + let env = + match CandidateEnvironment::new(&*self.keystore, &self.rolling_session_window, session) + { + None => { + gum::warn!( + target: LOG_TARGET, + session, + "We are lacking a `SessionInfo` for handling import of statements." + ); - return Ok(ImportStatementsResult::InvalidImport) - }, - Some(info) => info, - }; - let validators = session_info.validators.clone(); + return Ok(ImportStatementsResult::InvalidImport) + }, + Some(env) => env, + }; - let n_validators = validators.len(); + let candidate_hash = candidate_receipt.hash(); - let supermajority_threshold = - polkadot_primitives::v2::supermajority_threshold(n_validators); + gum::trace!( + target: LOG_TARGET, + ?candidate_hash, + ?session, + num_validators = ?env.session_info().validators.len(), + "Number of validators" + ); // In case we are not provided with a candidate receipt // we operate under the assumption, that a previous vote // which included a `CandidateReceipt` was seen. - // This holds since every block is preceeded by the `Backing`-phase. + // This holds since every block is preceded by the `Backing`-phase. // // There is one exception: A sufficiently sophisticated attacker could prevent - // us from seeing the backing votes by witholding arbitrary blocks, and hence we do + // us from seeing the backing votes by withholding arbitrary blocks, and hence we do // not have a `CandidateReceipt` available. - let (mut votes, mut votes_changed) = match overlay_db + let old_state = match overlay_db .load_candidate_votes(session, &candidate_hash)? .map(CandidateVotes::from) { - Some(votes) => (votes, false), + Some(votes) => CandidateVoteState::new(votes, &env), None => if let MaybeCandidateReceipt::Provides(candidate_receipt) = candidate_receipt { - ( - CandidateVotes { - candidate_receipt, - valid: Vec::new(), - invalid: Vec::new(), - }, - true, - ) + CandidateVoteState::new_from_receipt(candidate_receipt) } else { gum::warn!( target: LOG_TARGET, session, - "Not seen backing vote for candidate which has an active dispute", + ?candidate_hash, + "Cannot import votes, without `CandidateReceipt` available!" ); return Ok(ImportStatementsResult::InvalidImport) }, }; - let candidate_receipt = votes.candidate_receipt.clone(); - let was_concluded_valid = votes.valid.len() >= supermajority_threshold; - let was_concluded_invalid = votes.invalid.len() >= supermajority_threshold; - - let mut recent_disputes = overlay_db.load_recent_disputes()?.unwrap_or_default(); - let controlled_indices = find_controlled_validator_indices(&self.keystore, &validators); - - // Whether we already cast a vote in that dispute: - let voted_already = { - let mut our_votes = votes.voted_indices(); - our_votes.retain(|index| controlled_indices.contains(index)); - !our_votes.is_empty() - }; - - let was_confirmed = recent_disputes - .get(&(session, candidate_hash)) - .map_or(false, |s| s.is_confirmed_concluded()); - let is_included = self.scraper.is_candidate_included(&candidate_receipt.hash()); + gum::trace!(target: LOG_TARGET, ?candidate_hash, ?session, "Loaded votes"); - let is_local = statements - .iter() - .find(|(_, index)| controlled_indices.contains(index)) - .is_some(); + let import_result = { + let intermediate_result = old_state.import_statements(&env, statements); - // Indexes of the validators issued 'invalid' statements. Will be used to populate spam slots. - let mut fresh_invalid_statement_issuers = Vec::new(); - - // Update candidate votes. - for (statement, val_index) in &statements { - if validators - .get(val_index.0 as usize) - .map_or(true, |v| v != statement.validator_public()) + // Handle approval vote import: + // + // See guide: We import on fresh disputes to maximize likelihood of fetching votes for + // dead forks and once concluded to maximize time for approval votes to trickle in. + if intermediate_result.is_freshly_disputed() || + intermediate_result.is_freshly_concluded() { - gum::debug!( - target: LOG_TARGET, - ?val_index, - session, - claimed_key = ?statement.validator_public(), - "Validator index doesn't match claimed key", + gum::trace!( + target: LOG_TARGET, + ?candidate_hash, + ?session, + "Requesting approval signatures" ); - - continue - } - - match statement.statement() { - DisputeStatement::Valid(valid_kind) => { - let fresh = insert_into_statement_vec( - &mut votes.valid, - *valid_kind, - *val_index, - statement.validator_signature().clone(), - ); - - if !fresh { - continue - } - - votes_changed = true; - self.metrics.on_valid_vote(); - }, - DisputeStatement::Invalid(invalid_kind) => { - let fresh = insert_into_statement_vec( - &mut votes.invalid, - *invalid_kind, - *val_index, - statement.validator_signature().clone(), - ); - - if !fresh { - continue - } - - fresh_invalid_statement_issuers.push(*val_index); - votes_changed = true; - self.metrics.on_invalid_vote(); - }, + let (tx, rx) = oneshot::channel(); + // Use of unbounded channels justified because: + // 1. Only triggered twice per dispute. + // 2. Raising a dispute is costly (requires validation + recovery) by honest nodes, + // dishonest nodes are limited by spam slots. + // 3. Concluding a dispute is even more costly. + // Therefore it is reasonable to expect a simple vote request to succeed way faster + // than disputes are raised. + // 4. We are waiting (and blocking the whole subsystem) on a response right after - + // therefore even with all else failing we will never have more than + // one message in flight at any given time. + ctx.send_unbounded_message( + ApprovalVotingMessage::GetApprovalSignaturesForCandidate(candidate_hash, tx), + ); + match rx.await { + Err(_) => { + gum::warn!( + target: LOG_TARGET, + "Fetch for approval votes got cancelled, only expected during shutdown!" + ); + intermediate_result + }, + Ok(votes) => intermediate_result.import_approval_votes(&env, votes), + } + } else { + gum::trace!( + target: LOG_TARGET, + ?candidate_hash, + ?session, + "Not requested approval signatures" + ); + intermediate_result } - } + }; - // Whether or not we know already that this is a good dispute: - // - // Note we can only know for sure whether we reached the `byzantine_threshold` after - // updating candidate votes above, therefore the spam checking is afterwards: - let is_confirmed = is_included || - was_confirmed || - is_local || votes.voted_indices().len() > - byzantine_threshold(n_validators); + gum::trace!( + target: LOG_TARGET, + ?candidate_hash, + ?session, + num_validators = ?env.session_info().validators.len(), + "Import result ready" + ); + let new_state = import_result.new_state(); + + let is_included = self.scraper.is_candidate_included(&candidate_hash); + + let potential_spam = !is_included && !new_state.is_confirmed() && !new_state.has_own_vote(); + + gum::trace!( + target: LOG_TARGET, + has_own_vote = ?new_state.has_own_vote(), + ?potential_spam, + ?is_included, + ?candidate_hash, + confirmed = ?new_state.is_confirmed(), + has_invalid_voters = ?!import_result.new_invalid_voters().is_empty(), + "Is spam?" + ); + + if !potential_spam { + // Former spammers have not been spammers after all: + self.spam_slots.clear(&(session, candidate_hash)); // Potential spam: - if !is_confirmed && !fresh_invalid_statement_issuers.is_empty() { - let mut free_spam_slots_available = true; - // Only allow import if all validators voting invalid, have not exceeded - // their spam slots: - for index in fresh_invalid_statement_issuers { + } else if !import_result.new_invalid_voters().is_empty() { + let mut free_spam_slots_available = false; + // Only allow import if at least one validator voting invalid, has not exceeded + // its spam slots: + for index in import_result.new_invalid_voters() { // Disputes can only be triggered via an invalidity stating vote, thus we only // need to increase spam slots on invalid votes. (If we did not, we would also // increase spam slots for backing validators for example - as validators have to // provide some opposing vote for dispute-distribution). - free_spam_slots_available &= - self.spam_slots.add_unconfirmed(session, candidate_hash, index); + free_spam_slots_available |= + self.spam_slots.add_unconfirmed(session, candidate_hash, *index); } - // Only validity stating votes or validator had free spam slot? if !free_spam_slots_available { gum::debug!( target: LOG_TARGET, ?candidate_hash, ?session, - ?statements, + invalid_voters = ?import_result.new_invalid_voters(), "Rejecting import because of full spam slots." ); return Ok(ImportStatementsResult::InvalidImport) } } - if is_confirmed && !was_confirmed { - // Former spammers have not been spammers after all: - self.spam_slots.clear(&(session, candidate_hash)); - } - - // Check if newly disputed. - let is_disputed = !votes.valid.is_empty() && !votes.invalid.is_empty(); - let concluded_valid = votes.valid.len() >= supermajority_threshold; - let concluded_invalid = votes.invalid.len() >= supermajority_threshold; - - // Participate in dispute if the imported vote was not local, we did not vote before either - // and we actually have keys to issue a local vote. - if !is_local && !voted_already && is_disputed && !controlled_indices.is_empty() { + // Participate in dispute if we did not cast a vote before and actually have keys to cast a + // local vote: + if !new_state.has_own_vote() && + new_state.is_disputed() && + !env.controlled_indices().is_empty() + { let priority = ParticipationPriority::with_priority_if(is_included); gum::trace!( target: LOG_TARGET, - candidate_hash = ?candidate_receipt.hash(), + ?candidate_hash, ?priority, "Queuing participation for candidate" ); @@ -835,22 +885,77 @@ impl Initialized { } else { self.metrics.on_queued_best_effort_participation(); } - // Participate whenever the imported vote was local & we did not had no cast - // previously: let r = self .participation .queue_participation( ctx, priority, - ParticipationRequest::new(candidate_receipt, session, n_validators), + ParticipationRequest::new( + new_state.candidate_receipt().clone(), + session, + env.validators().len(), + ), ) .await; log_error(r)?; } - let prev_status = recent_disputes.get(&(session, candidate_hash)).map(|x| x.clone()); + // Also send any already existing approval vote on new disputes: + if import_result.is_freshly_disputed() { + let no_votes = Vec::new(); + let our_approval_votes = new_state.own_approval_votes().unwrap_or(&no_votes); + for (validator_index, sig) in our_approval_votes { + let pub_key = match env.validators().get(validator_index.0 as usize) { + None => { + gum::error!( + target: LOG_TARGET, + ?validator_index, + ?session, + "Could not find pub key in `SessionInfo` for our own approval vote!" + ); + continue + }, + Some(k) => k, + }; + let statement = SignedDisputeStatement::new_unchecked_from_trusted_source( + DisputeStatement::Valid(ValidDisputeStatementKind::ApprovalChecking), + candidate_hash, + session, + pub_key.clone(), + sig.clone(), + ); + gum::trace!( + target: LOG_TARGET, + ?candidate_hash, + ?session, + ?validator_index, + "Sending out own approval vote" + ); + match make_dispute_message( + env.session_info(), + &new_state.votes(), + statement, + *validator_index, + ) { + Err(err) => { + gum::error!( + target: LOG_TARGET, + ?err, + "No ongoing dispute, but we checked there is one!" + ); + }, + Ok(dispute_message) => { + ctx.send_message(DisputeDistributionMessage::SendDispute(dispute_message)) + .await; + }, + }; + } + } + + // All good, update recent disputes if state has changed: + if import_result.dispute_state_changed() { + let mut recent_disputes = overlay_db.load_recent_disputes()?.unwrap_or_default(); - let status = if is_disputed { let status = recent_disputes.entry((session, candidate_hash)).or_insert_with(|| { gum::info!( target: LOG_TARGET, @@ -861,57 +966,73 @@ impl Initialized { DisputeStatus::active() }); - if is_confirmed { + if new_state.is_confirmed() { *status = status.confirm(); } // Note: concluded-invalid overwrites concluded-valid, // so we do this check first. Dispute state machine is // non-commutative. - if concluded_valid { + if new_state.is_concluded_valid() { *status = status.concluded_for(now); } - if concluded_invalid { + if new_state.is_concluded_invalid() { *status = status.concluded_against(now); } - Some(*status) - } else { - None - }; - - if status != prev_status { - if prev_status.is_none() { - self.metrics.on_open(); - } - - if !was_concluded_valid && concluded_valid { - gum::info!( - target: LOG_TARGET, - ?candidate_hash, - session, - "Dispute on candidate concluded with 'valid' result", - ); - self.metrics.on_concluded_valid(); - } - - if !was_concluded_invalid && concluded_invalid { - gum::info!( - target: LOG_TARGET, - ?candidate_hash, - session, - "Dispute on candidate concluded with 'invalid' result", - ); - self.metrics.on_concluded_invalid(); - } - - // Only write when updated: + gum::trace!( + target: LOG_TARGET, + ?candidate_hash, + ?status, + is_concluded_valid = ?new_state.is_concluded_valid(), + is_concluded_invalid = ?new_state.is_concluded_invalid(), + "Writing recent disputes with updates for candidate" + ); overlay_db.write_recent_disputes(recent_disputes); } + // Update metrics: + if import_result.is_freshly_disputed() { + self.metrics.on_open(); + } + self.metrics.on_valid_votes(import_result.imported_valid_votes()); + self.metrics.on_invalid_votes(import_result.imported_invalid_votes()); + gum::trace!( + target: LOG_TARGET, + ?candidate_hash, + ?session, + imported_approval_votes = ?import_result.imported_approval_votes(), + imported_valid_votes = ?import_result.imported_valid_votes(), + imported_invalid_votes = ?import_result.imported_invalid_votes(), + total_valid_votes = ?import_result.new_state().votes().valid.len(), + total_invalid_votes = ?import_result.new_state().votes().invalid.len(), + confirmed = ?import_result.new_state().is_confirmed(), + "Import summary" + ); + + self.metrics.on_approval_votes(import_result.imported_approval_votes()); + if import_result.is_freshly_concluded_valid() { + gum::info!( + target: LOG_TARGET, + ?candidate_hash, + session, + "Dispute on candidate concluded with 'valid' result", + ); + self.metrics.on_concluded_valid(); + } + if import_result.is_freshly_concluded_invalid() { + gum::info!( + target: LOG_TARGET, + ?candidate_hash, + session, + "Dispute on candidate concluded with 'invalid' result", + ); + self.metrics.on_concluded_invalid(); + } + // Only write when votes have changed. - if votes_changed { + if let Some(votes) = import_result.into_updated_votes() { overlay_db.write_candidate_votes(session, candidate_hash, votes.into()); } @@ -928,29 +1049,37 @@ impl Initialized { valid: bool, now: Timestamp, ) -> Result<()> { - // Load session info. - let info = match self.rolling_session_window.session_info(session) { - None => { - gum::warn!( - target: LOG_TARGET, - session, - "Missing info for session which has an active dispute", - ); - - return Ok(()) - }, - Some(info) => info, - }; + gum::trace!( + target: LOG_TARGET, + ?candidate_hash, + ?session, + ?valid, + ?now, + "Issuing local statement for candidate!" + ); + // Load environment: + let env = + match CandidateEnvironment::new(&*self.keystore, &self.rolling_session_window, session) + { + None => { + gum::warn!( + target: LOG_TARGET, + session, + "Missing info for session which has an active dispute", + ); - let validators = info.validators.clone(); + return Ok(()) + }, + Some(env) => env, + }; let votes = overlay_db .load_candidate_votes(session, &candidate_hash)? .map(CandidateVotes::from) .unwrap_or_else(|| CandidateVotes { candidate_receipt: candidate_receipt.clone(), - valid: Vec::new(), - invalid: Vec::new(), + valid: BTreeMap::new(), + invalid: BTreeMap::new(), }); // Sign a statement for each validator index we control which has @@ -958,8 +1087,7 @@ impl Initialized { let voted_indices = votes.voted_indices(); let mut statements = Vec::new(); - let voted_indices: HashSet<_> = voted_indices.into_iter().collect(); - let controlled_indices = find_controlled_validator_indices(&self.keystore, &validators[..]); + let controlled_indices = env.controlled_indices(); for index in controlled_indices { if voted_indices.contains(&index) { continue @@ -971,13 +1099,13 @@ impl Initialized { valid, candidate_hash, session, - validators[index.0 as usize].clone(), + env.validators()[index.0 as usize].clone(), ) .await; match res { Ok(Some(signed_dispute_statement)) => { - statements.push((signed_dispute_statement, index)); + statements.push((signed_dispute_statement, *index)); }, Ok(None) => {}, Err(e) => { @@ -993,7 +1121,7 @@ impl Initialized { // Get our message out: for (statement, index) in &statements { let dispute_message = - match make_dispute_message(info, &votes, statement.clone(), *index) { + match make_dispute_message(env.session_info(), &votes, statement.clone(), *index) { Err(err) => { gum::debug!(target: LOG_TARGET, ?err, "Creating dispute message failed."); continue @@ -1010,7 +1138,6 @@ impl Initialized { .handle_import_statements( ctx, overlay_db, - candidate_hash, MaybeCandidateReceipt::Provides(candidate_receipt), session, statements, @@ -1066,29 +1193,22 @@ impl MuxedMessage { } } -// Returns 'true' if no other vote by that validator was already -// present and 'false' otherwise. Same semantics as `HashSet`. -fn insert_into_statement_vec( - vec: &mut Vec<(T, ValidatorIndex, ValidatorSignature)>, - tag: T, - val_index: ValidatorIndex, - val_signature: ValidatorSignature, -) -> bool { - let pos = match vec.binary_search_by_key(&val_index, |x| x.1) { - Ok(_) => return false, // no duplicates needed. - Err(p) => p, - }; - - vec.insert(pos, (tag, val_index, val_signature)); - true -} - #[derive(Debug, Clone)] enum MaybeCandidateReceipt { - /// Directly provides the candiate receipt. + /// Directly provides the candidate receipt. Provides(CandidateReceipt), /// Assumes it was seen before by means of seconded message. - AssumeBackingVotePresent, + AssumeBackingVotePresent(CandidateHash), +} + +impl MaybeCandidateReceipt { + /// Retrieve `CandidateHash` for the corresponding candidate. + pub fn hash(&self) -> CandidateHash { + match self { + Self::Provides(receipt) => receipt.hash(), + Self::AssumeBackingVotePresent(hash) => *hash, + } + } } #[derive(Debug, thiserror::Error)] @@ -1113,35 +1233,35 @@ fn make_dispute_message( let (valid_statement, valid_index, invalid_statement, invalid_index) = if let DisputeStatement::Valid(_) = our_vote.statement() { - let (statement_kind, validator_index, validator_signature) = - votes.invalid.get(0).ok_or(DisputeMessageCreationError::NoOppositeVote)?.clone(); + let (validator_index, (statement_kind, validator_signature)) = + votes.invalid.iter().next().ok_or(DisputeMessageCreationError::NoOppositeVote)?; let other_vote = SignedDisputeStatement::new_checked( - DisputeStatement::Invalid(statement_kind), + DisputeStatement::Invalid(*statement_kind), our_vote.candidate_hash().clone(), our_vote.session_index(), validators .get(validator_index.0 as usize) .ok_or(DisputeMessageCreationError::InvalidValidatorIndex)? .clone(), - validator_signature, + validator_signature.clone(), ) .map_err(|()| DisputeMessageCreationError::InvalidStoredStatement)?; - (our_vote, our_index, other_vote, validator_index) + (our_vote, our_index, other_vote, *validator_index) } else { - let (statement_kind, validator_index, validator_signature) = - votes.valid.get(0).ok_or(DisputeMessageCreationError::NoOppositeVote)?.clone(); + let (validator_index, (statement_kind, validator_signature)) = + votes.valid.iter().next().ok_or(DisputeMessageCreationError::NoOppositeVote)?; let other_vote = SignedDisputeStatement::new_checked( - DisputeStatement::Valid(statement_kind), + DisputeStatement::Valid(*statement_kind), our_vote.candidate_hash().clone(), our_vote.session_index(), validators .get(validator_index.0 as usize) .ok_or(DisputeMessageCreationError::InvalidValidatorIndex)? .clone(), - validator_signature, + validator_signature.clone(), ) .map_err(|()| DisputeMessageCreationError::InvalidStoredStatement)?; - (other_vote, validator_index, our_vote, our_index) + (other_vote, *validator_index, our_vote, our_index) }; DisputeMessage::from_signed_statements( @@ -1155,7 +1275,7 @@ fn make_dispute_message( .map_err(DisputeMessageCreationError::InvalidStatementCombination) } -/// Determine the the best block and its block number. +/// Determine the best block and its block number. /// Assumes `block_descriptions` are sorted from the one /// with the lowest `BlockNumber` to the highest. fn determine_undisputed_chain( @@ -1194,19 +1314,3 @@ fn determine_undisputed_chain( Ok(last) } - -fn find_controlled_validator_indices( - keystore: &LocalKeystore, - validators: &[ValidatorId], -) -> HashSet { - let mut controlled = HashSet::new(); - for (index, validator) in validators.iter().enumerate() { - if keystore.key_pair::(validator).ok().flatten().is_none() { - continue - } - - controlled.insert(ValidatorIndex(index as _)); - } - - controlled -} diff --git a/node/core/dispute-coordinator/src/lib.rs b/node/core/dispute-coordinator/src/lib.rs index d04cbf29ca58..03193a9d68ea 100644 --- a/node/core/dispute-coordinator/src/lib.rs +++ b/node/core/dispute-coordinator/src/lib.rs @@ -24,7 +24,7 @@ //! validation results as well as a sink for votes received by other subsystems. When importing a dispute vote from //! another node, this will trigger dispute participation to recover and validate the block. -use std::{collections::HashSet, sync::Arc}; +use std::sync::Arc; use futures::FutureExt; @@ -89,6 +89,9 @@ mod spam_slots; /// if there are lots of them. pub(crate) mod participation; +/// Pure processing of vote imports. +pub(crate) mod import; + /// Metrics types. mod metrics; @@ -302,7 +305,7 @@ impl DisputeCoordinatorSubsystem { }; let n_validators = validators.len(); - let voted_indices: HashSet<_> = votes.voted_indices().into_iter().collect(); + let voted_indices = votes.voted_indices(); // Determine if there are any missing local statements for this dispute. Validators are // filtered if: diff --git a/node/core/dispute-coordinator/src/metrics.rs b/node/core/dispute-coordinator/src/metrics.rs index 40503428c1c8..1fbe7e49e8b8 100644 --- a/node/core/dispute-coordinator/src/metrics.rs +++ b/node/core/dispute-coordinator/src/metrics.rs @@ -22,6 +22,8 @@ struct MetricsInner { open: prometheus::Counter, /// Votes of all disputes. votes: prometheus::CounterVec, + /// Number of approval votes explicitly fetched from approval voting. + approval_votes: prometheus::Counter, /// Conclusion across all disputes. concluded: prometheus::CounterVec, /// Number of participations that have been queued. @@ -41,15 +43,21 @@ impl Metrics { } } - pub(crate) fn on_valid_vote(&self) { + pub(crate) fn on_valid_votes(&self, vote_count: u32) { if let Some(metrics) = &self.0 { - metrics.votes.with_label_values(&["valid"]).inc(); + metrics.votes.with_label_values(&["valid"]).inc_by(vote_count as _); } } - pub(crate) fn on_invalid_vote(&self) { + pub(crate) fn on_invalid_votes(&self, vote_count: u32) { if let Some(metrics) = &self.0 { - metrics.votes.with_label_values(&["invalid"]).inc(); + metrics.votes.with_label_values(&["invalid"]).inc_by(vote_count as _); + } + } + + pub(crate) fn on_approval_votes(&self, vote_count: u32) { + if let Some(metrics) = &self.0 { + metrics.approval_votes.inc_by(vote_count as _); } } @@ -112,6 +120,13 @@ impl metrics::Metrics for Metrics { )?, registry, )?, + approval_votes: prometheus::register( + prometheus::Counter::with_opts(prometheus::Opts::new( + "polkadot_parachain_dispute_candidate_approval_votes_fetched_total", + "Number of approval votes fetched from approval voting.", + ))?, + registry, + )?, queued_participations: prometheus::register( prometheus::CounterVec::new( prometheus::Opts::new( diff --git a/node/core/dispute-coordinator/src/spam_slots.rs b/node/core/dispute-coordinator/src/spam_slots.rs index 76cae0a72197..c0619bf3a1a5 100644 --- a/node/core/dispute-coordinator/src/spam_slots.rs +++ b/node/core/dispute-coordinator/src/spam_slots.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use std::collections::{HashMap, HashSet}; +use std::collections::{BTreeSet, HashMap}; use polkadot_primitives::v2::{CandidateHash, SessionIndex, ValidatorIndex}; @@ -54,7 +54,7 @@ pub struct SpamSlots { } /// Unconfirmed disputes to be passed at initialization. -pub type UnconfirmedDisputes = HashMap<(SessionIndex, CandidateHash), HashSet>; +pub type UnconfirmedDisputes = HashMap<(SessionIndex, CandidateHash), BTreeSet>; impl SpamSlots { /// Recover `SpamSlots` from state on startup. diff --git a/node/core/dispute-coordinator/src/tests.rs b/node/core/dispute-coordinator/src/tests.rs index f1dbde642c22..39fdc3a037e5 100644 --- a/node/core/dispute-coordinator/src/tests.rs +++ b/node/core/dispute-coordinator/src/tests.rs @@ -34,14 +34,16 @@ use polkadot_node_subsystem_util::database::Database; use polkadot_node_primitives::{SignedDisputeStatement, SignedFullStatement, Statement}; use polkadot_node_subsystem::{ messages::{ - ChainApiMessage, DisputeCoordinatorMessage, DisputeDistributionMessage, - ImportStatementsResult, + ApprovalVotingMessage, ChainApiMessage, DisputeCoordinatorMessage, + DisputeDistributionMessage, ImportStatementsResult, }, overseer::FromOrchestra, OverseerSignal, }; + use polkadot_node_subsystem_util::TimeoutExt; use sc_keystore::LocalKeystore; +use sp_application_crypto::AppKey; use sp_core::{sr25519::Pair, testing::TaskExecutor, Pair as PairT}; use sp_keyring::Sr25519Keyring; use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; @@ -54,9 +56,10 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_test_helpers::{make_subsystem_context, TestSubsystemContextHandle}; use polkadot_primitives::v2::{ - BlockNumber, CandidateCommitments, CandidateHash, CandidateReceipt, Hash, Header, - MultiDisputeStatementSet, ScrapedOnChainVotes, SessionIndex, SessionInfo, SigningContext, - ValidatorId, ValidatorIndex, + ApprovalVote, BlockNumber, CandidateCommitments, CandidateHash, CandidateReceipt, + DisputeStatement, Hash, Header, MultiDisputeStatementSet, ScrapedOnChainVotes, SessionIndex, + SessionInfo, SigningContext, ValidDisputeStatementKind, ValidatorId, ValidatorIndex, + ValidatorSignature, }; use crate::{ @@ -311,7 +314,7 @@ impl TestState { tx.send(Ok(Vec::new())).unwrap(); } ); - gum::info!("After answering runtime api request"); + gum::trace!("After answering runtime api request"); assert_matches!( overseer_recv(virtual_overseer).await, AllMessages::RuntimeApi(RuntimeApiMessage::Request( @@ -326,7 +329,7 @@ impl TestState { }))).unwrap(); } ); - gum::info!("After answering runtime api request (votes)"); + gum::trace!("After answering runtime API request (votes)"); }, msg => { panic!("Received unexpected message in `handle_sync_queries`: {:?}", msg); @@ -385,12 +388,12 @@ impl TestState { async fn issue_explicit_statement_with_index( &self, - index: usize, + index: ValidatorIndex, candidate_hash: CandidateHash, session: SessionIndex, valid: bool, ) -> SignedDisputeStatement { - let public = self.validator_public[index].clone(); + let public = self.validator_public[index.0 as usize].clone(); let keystore = self.master_keystore.clone() as SyncCryptoStorePtr; @@ -402,12 +405,12 @@ impl TestState { async fn issue_backing_statement_with_index( &self, - index: usize, + index: ValidatorIndex, candidate_hash: CandidateHash, session: SessionIndex, ) -> SignedDisputeStatement { let keystore = self.master_keystore.clone() as SyncCryptoStorePtr; - let validator_id = self.validators[index].public().into(); + let validator_id = self.validators[index.0 as usize].public().into(); let context = SigningContext { session_index: session, parent_hash: Hash::repeat_byte(0xac) }; @@ -415,7 +418,7 @@ impl TestState { &keystore, Statement::Valid(candidate_hash), &context, - ValidatorIndex(index as _), + index, &validator_id, ) .await @@ -426,6 +429,35 @@ impl TestState { SignedDisputeStatement::from_backing_statement(&statement, context, validator_id).unwrap() } + fn issue_approval_vote_with_index( + &self, + index: ValidatorIndex, + candidate_hash: CandidateHash, + session: SessionIndex, + ) -> SignedDisputeStatement { + let keystore = self.master_keystore.clone() as SyncCryptoStorePtr; + let validator_id = self.validators[index.0 as usize].public(); + + let payload = ApprovalVote(candidate_hash).signing_payload(session); + let signature = SyncCryptoStore::sign_with( + &*keystore, + ValidatorId::ID, + &validator_id.into(), + &payload[..], + ) + .ok() + .flatten() + .unwrap(); + + SignedDisputeStatement::new_unchecked_from_trusted_source( + DisputeStatement::Valid(ValidDisputeStatementKind::ApprovalChecking), + candidate_hash, + session, + validator_id.into(), + signature.try_into().unwrap(), + ) + } + fn resume(mut self, test: F) -> Self where F: FnOnce(TestState, VirtualOverseer) -> BoxFuture<'static, TestState>, @@ -482,6 +514,24 @@ fn make_invalid_candidate_receipt() -> CandidateReceipt { dummy_candidate_receipt_bad_sig(Default::default(), Some(Default::default())) } +/// Handle request for approval votes: +pub async fn handle_approval_vote_request( + ctx_handle: &mut VirtualOverseer, + expected_hash: &CandidateHash, + votes_to_send: HashMap, +) { + assert_matches!( + ctx_handle.recv().await, + AllMessages::ApprovalVoting( + ApprovalVotingMessage::GetApprovalSignaturesForCandidate(hash, tx) + ) => { + assert_eq!(&hash, expected_hash); + tx.send(votes_to_send).unwrap(); + }, + "overseer did not receive `GetApprovalSignaturesForCandidate` message.", + ); +} + #[test] fn too_many_unconfirmed_statements_are_considered_spam() { test_harness(|mut test_state, mut virtual_overseer| { @@ -497,24 +547,36 @@ fn too_many_unconfirmed_statements_are_considered_spam() { test_state.activate_leaf_at_session(&mut virtual_overseer, session, 1).await; - let valid_vote1 = - test_state.issue_backing_statement_with_index(3, candidate_hash1, session).await; + let valid_vote1 = test_state + .issue_backing_statement_with_index(ValidatorIndex(3), candidate_hash1, session) + .await; let invalid_vote1 = test_state - .issue_explicit_statement_with_index(1, candidate_hash1, session, false) + .issue_explicit_statement_with_index( + ValidatorIndex(1), + candidate_hash1, + session, + false, + ) .await; - let valid_vote2 = - test_state.issue_backing_statement_with_index(3, candidate_hash1, session).await; + let valid_vote2 = test_state + .issue_backing_statement_with_index(ValidatorIndex(3), candidate_hash2, session) + .await; let invalid_vote2 = test_state - .issue_explicit_statement_with_index(1, candidate_hash1, session, false) + .issue_explicit_statement_with_index( + ValidatorIndex(1), + candidate_hash2, + session, + false, + ) .await; + gum::trace!("Before sending `ImportStatements`"); virtual_overseer .send(FromOrchestra::Communication { msg: DisputeCoordinatorMessage::ImportStatements { - candidate_hash: candidate_hash1, candidate_receipt: candidate_receipt1.clone(), session, statements: vec![ @@ -525,6 +587,10 @@ fn too_many_unconfirmed_statements_are_considered_spam() { }, }) .await; + gum::trace!("After sending `ImportStatements`"); + + handle_approval_vote_request(&mut virtual_overseer, &candidate_hash1, HashMap::new()) + .await; // Participation has to fail, otherwise the dispute will be confirmed. participation_missing_availability(&mut virtual_overseer).await; @@ -558,7 +624,6 @@ fn too_many_unconfirmed_statements_are_considered_spam() { virtual_overseer .send(FromOrchestra::Communication { msg: DisputeCoordinatorMessage::ImportStatements { - candidate_hash: candidate_hash2, candidate_receipt: candidate_receipt2.clone(), session, statements: vec![ @@ -570,6 +635,9 @@ fn too_many_unconfirmed_statements_are_considered_spam() { }) .await; + handle_approval_vote_request(&mut virtual_overseer, &candidate_hash2, HashMap::new()) + .await; + { let (tx, rx) = oneshot::channel(); virtual_overseer @@ -597,6 +665,99 @@ fn too_many_unconfirmed_statements_are_considered_spam() { }); } +#[test] +fn approval_vote_import_works() { + test_harness(|mut test_state, mut virtual_overseer| { + Box::pin(async move { + let session = 1; + + test_state.handle_resume_sync(&mut virtual_overseer, session).await; + + let candidate_receipt1 = make_valid_candidate_receipt(); + let candidate_hash1 = candidate_receipt1.hash(); + + test_state.activate_leaf_at_session(&mut virtual_overseer, session, 1).await; + + let valid_vote1 = test_state + .issue_backing_statement_with_index(ValidatorIndex(3), candidate_hash1, session) + .await; + + let invalid_vote1 = test_state + .issue_explicit_statement_with_index( + ValidatorIndex(1), + candidate_hash1, + session, + false, + ) + .await; + + let approval_vote = test_state.issue_approval_vote_with_index( + ValidatorIndex(4), + candidate_hash1, + session, + ); + + gum::trace!("Before sending `ImportStatements`"); + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ImportStatements { + candidate_receipt: candidate_receipt1.clone(), + session, + statements: vec![ + (valid_vote1, ValidatorIndex(3)), + (invalid_vote1, ValidatorIndex(1)), + ], + pending_confirmation: None, + }, + }) + .await; + gum::trace!("After sending `ImportStatements`"); + + let approval_votes = [(ValidatorIndex(4), approval_vote.into_validator_signature())] + .into_iter() + .collect(); + handle_approval_vote_request(&mut virtual_overseer, &candidate_hash1, approval_votes) + .await; + + // Participation has to fail, otherwise the dispute will be confirmed. + participation_missing_availability(&mut virtual_overseer).await; + + { + let (tx, rx) = oneshot::channel(); + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ActiveDisputes(tx), + }) + .await; + + assert_eq!(rx.await.unwrap(), vec![(session, candidate_hash1)]); + + let (tx, rx) = oneshot::channel(); + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::QueryCandidateVotes( + vec![(session, candidate_hash1)], + tx, + ), + }) + .await; + + let (_, _, votes) = rx.await.unwrap().get(0).unwrap().clone(); + assert_eq!(votes.valid.len(), 2); + assert!(votes.valid.get(&ValidatorIndex(4)).is_some(), "Approval vote is missing!"); + assert_eq!(votes.invalid.len(), 1); + } + + virtual_overseer.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await; + + // No more messages expected: + assert!(virtual_overseer.try_recv().await.is_none()); + + test_state + }) + }); +} + #[test] fn dispute_gets_confirmed_via_participation() { test_harness(|mut test_state, mut virtual_overseer| { @@ -613,25 +774,44 @@ fn dispute_gets_confirmed_via_participation() { test_state.activate_leaf_at_session(&mut virtual_overseer, session, 1).await; let valid_vote1 = test_state - .issue_explicit_statement_with_index(3, candidate_hash1, session, true) + .issue_explicit_statement_with_index( + ValidatorIndex(3), + candidate_hash1, + session, + true, + ) .await; let invalid_vote1 = test_state - .issue_explicit_statement_with_index(1, candidate_hash1, session, false) + .issue_explicit_statement_with_index( + ValidatorIndex(1), + candidate_hash1, + session, + false, + ) .await; let valid_vote2 = test_state - .issue_explicit_statement_with_index(3, candidate_hash1, session, true) + .issue_explicit_statement_with_index( + ValidatorIndex(3), + candidate_hash2, + session, + true, + ) .await; let invalid_vote2 = test_state - .issue_explicit_statement_with_index(1, candidate_hash1, session, false) + .issue_explicit_statement_with_index( + ValidatorIndex(1), + candidate_hash2, + session, + false, + ) .await; virtual_overseer .send(FromOrchestra::Communication { msg: DisputeCoordinatorMessage::ImportStatements { - candidate_hash: candidate_hash1, candidate_receipt: candidate_receipt1.clone(), session, statements: vec![ @@ -642,6 +822,9 @@ fn dispute_gets_confirmed_via_participation() { }, }) .await; + gum::debug!("After First import!"); + handle_approval_vote_request(&mut virtual_overseer, &candidate_hash1, HashMap::new()) + .await; participation_with_distribution( &mut virtual_overseer, @@ -649,6 +832,7 @@ fn dispute_gets_confirmed_via_participation() { candidate_receipt1.commitments_hash, ) .await; + gum::debug!("After Participation!"); { let (tx, rx) = oneshot::channel(); @@ -679,7 +863,6 @@ fn dispute_gets_confirmed_via_participation() { virtual_overseer .send(FromOrchestra::Communication { msg: DisputeCoordinatorMessage::ImportStatements { - candidate_hash: candidate_hash2, candidate_receipt: candidate_receipt2.clone(), session, statements: vec![ @@ -690,6 +873,8 @@ fn dispute_gets_confirmed_via_participation() { }, }) .await; + handle_approval_vote_request(&mut virtual_overseer, &candidate_hash2, HashMap::new()) + .await; participation_missing_availability(&mut virtual_overseer).await; @@ -738,33 +923,62 @@ fn dispute_gets_confirmed_at_byzantine_threshold() { test_state.activate_leaf_at_session(&mut virtual_overseer, session, 1).await; let valid_vote1 = test_state - .issue_explicit_statement_with_index(3, candidate_hash1, session, true) + .issue_explicit_statement_with_index( + ValidatorIndex(3), + candidate_hash1, + session, + true, + ) .await; let invalid_vote1 = test_state - .issue_explicit_statement_with_index(1, candidate_hash1, session, false) + .issue_explicit_statement_with_index( + ValidatorIndex(1), + candidate_hash1, + session, + false, + ) .await; let valid_vote1a = test_state - .issue_explicit_statement_with_index(4, candidate_hash1, session, true) + .issue_explicit_statement_with_index( + ValidatorIndex(4), + candidate_hash1, + session, + true, + ) .await; let invalid_vote1a = test_state - .issue_explicit_statement_with_index(5, candidate_hash1, session, false) + .issue_explicit_statement_with_index( + ValidatorIndex(5), + candidate_hash1, + session, + false, + ) .await; let valid_vote2 = test_state - .issue_explicit_statement_with_index(3, candidate_hash1, session, true) + .issue_explicit_statement_with_index( + ValidatorIndex(3), + candidate_hash2, + session, + true, + ) .await; let invalid_vote2 = test_state - .issue_explicit_statement_with_index(1, candidate_hash1, session, false) + .issue_explicit_statement_with_index( + ValidatorIndex(1), + candidate_hash2, + session, + false, + ) .await; virtual_overseer .send(FromOrchestra::Communication { msg: DisputeCoordinatorMessage::ImportStatements { - candidate_hash: candidate_hash1, candidate_receipt: candidate_receipt1.clone(), session, statements: vec![ @@ -777,6 +991,8 @@ fn dispute_gets_confirmed_at_byzantine_threshold() { }, }) .await; + handle_approval_vote_request(&mut virtual_overseer, &candidate_hash1, HashMap::new()) + .await; participation_missing_availability(&mut virtual_overseer).await; @@ -809,7 +1025,6 @@ fn dispute_gets_confirmed_at_byzantine_threshold() { virtual_overseer .send(FromOrchestra::Communication { msg: DisputeCoordinatorMessage::ImportStatements { - candidate_hash: candidate_hash2, candidate_receipt: candidate_receipt2.clone(), session, statements: vec![ @@ -820,6 +1035,8 @@ fn dispute_gets_confirmed_at_byzantine_threshold() { }, }) .await; + handle_approval_vote_request(&mut virtual_overseer, &candidate_hash2, HashMap::new()) + .await; participation_missing_availability(&mut virtual_overseer).await; @@ -866,17 +1083,18 @@ fn backing_statements_import_works_and_no_spam() { test_state.activate_leaf_at_session(&mut virtual_overseer, session, 1).await; - let valid_vote1 = - test_state.issue_backing_statement_with_index(3, candidate_hash, session).await; + let valid_vote1 = test_state + .issue_backing_statement_with_index(ValidatorIndex(3), candidate_hash, session) + .await; - let valid_vote2 = - test_state.issue_backing_statement_with_index(4, candidate_hash, session).await; + let valid_vote2 = test_state + .issue_backing_statement_with_index(ValidatorIndex(4), candidate_hash, session) + .await; let (pending_confirmation, confirmation_rx) = oneshot::channel(); virtual_overseer .send(FromOrchestra::Communication { msg: DisputeCoordinatorMessage::ImportStatements { - candidate_hash, candidate_receipt: candidate_receipt.clone(), session, statements: vec![ @@ -918,11 +1136,13 @@ fn backing_statements_import_works_and_no_spam() { let candidate_receipt = make_invalid_candidate_receipt(); let candidate_hash = candidate_receipt.hash(); - let valid_vote1 = - test_state.issue_backing_statement_with_index(3, candidate_hash, session).await; + let valid_vote1 = test_state + .issue_backing_statement_with_index(ValidatorIndex(3), candidate_hash, session) + .await; - let valid_vote2 = - test_state.issue_backing_statement_with_index(4, candidate_hash, session).await; + let valid_vote2 = test_state + .issue_backing_statement_with_index(ValidatorIndex(4), candidate_hash, session) + .await; let (pending_confirmation, confirmation_rx) = oneshot::channel(); // Backing vote import should not have accounted to spam slots, so this should succeed @@ -930,7 +1150,6 @@ fn backing_statements_import_works_and_no_spam() { virtual_overseer .send(FromOrchestra::Communication { msg: DisputeCoordinatorMessage::ImportStatements { - candidate_hash, candidate_receipt: candidate_receipt.clone(), session, statements: vec![ @@ -969,21 +1188,35 @@ fn conflicting_votes_lead_to_dispute_participation() { test_state.activate_leaf_at_session(&mut virtual_overseer, session, 1).await; let valid_vote = test_state - .issue_explicit_statement_with_index(3, candidate_hash, session, true) + .issue_explicit_statement_with_index( + ValidatorIndex(3), + candidate_hash, + session, + true, + ) .await; let invalid_vote = test_state - .issue_explicit_statement_with_index(1, candidate_hash, session, false) + .issue_explicit_statement_with_index( + ValidatorIndex(1), + candidate_hash, + session, + false, + ) .await; let invalid_vote_2 = test_state - .issue_explicit_statement_with_index(2, candidate_hash, session, false) + .issue_explicit_statement_with_index( + ValidatorIndex(2), + candidate_hash, + session, + false, + ) .await; virtual_overseer .send(FromOrchestra::Communication { msg: DisputeCoordinatorMessage::ImportStatements { - candidate_hash, candidate_receipt: candidate_receipt.clone(), session, statements: vec![ @@ -994,6 +1227,8 @@ fn conflicting_votes_lead_to_dispute_participation() { }, }) .await; + handle_approval_vote_request(&mut virtual_overseer, &candidate_hash, HashMap::new()) + .await; participation_with_distribution( &mut virtual_overseer, @@ -1030,7 +1265,6 @@ fn conflicting_votes_lead_to_dispute_participation() { virtual_overseer .send(FromOrchestra::Communication { msg: DisputeCoordinatorMessage::ImportStatements { - candidate_hash, candidate_receipt: candidate_receipt.clone(), session, statements: vec![(invalid_vote_2, ValidatorIndex(2))], @@ -1079,17 +1313,26 @@ fn positive_votes_dont_trigger_participation() { test_state.activate_leaf_at_session(&mut virtual_overseer, session, 1).await; let valid_vote = test_state - .issue_explicit_statement_with_index(2, candidate_hash, session, true) + .issue_explicit_statement_with_index( + ValidatorIndex(2), + candidate_hash, + session, + true, + ) .await; let valid_vote_2 = test_state - .issue_explicit_statement_with_index(1, candidate_hash, session, true) + .issue_explicit_statement_with_index( + ValidatorIndex(1), + candidate_hash, + session, + true, + ) .await; virtual_overseer .send(FromOrchestra::Communication { msg: DisputeCoordinatorMessage::ImportStatements { - candidate_hash, candidate_receipt: candidate_receipt.clone(), session, statements: vec![(valid_vote, ValidatorIndex(2))], @@ -1126,7 +1369,6 @@ fn positive_votes_dont_trigger_participation() { virtual_overseer .send(FromOrchestra::Communication { msg: DisputeCoordinatorMessage::ImportStatements { - candidate_hash, candidate_receipt: candidate_receipt.clone(), session, statements: vec![(valid_vote_2, ValidatorIndex(1))], @@ -1184,17 +1426,26 @@ fn wrong_validator_index_is_ignored() { test_state.activate_leaf_at_session(&mut virtual_overseer, session, 1).await; let valid_vote = test_state - .issue_explicit_statement_with_index(2, candidate_hash, session, true) + .issue_explicit_statement_with_index( + ValidatorIndex(2), + candidate_hash, + session, + true, + ) .await; let invalid_vote = test_state - .issue_explicit_statement_with_index(1, candidate_hash, session, false) + .issue_explicit_statement_with_index( + ValidatorIndex(1), + candidate_hash, + session, + false, + ) .await; virtual_overseer .send(FromOrchestra::Communication { msg: DisputeCoordinatorMessage::ImportStatements { - candidate_hash, candidate_receipt: candidate_receipt.clone(), session, statements: vec![ @@ -1226,9 +1477,7 @@ fn wrong_validator_index_is_ignored() { }) .await; - let (_, _, votes) = rx.await.unwrap().get(0).unwrap().clone(); - assert!(votes.valid.is_empty()); - assert!(votes.invalid.is_empty()); + assert_matches!(rx.await.unwrap().get(0), None); } virtual_overseer.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await; @@ -1255,17 +1504,26 @@ fn finality_votes_ignore_disputed_candidates() { test_state.activate_leaf_at_session(&mut virtual_overseer, session, 1).await; let valid_vote = test_state - .issue_explicit_statement_with_index(2, candidate_hash, session, true) + .issue_explicit_statement_with_index( + ValidatorIndex(2), + candidate_hash, + session, + true, + ) .await; let invalid_vote = test_state - .issue_explicit_statement_with_index(1, candidate_hash, session, false) + .issue_explicit_statement_with_index( + ValidatorIndex(1), + candidate_hash, + session, + false, + ) .await; virtual_overseer .send(FromOrchestra::Communication { msg: DisputeCoordinatorMessage::ImportStatements { - candidate_hash, candidate_receipt: candidate_receipt.clone(), session, statements: vec![ @@ -1276,6 +1534,8 @@ fn finality_votes_ignore_disputed_candidates() { }, }) .await; + handle_approval_vote_request(&mut virtual_overseer, &candidate_hash, HashMap::new()) + .await; participation_with_distribution( &mut virtual_overseer, @@ -1357,17 +1617,26 @@ fn supermajority_valid_dispute_may_be_finalized() { polkadot_primitives::v2::supermajority_threshold(test_state.validators.len()); let valid_vote = test_state - .issue_explicit_statement_with_index(2, candidate_hash, session, true) + .issue_explicit_statement_with_index( + ValidatorIndex(2), + candidate_hash, + session, + true, + ) .await; let invalid_vote = test_state - .issue_explicit_statement_with_index(1, candidate_hash, session, false) + .issue_explicit_statement_with_index( + ValidatorIndex(1), + candidate_hash, + session, + false, + ) .await; virtual_overseer .send(FromOrchestra::Communication { msg: DisputeCoordinatorMessage::ImportStatements { - candidate_hash, candidate_receipt: candidate_receipt.clone(), session, statements: vec![ @@ -1378,6 +1647,8 @@ fn supermajority_valid_dispute_may_be_finalized() { }, }) .await; + handle_approval_vote_request(&mut virtual_overseer, &candidate_hash, HashMap::new()) + .await; participation_with_distribution( &mut virtual_overseer, @@ -1387,9 +1658,14 @@ fn supermajority_valid_dispute_may_be_finalized() { .await; let mut statements = Vec::new(); - for i in (0..supermajority_threshold - 1).map(|i| i + 3) { + for i in (0_u32..supermajority_threshold as u32 - 1).map(|i| i + 3) { let vote = test_state - .issue_explicit_statement_with_index(i, candidate_hash, session, true) + .issue_explicit_statement_with_index( + ValidatorIndex(i), + candidate_hash, + session, + true, + ) .await; statements.push((vote, ValidatorIndex(i as _))); @@ -1398,7 +1674,6 @@ fn supermajority_valid_dispute_may_be_finalized() { virtual_overseer .send(FromOrchestra::Communication { msg: DisputeCoordinatorMessage::ImportStatements { - candidate_hash, candidate_receipt: candidate_receipt.clone(), session, statements, @@ -1406,6 +1681,8 @@ fn supermajority_valid_dispute_may_be_finalized() { }, }) .await; + handle_approval_vote_request(&mut virtual_overseer, &candidate_hash, HashMap::new()) + .await; { let (tx, rx) = oneshot::channel(); @@ -1480,17 +1757,26 @@ fn concluded_supermajority_for_non_active_after_time() { polkadot_primitives::v2::supermajority_threshold(test_state.validators.len()); let valid_vote = test_state - .issue_explicit_statement_with_index(2, candidate_hash, session, true) + .issue_explicit_statement_with_index( + ValidatorIndex(2), + candidate_hash, + session, + true, + ) .await; let invalid_vote = test_state - .issue_explicit_statement_with_index(1, candidate_hash, session, false) + .issue_explicit_statement_with_index( + ValidatorIndex(1), + candidate_hash, + session, + false, + ) .await; virtual_overseer .send(FromOrchestra::Communication { msg: DisputeCoordinatorMessage::ImportStatements { - candidate_hash, candidate_receipt: candidate_receipt.clone(), session, statements: vec![ @@ -1501,6 +1787,8 @@ fn concluded_supermajority_for_non_active_after_time() { }, }) .await; + handle_approval_vote_request(&mut virtual_overseer, &candidate_hash, HashMap::new()) + .await; participation_with_distribution( &mut virtual_overseer, @@ -1511,9 +1799,14 @@ fn concluded_supermajority_for_non_active_after_time() { let mut statements = Vec::new(); // -2: 1 for already imported vote and one for local vote (which is valid). - for i in (0..supermajority_threshold - 2).map(|i| i + 3) { + for i in (0_u32..supermajority_threshold as u32 - 2).map(|i| i + 3) { let vote = test_state - .issue_explicit_statement_with_index(i, candidate_hash, session, true) + .issue_explicit_statement_with_index( + ValidatorIndex(i), + candidate_hash, + session, + true, + ) .await; statements.push((vote, ValidatorIndex(i as _))); @@ -1522,7 +1815,6 @@ fn concluded_supermajority_for_non_active_after_time() { virtual_overseer .send(FromOrchestra::Communication { msg: DisputeCoordinatorMessage::ImportStatements { - candidate_hash, candidate_receipt: candidate_receipt.clone(), session, statements, @@ -1530,6 +1822,8 @@ fn concluded_supermajority_for_non_active_after_time() { }, }) .await; + handle_approval_vote_request(&mut virtual_overseer, &candidate_hash, HashMap::new()) + .await; test_state.clock.set(ACTIVE_DURATION_SECS + 1); @@ -1581,18 +1875,27 @@ fn concluded_supermajority_against_non_active_after_time() { polkadot_primitives::v2::supermajority_threshold(test_state.validators.len()); let valid_vote = test_state - .issue_explicit_statement_with_index(2, candidate_hash, session, true) + .issue_explicit_statement_with_index( + ValidatorIndex(2), + candidate_hash, + session, + true, + ) .await; let invalid_vote = test_state - .issue_explicit_statement_with_index(1, candidate_hash, session, false) + .issue_explicit_statement_with_index( + ValidatorIndex(1), + candidate_hash, + session, + false, + ) .await; let (pending_confirmation, confirmation_rx) = oneshot::channel(); virtual_overseer .send(FromOrchestra::Communication { msg: DisputeCoordinatorMessage::ImportStatements { - candidate_hash, candidate_receipt: candidate_receipt.clone(), session, statements: vec![ @@ -1603,6 +1906,8 @@ fn concluded_supermajority_against_non_active_after_time() { }, }) .await; + handle_approval_vote_request(&mut virtual_overseer, &candidate_hash, HashMap::new()) + .await; assert_matches!(confirmation_rx.await.unwrap(), ImportStatementsResult::ValidImport => {} ); @@ -1617,9 +1922,14 @@ fn concluded_supermajority_against_non_active_after_time() { let mut statements = Vec::new(); // minus 2, because of local vote and one previously imported invalid vote. - for i in (0..supermajority_threshold - 2).map(|i| i + 3) { + for i in (0_u32..supermajority_threshold as u32 - 2).map(|i| i + 3) { let vote = test_state - .issue_explicit_statement_with_index(i, candidate_hash, session, false) + .issue_explicit_statement_with_index( + ValidatorIndex(i), + candidate_hash, + session, + false, + ) .await; statements.push((vote, ValidatorIndex(i as _))); @@ -1628,7 +1938,6 @@ fn concluded_supermajority_against_non_active_after_time() { virtual_overseer .send(FromOrchestra::Communication { msg: DisputeCoordinatorMessage::ImportStatements { - candidate_hash, candidate_receipt: candidate_receipt.clone(), session, statements, @@ -1636,6 +1945,8 @@ fn concluded_supermajority_against_non_active_after_time() { }, }) .await; + handle_approval_vote_request(&mut virtual_overseer, &candidate_hash, HashMap::new()) + .await; test_state.clock.set(ACTIVE_DURATION_SECS + 1); @@ -1685,18 +1996,27 @@ fn resume_dispute_without_local_statement() { test_state.activate_leaf_at_session(&mut virtual_overseer, session, 1).await; let valid_vote = test_state - .issue_explicit_statement_with_index(1, candidate_hash, session, true) + .issue_explicit_statement_with_index( + ValidatorIndex(1), + candidate_hash, + session, + true, + ) .await; let invalid_vote = test_state - .issue_explicit_statement_with_index(2, candidate_hash, session, false) + .issue_explicit_statement_with_index( + ValidatorIndex(2), + candidate_hash, + session, + false, + ) .await; let (pending_confirmation, confirmation_rx) = oneshot::channel(); virtual_overseer .send(FromOrchestra::Communication { msg: DisputeCoordinatorMessage::ImportStatements { - candidate_hash, candidate_receipt: candidate_receipt.clone(), session, statements: vec![ @@ -1707,6 +2027,8 @@ fn resume_dispute_without_local_statement() { }, }) .await; + handle_approval_vote_request(&mut virtual_overseer, &candidate_hash, HashMap::new()) + .await; // Missing availability -> No local vote. participation_missing_availability(&mut virtual_overseer).await; @@ -1748,28 +2070,57 @@ fn resume_dispute_without_local_statement() { .await; let valid_vote0 = test_state - .issue_explicit_statement_with_index(0, candidate_hash, session, true) + .issue_explicit_statement_with_index( + ValidatorIndex(0), + candidate_hash, + session, + true, + ) .await; let valid_vote3 = test_state - .issue_explicit_statement_with_index(3, candidate_hash, session, true) + .issue_explicit_statement_with_index( + ValidatorIndex(3), + candidate_hash, + session, + true, + ) .await; let valid_vote4 = test_state - .issue_explicit_statement_with_index(4, candidate_hash, session, true) + .issue_explicit_statement_with_index( + ValidatorIndex(4), + candidate_hash, + session, + true, + ) .await; let valid_vote5 = test_state - .issue_explicit_statement_with_index(5, candidate_hash, session, true) + .issue_explicit_statement_with_index( + ValidatorIndex(5), + candidate_hash, + session, + true, + ) .await; let valid_vote6 = test_state - .issue_explicit_statement_with_index(6, candidate_hash, session, true) + .issue_explicit_statement_with_index( + ValidatorIndex(6), + candidate_hash, + session, + true, + ) .await; let valid_vote7 = test_state - .issue_explicit_statement_with_index(7, candidate_hash, session, true) + .issue_explicit_statement_with_index( + ValidatorIndex(7), + candidate_hash, + session, + true, + ) .await; virtual_overseer .send(FromOrchestra::Communication { msg: DisputeCoordinatorMessage::ImportStatements { - candidate_hash, candidate_receipt: candidate_receipt.clone(), session, statements: vec![ @@ -1784,6 +2135,8 @@ fn resume_dispute_without_local_statement() { }, }) .await; + handle_approval_vote_request(&mut virtual_overseer, &candidate_hash, HashMap::new()) + .await; // Advance the clock far enough so that the concluded dispute will be omitted from an // ActiveDisputes query. @@ -1823,22 +2176,36 @@ fn resume_dispute_with_local_statement() { test_state.activate_leaf_at_session(&mut virtual_overseer, session, 1).await; let local_valid_vote = test_state - .issue_explicit_statement_with_index(0, candidate_hash, session, true) + .issue_explicit_statement_with_index( + ValidatorIndex(0), + candidate_hash, + session, + true, + ) .await; let valid_vote = test_state - .issue_explicit_statement_with_index(1, candidate_hash, session, true) + .issue_explicit_statement_with_index( + ValidatorIndex(1), + candidate_hash, + session, + true, + ) .await; let invalid_vote = test_state - .issue_explicit_statement_with_index(2, candidate_hash, session, false) + .issue_explicit_statement_with_index( + ValidatorIndex(2), + candidate_hash, + session, + false, + ) .await; let (pending_confirmation, confirmation_rx) = oneshot::channel(); virtual_overseer .send(FromOrchestra::Communication { msg: DisputeCoordinatorMessage::ImportStatements { - candidate_hash, candidate_receipt: candidate_receipt.clone(), session, statements: vec![ @@ -1850,6 +2217,8 @@ fn resume_dispute_with_local_statement() { }, }) .await; + handle_approval_vote_request(&mut virtual_overseer, &candidate_hash, HashMap::new()) + .await; assert_eq!(confirmation_rx.await, Ok(ImportStatementsResult::ValidImport)); @@ -1905,18 +2274,27 @@ fn resume_dispute_without_local_statement_or_local_key() { test_state.activate_leaf_at_session(&mut virtual_overseer, session, 1).await; let valid_vote = test_state - .issue_explicit_statement_with_index(1, candidate_hash, session, true) + .issue_explicit_statement_with_index( + ValidatorIndex(1), + candidate_hash, + session, + true, + ) .await; let invalid_vote = test_state - .issue_explicit_statement_with_index(2, candidate_hash, session, false) + .issue_explicit_statement_with_index( + ValidatorIndex(2), + candidate_hash, + session, + false, + ) .await; let (pending_confirmation, confirmation_rx) = oneshot::channel(); virtual_overseer .send(FromOrchestra::Communication { msg: DisputeCoordinatorMessage::ImportStatements { - candidate_hash, candidate_receipt: candidate_receipt.clone(), session, statements: vec![ @@ -1927,6 +2305,12 @@ fn resume_dispute_without_local_statement_or_local_key() { }, }) .await; + handle_approval_vote_request( + &mut virtual_overseer, + &candidate_hash, + HashMap::new(), + ) + .await; assert_eq!(confirmation_rx.await, Ok(ImportStatementsResult::ValidImport)); @@ -1983,22 +2367,36 @@ fn resume_dispute_with_local_statement_without_local_key() { test_state.activate_leaf_at_session(&mut virtual_overseer, session, 1).await; let local_valid_vote = test_state - .issue_explicit_statement_with_index(0, candidate_hash, session, true) + .issue_explicit_statement_with_index( + ValidatorIndex(0), + candidate_hash, + session, + true, + ) .await; let valid_vote = test_state - .issue_explicit_statement_with_index(1, candidate_hash, session, true) + .issue_explicit_statement_with_index( + ValidatorIndex(1), + candidate_hash, + session, + true, + ) .await; let invalid_vote = test_state - .issue_explicit_statement_with_index(2, candidate_hash, session, false) + .issue_explicit_statement_with_index( + ValidatorIndex(2), + candidate_hash, + session, + false, + ) .await; let (pending_confirmation, confirmation_rx) = oneshot::channel(); virtual_overseer .send(FromOrchestra::Communication { msg: DisputeCoordinatorMessage::ImportStatements { - candidate_hash, candidate_receipt: candidate_receipt.clone(), session, statements: vec![ @@ -2010,6 +2408,8 @@ fn resume_dispute_with_local_statement_without_local_key() { }, }) .await; + handle_approval_vote_request(&mut virtual_overseer, &candidate_hash, HashMap::new()) + .await; assert_eq!(confirmation_rx.await, Ok(ImportStatementsResult::ValidImport)); @@ -2075,14 +2475,18 @@ fn issue_local_statement_does_cause_distribution_but_not_duplicate_participation test_state.activate_leaf_at_session(&mut virtual_overseer, session, 1).await; let other_vote = test_state - .issue_explicit_statement_with_index(1, candidate_hash, session, !validity) + .issue_explicit_statement_with_index( + ValidatorIndex(1), + candidate_hash, + session, + !validity, + ) .await; let (pending_confirmation, confirmation_rx) = oneshot::channel(); virtual_overseer .send(FromOrchestra::Communication { msg: DisputeCoordinatorMessage::ImportStatements { - candidate_hash, candidate_receipt: candidate_receipt.clone(), session, statements: vec![(other_vote, ValidatorIndex(1))], @@ -2116,6 +2520,9 @@ fn issue_local_statement_does_cause_distribution_but_not_duplicate_participation } ); + handle_approval_vote_request(&mut virtual_overseer, &candidate_hash, HashMap::new()) + .await; + // Make sure we won't participate: assert!(virtual_overseer.recv().timeout(TEST_TIMEOUT).await.is_none()); @@ -2128,44 +2535,87 @@ fn issue_local_statement_does_cause_distribution_but_not_duplicate_participation } #[test] -fn negative_issue_local_statement_only_triggers_import() { +fn own_approval_vote_gets_distributed_on_dispute() { test_harness(|mut test_state, mut virtual_overseer| { Box::pin(async move { let session = 1; test_state.handle_resume_sync(&mut virtual_overseer, session).await; - let candidate_receipt = make_invalid_candidate_receipt(); + let candidate_receipt = make_valid_candidate_receipt(); let candidate_hash = candidate_receipt.hash(); test_state.activate_leaf_at_session(&mut virtual_overseer, session, 1).await; + let statement = test_state.issue_approval_vote_with_index( + ValidatorIndex(0), + candidate_hash, + session, + ); + + // Import our approval vote: virtual_overseer .send(FromOrchestra::Communication { - msg: DisputeCoordinatorMessage::IssueLocalStatement( + msg: DisputeCoordinatorMessage::ImportStatements { + candidate_receipt: candidate_receipt.clone(), session, - candidate_hash, - candidate_receipt.clone(), - false, - ), + statements: vec![(statement, ValidatorIndex(0))], + pending_confirmation: None, + }, }) .await; - let backend = DbBackend::new( - test_state.db.clone(), - test_state.config.column_config(), - Metrics::default(), - ); + // Trigger dispute: + let invalid_vote = test_state + .issue_explicit_statement_with_index( + ValidatorIndex(1), + candidate_hash, + session, + false, + ) + .await; + let valid_vote = test_state + .issue_explicit_statement_with_index( + ValidatorIndex(2), + candidate_hash, + session, + true, + ) + .await; - let votes = backend.load_candidate_votes(session, &candidate_hash).unwrap().unwrap(); - assert_eq!(votes.invalid.len(), 1); - assert_eq!(votes.valid.len(), 0); + let (pending_confirmation, confirmation_rx) = oneshot::channel(); + virtual_overseer + .send(FromOrchestra::Communication { + msg: DisputeCoordinatorMessage::ImportStatements { + candidate_receipt: candidate_receipt.clone(), + session, + statements: vec![ + (invalid_vote, ValidatorIndex(1)), + (valid_vote, ValidatorIndex(2)), + ], + pending_confirmation: Some(pending_confirmation), + }, + }) + .await; + handle_approval_vote_request(&mut virtual_overseer, &candidate_hash, HashMap::new()) + .await; - let disputes = backend.load_recent_disputes().unwrap(); - assert_eq!(disputes, None); + assert_eq!(confirmation_rx.await, Ok(ImportStatementsResult::ValidImport)); - // Assert that subsystem is not participating. - assert!(virtual_overseer.recv().timeout(TEST_TIMEOUT).await.is_none()); + // Dispute distribution should get notified now (without participation, as we already + // have an approval vote): + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::DisputeDistribution( + DisputeDistributionMessage::SendDispute(msg) + ) => { + assert_eq!(msg.session_index(), session); + assert_eq!(msg.candidate_receipt(), &candidate_receipt); + } + ); + + // No participation should occur: + assert_matches!(virtual_overseer.recv().timeout(TEST_TIMEOUT).await, None); virtual_overseer.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await; assert!(virtual_overseer.try_recv().await.is_none()); @@ -2176,33 +2626,29 @@ fn negative_issue_local_statement_only_triggers_import() { } #[test] -fn empty_import_still_writes_candidate_receipt() { +fn negative_issue_local_statement_only_triggers_import() { test_harness(|mut test_state, mut virtual_overseer| { Box::pin(async move { let session = 1; test_state.handle_resume_sync(&mut virtual_overseer, session).await; - let candidate_receipt = make_valid_candidate_receipt(); + let candidate_receipt = make_invalid_candidate_receipt(); let candidate_hash = candidate_receipt.hash(); test_state.activate_leaf_at_session(&mut virtual_overseer, session, 1).await; - let (tx, rx) = oneshot::channel(); virtual_overseer .send(FromOrchestra::Communication { - msg: DisputeCoordinatorMessage::ImportStatements { - candidate_hash, - candidate_receipt: candidate_receipt.clone(), + msg: DisputeCoordinatorMessage::IssueLocalStatement( session, - statements: Vec::new(), - pending_confirmation: Some(tx), - }, + candidate_hash, + candidate_receipt.clone(), + false, + ), }) .await; - rx.await.unwrap(); - let backend = DbBackend::new( test_state.db.clone(), test_state.config.column_config(), @@ -2210,9 +2656,14 @@ fn empty_import_still_writes_candidate_receipt() { ); let votes = backend.load_candidate_votes(session, &candidate_hash).unwrap().unwrap(); - assert_eq!(votes.invalid.len(), 0); + assert_eq!(votes.invalid.len(), 1); assert_eq!(votes.valid.len(), 0); - assert_eq!(votes.candidate_receipt, candidate_receipt); + + let disputes = backend.load_recent_disputes().unwrap(); + assert_eq!(disputes, None); + + // Assert that subsystem is not participating. + assert!(virtual_overseer.recv().timeout(TEST_TIMEOUT).await.is_none()); virtual_overseer.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await; assert!(virtual_overseer.try_recv().await.is_none()); @@ -2235,11 +2686,13 @@ fn redundant_votes_ignored() { test_state.activate_leaf_at_session(&mut virtual_overseer, session, 1).await; - let valid_vote = - test_state.issue_backing_statement_with_index(1, candidate_hash, session).await; + let valid_vote = test_state + .issue_backing_statement_with_index(ValidatorIndex(1), candidate_hash, session) + .await; - let valid_vote_2 = - test_state.issue_backing_statement_with_index(1, candidate_hash, session).await; + let valid_vote_2 = test_state + .issue_backing_statement_with_index(ValidatorIndex(1), candidate_hash, session) + .await; assert!(valid_vote.validator_signature() != valid_vote_2.validator_signature()); @@ -2247,7 +2700,6 @@ fn redundant_votes_ignored() { virtual_overseer .send(FromOrchestra::Communication { msg: DisputeCoordinatorMessage::ImportStatements { - candidate_hash, candidate_receipt: candidate_receipt.clone(), session, statements: vec![(valid_vote.clone(), ValidatorIndex(1))], @@ -2262,7 +2714,6 @@ fn redundant_votes_ignored() { virtual_overseer .send(FromOrchestra::Communication { msg: DisputeCoordinatorMessage::ImportStatements { - candidate_hash, candidate_receipt: candidate_receipt.clone(), session, statements: vec![(valid_vote_2, ValidatorIndex(1))], diff --git a/node/core/parachains-inherent/src/lib.rs b/node/core/parachains-inherent/src/lib.rs index f458504a0ef5..af14216749ff 100644 --- a/node/core/parachains-inherent/src/lib.rs +++ b/node/core/parachains-inherent/src/lib.rs @@ -97,7 +97,7 @@ impl ParachainsInherentDataProvider { Err(err) => { gum::debug!( target: LOG_TARGET, - ?err, + %err, "Could not get provisioner inherent data; injecting default data", ); ParachainsInherentData { diff --git a/node/core/provisioner/src/error.rs b/node/core/provisioner/src/error.rs index 4589ab02cf31..05e437854eac 100644 --- a/node/core/provisioner/src/error.rs +++ b/node/core/provisioner/src/error.rs @@ -58,6 +58,9 @@ pub enum Error { #[error("failed to send message to CandidateBacking to get backed candidates")] GetBackedCandidatesSend(#[source] mpsc::SendError), + #[error("Send inherent data timeout.")] + SendInherentDataTimeout, + #[error("failed to send return message with Inherents")] InherentDataReturnChannel, diff --git a/node/core/provisioner/src/lib.rs b/node/core/provisioner/src/lib.rs index 66602ac60583..58725a7706e3 100644 --- a/node/core/provisioner/src/lib.rs +++ b/node/core/provisioner/src/lib.rs @@ -35,7 +35,9 @@ use polkadot_node_subsystem::{ overseer, ActivatedLeaf, ActiveLeavesUpdate, FromOrchestra, LeafStatus, OverseerSignal, PerLeafSpan, SpawnedSubsystem, SubsystemError, }; -use polkadot_node_subsystem_util::{request_availability_cores, request_persisted_validation_data}; +use polkadot_node_subsystem_util::{ + request_availability_cores, request_persisted_validation_data, TimeoutExt, +}; use polkadot_primitives::v2::{ BackedCandidate, BlockNumber, CandidateHash, CandidateReceipt, CoreState, DisputeState, DisputeStatement, DisputeStatementSet, Hash, MultiDisputeStatementSet, OccupiedCoreAssumption, @@ -55,6 +57,8 @@ mod tests; /// How long to wait before proposing. const PRE_PROPOSE_TIMEOUT: std::time::Duration = core::time::Duration::from_millis(2000); +/// Some timeout to ensure task won't hang around in the background forever on issues. +const SEND_INHERENT_DATA_TIMEOUT: std::time::Duration = core::time::Duration::from_millis(500); const LOG_TARGET: &str = "parachain::provisioner"; @@ -153,6 +157,12 @@ async fn run_iteration( if let Some(state) = per_relay_parent.get_mut(&hash) { state.is_inherent_ready = true; + gum::trace!( + target: LOG_TARGET, + relay_parent = ?hash, + "Inherent Data became ready" + ); + let return_senders = std::mem::take(&mut state.awaiting_inherent); if !return_senders.is_empty() { send_inherent_data_bg(ctx, &state, return_senders, metrics.clone()).await?; @@ -188,11 +198,19 @@ async fn handle_communication( ) -> Result<(), Error> { match message { ProvisionerMessage::RequestInherentData(relay_parent, return_sender) => { + gum::trace!(target: LOG_TARGET, ?relay_parent, "Inherent data got requested."); + if let Some(state) = per_relay_parent.get_mut(&relay_parent) { if state.is_inherent_ready { + gum::trace!(target: LOG_TARGET, ?relay_parent, "Calling send_inherent_data."); send_inherent_data_bg(ctx, &state, vec![return_sender], metrics.clone()) .await?; } else { + gum::trace!( + target: LOG_TARGET, + ?relay_parent, + "Queuing inherent data request (inherent data not yet ready)." + ); state.awaiting_inherent.push(return_sender); } } @@ -202,6 +220,8 @@ async fn handle_communication( let span = state.span.child("provisionable-data"); let _timer = metrics.time_provisionable_data(); + gum::trace!(target: LOG_TARGET, ?relay_parent, "Received provisionable data."); + note_provisionable_data(state, &span, data); } }, @@ -228,28 +248,42 @@ async fn send_inherent_data_bg( let _span = span; let _timer = metrics.time_request_inherent_data(); - if let Err(err) = send_inherent_data( + gum::trace!( + target: LOG_TARGET, + relay_parent = ?leaf.hash, + "Sending inherent data in background." + ); + + let send_result = send_inherent_data( &leaf, &signed_bitfields, &backed_candidates, return_senders, &mut sender, &metrics, - ) - .await - { - gum::warn!(target: LOG_TARGET, err = ?err, "failed to assemble or send inherent data"); - metrics.on_inherent_data_request(Err(())); - } else { - metrics.on_inherent_data_request(Ok(())); - gum::debug!( - target: LOG_TARGET, - signed_bitfield_count = signed_bitfields.len(), - backed_candidates_count = backed_candidates.len(), - leaf_hash = ?leaf.hash, - "inherent data sent successfully" - ); - metrics.observe_inherent_data_bitfields_count(signed_bitfields.len()); + ) // Make sure call is not taking forever: + .timeout(SEND_INHERENT_DATA_TIMEOUT) + .map(|v| match v { + Some(r) => r, + None => Err(Error::SendInherentDataTimeout), + }); + + match send_result.await { + Err(err) => { + gum::warn!(target: LOG_TARGET, err = ?err, "failed to assemble or send inherent data"); + metrics.on_inherent_data_request(Err(())); + }, + Ok(()) => { + metrics.on_inherent_data_request(Ok(())); + gum::debug!( + target: LOG_TARGET, + signed_bitfield_count = signed_bitfields.len(), + backed_candidates_count = backed_candidates.len(), + leaf_hash = ?leaf.hash, + "inherent data sent successfully" + ); + metrics.observe_inherent_data_bitfields_count(signed_bitfields.len()); + }, } }; @@ -312,12 +346,27 @@ async fn send_inherent_data( from_job: &mut impl overseer::ProvisionerSenderTrait, metrics: &Metrics, ) -> Result<(), Error> { + gum::trace!( + target: LOG_TARGET, + relay_parent = ?leaf.hash, + "Requesting availability cores" + ); let availability_cores = request_availability_cores(leaf.hash, from_job) .await .await .map_err(|err| Error::CanceledAvailabilityCores(err))??; + gum::trace!( + target: LOG_TARGET, + relay_parent = ?leaf.hash, + "Selecting disputes" + ); let disputes = select_disputes(from_job, metrics, leaf).await?; + gum::trace!( + target: LOG_TARGET, + relay_parent = ?leaf.hash, + "Selected disputes" + ); // Only include bitfields on fresh leaves. On chain reversions, we want to make sure that // there will be at least one block, which cannot get disputed, so the chain can make progress. @@ -326,9 +375,21 @@ async fn send_inherent_data( select_availability_bitfields(&availability_cores, bitfields, &leaf.hash), LeafStatus::Stale => Vec::new(), }; + + gum::trace!( + target: LOG_TARGET, + relay_parent = ?leaf.hash, + "Selected bitfields" + ); let candidates = select_candidates(&availability_cores, &bitfields, candidates, leaf.hash, from_job).await?; + gum::trace!( + target: LOG_TARGET, + relay_parent = ?leaf.hash, + "Selected candidates" + ); + gum::debug!( target: LOG_TARGET, availability_cores_len = availability_cores.len(), @@ -342,6 +403,12 @@ async fn send_inherent_data( let inherent_data = ProvisionerInherentData { bitfields, backed_candidates: candidates, disputes }; + gum::trace!( + target: LOG_TARGET, + relay_parent = ?leaf.hash, + "Sending back inherent data to requesters." + ); + for return_sender in return_senders { return_sender .send(inherent_data.clone()) @@ -765,6 +832,12 @@ async fn select_disputes( active }; + gum::trace!( + target: LOG_TARGET, + relay_parent = ?_leaf.hash, + "Request recent disputes" + ); + // We use `RecentDisputes` instead of `ActiveDisputes` because redundancy is fine. // It's heavier than `ActiveDisputes` but ensures that everything from the dispute // window gets on-chain, unlike `ActiveDisputes`. @@ -773,6 +846,18 @@ async fn select_disputes( // If the active ones are already exceeding the bounds, randomly select a subset. let recent = request_disputes(sender, RequestType::Recent).await; + gum::trace!( + target: LOG_TARGET, + relay_paent = ?_leaf.hash, + "Received recent disputes" + ); + + gum::trace!( + target: LOG_TARGET, + relay_paent = ?_leaf.hash, + "Request on chain disputes" + ); + // On chain disputes are fetched from the runtime. We want to prioritise the inclusion of unknown // disputes in the inherent data. The call relies on staging Runtime API. If the staging API is not // enabled in the binary an empty set is generated which doesn't affect the rest of the logic. @@ -788,6 +873,18 @@ async fn select_disputes( }, }; + gum::trace!( + target: LOG_TARGET, + relay_paent = ?_leaf.hash, + "Received on chain disputes" + ); + + gum::trace!( + target: LOG_TARGET, + relay_paent = ?_leaf.hash, + "Filtering disputes" + ); + let disputes = if recent.len() > MAX_DISPUTES_FORWARDED_TO_RUNTIME { gum::warn!( target: LOG_TARGET, @@ -805,20 +902,34 @@ async fn select_disputes( recent }; + gum::trace!( + target: LOG_TARGET, + relay_paent = ?_leaf.hash, + "Calling `request_votes`" + ); + // Load all votes for all disputes from the coordinator. let dispute_candidate_votes = request_votes(sender, disputes).await; + gum::trace!( + target: LOG_TARGET, + relay_paent = ?_leaf.hash, + "Finished `request_votes`" + ); + // Transform all `CandidateVotes` into `MultiDisputeStatementSet`. Ok(dispute_candidate_votes .into_iter() .map(|(session_index, candidate_hash, votes)| { - let valid_statements = - votes.valid.into_iter().map(|(s, i, sig)| (DisputeStatement::Valid(s), i, sig)); + let valid_statements = votes + .valid + .into_iter() + .map(|(i, (s, sig))| (DisputeStatement::Valid(s), i, sig)); let invalid_statements = votes .invalid .into_iter() - .map(|(s, i, sig)| (DisputeStatement::Invalid(s), i, sig)); + .map(|(i, (s, sig))| (DisputeStatement::Invalid(s), i, sig)); metrics.inc_valid_statements_by(valid_statements.len()); metrics.inc_invalid_statements_by(invalid_statements.len()); diff --git a/node/core/provisioner/src/tests.rs b/node/core/provisioner/src/tests.rs index a58e22d7efc2..d0ca425210ed 100644 --- a/node/core/provisioner/src/tests.rs +++ b/node/core/provisioner/src/tests.rs @@ -571,8 +571,8 @@ mod select_disputes { let mut res = Vec::new(); let v = CandidateVotes { candidate_receipt: test_helpers::dummy_candidate_receipt(leaf.hash.clone()), - valid: vec![], - invalid: vec![], + valid: BTreeMap::new(), + invalid: BTreeMap::new(), }; for r in disputes.iter() { res.push((r.0, r.1, v.clone())); diff --git a/node/network/approval-distribution/src/lib.rs b/node/network/approval-distribution/src/lib.rs index b839a120bef9..f0cb4fc24ff8 100644 --- a/node/network/approval-distribution/src/lib.rs +++ b/node/network/approval-distribution/src/lib.rs @@ -1210,6 +1210,49 @@ impl State { } } + /// Retrieve approval signatures from state for the given relay block/indices: + fn get_approval_signatures( + &mut self, + indices: HashSet<(Hash, CandidateIndex)>, + ) -> HashMap { + let mut all_sigs = HashMap::new(); + for (hash, index) in indices { + let block_entry = match self.blocks.get(&hash) { + None => { + gum::debug!( + target: LOG_TARGET, + ?hash, + "`get_approval_signatures`: could not find block entry for given hash!" + ); + continue + }, + Some(e) => e, + }; + + let candidate_entry = match block_entry.candidates.get(index as usize) { + None => { + gum::debug!( + target: LOG_TARGET, + ?hash, + ?index, + "`get_approval_signatures`: could not find candidate entry for given hash and index!" + ); + continue + }, + Some(e) => e, + }; + let sigs = + candidate_entry.messages.iter().filter_map(|(validator_index, message_state)| { + match &message_state.approval_state { + ApprovalState::Approved(_, sig) => Some((*validator_index, sig.clone())), + ApprovalState::Assigned(_) => None, + } + }); + all_sigs.extend(sigs); + } + all_sigs + } + async fn unify_with_peer( sender: &mut impl overseer::ApprovalDistributionSenderTrait, metrics: &Metrics, @@ -1681,6 +1724,15 @@ impl ApprovalDistribution { .import_and_circulate_approval(ctx, metrics, MessageSource::Local, vote) .await; }, + ApprovalDistributionMessage::GetApprovalSignatures(indices, tx) => { + let sigs = state.get_approval_signatures(indices); + if let Err(_) = tx.send(sigs) { + gum::debug!( + target: LOG_TARGET, + "Sending back approval signatures failed, oneshot got closed" + ); + } + }, } } } diff --git a/node/network/dispute-distribution/src/receiver/mod.rs b/node/network/dispute-distribution/src/receiver/mod.rs index e061e67f4301..9193947e78d1 100644 --- a/node/network/dispute-distribution/src/receiver/mod.rs +++ b/node/network/dispute-distribution/src/receiver/mod.rs @@ -264,10 +264,8 @@ where }; let (pending_confirmation, confirmation_rx) = oneshot::channel(); - let candidate_hash = candidate_receipt.hash(); self.sender .send_message(DisputeCoordinatorMessage::ImportStatements { - candidate_hash, candidate_receipt, session: valid_vote.0.session_index(), statements: vec![valid_vote, invalid_vote], diff --git a/node/network/dispute-distribution/src/sender/mod.rs b/node/network/dispute-distribution/src/sender/mod.rs index 150e79eda108..5312528b413e 100644 --- a/node/network/dispute-distribution/src/sender/mod.rs +++ b/node/network/dispute-distribution/src/sender/mod.rs @@ -231,24 +231,25 @@ impl DisputeSender { Some(votes) => votes, }; - let our_valid_vote = votes.valid.iter().find(|(_, i, _)| *i == our_index); + let our_valid_vote = votes.valid.get(&our_index); - let our_invalid_vote = votes.invalid.iter().find(|(_, i, _)| *i == our_index); + let our_invalid_vote = votes.invalid.get(&our_index); let (valid_vote, invalid_vote) = if let Some(our_valid_vote) = our_valid_vote { // Get some invalid vote as well: let invalid_vote = - votes.invalid.get(0).ok_or(JfyiError::MissingVotesFromCoordinator)?; - (our_valid_vote, invalid_vote) + votes.invalid.iter().next().ok_or(JfyiError::MissingVotesFromCoordinator)?; + ((&our_index, our_valid_vote), invalid_vote) } else if let Some(our_invalid_vote) = our_invalid_vote { // Get some valid vote as well: - let valid_vote = votes.valid.get(0).ok_or(JfyiError::MissingVotesFromCoordinator)?; - (valid_vote, our_invalid_vote) + let valid_vote = + votes.valid.iter().next().ok_or(JfyiError::MissingVotesFromCoordinator)?; + (valid_vote, (&our_index, our_invalid_vote)) } else { // There is no vote from us yet - nothing to do. return Ok(()) }; - let (kind, valid_index, signature) = valid_vote; + let (valid_index, (kind, signature)) = valid_vote; let valid_public = info .session_info .validators @@ -263,7 +264,7 @@ impl DisputeSender { ) .map_err(|()| JfyiError::InvalidStatementFromCoordinator)?; - let (kind, invalid_index, signature) = invalid_vote; + let (invalid_index, (kind, signature)) = invalid_vote; let invalid_public = info .session_info .validators diff --git a/node/network/dispute-distribution/src/tests/mod.rs b/node/network/dispute-distribution/src/tests/mod.rs index 8f78b664de82..8ef8286ea197 100644 --- a/node/network/dispute-distribution/src/tests/mod.rs +++ b/node/network/dispute-distribution/src/tests/mod.rs @@ -274,16 +274,19 @@ fn disputes_are_recovered_at_startup() { let unchecked: UncheckedDisputeMessage = message.into(); tx.send(vec![(session_index, candidate_hash, CandidateVotes { candidate_receipt: candidate, - valid: vec![( - unchecked.valid_vote.kind, + valid: [( unchecked.valid_vote.validator_index, + (unchecked.valid_vote.kind, unchecked.valid_vote.signature - )], - invalid: vec![( - unchecked.invalid_vote.kind, + ), + )].into_iter().collect(), + invalid: [( unchecked.invalid_vote.validator_index, + ( + unchecked.invalid_vote.kind, unchecked.invalid_vote.signature - )], + ), + )].into_iter().collect(), })]) .expect("Receiver should stay alive."); } @@ -522,16 +525,15 @@ async fn nested_network_dispute_request<'a, F, O>( handle.recv().await, AllMessages::DisputeCoordinator( DisputeCoordinatorMessage::ImportStatements { - candidate_hash, candidate_receipt, session, statements, pending_confirmation: Some(pending_confirmation), } ) => { + let candidate_hash = candidate_receipt.hash(); assert_eq!(session, MOCK_SESSION_INDEX); assert_eq!(candidate_hash, message.0.candidate_receipt.hash()); - assert_eq!(candidate_hash, candidate_receipt.hash()); assert_eq!(statements.len(), 2); pending_confirmation } diff --git a/node/overseer/src/lib.rs b/node/overseer/src/lib.rs index 43386352b77a..21160bddaecb 100644 --- a/node/overseer/src/lib.rs +++ b/node/overseer/src/lib.rs @@ -467,7 +467,6 @@ pub struct Overseer { StatementDistributionMessage, ProvisionerMessage, RuntimeApiMessage, - DisputeCoordinatorMessage, ])] candidate_backing: CandidateBacking, @@ -562,13 +561,13 @@ pub struct Overseer { approval_distribution: ApprovalDistribution, #[subsystem(blocking, ApprovalVotingMessage, sends: [ - RuntimeApiMessage, + ApprovalDistributionMessage, + AvailabilityRecoveryMessage, + CandidateValidationMessage, ChainApiMessage, ChainSelectionMessage, DisputeCoordinatorMessage, - AvailabilityRecoveryMessage, - ApprovalDistributionMessage, - CandidateValidationMessage, + RuntimeApiMessage, ])] approval_voting: ApprovalVoting, @@ -585,6 +584,7 @@ pub struct Overseer { ChainApiMessage, DisputeDistributionMessage, CandidateValidationMessage, + ApprovalVotingMessage, AvailabilityStoreMessage, AvailabilityRecoveryMessage, ])] diff --git a/node/primitives/src/disputes/mod.rs b/node/primitives/src/disputes/mod.rs index 4b2d636dc10e..ec7bb6abc3b7 100644 --- a/node/primitives/src/disputes/mod.rs +++ b/node/primitives/src/disputes/mod.rs @@ -14,6 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . +use std::collections::{BTreeMap, BTreeSet}; + use parity_scale_codec::{Decode, Encode}; use sp_application_crypto::AppKey; @@ -45,21 +47,23 @@ pub struct CandidateVotes { /// The receipt of the candidate itself. pub candidate_receipt: CandidateReceipt, /// Votes of validity, sorted by validator index. - pub valid: Vec<(ValidDisputeStatementKind, ValidatorIndex, ValidatorSignature)>, + pub valid: BTreeMap, /// Votes of invalidity, sorted by validator index. - pub invalid: Vec<(InvalidDisputeStatementKind, ValidatorIndex, ValidatorSignature)>, + pub invalid: BTreeMap, } -impl CandidateVotes { - /// Get the set of all validators who have votes in the set, ascending. - pub fn voted_indices(&self) -> Vec { - let mut v: Vec<_> = - self.valid.iter().map(|x| x.1).chain(self.invalid.iter().map(|x| x.1)).collect(); +/// Type alias for retrieving valid votes from `CandidateVotes` +pub type ValidVoteData = (ValidatorIndex, (ValidDisputeStatementKind, ValidatorSignature)); - v.sort(); - v.dedup(); +/// Type alias for retrieving invalid votes from `CandidateVotes` +pub type InvalidVoteData = (ValidatorIndex, (InvalidDisputeStatementKind, ValidatorSignature)); - v +impl CandidateVotes { + /// Get the set of all validators who have votes in the set, ascending. + pub fn voted_indices(&self) -> BTreeSet { + let mut keys: BTreeSet<_> = self.valid.keys().cloned().collect(); + keys.extend(self.invalid.keys().cloned()); + keys } } @@ -162,6 +166,11 @@ impl SignedDisputeStatement { &self.validator_signature } + /// Consume self to return the signature. + pub fn into_validator_signature(self) -> ValidatorSignature { + self.validator_signature + } + /// Access the underlying session index. pub fn session_index(&self) -> SessionIndex { self.session_index diff --git a/node/subsystem-types/src/messages.rs b/node/subsystem-types/src/messages.rs index b79785ce406f..10a5201cc524 100644 --- a/node/subsystem-types/src/messages.rs +++ b/node/subsystem-types/src/messages.rs @@ -243,8 +243,6 @@ pub enum DisputeCoordinatorMessage { /// /// This does not do any checking of the message signature. ImportStatements { - /// The hash of the candidate. - candidate_hash: CandidateHash, /// The candidate receipt itself. candidate_receipt: CandidateReceipt, /// The session the candidate appears in. @@ -275,7 +273,7 @@ pub enum DisputeCoordinatorMessage { /// and which may have already concluded. RecentDisputes(oneshot::Sender>), /// Fetch a list of all active disputes that the coordinator is aware of. - /// These disputes are either unconcluded or recently concluded. + /// These disputes are either not yet concluded or recently concluded. ActiveDisputes(oneshot::Sender>), /// Get candidate votes for a candidate. QueryCandidateVotes( @@ -908,6 +906,15 @@ pub enum ApprovalVotingMessage { /// It can also return the same block hash, if that is acceptable to vote upon. /// Return `None` if the input hash is unrecognized. ApprovedAncestor(Hash, BlockNumber, oneshot::Sender>), + + /// Retrieve all available approval signatures for a candidate from approval-voting. + /// + /// This message involves a linear search for candidates on each relay chain fork and also + /// requires calling into `approval-distribution`: Calls should be infrequent and bounded. + GetApprovalSignaturesForCandidate( + CandidateHash, + oneshot::Sender>, + ), } /// Message to the Approval Distribution subsystem. @@ -926,6 +933,12 @@ pub enum ApprovalDistributionMessage { /// An update from the network bridge. #[from] NetworkBridgeUpdate(NetworkBridgeEvent), + + /// Get all approval signatures for all chains a candidate appeared in. + GetApprovalSignatures( + HashSet<(Hash, CandidateIndex)>, + oneshot::Sender>, + ), } /// Message to the Gossip Support subsystem. diff --git a/roadmap/implementers-guide/src/node/approval/approval-voting.md b/roadmap/implementers-guide/src/node/approval/approval-voting.md index adb95e1f6389..2761f21b1c2c 100644 --- a/roadmap/implementers-guide/src/node/approval/approval-voting.md +++ b/roadmap/implementers-guide/src/node/approval/approval-voting.md @@ -6,7 +6,7 @@ Approval votes are split into two parts: Assignments and Approvals. Validators f The core of this subsystem is a Tick-based timer loop, where Ticks are 500ms. We also reason about time in terms of `DelayTranche`s, which measure the number of ticks elapsed since a block was produced. We track metadata for all un-finalized but included candidates. We compute our local assignments to check each candidate, as well as which `DelayTranche` those assignments may be minimally triggered at. As the same candidate may appear in more than one block, we must produce our potential assignments for each (Block, Candidate) pair. The timing loop is based on waiting for assignments to become no-shows or waiting to broadcast and begin our own assignment to check. -Another main component of this subsystem is the logic for determining when a (Block, Candidate) pair has been approved and when to broadcast and trigger our own assignment. Once a (Block, Candidate) pair has been approved, we mark a corresponding bit in the `BlockEntry` that indicates the candidate has been approved under the block. When we trigger our own assignment, we broadcast it via Approval Distribution, begin fetching the data from Availability Recovery, and then pass it through to the Candidate Validation. Once these steps are successful, we issue our approval vote. If any of these steps fail, we don't issue any vote and will "no-show" from the perspective of other validators. In the future we will initiate disputes as well. +Another main component of this subsystem is the logic for determining when a (Block, Candidate) pair has been approved and when to broadcast and trigger our own assignment. Once a (Block, Candidate) pair has been approved, we mark a corresponding bit in the `BlockEntry` that indicates the candidate has been approved under the block. When we trigger our own assignment, we broadcast it via Approval Distribution, begin fetching the data from Availability Recovery, and then pass it through to the Candidate Validation. Once these steps are successful, we issue our approval vote. If any of these steps fail, we don't issue any vote and will "no-show" from the perspective of other validators in addition a dispute is raised via the dispute-coordinator, by sending `IssueLocalStatement`. Where this all fits into Polkadot is via block finality. Our goal is to not finalize any block containing a candidate that is not approved. We provide a hook for a custom GRANDPA voting rule - GRANDPA makes requests of the form (target, minimum) consisting of a target block (i.e. longest chain) that it would like to finalize, and a minimum block which, due to the rules of GRANDPA, must be voted on. The minimum is typically the last finalized block, but may be beyond it, in the case of having a last-round-estimate beyond the last finalized. Thus, our goal is to inform GRANDPA of some block between target and minimum which we believe can be finalized safely. We do this by iterating backwards from the target to the minimum and finding the longest continuous chain from minimum where all candidates included by those blocks have been approved. @@ -142,7 +142,7 @@ struct State { // A scheduler which keeps at most one wakeup per hash, candidate hash pair and // maps such pairs to `Tick`s. - wakeups: Wakeups, + wakeups: Wakeups, // These are connected to each other. background_tx: mpsc::Sender, @@ -220,7 +220,6 @@ On receiving a `CheckAndImportApproval(indirect_approval_vote, response_channel) * Fetch the `CandidateEntry` from the indirect approval vote's `candidate_index`. If the block did not trigger inclusion of enough candidates, return `ApprovalCheckResult::Bad`. * Construct a `SignedApprovalVote` using the candidate hash and check against the validator's approval key, based on the session info of the block. If invalid or no such validator, return `ApprovalCheckResult::Bad`. * Send `ApprovalCheckResult::Accepted` - * Dispatch a [`DisputeCoordinatorMessage::ImportStatement`](../../types/overseer-protocol.md#dispute-coordinator-message) with the approval statement. * [Import the checked approval vote](#import-checked-approval) #### `ApprovalVotingMessage::ApprovedAncestor` @@ -256,7 +255,7 @@ On receiving an `ApprovedAncestor(Hash, BlockNumber, response_channel)`: * Determine if we should trigger our assignment. * If we've already triggered or `OurAssignment` is `None`, we do not trigger. * If we have `RequiredTranches::All`, then we trigger if the candidate is [not approved](#check-approval). We have no next wakeup as we assume that other validators are doing the same and we will be implicitly woken up by handling new votes. - * If we have `RequiredTranches::Pending { considered, next_no_show, uncovered, maximum_broadcast, clock_drift }`, then we trigger if our assignment's tranche is less than or equal to `maximum_broadcast` and the current tick, with `clock_drift` applied, is at least the tick of our tranche. + * If we have `RequiredTranches::Pending { considered, next_no_show, uncovered, maximum_broadcast, clock_drift }`, then we trigger if our assignment's tranche is less than or equal to `maximum_broadcast` and the current tick, with `clock_drift` applied, is at least the tick of our tranche. * If we have `RequiredTranches::Exact { .. }` then we do not trigger, because this value indicates that no new assignments are needed at the moment. * If we should trigger our assignment * Import the assignment to the `ApprovalEntry` @@ -348,7 +347,7 @@ enum RequiredTranches { Our vote-counting procedure depends heavily on how we interpret time based on the presence of no-shows - assignments which have no corresponding approval after some time. -We have this is because of how we handle no-shows: we keep track of the depth of no-shows we are covering. +We have this is because of how we handle no-shows: we keep track of the depth of no-shows we are covering. As an example: there may be initial no-shows in tranche 0. It'll take `no_show_duration` ticks before those are considered no-shows. Then, we don't want to immediately take `no_show_duration` more tranches. Instead, we want to take one tranche for each uncovered no-show. However, as we take those tranches, there may be further no-shows. Since these depth-1 no-shows should have only been triggered after the depth-0 no-shows were already known to be no-shows, we need to discount the local clock by `no_show_duration` to see whether these should be considered no-shows or not. There may be malicious parties who broadcast their assignment earlier than they were meant to, who shouldn't be counted as instant no-shows. We continue onwards to cover all depth-1 no-shows which may lead to depth-2 no-shows and so on. @@ -373,8 +372,8 @@ Likewise, when considering how many tranches to take, the no-show depth should b * If we have `3 * n_approvals > n_validators`, return true. This is because any set with f+1 validators must have at least one honest validator, who has approved the candidate. * If `n_tranches` is `RequiredTranches::Pending`, return false * If `n_tranches` is `RequiredTranches::All`, return false. - * If `n_tranches` is `RequiredTranches::Exact { tranche, tolerated_missing, latest_assignment_tick, .. }`, then we return whether all assigned validators up to `tranche` less `tolerated_missing` have approved and `latest_assignment_tick + APPROVAL_DELAY >= tick_now`. - * e.g. if we had 5 tranches and 1 tolerated missing, we would accept only if all but 1 of assigned validators in tranches 0..=5 have approved. In that example, we also accept all validators in tranches 0..=5 having approved, but that would indicate that the `RequiredTranches` value was incorrectly constructed, so it is not realistic. `tolerated_missing` actually represents covered no-shows. If there are more missing approvals than there are tolerated missing, that indicates that there are some assignments which are not yet no-shows, but may become no-shows, and we should wait for the validators to either approve or become no-shows. + * If `n_tranches` is `RequiredTranches::Exact { tranche, tolerated_missing, latest_assignment_tick, .. }`, then we return whether all assigned validators up to `tranche` less `tolerated_missing` have approved and `latest_assignment_tick + APPROVAL_DELAY >= tick_now`. + * e.g. if we had 5 tranches and 1 tolerated missing, we would accept only if all but 1 of assigned validators in tranches 0..=5 have approved. In that example, we also accept all validators in tranches 0..=5 having approved, but that would indicate that the `RequiredTranches` value was incorrectly constructed, so it is not realistic. `tolerated_missing` actually represents covered no-shows. If there are more missing approvals than there are tolerated missing, that indicates that there are some assignments which are not yet no-shows, but may become no-shows, and we should wait for the validators to either approve or become no-shows. * e.g. If the above passes and the `latest_assignment_tick` was 5 and the current tick was 6, then we'd return false. ### Time diff --git a/roadmap/implementers-guide/src/node/backing/candidate-backing.md b/roadmap/implementers-guide/src/node/backing/candidate-backing.md index 6637ef431b62..6c3eace313c3 100644 --- a/roadmap/implementers-guide/src/node/backing/candidate-backing.md +++ b/roadmap/implementers-guide/src/node/backing/candidate-backing.md @@ -94,7 +94,10 @@ match msg { Add `Seconded` statements and `Valid` statements to a quorum. If the quorum reaches a pre-defined threshold, send a [`ProvisionerMessage`][PM]`::ProvisionableData(ProvisionableData::BackedCandidate(CandidateReceipt))` message. `Invalid` statements that conflict with already witnessed `Seconded` and `Valid` statements for the given candidate, statements that are double-votes, self-contradictions and so on, should result in issuing a [`ProvisionerMessage`][PM]`::MisbehaviorReport` message for each newly detected case of this kind. -On each incoming statement, [`DisputeCoordinatorMessage::ImportStatement`][DCM] should be issued. +Backing does not need to concern itself with providing statements to the dispute +coordinator as the dispute coordinator scrapes them from chain. This way the +import is batched and contains only statements that actually made it on some +chain. ### Validating Candidates. diff --git a/roadmap/implementers-guide/src/node/disputes/dispute-coordinator.md b/roadmap/implementers-guide/src/node/disputes/dispute-coordinator.md index 52c6f565337e..b35fc0df2ca1 100644 --- a/roadmap/implementers-guide/src/node/disputes/dispute-coordinator.md +++ b/roadmap/implementers-guide/src/node/disputes/dispute-coordinator.md @@ -1,13 +1,425 @@ # Dispute Coordinator -The coordinator is the central subsystem of the node-side components which participate in disputes. It -wraps a database, which used to track all statements observed by _all_ validators over some window -of sessions. Votes older than this session window are pruned. - -This subsystem will be the point which produce dispute votes, either positive or negative, based on -locally-observed validation results as well as a sink for votes received by other subsystems. When -a statement import makes it clear that a dispute has been raised (there are opposing votes for a -candidate), the dispute coordinator will make sure the local node will participate in the dispute. +The coordinator is the central subsystem of the node-side components which +participate in disputes. It wraps a database, which used to track statements +observed by _all_ validators over some window of sessions. Votes older than this +session window are pruned. + +In particular the dispute-coordinator is responsible for: + +- Ensuring that the node is able to raise a dispute in case an invalid candidate + is found during approval checking. +- Ensuring malicious approval votes will be recorded, so nodes can get slashed + properly. +- Coordinating actual participation in a dispute, ensuring that the node + participates in any justified dispute in a way that ensures resolution of + disputes on the network even in the case of many disputes raised (flood/DoS + scenario). +- Provide an API for chain selection, so we can prevent finalization of any + chain which has included candidates for which a dispute is either ongoing or + concluded invalid and avoid building on chains with an included invalid + candidate. +- Provide an API for retrieving (resolved) disputes, including all votes, both + implicit (approval, backing) and explicit dispute votes. So validators can get + rewarded/slashed accordingly. + +## Ensuring That Disputes Can Be Raised + +If a candidate turns out invalid in approval checking, the `approval-voting` +subsystem will try to issue a dispute. For this, it will send a message +`DisputeCoordinatorMessage::IssueLocalStatement` to the dispute coordinator, +indicating to cast an explicit invalid vote. It is the responsibility of the +dispute coordinator on reception of such a message to create and sign that +explicit invalid vote and trigger a dispute if none for that candidate is +already ongoing. + +In order to raise a dispute, a node has to be able to provide two opposing votes. +Given that the reason of the backing phase is to have validators with skin in +the game, the opposing valid vote will very likely be a backing vote. It could +also be some already cast approval vote, but the significant point here is: As +long as we have backing votes available, any node will be able to raise a +dispute. + +Therefore a vital responsibility of the dispute coordinator is to make sure +backing votes are available for all candidates that might still get disputed. To +accomplish this task in an efficient way the dispute-coordinator relies on chain +scraping. Whenever a candidate gets backed on chain, we record in chain storage +the backing votes imported in that block. This way, given the chain state for a +given relay chain block, we can retrieve via a provided runtime API the backing +votes imported by that block. The dispute coordinator makes sure to query those +votes for any non finalized blocks: In case of missed blocks, it will do chain +traversal as necessary. + +Relying on chain scraping is very efficient for two reasons: + +1. Votes are already batched. We import all available backing votes for a + candidate all at once. If instead we imported votes from candidate-backing as + they came along, we would import each vote individually which is + inefficient in the current dispute coordinator implementation (quadratic + complexity). +2. We also import less votes in total, as we avoid importing statements for + candidates that never got successfully backed on any chain. + +It also is secure, because disputes are only ever raised in the approval voting +phase. A node only starts the approval process after it has seen a candidate +included on some chain, for that to happen it must have been backed previously. +Therefore backing votes are available at that point in time. Signals are +processed first, so even if a block is skipped and we only start importing +backing votes on the including block, we will have seen the backing votes by the +time we process messages from approval voting. + +In summary, for making it possible for a dispute to be raised, recording of +backing votes from chain is sufficient and efficient. In particular there is no +need to preemptively import approval votes, which has shown to be a very +inefficient process. (Quadratic complexity adds up, with 35 votes in total per candidate) + +Approval votes are very relevant nonetheless as we are going to see in the next +section. + +## Ensuring Malicious Approval Votes Will Be Recorded + +### Ensuring Recording + +While there is no need to record approval votes in the dispute coordinator +preemptively, we do need to make sure they are recorded when a dispute +actually happens. This is because only votes recorded by the dispute +coordinator will be considered for slashing. While the backing group always gets +slashed, a serious attack attempt will likely also consist of malicious approval +checkers which will cast approval votes, although the candidate is invalid. If +we did not import those votes, those nodes would likely cast an `invalid` explicit +vote as part of the dispute in addition to their approval vote and thus avoid a +slash. With the 2/3rd honest assumption it seems unrealistic that malicious +actors will keep sending approval votes once they became aware of a raised +dispute. Hence the most crucial approval votes to import are the early ones +(tranche 0), to take into account network latencies and such we still want to +import approval votes at a later point in time as well (in particular we need to +make sure the dispute can conclude, but more on that later). + +As mentioned already previously, importing votes is most efficient when batched. +At the same time approval voting and disputes are running concurrently so +approval votes are expected to trickle in still, when a dispute is already +ongoing. + +Hence, we have the following requirements for importing approval votes: + +1. Only import them when there is a dispute, because otherwise we are + wasting lots of resources _always_ for the exceptional case of a dispute. +2. Import votes batched when possible, to avoid quadratic import complexity. +3. Take into account that approval voting is still ongoing, while a dispute is + already running. + +With a design where approval voting sends votes to the dispute-coordinator by +itself, we would need to make approval voting aware of ongoing disputes and +once it is aware it could start sending all already existing votes batched and +trickling in votes as they come. The problem with this is, that it adds some +unnecessary complexity to approval-voting and also we might still import most of +the votes unbatched, but one-by-one, depending on what point in time the dispute +was raised. + +Instead of the dispute coordinator informing approval-voting of an ongoing +dispute for it to begin forwarding votes to the dispute coordinator, it makes +more sense for the dispute-coordinator to just ask approval-voting for votes of +candidates in dispute. This way, the dispute coordinator can also pick the best +time for maximizing the number of votes in the batch. + +Now the question remains, when should the dispute coordinator ask +approval-voting for votes? As argued above already, querying approval votes at +the beginning of the dispute, will likely already take care of most malicious +votes. Still we would like to have a record of all, if possible. So what are +other points in time we might query approval votes? + +In fact for slashing it is only relevant to have them once the dispute +concluded, so we can query approval voting the moment the dispute concludes! +There are two potential caveats with this though: + +1. Timing: We would like to rely as little as possible on implementation details + of approval voting. In particular, if the dispute is ongoing for a long time, + do we have any guarantees that approval votes are kept around long enough by + approval voting? Will approval votes still be present by the time the + dispute concludes in all cases? The answer is nuanced, but in general we + cannot rely on it. The problem is first, that finalization and + approval-voting is an off-chain process so there is no global consensus: As + soon as at least f+1 honest (f= n/3, where n is the number of + validators/nodes) nodes have seen the dispute conclude, finalization will + take place and approval votes will be cleared. This would still be fine, if + we had some guarantees that those honest nodes will be able to include those + votes in a block. This guarantee does not exist unfortunately, we will + discuss the problem and solutions in more detail [below][#Ensuring Chain Import]. + + The second problem is that approval-voting will abandon votes as soon as a + chain can no longer be finalized (some other/better fork already has been). + This second problem can somehow be mitigated by also importing votes as soon + as a dispute is detected, but not fully resolved. It is still inherently + racy. The problem can be solved in at least two ways: Either go back to full + eager import of approval votes into the dispute-coordinator in some more + efficient manner or by changing requirements on approval-voting, making it + hold on votes longer than necessary for approval-voting itself. Conceptually + both solutions are equivalent, as we make sure votes are available even + without an ongoing dispute. For now, in the interest of time we punt on this + issue: If nodes import votes as soon as a dispute is raised in addition to + when it concludes, we have a good chance of getting relevant votes and even + if not, the fundamental security properties will still hold: Backers are + getting slashed, therefore gambler's ruin is maintained. We would still like + to fix this at [some + point](https://github.com/paritytech/polkadot/issues/5864). +2. There could be a chicken and egg problem: If we wait for approval vote import + for the dispute to conclude, we would run into a problem if we needed those + approval votes to get enough votes to conclude the dispute. Luckily it turns + out that this is not quite true or at least can be made not true easily: As + already mentioned, approval voting and disputes are running concurrently, but + not only that, they race with each other! A node might simultaneously start + participating in a dispute via the dispute coordinator, due to learning about + a dispute via dispute-distribution, while also participating in approval + voting. By distributing our own approval vote we make sure the dispute can + conclude regardless how the race ended (we either participate explicitly + anyway or we sent our already present approval vote). By importing all + approval votes we make it possible to slash malicious approval voters, even + if they also cast an invalid explicit vote. + +Conclusion: As long as we make sure, if our own approval vote gets imported +(which would prevent dispute participation) to also distribute it via +dispute-distribution, disputes can conclude. To mitigate raciness with +approval-voting deleting votes we will import approval votes twice during a +dispute: Once when it is raised, to make as sure as possible to see approval +votes also for abandoned forks and second when the dispute concludes, to +maximize the amount of potentially malicious approval votes to be recorded. The +raciness obviously is not fully resolved by this, [a +ticket](https://github.com/paritytech/polkadot/issues/5864) exists. + +Ensuring vote import on chain is covered in the next section. + +As already touched: Honest nodes +will likely validate twice, once in approval voting and once via +dispute-participation. Avoiding that does not really seem worthwhile though, as +disputes are for one exceptional, so a little wasted effort won't affect +everyday performance - second, even with eager importing of approval votes, +those doubled work is still present as disputes and approvals are racing. Every +time participation is faster than approval, a node would do double work. + +### Ensuring Chain Import + +While in the previous section we discussed means for nodes to ensure relevant +votes are recorded so attackers get slashed properly, it is crucial to also +discuss the actual chain import. Only if we guarantee that recorded votes will +also get imported on chain (on all potential chains really) we will succeed in +executing slashes. Again approval votes prove to be our weak spot here, but also +backing votes might get missed. + +Dispute distribution will make sure all explicit dispute votes get distributed +among nodes which includes current block producers (current authority set) which +is an important property: If the dispute carries on across an era change, we +need to ensure that the new validator set will learn about any disputes and +their votes, so they can put that information on chain. Dispute-distribution +luckily has this property and sends votes to the current authority set always. +The issue is, for dispute-distribution, nodes send only their own explicit (or +in some cases their approval vote) in addition to some opposing vote. This +guarantees that at least some backing or approval vote will be present at the +block producer, but we don't have a 100% guarantee to have votes for all +backers, even less for approval checkers. + +Reason for backing votes: While backing votes will be present on at least some +chain, that does not mean that any such chain is still considered for block +production in the current set - they might only exist on an already abandoned +fork. This means a block producer that just joined the set, might not have seen +any of them. + +For approvals it is even more tricky: Approval voting together with finalization +is a completely off-chain process therefore those protocols don't care about +block production at all. Approval votes only have a guarantee of being +propagated between the nodes that are responsible for finalizing the concerned +blocks. This implies that on an era change the current authority set, will not +necessarily get informed about any approval votes for the previous era. Hence +even if all validators of the previous era successfully recorded all approval +votes in the dispute coordinator, they won't get a chance to put them on chain, +hence they won't be considered for slashing. + +It is important to note, that the essential properties of the system still hold: +Dispute-distribution will distribute at _least one_ "valid" vote to the current +authority set, hence at least one node will get slashed in case of outcome +"invalid". Also in reality the validator set is rarely exchanged 100%, therefore +in practice some validators in the current authority set will overlap with the +ones in the previous set and will be able to record votes on chain. + +Still, for maximum accountability we need to make sure a previous authority set +can communicate votes to the next one, regardless of any chain: This is yet to +be implemented see section "Resiliency" in dispute-distribution and +[this](https://github.com/paritytech/polkadot/issues/3398) ticket. + +## Coordinating Actual Dispute Participation + +Once the dispute coordinator learns about a dispute, it is its responsibility to +make sure the local node participates in that dispute. + +The dispute coordinator learns about a dispute by importing votes from either +chain scraping or from dispute-distribution. If it finds opposing votes (always +the case when coming from dispute-distribution), it records the presence of a +dispute. Then, in case it does not find any local vote for that dispute already, +it needs to trigger participation in the dispute (see previous section for +considerations when the found local vote is an approval vote). + +Participation means, recovering availability and re-evaluating the POV. The +result of that validation (either valid or invalid) will be the node's vote on +that dispute: Either explicit "invalid" or "valid". The dispute coordinator will +inform `dispute-distribution` about our vote and `dispute-distribution` will make +sure that our vote gets distributed to all other validators. + +Nothing ever is that easy though. We can not blindly import anything that comes +along and trigger participation no matter what. + +### Spam Considerations + +In Polkadot's security model, it is important that attempts to attack the system +result in a slash of the offenders. Therefore we need to make sure that this +slash is actually happening. Attackers could try to prevent the slashing from +taking place, by overwhelming validators with disputes in such a way that no +single dispute ever concludes, because nodes are busy processing newly incoming +ones. Other attacks are imaginable as well, like raising disputes for candidates +that don't exist, just filling up everyone's disk slowly or worse making nodes +try to participate, which will result in lots of network requests for recovering +availability. + +The last point brings up a significant consideration in general: Disputes are +about escalation: Every node will suddenly want to check, instead of only a few. +A single message will trigger the whole network to start significant amount of +work and will cause lots of network traffic and messages. Hence the +dispute system is very susceptible to being a brutal amplifier for DoS attacks, +resulting in DoS attacks to become very easy and cheap, if we are not careful. + +One counter measure we are taking is making raising of disputes a costly thing: +If you raise a dispute, because you claim a candidate is invalid, although it is +in fact valid - you will get slashed, hence you pay for consuming those +resources. The issue is: This only works if the dispute concerns a candidate +that actually exists! + +If a node raises a dispute for a candidate that never got included (became +available) on any chain, then the dispute can never conclude, hence nobody gets +slashed. It makes sense to point out that this is less bad than it might sound +at first, as trying to participate in a dispute for a non existing candidate is +"relatively" cheap. Each node will send out a few hundred tiny request messages +for availability chunks, which all will end up in a tiny response "NoSuchChunk" +and then no participation will actually happen as there is nothing to +participate. Malicious nodes could provide chunks, which would make things more +costly, but at the full expense of the attackers bandwidth - no amplification +here. I am bringing that up for completeness only: Triggering a thousand nodes +to send out a thousand tiny network messages by just sending out a single +garbage message, is still a significant amplification and is nothing to ignore - +this could absolutely be used to cause harm! + +#### Participation + +As explained, just blindly participating in any "dispute" that comes along is +not a good idea. First we would like to make sure the dispute is actually +genuine, to prevent cheap DoS attacks. Secondly, in case of genuine disputes, we +would like to conclude one after the other, in contrast to +processing all at the same time, slowing down progress on all of them, bringing +individual processing to a complete halt in the worst case (nodes get overwhelmed +at some stage in the pipeline). + +To ensure to only spend significant work on genuine disputes, we only trigger +participation at all on any _vote import_ if any of the following holds true: + +- We saw the disputed candidate included on at least one fork of the chain +- We have "our" availability chunk available for that candidate as this suggests + that either availability was at least run, although it might not have + succeeded or we have been a backing node of the candidate. In both cases the + candidate is at least not completely made up and there has been some effort + already flown into that candidate. +- The dispute is already confirmed: Meaning that 1/3+1 nodes already + participated, as this suggests in our threat model that there was at least one + honest node that already voted, so the dispute must be genuine. + +Note: A node might be out of sync with the chain and we might only learn about a +block including a candidate, after we learned about the dispute. This means, we +have to re-evaluate participation decisions on block import! + +With this nodes won't waste significant resources on completely made up +candidates. The next step is to process dispute participation in a (globally) +ordered fashion. Meaning a majority of validators should arrive at at least +roughly at the same ordering of participation, for disputes to get resolved one +after another. This order is only relevant if there are lots of disputes, so we +obviously only need to worry about order if participations start queuing up. + +We treat participation for candidates that we have seen included with priority +and put them on a priority queue which sorts participation based on the block +number of the relay parent of that candidate and for candidates with the same +relay parent height further by the `CandidateHash`. This ordering is globally +unique and also prioritizes older candidates. + +The latter property makes sense, because if an older candidate turns out invalid, +we can roll back the full chain at once. If we resolved earlier disputes first +and they turned out invalid as well, we might need to roll back a couple of +times instead of just once to the oldest offender. This is obviously a good +idea, in particular it makes it impossible for an attacker to prevent rolling +back a very old candidate, by keeping raising disputes for newer candidates. + +For candidates we have not seen included, but we have our availability piece +available we put participation on a best-effort queue, which at the moment is +processed on the basis how often we requested participation locally, which +equals the number of times we imported votes for that dispute. The idea is, if +we have not seen the candidate included, but the dispute is valid, other nodes +will have seen it included - so the more votes there are, the more likely it is +a valid dispute and we should implicitly arrive at a similar ordering as the +nodes that are able to sort based on the relay parent block height. + +#### Import + +In the last section we looked at how to treat queuing participations to handle +heavy dispute load well. This already ensures, that honest nodes won't amplify +cheap DoS attacks. There is one minor issue remaining: Even if we delay +participation until we have some confirmation of the authenticity of the +dispute, we should also not blindly import all votes arriving into the +database as this might be used to just slowly fill up disk space, until the node +is no longer functional. This leads to our last protection mechanism at the +dispute coordinator level (dispute-distribution also has its own), which is spam +slots. For each import, where we don't know whether it might be spam or not we +increment a counter for each signing participant of explicit `invalid` votes. + +The reason this works is because we only need to worry about actual dispute +votes. Import of backing votes are already rate limited and concern only real +candidates for approval votes a similar argument holds (if they come from +approval-voting), but we also don't import them until a dispute already +concluded. For actual dispute votes, we need two opposing votes, so there must be +an explicit `invalid` vote in the import. Only a third of the validators can be +malicious, so spam disk usage is limited to ```2*vote_size*n/3*NUM_SPAM_SLOTS```, with +n being the number of validators. +- +More reasoning behind spam considerations can be found on +this sr-lab ticket: https://github.com/paritytech/srlabs_findings/issues/179 + +## Disputes for Non Included Candidates + +We only ever care about disputes for candidates that have been included on at +least some chain (became available). This is because the availability system was +designed for precisely that: Only with inclusion (availability) we have +guarantees about the candidate to actually be available. Because only then we +have guarantees that malicious backers can be reliably checked and slashed. The +system was also designed for non included candidates to not pose any threat to +the system. + +One could think of an (additional) dispute system to make it possible to dispute +any candidate that has been proposed by a validator, no matter whether it got +successfully included or even backed. Unfortunately, it would be very brittle +(no availability) and also spam protection would be way harder than for the +disputes handled by the dispute-coordinator. In fact all described spam handling +strategies above would simply be not available. + +It is worth thinking about who could actually raise such disputes anyway: +Approval checkers certainly not, as they will only ever check once availability +succeeded. The only other nodes that meaningfully could/would are honest backing +nodes or collators. For collators spam considerations would be even worse as +there can be an unlimited number of them and we can not charge them for spam, so +trying to handle disputes raised by collators would be even more complex. For +honest backers: It actually makes more sense for them to wait until availability +is reached as well, as only then they have guarantees that other nodes will be +able to check. If they disputed before, all nodes would need to recover the data +from them, so they would be an easy DoS target. + +In summary: The availability system was designed for raising disputes in a +meaningful and secure way after availability was reached. Trying to raise +disputes before does not meaningfully contribute to the systems security/might +even weaken it as attackers are warned before availability is reached, while at +the same time adding signficant amount of complexity. We therefore punt on such +disputes and concentrate on disputes the system was designed to handle. ## Database Schema @@ -63,117 +475,6 @@ pub enum DisputeStatus { } ``` -## Internal modules -Dispute coordinator subsystem includes a few internal modules - `ordering`, `participation` and -`spam_slots`. - -### Ordering -Ordering module contains two structs - `OrderingProvider` and `CandidateComparator`. The former -keeps track of included blocks and their ancestors. It also generates `CandidateComparator` -instances for candidates. - -`CandidateComparator` wraps the candidate hash and its parent block number: - -```rust -pub struct CandidateComparator { - /// Block number of the relay parent. - /// - /// Important, so we will be participating in oldest disputes first. - /// - /// Note: In theory it would make more sense to use the `BlockNumber` of the including - /// block, as inclusion time is the actual relevant event when it comes to ordering. The - /// problem is, that a candidate can get included multiple times on forks, so the `BlockNumber` - /// of the including block is not unique. We could theoretically work around that problem, by - /// just using the lowest `BlockNumber` of all available including blocks - the problem is, - /// that is not stable. If a new fork appears after the fact, we would start ordering the same - /// candidate differently, which would result in the same candidate getting queued twice. - relay_parent_block_number: BlockNumber, - /// By adding the `CandidateHash`, we can guarantee a unique ordering across candidates. - candidate_hash: CandidateHash, -} -``` - -It also implements `PartialEq`, `Eq`, `PartialOrd` and `Ord` traits enabling comparison operations -with the comparators. - -`Comparator` is used inside `Participation` module as a key for saving `ParticipationRequest`. It -provides the ordering required to process the most important requests first (check the next section -for details). - -### Participation -This module keeps track of the disputes that the node participates in. At most there are -`MAX_PARALLEL_PARTICIPATIONS` parallel participations in the subsystem. The internal state of the -module is: - -```rust -pub struct Participation { - /// Participations currently being processed. - running_participations: HashSet, - /// Priority and best effort queues. - queue: Queues, - /// Sender to be passed to worker tasks. - worker_sender: WorkerMessageSender, - /// Some recent block for retrieving validation code from chain. - recent_block: Option<(BlockNumber, Hash)>, -} -``` -New candidates are processed immediately if the number of running participations is less than -`MAX_PARALLEL_PARTICIPATIONS` or queued for processing otherwise. `Participation` uses another -internal module `Queues` which provides prioritisation of the disputes. It guarantees that important -disputes will be processed first. The actual decision how important is a given dispute is performed -by the `ordering` module. - -The actual participation is performed by `fn participate()`. First it sends -`AvailabilityRecoveryMessage::RecoverAvailableData` to obtain data from the validators. Then gets -the validation code and stores `AvailableData` with `AvailabilityStoreMessage::StoreAvailableData` -message. Finally Participation module performs the actual validation and sends the result as -`WorkerMessage` to the main subsystem (`DisputeCoordinatorSubsystem`). `Participation` generates -messages which `DisputeCoordinatorSubsystem` consumes. You can find more information how these -events are processed in the next section. - -### SpamSlots - -`struct SpamSlots` aims to protect the validator from malicious peers generating erroneous disputes -with the purpose of overloading the validator with unnecessary work. - -How the spam protection works? Each peer validator has got a spam slot for unconfirmed disputes with -fixed size (`MAX_SPAM_VOTES`). Each unconfirmed dispute is added to one such slot until all slots -for the given validator are filled up. At this point statements from this validator for unconfirmed -disputes are ignored. - -What unconfirmed dispute means? Quote from the source code provides an excellent explanation: - -> Node has not seen the candidate be included on any chain, it has not cast a -> vote itself on that dispute, the dispute has not yet reached more than a third of -> validator's votes and the including relay chain block has not yet been finalized. - -`SpamSlots` has got this internal state: - -```rust -pub struct SpamSlots { - /// Counts per validator and session. - /// - /// Must not exceed `MAX_SPAM_VOTES`. - slots: HashMap<(SessionIndex, ValidatorIndex), SpamCount>, - - /// All unconfirmed candidates we are aware of right now. - unconfirmed: UnconfirmedDisputes, -} -``` - -It's worth noting that `SpamSlots` provides an interface for adding entries (`fn add_unconfirmed()`) -and removing them (`fn clear()`). The actual spam protection logic resides in the main subsystem, in -`fn handle_import_statements()`. It is invoked during `DisputeCoordinatorMessage::ImportStatements` -message handling (there is a dedicated section for it below). Spam slots are indexed by session id -and validator index. For each such pair there is a limit of active disputes. If this limit is -reached - the import is ignored. - -Spam protection is performed only on invalid vote statements where the concerned candidate is not -included on any chain, not confirmed, not local and the votes hasn't reached the byzantine -threshold. This check is performed by `Ordering` module. - -Spam slots are cleared when the session window advances so that the `SpamSlots` state doesn't grow -indefinitely. ## Protocol Input: [`DisputeCoordinatorMessage`][DisputeCoordinatorMessage] @@ -252,7 +553,8 @@ Performs cleanup of the finalized candidate. Import statements by validators are processed in `fn handle_import_statements()`. The function has got three main responsibilities: -* Initiate participation in disputes. +* Initiate participation in disputes and sending out of any existing own + approval vote in case of a raised dispute. * Persist all fresh votes in the database. Fresh votes in this context means votes that are not already processed by the node. * Spam protection on all invalid (`DisputeStatement::Invalid`) votes. Please check the SpamSlots diff --git a/roadmap/implementers-guide/src/node/disputes/dispute-distribution.md b/roadmap/implementers-guide/src/node/disputes/dispute-distribution.md index 579a1ce8f143..b63ea2bdcbf0 100644 --- a/roadmap/implementers-guide/src/node/disputes/dispute-distribution.md +++ b/roadmap/implementers-guide/src/node/disputes/dispute-distribution.md @@ -297,6 +297,10 @@ cases we also want to have covered: - Nodes might have missed votes, especially backing or approval votes. Recovering them from chain is difficult and expensive, due to runtime upgrades and untyped extrinsics. +- More importantly, on era changes the new authority set, from the perspective + of approval-voting have no need to see "old" approval votes, hence they might + not see them, can therefore not import them into the dispute coordinator and + therefore no authority will put them on chain. To cover those cases, we introduce a second request/response protocol, which can be handled on a lower priority basis as the one above. It consists of the