Skip to content
This repository has been archived by the owner on Nov 15, 2023. It is now read-only.

Fix transaction pool & network issues #6288

Merged
merged 3 commits into from
Jun 8, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion client/network/src/protocol.rs
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,9 @@ const PROPAGATE_TIMEOUT: time::Duration = time::Duration::from_millis(2900);
/// Maximim number of known block hashes to keep for a peer.
const MAX_KNOWN_BLOCKS: usize = 1024; // ~32kb per peer + LruHashSet overhead
/// Maximim number of known extrinsic hashes to keep for a peer.
const MAX_KNOWN_EXTRINSICS: usize = 4096; // ~128kb per peer + overhead
///
/// This should be approx. 2 blocks full of transactions for the network to function properly.
const MAX_KNOWN_EXTRINSICS: usize = 10240; // ~300kb per peer + overhead.
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

how about chains with longer block time / larger tx per block?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@xlc let's make an issue for that

This should not be a const but rather some sort of configuration (runtime/cli or deduced from other values)

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

larger tx per block

It only stores the hash of a transaction.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I mean larger number of tx per block


/// Maximim number of transaction validation request we keep at any moment.
const MAX_PENDING_TRANSACTIONS: usize = 8192;
Expand Down
38 changes: 30 additions & 8 deletions client/transaction-pool/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,8 @@ pub mod testing;
pub use sc_transaction_graph as txpool;
pub use crate::api::{FullChainApi, LightChainApi};

use std::{collections::HashMap, sync::Arc, pin::Pin};
use futures::{prelude::*, future::{ready, self}, channel::oneshot};
use std::{collections::{HashMap, HashSet}, sync::Arc, pin::Pin};
use futures::{prelude::*, future::{self, ready}, channel::oneshot};
use parking_lot::Mutex;

use sp_runtime::{
Expand All @@ -47,7 +47,7 @@ use sp_transaction_pool::{
TransactionStatusStreamFor, MaintainedTransactionPool, PoolFuture, ChainEvent,
TransactionSource,
};
use sc_transaction_graph::ChainApi;
use sc_transaction_graph::{ChainApi, ExtrinsicHash};
use wasm_timer::Instant;

use prometheus_endpoint::Registry as PrometheusRegistry;
Expand Down Expand Up @@ -440,10 +440,10 @@ async fn prune_known_txs_for_block<Block: BlockT, Api: ChainApi<Block = Block>>(
block_id: BlockId<Block>,
api: &Api,
pool: &sc_transaction_graph::Pool<Api>,
) {
) -> Vec<ExtrinsicHash<Api>> {
// We don't query block if we won't prune anything
if pool.validated_pool().status().is_empty() {
return;
return Vec::new();
}

let hashes = api.block_body(&block_id).await
Expand All @@ -459,6 +459,8 @@ async fn prune_known_txs_for_block<Block: BlockT, Api: ChainApi<Block = Block>>(
if let Err(e) = pool.prune_known(&block_id, &hashes) {
log::error!("Cannot prune known in the pool {:?}!", e);
}

hashes
}

impl<PoolApi, Block> MaintainedTransactionPool for BasicPool<PoolApi, Block>
Expand Down Expand Up @@ -495,6 +497,10 @@ impl<PoolApi, Block> MaintainedTransactionPool for BasicPool<PoolApi, Block>
let ready_poll = self.ready_poll.clone();

async move {
// We keep track of everything we prune so that later we won't add
// tranactions with those hashes from the retracted blocks.
let mut pruned_log = HashSet::<ExtrinsicHash<PoolApi>>::new();

// If there is a tree route, we use this to prune known tx based on the enacted
// blocks.
if let Some(ref tree_route) = tree_route {
Expand All @@ -509,12 +515,14 @@ impl<PoolApi, Block> MaintainedTransactionPool for BasicPool<PoolApi, Block>
&*pool,
),
),
).await;
).await.into_iter().for_each(|enacted_log|{
pruned_log.extend(enacted_log);
})
}

// If this is a new best block, we need to prune its transactions from the pool.
if is_new_best {
prune_known_txs_for_block(id.clone(), &*api, &*pool).await;
pruned_log.extend(prune_known_txs_for_block(id.clone(), &*api, &*pool).await);
}

if let (true, Some(tree_route)) = (next_action.resubmit, tree_route) {
Expand All @@ -536,7 +544,21 @@ impl<PoolApi, Block> MaintainedTransactionPool for BasicPool<PoolApi, Block>
.into_iter()
.filter(|tx| tx.is_signed().unwrap_or(true));

resubmit_transactions.extend(block_transactions);
resubmit_transactions.extend(
block_transactions.into_iter().filter(|tx| {
let tx_hash = pool.hash_of(&tx);
let contains = pruned_log.contains(&tx_hash);
if !contains {
log::debug!(
target: "txpool",
"[{:?}]: Resubmitting from retracted block {:?}",
tx_hash,
hash,
);
}
!contains
})
);
}

if let Err(e) = pool.submit_at(
Expand Down
19 changes: 19 additions & 0 deletions client/transaction-pool/src/testing/pool.rs
Original file line number Diff line number Diff line change
Expand Up @@ -281,6 +281,25 @@ fn should_resubmit_from_retracted_during_maintenance() {
assert_eq!(pool.status().ready, 1);
}


#[test]
fn should_not_resubmit_from_retracted_during_maintenance_if_tx_is_also_in_enacted() {
let xt = uxt(Alice, 209);

let (pool, _guard, _notifier) = maintained_pool();

block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt.clone())).expect("1. Imported");
assert_eq!(pool.status().ready, 1);

let header = pool.api.push_block(1, vec![xt.clone()]);
let fork_header = pool.api.push_block(1, vec![xt]);

let event = block_event_with_retracted(header, fork_header.hash(), &*pool.api);

block_on(pool.maintain(event));
assert_eq!(pool.status().ready, 0);
}

#[test]
fn should_not_retain_invalid_hashes_from_retracted() {
let xt = uxt(Alice, 209);
Expand Down