Skip to content

Commit

Permalink
Check block size and retry if too large
Browse files Browse the repository at this point in the history
  • Loading branch information
blckngm authored and jjyr committed Nov 3, 2022
1 parent 7e81328 commit 22700e1
Show file tree
Hide file tree
Showing 2 changed files with 40 additions and 2 deletions.
26 changes: 26 additions & 0 deletions crates/block-producer/src/block_producer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,16 @@ use tracing::instrument;

/// 524_288 we choose this value because it is smaller than the MAX_BLOCK_BYTES which is 597K
const MAX_ROLLUP_WITNESS_SIZE: usize = 1 << 19;
/// How many extra size are needed for the rollup WitnessArgs compared to the
/// L2Block if there are no reverted blocks.
const ROLLUP_WITNESS_OVERHEAD: usize = 48;

pub fn check_block_size(block_size: usize) -> Result<()> {
if block_size >= MAX_ROLLUP_WITNESS_SIZE - ROLLUP_WITNESS_OVERHEAD {
bail!(TransactionSizeError::WitnessTooLarge)
}
Ok(())
}

fn generate_custodian_cells(
rollup_context: &RollupContext,
Expand Down Expand Up @@ -476,3 +486,19 @@ pub enum TransactionSizeError {
#[error("witness too large")]
WitnessTooLarge,
}

#[test]
fn test_witness_size_overhead() {
let block = L2Block::default();
let submit = RollupSubmitBlock::new_builder()
.block(block.clone())
.build();
let action = RollupAction::new_builder().set(submit).build();
let witness = WitnessArgs::new_builder()
.output_type(Some(action.as_bytes()).pack())
.build();
assert_eq!(
witness.as_slice().len() - block.as_slice().len(),
ROLLUP_WITNESS_OVERHEAD
);
}
16 changes: 14 additions & 2 deletions crates/block-producer/src/psc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ use tracing::instrument;
use tracing_opentelemetry::OpenTelemetrySpanExt;

use crate::{
block_producer::{BlockProducer, ComposeSubmitTxArgs, TransactionSizeError},
block_producer::{check_block_size, BlockProducer, ComposeSubmitTxArgs, TransactionSizeError},
chain_updater::ChainUpdater,
produce_block::ProduceBlockResult,
sync_l1::{revert, sync_l1, SyncL1Context},
Expand Down Expand Up @@ -362,13 +362,25 @@ async fn produce_local_block(ctx: &PSCContext) -> Result<()> {
// quite some pressure on p2p syncing and read-only nodes.
let mut pool = ctx.mem_pool.lock().await;

let mut retry_count = 0;
let ProduceBlockResult {
block,
global_state,
withdrawal_extras,
deposit_cells,
remaining_capacity,
} = ctx.block_producer.produce_next_block(&mut pool, 0).await?;
} = loop {
let result = ctx
.block_producer
.produce_next_block(&mut pool, retry_count)
.await?;

if check_block_size(result.block.as_slice().len()).is_ok() {
break result;
}
retry_count += 1;
log::warn!("block too large, retry {retry_count}");
};

let number: u64 = block.raw().number().unpack();
let block_hash: H256 = block.hash().into();
Expand Down

0 comments on commit 22700e1

Please sign in to comment.