diff --git a/Cargo.lock b/Cargo.lock index 4638987fbe9..5dc81bca6f8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3671,6 +3671,7 @@ dependencies = [ "serde_json", "strum", "thiserror", + "tracing", "yansi", ] diff --git a/chain/chain/src/chain.rs b/chain/chain/src/chain.rs index 682c3e86932..bc3d7c08fc7 100644 --- a/chain/chain/src/chain.rs +++ b/chain/chain/src/chain.rs @@ -97,7 +97,7 @@ use std::collections::{HashMap, HashSet}; use std::fmt::{Debug, Formatter}; use std::sync::Arc; use std::time::{Duration as TimeDuration, Instant}; -use tracing::{debug, error, info, warn, Span}; +use tracing::{debug, debug_span, error, info, warn, Span}; /// Maximum number of orphans chain can store. pub const MAX_ORPHAN_SIZE: usize = 1024; @@ -358,6 +358,15 @@ pub struct BlockMissingChunks { pub missing_chunks: Vec, } +impl Debug for BlockMissingChunks { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.debug_struct("BlockMissingChunks") + .field("prev_hash", &self.prev_hash) + .field("num_missing_chunks", &self.missing_chunks.len()) + .finish() + } +} + /// Contains information needed to request chunks for orphans /// Fields will be used as arguments for `request_chunks_for_orphan` pub struct OrphanMissingChunks { @@ -370,6 +379,16 @@ pub struct OrphanMissingChunks { pub ancestor_hash: CryptoHash, } +impl Debug for OrphanMissingChunks { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.debug_struct("OrphanMissingChunks") + .field("epoch_id", &self.epoch_id) + .field("ancestor_hash", &self.ancestor_hash) + .field("num_missing_chunks", &self.missing_chunks.len()) + .finish() + } +} + /// Check if block header is known /// Returns Err(Error) if any error occurs when checking store /// Ok(Err(BlockKnownError)) if the block header is known @@ -1778,6 +1797,8 @@ impl Chain { block_processing_artifacts: &mut BlockProcessingArtifact, apply_chunks_done_callback: DoneApplyChunkCallback, ) -> Result<(), Error> { + let _span = + debug_span!(target: "chain", "start_process_block_async", ?provenance).entered(); let block_received_time = StaticClock::instant(); metrics::BLOCK_PROCESSING_ATTEMPTS_TOTAL.inc(); @@ -1818,6 +1839,7 @@ impl Chain { block_processing_artifacts: &mut BlockProcessingArtifact, apply_chunks_done_callback: DoneApplyChunkCallback, ) -> (Vec, HashMap) { + let _span = debug_span!(target: "chain", "postprocess_ready_blocks_chain").entered(); let mut accepted_blocks = vec![]; let mut errors = HashMap::new(); while let Ok((block_hash, apply_result)) = self.apply_chunks_receiver.try_recv() { @@ -2083,11 +2105,8 @@ impl Chain { block_received_time: Instant, ) -> Result<(), Error> { let block_height = block.header().height(); - let _span = tracing::debug_span!( - target: "chain", - "start_process_block_impl", - height = block_height) - .entered(); + let _span = + debug_span!(target: "chain", "start_process_block_impl", block_height).entered(); // 0) Before we proceed with any further processing, we first check that the block // hash and signature matches to make sure the block is indeed produced by the assigned // block producer. If not, we drop the block immediately @@ -2234,7 +2253,7 @@ impl Chain { ) { let sc = self.apply_chunks_sender.clone(); spawn(move || { - // do_apply_chunks runs `work` parallelly, but still waits for all of them to finish + // do_apply_chunks runs `work` in parallel, but still waits for all of them to finish let res = do_apply_chunks(block_hash, block_height, work); // If we encounter error here, that means the receiver is deallocated and the client // thread is already shut down. The node is already crashed, so we can unwrap here @@ -2803,8 +2822,13 @@ impl Chain { block_processing_artifacts: &mut BlockProcessingArtifact, apply_chunks_done_callback: DoneApplyChunkCallback, ) { + let _span = debug_span!( + target: "chain", + "check_orphans", + ?prev_hash, + num_orphans = self.orphans.len()) + .entered(); // Check if there are orphans we can process. - debug!(target: "chain", "Check orphans: from {}, # total orphans {}", prev_hash, self.orphans.len()); // check within the descendents of `prev_hash` to see if there are orphans there that // are ready to request missing chunks for let orphans_to_check = @@ -5264,7 +5288,7 @@ impl<'a> ChainUpdate<'a> { let prev_hash = block.header().prev_hash(); let results = apply_chunks_results.into_iter().map(|x| { if let Err(err) = &x { - warn!(target:"chain", hash = %block.hash(), error = %err, "Error in applying chunks for block"); + warn!(target: "chain", hash = %block.hash(), %err, "Error in applying chunks for block"); } x }).collect::, Error>>()?; @@ -5903,7 +5927,7 @@ pub fn do_apply_chunks( // a single span. task(&parent_span) }) - .collect::>() + .collect() } pub fn collect_receipts<'a, T>(receipt_proofs: T) -> Vec diff --git a/chain/chain/src/doomslug.rs b/chain/chain/src/doomslug.rs index 55b2a549f63..b05c607c619 100644 --- a/chain/chain/src/doomslug.rs +++ b/chain/chain/src/doomslug.rs @@ -11,7 +11,7 @@ use near_primitives::hash::CryptoHash; use near_primitives::static_clock::StaticClock; use near_primitives::types::{AccountId, ApprovalStake, Balance, BlockHeight, BlockHeightDelta}; use near_primitives::validator_signer::ValidatorSigner; -use tracing::info; +use tracing::{debug, debug_span, field, info}; /// Have that many iterations in the timer instead of `loop` to prevent potential bugs from blocking /// the node @@ -699,6 +699,15 @@ impl Doomslug { has_enough_chunks: bool, log_block_production_info: bool, ) -> bool { + let span = debug_span!( + target: "doomslug", + "ready_to_produce_block", + has_enough_chunks, + target_height, + enough_approvals_for = field::Empty, + ready_to_produce_block = field::Empty, + need_to_wait = field::Empty) + .entered(); let hash_or_height = ApprovalInner::new(&self.tip.block_hash, self.tip.height, target_height); if let Some(approval_trackers_at_height) = self.approval_tracking.get_mut(&target_height) { @@ -710,9 +719,16 @@ impl Doomslug { match block_production_readiness { DoomslugBlockProductionReadiness::NotReady => false, DoomslugBlockProductionReadiness::ReadySince(when) => { + let enough_approvals_for = now.saturating_duration_since(when); + span.record("enough_approvals_for", enough_approvals_for.as_secs_f64()); + span.record("ready_to_produce_block", true); if has_enough_chunks { if log_block_production_info { - info!("ready to produce block @ {}, has enough approvals for {:?}, has enough chunks", target_height, now.saturating_duration_since(when)); + info!( + target: "doomslug", + target_height, + ?enough_approvals_for, + "ready to produce block, has enough approvals, has enough chunks"); } true } else { @@ -721,11 +737,21 @@ impl Doomslug { ) / 6; let ready = now > when + delay; + span.record("need_to_wait", !ready); if log_block_production_info { if ready { - info!("ready to produce block @ {}, has enough approvals for {:?}, does not have enough chunks", target_height, now.saturating_duration_since(when)); + info!( + target: "doomslug", + target_height, + ?enough_approvals_for, + "ready to produce block, but does not have enough chunks"); } else { - info!("not ready to produce block @ {}, need to wait {:?}, has enough approvals for {:?}", target_height, (when + delay).saturating_duration_since(now), now.saturating_duration_since(when)); + info!( + target: "doomslug", + target_height, + need_to_wait_for = ?(when + delay).saturating_duration_since(now), + ?enough_approvals_for, + "not ready to produce block, need to wait"); } } ready @@ -733,9 +759,11 @@ impl Doomslug { } } } else { + debug!(target: "doomslug", target_height, ?hash_or_height, "No approval tracker"); false } } else { + debug!(target: "doomslug", target_height, "No approval trackers at height"); false } } diff --git a/chain/chunks/src/lib.rs b/chain/chunks/src/lib.rs index cf66357522d..6f20298d4b4 100644 --- a/chain/chunks/src/lib.rs +++ b/chain/chunks/src/lib.rs @@ -127,7 +127,7 @@ use rand::seq::IteratorRandom; use rand::Rng; use std::collections::{HashMap, HashSet}; use std::sync::Arc; -use tracing::{debug, error, warn}; +use tracing::{debug, debug_span, error, warn}; pub mod adapter; mod chunk_cache; @@ -576,6 +576,13 @@ impl ShardsManager { let height = chunk_header.height_created(); let shard_id = chunk_header.shard_id(); let chunk_hash = chunk_header.chunk_hash(); + let _span = debug_span!( + target: "chunks", + "request_chunk_single", + ?chunk_hash, + shard_id, + height_created = height) + .entered(); if self.requested_partial_encoded_chunks.contains_key(&chunk_hash) { debug!(target: "chunks", height, shard_id, ?chunk_hash, "Not requesting chunk, already being requested."); @@ -665,6 +672,12 @@ impl ShardsManager { chunks_to_request: Vec, prev_hash: CryptoHash, ) { + let _span = debug_span!( + target: "chunks", + "request_chunks", + ?prev_hash, + num_chunks_to_request = chunks_to_request.len()) + .entered(); for chunk_header in chunks_to_request { self.request_chunk_single(&chunk_header, prev_hash, false); } @@ -684,6 +697,13 @@ impl ShardsManager { epoch_id: &EpochId, ancestor_hash: CryptoHash, ) { + let _span = debug_span!( + target: "chunks", + "request_chunks_for_orphan", + ?ancestor_hash, + ?epoch_id, + num_chunks_to_request = chunks_to_request.len()) + .entered(); let ancestor_epoch_id = unwrap_or_return!(self.epoch_manager.get_epoch_id_from_prev_block(&ancestor_hash)); if epoch_id != &ancestor_epoch_id { @@ -1017,19 +1037,34 @@ impl ShardsManager { chunk: &mut EncodedShardChunk, rs: &mut ReedSolomonWrapper, ) -> ChunkStatus { + let _span = debug_span!( + target: "chunks", + "check_chunk_complete", + height_included = chunk.cloned_header().height_included(), + shard_id = chunk.cloned_header().shard_id(), + chunk_hash = ?chunk.chunk_hash()) + .entered(); let data_parts = rs.data_shard_count(); if chunk.content().num_fetched_parts() >= data_parts { if let Ok(_) = chunk.content_mut().reconstruct(rs) { let (merkle_root, merkle_paths) = chunk.content().get_merkle_hash_and_paths(); if merkle_root == chunk.encoded_merkle_root() { + debug!(target: "chunks", "Complete"); ChunkStatus::Complete(merkle_paths) } else { + debug!( + target: "chunks", + ?merkle_root, + chunk_encoded_merkle_root = ?chunk.encoded_merkle_root(), + "Invalid: Wrong merkle root"); ChunkStatus::Invalid } } else { + debug!(target: "chunks", "Invalid: Failed to reconstruct"); ChunkStatus::Invalid } } else { + debug!(target: "chunks", num_fetched_parts = chunk.content().num_fetched_parts(), data_parts, "Incomplete"); ChunkStatus::Incomplete } } @@ -1229,6 +1264,7 @@ impl ShardsManager { // then in `process_partial_encoded_chunk` after checking the previous block is ready fn validate_chunk_header(&self, header: &ShardChunkHeader) -> Result<(), Error> { let chunk_hash = header.chunk_hash(); + let _span = debug_span!(target: "chunks", "validate_chunk_header", ?chunk_hash).entered(); // 1. check signature // Ideally, validating the chunk header needs the previous block to be accepted already. // However, we want to be able to validate chunk header in advance so we can save @@ -1359,23 +1395,27 @@ impl ShardsManager { partial_encoded_chunk.map(|chunk| PartialEncodedChunkV2::from(chunk)); let header = &partial_encoded_chunk.header; let chunk_hash = header.chunk_hash(); - debug!( + let _span = debug_span!( target: "chunks", + "process_partial_encoded_chunk", ?chunk_hash, - height = header.height_created(), shard_id = header.shard_id(), + height_created = header.height_created(), + height_included = header.height_included()) + .entered(); + debug!( + target: "chunks", parts = ?partial_encoded_chunk.get_inner().parts.iter().map(|p| p.part_ord).collect::>(), - "Process partial encoded chunk", - ); + "Process partial encoded chunk"); // Verify the partial encoded chunk is valid and worth processing // 1.a Leave if we received known chunk if let Some(entry) = self.encoded_chunks.get(&chunk_hash) { if entry.complete { return Ok(ProcessPartialEncodedChunkResult::Known); } - debug!(target: "chunks", "{} parts in cache, total needed: {}", entry.parts.len(), self.rs.data_shard_count()); + debug!(target: "chunks", num_parts_in_cache = entry.parts.len(), total_needed = self.rs.data_shard_count()); } else { - debug!(target: "chunks", "0 parts in cache, total needed: {}", self.rs.data_shard_count()); + debug!(target: "chunks", num_parts_in_cache = 0, total_needed = self.rs.data_shard_count()); } // 1.b Checking chunk height @@ -1525,6 +1565,13 @@ impl ShardsManager { &mut self, header: &ShardChunkHeader, ) -> Result { + let chunk_hash = header.chunk_hash(); + let _span = debug_span!( + target: "chunks", + "try_process_chunk_parts_and_receipts", + height_included = header.height_included(), + ?chunk_hash) + .entered(); // The logic from now on requires previous block is processed because // calculating owner parts requires that, so we first check // whether prev_block_hash is in the chain, if not, returns NeedBlock @@ -1532,10 +1579,10 @@ impl ShardsManager { let epoch_id = match self.epoch_manager.get_epoch_id_from_prev_block(&prev_block_hash) { Ok(epoch_id) => epoch_id, Err(_) => { + debug!(target: "chunks", ?prev_block_hash, "NeedBlock"); return Ok(ProcessPartialEncodedChunkResult::NeedBlock); } }; - let chunk_hash = header.chunk_hash(); // check the header exists in encoded_chunks and validate it again (full validation) // now that prev_block is processed if let Some(chunk_entry) = self.encoded_chunks.get(&chunk_hash) { @@ -1547,10 +1594,14 @@ impl ShardsManager { } Err(err) => { return match err { - Error::ChainError(chain_error) => Err(chain_error.into()), - _ => { + Error::ChainError(chain_error) => { + debug!(target: "chunks", ?chain_error); + Err(chain_error.into()) + } + err => { // the chunk header is invalid // remove this entry from the cache and remove the request from the request pool + debug!(target: "chunks", ?err, "Chunk header is invalid"); self.encoded_chunks.remove(&chunk_hash); self.requested_partial_encoded_chunks.remove(&chunk_hash); Err(err) @@ -1560,6 +1611,7 @@ impl ShardsManager { } } } else { + debug!(target: "chunks", "UnknownChunk"); return Err(Error::UnknownChunk); } @@ -1598,6 +1650,7 @@ impl ShardsManager { &self.shard_tracker, ); + debug!(target: "chunks", cares_about_shard, can_reconstruct, have_all_parts, have_all_receipts); if !cares_about_shard && have_all_parts && have_all_receipts { // If we don't care about the shard, we only need the parts and the receipts that we // own, before marking the chunk as completed. @@ -1652,6 +1705,7 @@ impl ShardsManager { partial_chunk: PartialEncodedChunk, shard_chunk: Option, ) { + let _span = debug_span!(target: "chunks", "complete_chunk").entered(); let chunk_hash = partial_chunk.chunk_hash(); self.encoded_chunks.mark_entry_complete(&chunk_hash); self.encoded_chunks.remove_from_cache_if_outside_horizon(&chunk_hash); @@ -1667,16 +1721,22 @@ impl ShardsManager { /// the previous block is accepted. So we need to check if there are any chunks can be marked as /// complete when a new block is accepted. pub fn check_incomplete_chunks(&mut self, prev_block_hash: &CryptoHash) { + let _span = + debug_span!(target: "chunks", "check_incomplete_chunks", ?prev_block_hash).entered(); let mut chunks_to_process = vec![]; if let Some(chunk_hashes) = self.encoded_chunks.get_incomplete_chunks(prev_block_hash) { for chunk_hash in chunk_hashes { + debug!(target: "chunks", ?chunk_hash, "incomplete chunk"); if let Some(entry) = self.encoded_chunks.get(chunk_hash) { chunks_to_process.push(entry.header.clone()); } } } for header in chunks_to_process { - debug!(target:"chunks", "try to process incomplete chunk {:?}, prev_block: {:?}", header.chunk_hash(), prev_block_hash); + debug!(target: "chunks", + chunk_hash = ?header.chunk_hash(), + ?prev_block_hash, + "try to process incomplete chunk"); if let Err(err) = self.try_process_chunk_parts_and_receipts(&header) { error!(target:"chunks", "unexpected error processing orphan chunk {:?}", err) } diff --git a/chain/chunks/src/logic.rs b/chain/chunks/src/logic.rs index 6f07e1510bb..892495911f6 100644 --- a/chain/chunks/src/logic.rs +++ b/chain/chunks/src/logic.rs @@ -12,7 +12,7 @@ use near_primitives::{ }, types::{AccountId, ShardId}, }; -use tracing::log::{debug, error}; +use tracing::{debug, debug_span, error}; pub fn need_receipt( prev_block_hash: &CryptoHash, @@ -143,6 +143,13 @@ pub fn decode_encoded_chunk( shard_tracker: &ShardTracker, ) -> Result<(ShardChunk, PartialEncodedChunk), Error> { let chunk_hash = encoded_chunk.chunk_hash(); + let _span = debug_span!( + target: "chunks", + "decode_encoded_chunk", + height_included = encoded_chunk.cloned_header().height_included(), + shard_id = encoded_chunk.cloned_header().shard_id(), + ?chunk_hash) + .entered(); if let Ok(shard_chunk) = encoded_chunk .decode_chunk(epoch_manager.num_data_parts()) @@ -154,7 +161,13 @@ pub fn decode_encoded_chunk( Ok(shard_chunk) }) { - debug!(target: "chunks", "Reconstructed and decoded chunk {}, encoded length was {}, num txs: {}, I'm {:?}", chunk_hash.0, encoded_chunk.encoded_length(), shard_chunk.transactions().len(), me); + debug!( + target: "chunks", + ?chunk_hash, + encoded_length = encoded_chunk.encoded_length(), + num_tx = shard_chunk.transactions().len(), + ?me, + "Reconstructed and decoded"); let partial_chunk = create_partial_chunk( encoded_chunk, merkle_paths, @@ -165,11 +178,11 @@ pub fn decode_encoded_chunk( ) .map_err(|err| Error::ChainError(err.into()))?; - return Ok((shard_chunk, partial_chunk)); + Ok((shard_chunk, partial_chunk)) } else { // Can't decode chunk or has invalid proofs, ignore it - error!(target: "chunks", "Reconstructed, but failed to decoded chunk {}, I'm {:?}", chunk_hash.0, me); - return Err(Error::InvalidChunk); + error!(target: "chunks", ?chunk_hash, ?me, "Reconstructed, but failed to decoded chunk"); + Err(Error::InvalidChunk) } } diff --git a/chain/client-primitives/Cargo.toml b/chain/client-primitives/Cargo.toml index 64176f9895e..f0b72459468 100644 --- a/chain/client-primitives/Cargo.toml +++ b/chain/client-primitives/Cargo.toml @@ -19,11 +19,11 @@ serde.workspace = true serde_json.workspace = true strum.workspace = true thiserror.workspace = true +tracing.workspace = true yansi.workspace = true near-chain-primitives.workspace = true near-chain-configs.workspace = true - near-chunks-primitives.workspace = true near-crypto.workspace = true near-primitives.workspace = true diff --git a/chain/client-primitives/src/types.rs b/chain/client-primitives/src/types.rs index ada175b5245..aa8a879aabb 100644 --- a/chain/client-primitives/src/types.rs +++ b/chain/client-primitives/src/types.rs @@ -21,6 +21,7 @@ pub use near_primitives::views::{StatusResponse, StatusSyncInfo}; use std::collections::HashMap; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; +use tracing::debug_span; use yansi::Color::Magenta; use yansi::Style; @@ -321,6 +322,13 @@ impl SyncStatus { _ => None, } } + + pub fn update(&mut self, new_value: Self) { + let _span = + debug_span!(target: "sync", "update_sync_status", old_value = ?self, ?new_value) + .entered(); + *self = new_value + } } impl From for SyncStatusView { diff --git a/chain/client/src/client.rs b/chain/client/src/client.rs index bc3b8a2bd93..cc0eb9cec1a 100644 --- a/chain/client/src/client.rs +++ b/chain/client/src/client.rs @@ -82,7 +82,7 @@ use std::collections::{HashMap, HashSet}; use std::sync::Arc; use std::sync::RwLock; use std::time::{Duration, Instant}; -use tracing::{debug, error, info, trace, warn}; +use tracing::{debug, debug_span, error, info, trace, warn}; const NUM_REBROADCAST_BLOCKS: usize = 30; const CHUNK_HEADERS_FOR_INCLUSION_CACHE_SIZE: usize = 2048; @@ -431,8 +431,10 @@ impl Client { .sharded_tx_pool .reintroduce_transactions(shard_uid, &chunk.transactions()); if reintroduced_count < chunk.transactions().len() { - debug!(target: "client", "Reintroduced {} transactions out of {}", - reintroduced_count, chunk.transactions().len()); + debug!(target: "client", + reintroduced_count, + num_tx = chunk.transactions().len(), + "Reintroduced transactions"); } } } @@ -513,9 +515,9 @@ impl Client { if banned { warn!( target: "client", - "Not including chunk {:?} from banned validator {}", - chunk_header.chunk_hash(), - chunk_producer); + chunk_hash = ?chunk_header.chunk_hash(), + ?chunk_producer, + "Not including chunk from a banned validator"); metrics::CHUNK_DROPPED_BECAUSE_OF_BANNED_CHUNK_PRODUCER.inc(); } !banned @@ -589,6 +591,7 @@ impl Client { validator_signer.validator_id(), &next_block_proposer, )? { + debug!(target: "client", "Should reschedule block"); return Ok(None); } let (validator_stake, _) = self.epoch_manager.get_validator_by_account_id( @@ -599,7 +602,10 @@ impl Client { let validator_pk = validator_stake.take_public_key(); if validator_pk != validator_signer.public_key() { - debug!(target: "client", "Local validator key {} does not match expected validator key {}, skipping block production", validator_signer.public_key(), validator_pk); + debug!(target: "client", + local_validator_key = ?validator_signer.public_key(), + ?validator_pk, + "Local validator key does not match expected validator key, skipping block production"); #[cfg(not(feature = "test_features"))] return Ok(None); #[cfg(feature = "test_features")] @@ -796,15 +802,19 @@ impl Client { let chunk_proposer = self.epoch_manager.get_chunk_producer(epoch_id, next_height, shard_id).unwrap(); if validator_signer.validator_id() != &chunk_proposer { - debug!(target: "client", "Not producing chunk for shard {}: chain at {}, not block producer for next block. Me: {}, proposer: {}", shard_id, next_height, validator_signer.validator_id(), chunk_proposer); + debug!(target: "client", + me = ?validator_signer.validator_id(), + ?chunk_proposer, + next_height, + shard_id, + "Not producing chunk. Not chunk producer for next chunk."); return Ok(None); } - if self.epoch_manager.is_next_block_epoch_start(&prev_block_hash)? { let prev_prev_hash = *self.chain.get_block_header(&prev_block_hash)?.prev_hash(); if !self.chain.prev_block_is_caught_up(&prev_prev_hash, &prev_block_hash)? { // See comment in similar snipped in `produce_block` - debug!(target: "client", "Produce chunk: prev block is not caught up"); + debug!(target: "client", shard_id, next_height, "Produce chunk: prev block is not caught up"); return Err(Error::ChunkProducer( "State for the epoch is not downloaded yet, skipping chunk production" .to_string(), @@ -812,13 +822,7 @@ impl Client { } } - debug!( - target: "client", - "Producing chunk at height {} for shard {}, I'm {}", - next_height, - shard_id, - validator_signer.validator_id() - ); + debug!(target: "client", me = ?validator_signer.validator_id(), next_height, shard_id, "Producing chunk"); let shard_uid = self.epoch_manager.shard_id_to_uid(shard_id, epoch_id)?; let chunk_extra = self @@ -871,15 +875,13 @@ impl Client { protocol_version, )?; - debug!( - target: "client", - me=%validator_signer.validator_id(), - chunk_hash=%encoded_chunk.chunk_hash().0, + debug!(target: "client", + me = %validator_signer.validator_id(), + chunk_hash = ?encoded_chunk.chunk_hash(), %prev_block_hash, - "Produced chunk with {} txs and {} receipts", num_filtered_transactions, - outgoing_receipts.len(), - ); + num_outgoing_receipts = outgoing_receipts.len(), + "Produced chunk"); metrics::CHUNK_PRODUCED_TOTAL.inc(); self.chunk_production_info.put( @@ -982,8 +984,7 @@ impl Client { // included into the block. let reintroduced_count = sharded_tx_pool.reintroduce_transactions(shard_uid, &transactions); if reintroduced_count < transactions.len() { - debug!(target: "client", "Reintroduced {} transactions out of {}", - reintroduced_count, transactions.len()); + debug!(target: "client", reintroduced_count, num_tx = transactions.len(), "Reintroduced transactions"); } Ok(transactions) } @@ -1028,7 +1029,7 @@ impl Client { // done within process_block_impl, this is just for logging. if let Err(err) = res { if err.is_bad_data() { - warn!(target: "client", "Receive bad block: {}", err); + warn!(target: "client", ?err, "Receive bad block"); } else if err.is_error() { if let near_chain::Error::DBNotFoundErr(msg) = &err { debug_assert!(!msg.starts_with("BLOCK HEIGHT"), "{:?}", err); @@ -1036,12 +1037,12 @@ impl Client { if self.sync_status.is_syncing() { // While syncing, we may receive blocks that are older or from next epochs. // This leads to Old Block or EpochOutOfBounds errors. - debug!(target: "client", "Error on receival of block: {}", err); + debug!(target: "client", ?err, sync_status = ?self.sync_status, "Error receiving a block. is syncing"); } else { - error!(target: "client", "Error on receival of block: {}", err); + error!(target: "client", ?err, "Error on receiving a block. Not syncing"); } } else { - debug!(target: "client", error = %err, "Process block: refused by chain"); + debug!(target: "client", ?err, "Process block: refused by chain"); } self.chain.blocks_delay_tracker.mark_block_errored(&hash, err.to_string()); } @@ -1059,6 +1060,8 @@ impl Client { was_requested: bool, apply_chunks_done_callback: DoneApplyChunkCallback, ) -> Result<(), near_chain::Error> { + let _span = + debug_span!(target: "chain", "receive_block_impl", was_requested, ?peer_id).entered(); self.chain.blocks_delay_tracker.mark_block_received( &block, StaticClock::instant(), @@ -1091,11 +1094,15 @@ impl Client { let res = self.start_process_block(block, provenance, apply_chunks_done_callback); match &res { Err(near_chain::Error::Orphan) => { + debug!(target: "chain", ?prev_hash, "Orphan error"); if !self.chain.is_orphan(&prev_hash) { + debug!(target: "chain", "not orphan"); self.request_block(prev_hash, peer_id) } } - _ => {} + err => { + debug!(target: "chain", ?err, "some other error"); + } } res } @@ -1190,6 +1197,12 @@ impl Client { provenance: Provenance, apply_chunks_done_callback: DoneApplyChunkCallback, ) -> Result<(), near_chain::Error> { + let _span = debug_span!( + target: "chain", + "start_process_block", + ?provenance, + block_height = block.header().height()) + .entered(); let mut block_processing_artifacts = BlockProcessingArtifact::default(); let result = { @@ -1243,6 +1256,8 @@ impl Client { apply_chunks_done_callback: DoneApplyChunkCallback, should_produce_chunk: bool, ) -> (Vec, HashMap) { + let _span = debug_span!(target: "client", "postprocess_ready_blocks", should_produce_chunk) + .entered(); let me = self .validator_signer .as_ref() @@ -1307,7 +1322,7 @@ impl Client { for chunk_header in invalid_chunks { if let Err(err) = self.ban_chunk_producer_for_producing_invalid_chunk(chunk_header) { - error!(target: "client", "Failed to ban chunk producer for producing invalid chunk: {:?}", err); + error!(target: "client", ?err, "Failed to ban chunk producer for producing invalid chunk"); } } } @@ -1325,10 +1340,10 @@ impl Client { )?; error!( target: "client", - "Banning chunk producer {} for epoch {:?} for producing invalid chunk {:?}", - chunk_producer, - epoch_id, - chunk_header.chunk_hash()); + ?chunk_producer, + ?epoch_id, + chunk_hash = ?chunk_header.chunk_hash(), + "Banning chunk producer for producing invalid chunk"); metrics::CHUNK_PRODUCER_BANNED_FOR_EPOCH.inc(); self.do_not_include_chunks_from.put((epoch_id, chunk_producer), ()); Ok(()) @@ -1368,7 +1383,7 @@ impl Client { let mut update = self.chain.mut_store().store_update(); update.save_invalid_chunk(encoded_chunk); if let Err(err) = update.commit() { - error!(target: "client", "Error saving invalid chunk: {:?}", err); + error!(target: "client", ?err, "Error saving invalid chunk"); } } @@ -1472,7 +1487,12 @@ impl Client { if Some(&next_block_producer) == self.validator_signer.as_ref().map(|x| x.validator_id()) { self.collect_block_approval(&approval, ApprovalType::SelfApproval); } else { - debug!(target: "client", "Sending an approval {:?} from {} to {} for {}", approval.inner, approval.account_id, next_block_producer, approval.target_height); + debug!(target: "client", + approval_inner = ?approval.inner, + account_id = ?approval.account_id, + next_bp = ?next_block_producer, + target_height = approval.target_height, + "Sending an approval"); let approval_message = ApprovalMessage::new(approval, next_block_producer); self.network_adapter.send(PeerManagerMessageRequest::NetworkRequests( NetworkRequests::Approval { approval_message }, @@ -1492,10 +1512,20 @@ impl Client { provenance: Provenance, skip_produce_chunk: bool, ) { + let _span = tracing::debug_span!( + target: "client", + "on_block_accepted_with_optional_chunk_produce", + ?block_hash, + ?status, + ?provenance, + skip_produce_chunk, + is_syncing = self.sync_status.is_syncing(), + sync_status = ?self.sync_status) + .entered(); let block = match self.chain.get_block(&block_hash) { Ok(block) => block, Err(err) => { - error!(target: "client", "Failed to find block {} that was just accepted: {}", block_hash, err); + error!(target: "client", ?err, ?block_hash, "Failed to find block that was just accepted"); return; } }; @@ -1582,6 +1612,8 @@ impl Client { && !skip_produce_chunk { self.produce_chunks(&block, validator_id); + } else { + info!(target: "client", "not producing a chunk"); } } @@ -1659,6 +1691,12 @@ impl Client { // Produce new chunks fn produce_chunks(&mut self, block: &Block, validator_id: AccountId) { + let _span = debug_span!( + target: "client", + "produce_chunks", + ?validator_id, + block_height = block.header().height()) + .entered(); let epoch_id = self.epoch_manager.get_epoch_id_from_prev_block(block.header().hash()).unwrap(); for shard_id in self.epoch_manager.shard_ids(&epoch_id).unwrap() { @@ -1670,8 +1708,11 @@ impl Client { continue; } - let _span = tracing::debug_span!( - target: "client", "on_block_accepted", prev_block_hash = ?*block.hash(), ?shard_id) + let _span = debug_span!( + target: "client", + "on_block_accepted", + prev_block_hash = ?*block.hash(), + ?shard_id) .entered(); let _timer = metrics::PRODUCE_AND_DISTRIBUTE_CHUNK_TIME .with_label_values(&[&shard_id.to_string()]) @@ -1689,7 +1730,7 @@ impl Client { } Ok(None) => {} Err(err) => { - error!(target: "client", "Error producing chunk {:?}", err); + error!(target: "client", ?err, "Error producing chunk"); } } } @@ -1725,6 +1766,12 @@ impl Client { blocks_missing_chunks: Vec, orphans_missing_chunks: Vec, ) { + let _span = debug_span!( + target: "client", + "request_missing_chunks", + ?blocks_missing_chunks, + ?orphans_missing_chunks) + .entered(); let now = StaticClock::utc(); for BlockMissingChunks { prev_hash, missing_chunks } in blocks_missing_chunks { for chunk in &missing_chunks { @@ -1757,6 +1804,7 @@ impl Client { &mut self, apply_chunks_done_callback: DoneApplyChunkCallback, ) { + let _span = debug_span!(target: "client", "process_blocks_with_missing_chunks").entered(); let me = self.validator_signer.as_ref().map(|validator_signer| validator_signer.validator_id()); let mut blocks_processing_artifacts = BlockProcessingArtifact::default(); @@ -1944,7 +1992,7 @@ impl Client { match self.epoch_manager.get_epoch_block_approvers_ordered(&parent_hash) { Ok(block_producer_stakes) => block_producer_stakes, Err(err) => { - error!(target: "client", "Block approval error: {}", err); + error!(target: "client", ?err, "Block approval error"); return; } }; @@ -1982,13 +2030,7 @@ impl Client { validators.remove(account_id); } for validator in validators { - trace!(target: "client", - "I'm {:?}, routing a transaction {:?} to {}, shard_id = {}", - self.validator_signer.as_ref().map(|bp| bp.validator_id()), - tx, - validator, - shard_id - ); + trace!(target: "client", me = ?self.validator_signer.as_ref().map(|bp| bp.validator_id()), ?tx, ?validator, shard_id, "Routing a transaction"); // Send message to network to actually forward transaction. self.network_adapter.send(PeerManagerMessageRequest::NetworkRequests( @@ -2012,7 +2054,7 @@ impl Client { ) -> ProcessTxResponse { unwrap_or_return!(self.process_tx_internal(&tx, is_forwarded, check_only), { let me = self.validator_signer.as_ref().map(|vs| vs.validator_id()); - warn!(target: "client", "I'm: {:?} Dropping tx: {:?}", me, tx); + warn!(target: "client", ?me, ?tx, "Dropping tx"); ProcessTxResponse::NoResponse }) } @@ -2068,7 +2110,7 @@ impl Client { &tx.transaction.block_hash, transaction_validity_period, ) { - debug!(target: "client", "Invalid tx: expired or from a different fork -- {:?}", tx); + debug!(target: "client", ?tx, "Invalid tx: expired or from a different fork"); return Ok(ProcessTxResponse::InvalidTx(e)); } let gas_price = cur_block_header.next_gas_price(); @@ -2081,7 +2123,7 @@ impl Client { .validate_tx(gas_price, None, tx, true, &epoch_id, protocol_version) .expect("no storage errors") { - debug!(target: "client", tx=?tx.get_hash(), "Invalid tx during basic validation: {:?}", err); + debug!(target: "client", tx_hash = ?tx.get_hash(), ?err, "Invalid tx during basic validation"); return Ok(ProcessTxResponse::InvalidTx(err)); } @@ -2113,7 +2155,7 @@ impl Client { .validate_tx(gas_price, Some(state_root), tx, false, &epoch_id, protocol_version) .expect("no storage errors") { - debug!(target: "client", "Invalid tx: {:?}", err); + debug!(target: "client", ?err, "Invalid tx"); Ok(ProcessTxResponse::InvalidTx(err)) } else if check_only { Ok(ProcessTxResponse::ValidTx) @@ -2122,17 +2164,17 @@ impl Client { if me.is_some() { match self.sharded_tx_pool.insert_transaction(shard_uid, tx.clone()) { InsertTransactionResult::Success => { - trace!(target: "client", ?shard_uid, tx=?tx.get_hash(), "Recorded a transaction."); + trace!(target: "client", ?shard_uid, tx_hash = ?tx.get_hash(), "Recorded a transaction."); } InsertTransactionResult::Duplicate => { - trace!(target: "client", ?shard_uid, tx=?tx.get_hash(), "Duplicate transaction, not forwarding it."); + trace!(target: "client", ?shard_uid, tx_hash = ?tx.get_hash(), "Duplicate transaction, not forwarding it."); return Ok(ProcessTxResponse::ValidTx); } InsertTransactionResult::NoSpaceLeft => { if is_forwarded { - trace!(target: "client", ?shard_uid, tx=?tx.get_hash(), "Transaction pool is full, dropping the transaction."); + trace!(target: "client", ?shard_uid, tx_hash = ?tx.get_hash(), "Transaction pool is full, dropping the transaction."); } else { - trace!(target: "client", ?shard_uid, tx=?tx.get_hash(), "Transaction pool is full, trying to forward the transaction."); + trace!(target: "client", ?shard_uid, tx_hash = ?tx.get_hash(), "Transaction pool is full, trying to forward the transaction."); } } } @@ -2144,7 +2186,7 @@ impl Client { // forward to current epoch validators, // possibly forward to next epoch validators if self.active_validator(shard_id)? { - trace!(target: "client", account = ?me, shard_id, tx=?tx.get_hash(), is_forwarded, "Recording a transaction."); + trace!(target: "client", account = ?me, shard_id, tx_hash = ?tx.get_hash(), is_forwarded, "Recording a transaction."); metrics::TRANSACTION_RECEIVED_VALIDATOR.inc(); if !is_forwarded { @@ -2152,12 +2194,12 @@ impl Client { } Ok(ProcessTxResponse::ValidTx) } else if !is_forwarded { - trace!(target: "client", shard_id, tx=?tx.get_hash(), "Forwarding a transaction."); + trace!(target: "client", shard_id, tx_hash = ?tx.get_hash(), "Forwarding a transaction."); metrics::TRANSACTION_RECEIVED_NON_VALIDATOR.inc(); self.forward_tx(&epoch_id, tx)?; Ok(ProcessTxResponse::RequestRouted) } else { - trace!(target: "client", shard_id, tx=?tx.get_hash(), "Non-validator received a forwarded transaction, dropping it."); + trace!(target: "client", shard_id, tx_hash = ?tx.get_hash(), "Non-validator received a forwarded transaction, dropping it."); metrics::TRANSACTION_RECEIVED_NON_VALIDATOR_FORWARDED.inc(); Ok(ProcessTxResponse::NoResponse) } @@ -2166,7 +2208,7 @@ impl Client { Ok(ProcessTxResponse::DoesNotTrackShard) } else if is_forwarded { // Received forwarded transaction but we are not tracking the shard - debug!(target: "client", "Received forwarded transaction but no tracking shard {}, I'm {:?}", shard_id, me); + debug!(target: "client", ?me, shard_id, tx_hash = ?tx.get_hash(), "Received forwarded transaction but no tracking shard"); Ok(ProcessTxResponse::NoResponse) } else { // We are not tracking this shard, so there is no way to validate this tx. Just rerouting. @@ -2206,6 +2248,7 @@ impl Client { apply_chunks_done_callback: DoneApplyChunkCallback, state_parts_arbiter_handle: &ArbiterHandle, ) -> Result<(), Error> { + let _span = debug_span!(target: "sync", "run_catchup").entered(); let mut notify_state_sync = false; let me = &self.validator_signer.as_ref().map(|x| x.validator_id().clone()); for (sync_hash, state_sync_info) in self.chain.store().iterate_state_sync_infos()? { @@ -2431,6 +2474,7 @@ impl Client { /* implements functions used to communicate with network */ impl Client { pub fn request_block(&self, hash: CryptoHash, peer_id: PeerId) { + let _span = debug_span!(target: "client", "request_block", ?hash, ?peer_id).entered(); match self.chain.block_exists(&hash) { Ok(false) => { self.network_adapter.send(PeerManagerMessageRequest::NetworkRequests( @@ -2438,10 +2482,10 @@ impl Client { )); } Ok(true) => { - debug!(target: "client", "send_block_request_to_peer: block {} already known", hash) + debug!(target: "client", ?hash, "send_block_request_to_peer: block already known") } - Err(e) => { - error!(target: "client", "send_block_request_to_peer: failed to check block exists: {:?}", e) + Err(err) => { + error!(target: "client", ?err, "send_block_request_to_peer: failed to check block exists") } } } diff --git a/chain/client/src/client_actor.rs b/chain/client/src/client_actor.rs index 2731e041f51..e87e418f10a 100644 --- a/chain/client/src/client_actor.rs +++ b/chain/client/src/client_actor.rs @@ -78,7 +78,7 @@ use std::sync::{Arc, RwLock}; use std::thread; use std::time::{Duration, Instant}; use tokio::sync::broadcast; -use tracing::{debug, error, info, trace, warn}; +use tracing::{debug, debug_span, error, info, trace, warn}; /// Multiplier on `max_block_time` to wait until deciding that chain stalled. const STATUS_WAIT_TIME_MULTIPLIER: u64 = 10; @@ -421,6 +421,7 @@ impl Handler> for ClientActor { fn handle(&mut self, msg: WithSpanContext, ctx: &mut Context) { self.wrap(msg, ctx, "BlockResponse", |this: &mut Self, msg|{ let BlockResponse{ block, peer_id, was_requested } = msg; + info!(target: "client", block_height = block.header().height(), block_hash = ?block.header().hash(), "BlockResponse"); let blocks_at_height = this .client .chain @@ -1212,13 +1213,17 @@ impl ClientActor { /// this function in `check_triggers`, because the actix queue may be blocked by other messages /// and we want to prioritize block processing. fn try_process_unfinished_blocks(&mut self) { - let (accepted_blocks, _errors) = + let _span = debug_span!(target: "client", "try_process_unfinished_blocks").entered(); + let (accepted_blocks, errors) = self.client.postprocess_ready_blocks(self.get_apply_chunks_done_callback(), true); - // TODO: log the errors + if !errors.is_empty() { + error!(target: "client", ?errors, "try_process_unfinished_blocks got errors"); + } self.process_accepted_blocks(accepted_blocks); } fn try_handle_block_production(&mut self) { + let _span = debug_span!(target: "client", "try_handle_block_production").entered(); if let Err(err) = self.handle_block_production() { tracing::error!(target: "client", ?err, "Handle block production failed") } @@ -1273,12 +1278,13 @@ impl ClientActor { if let Err(e) = &res { match e { near_chain::Error::ChunksMissing(_) => { + debug!(target: "client", "chunks missing"); // missing chunks were already handled in Client::process_block, we don't need to // do anything here return Ok(()); } _ => { - error!(target: "client", "Failed to process freshly produced block: {:?}", res); + error!(target: "client", ?res, "Failed to process freshly produced block"); byzantine_assert!(false); return res.map_err(|err| err.into()); } @@ -1368,7 +1374,9 @@ impl ClientActor { } fn receive_headers(&mut self, headers: Vec, peer_id: PeerId) -> bool { - info!(target: "client", "Received {} block headers from {}", headers.len(), peer_id); + let _span = + debug_span!(target: "client", "receive_headers", num_headers = headers.len(), ?peer_id) + .entered(); if headers.is_empty() { return true; } @@ -1585,10 +1593,9 @@ impl ClientActor { | SyncRequirement::NoPeers | SyncRequirement::AdvHeaderSyncDisabled => { if currently_syncing { - info!(target: "client", "disabling sync: {}", &sync); - self.client.sync_status = SyncStatus::NoSync; - // Initial transition out of "syncing" state. + debug!(target: "sync", prev_sync_status = ?self.client.sync_status, "disabling sync"); + self.client.sync_status.update(SyncStatus::NoSync); // Announce this client's account id if their epoch is coming up. let head = unwrap_and_report!(self.client.chain.head()); self.check_send_announce_account(head.prev_block_hash); @@ -1598,10 +1605,7 @@ impl ClientActor { SyncRequirement::SyncNeeded { highest_height, .. } => { let mut notify_start_sync = false; if !currently_syncing { - info!( - target: "client", - "enabling sync: {}", &sync, - ); + info!(target: "client", ?sync, "enabling sync"); } // Run each step of syncing separately. unwrap_and_report!(self.client.header_sync.run( @@ -1641,7 +1645,15 @@ impl ClientActor { .reset_data_pre_state_sync(sync_hash)); } let s = StateSyncStatus { sync_hash, sync_status: HashMap::default() }; - self.client.sync_status = SyncStatus::StateSync(s); + // An artificial span to easily detect a node getting into the sync state in Grafana. + { + let _span = + debug_span!(target: "sync", "set_sync", sync = "StateSync") + .entered(); + debug!(target: "sync", prev_sync_status = ?self.client.sync_status); + self.client.sync_status = SyncStatus::StateSync(s); + debug!(target: "sync", sync_status = ?self.client.sync_status); + } // This is the first time we run state sync. notify_start_sync = true; } @@ -1758,12 +1770,11 @@ impl ClientActor { self.client .process_block_processing_artifact(block_processing_artifacts); - - self.client.sync_status = SyncStatus::BodySync { + self.client.sync_status.update(SyncStatus::BodySync { start_height: 0, current_height: 0, highest_height: 0, - }; + }); } } } diff --git a/chain/client/src/sync/block.rs b/chain/client/src/sync/block.rs index 5189dbd1a22..08805ffb865 100644 --- a/chain/client/src/sync/block.rs +++ b/chain/client/src/sync/block.rs @@ -74,8 +74,12 @@ impl BlockSync { Some(height) => height, None => head.height, }; - *sync_status = - SyncStatus::BodySync { start_height, current_height: head.height, highest_height }; + + sync_status.update(SyncStatus::BodySync { + start_height, + current_height: head.height, + highest_height, + }); Ok(false) } diff --git a/chain/client/src/sync/header.rs b/chain/client/src/sync/header.rs index 70d7784f35a..d5d8075ac44 100644 --- a/chain/client/src/sync/header.rs +++ b/chain/client/src/sync/header.rs @@ -1,6 +1,3 @@ -use std::cmp::min; -use std::time::Duration as TimeDuration; - use chrono::{DateTime, Duration, Utc}; use near_async::messaging::CanSend; use near_chain::{Chain, ChainStoreAccess}; @@ -14,6 +11,8 @@ use near_primitives::types::BlockHeight; use near_primitives::utils::to_timestamp; use rand::seq::SliceRandom; use rand::thread_rng; +use std::cmp::min; +use std::time::Duration as TimeDuration; use tracing::{debug, warn}; /// Maximum number of block headers send over the network. @@ -89,11 +88,12 @@ impl HeaderSync { Some(height) => height, None => chain.head()?.height, }; - *sync_status = SyncStatus::HeaderSync { + + sync_status.update(SyncStatus::HeaderSync { start_height, current_height: header_head.height, highest_height, - }; + }); self.syncing_peer = None; if let Some(peer) = highest_height_peers.choose(&mut thread_rng()).cloned() { if peer.highest_block_height > header_head.height { diff --git a/chain/epoch-manager/src/types.rs b/chain/epoch-manager/src/types.rs index 2f3a00b4a0b..d9e40956f10 100644 --- a/chain/epoch-manager/src/types.rs +++ b/chain/epoch-manager/src/types.rs @@ -1,7 +1,4 @@ -use std::collections::{BTreeMap, HashMap}; - use borsh::{BorshDeserialize, BorshSerialize}; - use near_primitives::block_header::BlockHeader; use near_primitives::challenge::SlashedValidator; use near_primitives::epoch_manager::block_info::BlockInfo; @@ -12,6 +9,8 @@ use near_primitives::types::{ AccountId, Balance, BlockHeight, EpochId, ShardId, ValidatorId, ValidatorStats, }; use near_primitives::version::ProtocolVersion; +use std::collections::{BTreeMap, HashMap}; +use tracing::{debug, debug_span}; use crate::EpochManager; @@ -106,6 +105,8 @@ impl EpochInfoAggregator { epoch_info: &EpochInfo, prev_block_height: BlockHeight, ) { + let _span = + debug_span!(target: "epoch_tracker", "update_tail", prev_block_height).entered(); // Step 1: update block tracer let block_info_height = block_info.height(); for height in prev_block_height + 1..=block_info_height { @@ -119,6 +120,10 @@ impl EpochInfoAggregator { }) .or_insert(ValidatorStats { produced: 1, expected: 1 }); } else { + debug!( + target: "epoch_tracker", + block_producer = ?epoch_info.validator_account_id(block_producer_id), + block_height = height, "Missed block"); entry .and_modify(|validator_stats| { validator_stats.expected += 1; @@ -140,6 +145,13 @@ impl EpochInfoAggregator { .and_modify(|stats| { if *mask { stats.produced += 1; + } else { + debug!( + target: "epoch_tracker", + chunk_validator = ?epoch_info.validator_account_id(chunk_validator_id), + shard_id = i, + block_height = prev_block_height + 1, + "Missed chunk"); } stats.expected += 1; }) diff --git a/core/primitives/src/sharding.rs b/core/primitives/src/sharding.rs index cabe9c1201c..be3d8899626 100644 --- a/core/primitives/src/sharding.rs +++ b/core/primitives/src/sharding.rs @@ -13,6 +13,7 @@ use reed_solomon_erasure::galois_8::{Field, ReedSolomon}; use reed_solomon_erasure::ReconstructShard; use std::cmp::Ordering; use std::sync::Arc; +use tracing::debug_span; #[derive( BorshSerialize, @@ -1164,6 +1165,14 @@ impl EncodedShardChunk { } pub fn decode_chunk(&self, data_parts: usize) -> Result { + let _span = debug_span!( + target: "sharding", + "decode_chunk", + data_parts, + height_included = self.cloned_header().height_included(), + shard_id = self.cloned_header().shard_id(), + chunk_hash = ?self.chunk_hash()) + .entered(); let parts = match self { Self::V1(chunk) => &chunk.content.parts[0..data_parts], Self::V2(chunk) => &chunk.content.parts[0..data_parts], diff --git a/runtime/near-vm/compiler-singlepass/src/compiler.rs b/runtime/near-vm/compiler-singlepass/src/compiler.rs index 5db8f193908..097bc726321 100644 --- a/runtime/near-vm/compiler-singlepass/src/compiler.rs +++ b/runtime/near-vm/compiler-singlepass/src/compiler.rs @@ -155,7 +155,7 @@ impl Compiler for SinglepassCompiler { reader.get_operators_reader()?.into_iter_with_offsets(); while generator.has_control_frames() { let (op, pos) = - tracing::debug_span!(target: "near_vm", "parsing-next-operator") + tracing::trace_span!(target: "near_vm", "parsing-next-operator") .in_scope(|| operator_reader.next().unwrap())?; generator.set_srcloc(pos as u32); generator.feed_operator(op).map_err(to_compile_error)?;