From 6ec75345d7f5d0fbd2e373ed83b49dac48690615 Mon Sep 17 00:00:00 2001 From: Ben Adams Date: Tue, 13 Aug 2024 03:20:16 +0100 Subject: [PATCH 1/4] Prewarm txns in order --- .../Nethermind.Consensus/Processing/BlockCachePreWarmer.cs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/Nethermind/Nethermind.Consensus/Processing/BlockCachePreWarmer.cs b/src/Nethermind/Nethermind.Consensus/Processing/BlockCachePreWarmer.cs index aab8d753950..05c224c059f 100644 --- a/src/Nethermind/Nethermind.Consensus/Processing/BlockCachePreWarmer.cs +++ b/src/Nethermind/Nethermind.Consensus/Processing/BlockCachePreWarmer.cs @@ -98,8 +98,13 @@ void WarmupWithdrawals(ParallelOptions parallelOptions, IReleaseSpec spec, Block void WarmupTransactions(ParallelOptions parallelOptions, IReleaseSpec spec, Block block, Hash256 stateRoot) { if (parallelOptions.CancellationToken.IsCancellationRequested) return; - Parallel.For(0, block.Transactions.Length, parallelOptions, i => + + int progress = 0; + Parallel.For(1, block.Transactions.Length, parallelOptions, _ => { + // Process transactions in order, rather than the partitioning scheme from Parallel.For + int i = Interlocked.Increment(ref progress); + // If the transaction has already been processed or being processed, exit early if (block.TransactionProcessed >= i) return; From 87db1c601e2e38ced58fc0ca54d2b97a10b4fa2b Mon Sep 17 00:00:00 2001 From: Ben Adams Date: Tue, 13 Aug 2024 13:12:31 +0100 Subject: [PATCH 2/4] Skip prewarming if PhysicalCoreCount too low --- .../Processing/BlockCachePreWarmer.cs | 33 +++++++++++++------ 1 file changed, 23 insertions(+), 10 deletions(-) diff --git a/src/Nethermind/Nethermind.Consensus/Processing/BlockCachePreWarmer.cs b/src/Nethermind/Nethermind.Consensus/Processing/BlockCachePreWarmer.cs index 05c224c059f..e95fa6a7037 100644 --- a/src/Nethermind/Nethermind.Consensus/Processing/BlockCachePreWarmer.cs +++ b/src/Nethermind/Nethermind.Consensus/Processing/BlockCachePreWarmer.cs @@ -54,9 +54,15 @@ private void PreWarmCachesParallel(Block suggestedBlock, Hash256 parentStateRoot try { + var physicalCoreCount = RuntimeInformation.PhysicalCoreCount; + if (physicalCoreCount < 2) + { + if (_logger.IsDebug) _logger.Debug("Physical core count is less than 2. Skipping pre-warming."); + return; + } if (_logger.IsDebug) _logger.Debug($"Started pre-warming caches for block {suggestedBlock.Number}."); - ParallelOptions parallelOptions = new() { MaxDegreeOfParallelism = Math.Max(1, RuntimeInformation.PhysicalCoreCount - 2), CancellationToken = cancellationToken }; + ParallelOptions parallelOptions = new() { MaxDegreeOfParallelism = physicalCoreCount - 1, CancellationToken = cancellationToken }; IReleaseSpec spec = specProvider.GetSpec(suggestedBlock.Header); WarmupTransactions(parallelOptions, spec, suggestedBlock, parentStateRoot); @@ -74,13 +80,18 @@ void WarmupWithdrawals(ParallelOptions parallelOptions, IReleaseSpec spec, Block if (parallelOptions.CancellationToken.IsCancellationRequested) return; if (spec.WithdrawalsEnabled && block.Withdrawals is not null) { + int progress = 0; Parallel.For(0, block.Withdrawals.Length, parallelOptions, - i => + _ => { IReadOnlyTxProcessorSource env = _envPool.Get(); + int i = 0; try { using IReadOnlyTxProcessingScope scope = env.Build(stateRoot); + // Process withdrawals in sequential order, rather than partitioning scheme from Parallel.For + // Interlocked.Increment returns the incremented value, so subtract 1 to start at 0 + i = Interlocked.Increment(ref progress) - 1; scope.WorldState.WarmUp(block.Withdrawals[i].Address); } catch (Exception ex) @@ -100,20 +111,22 @@ void WarmupTransactions(ParallelOptions parallelOptions, IReleaseSpec spec, Bloc if (parallelOptions.CancellationToken.IsCancellationRequested) return; int progress = 0; + // We want to start at 1 which is the second transaction, giving total count one less than the length Parallel.For(1, block.Transactions.Length, parallelOptions, _ => { - // Process transactions in order, rather than the partitioning scheme from Parallel.For - int i = Interlocked.Increment(ref progress); - - // If the transaction has already been processed or being processed, exit early - if (block.TransactionProcessed >= i) return; - using ThreadExtensions.Disposable handle = Thread.CurrentThread.BoostPriority(); - Transaction tx = block.Transactions[i]; IReadOnlyTxProcessorSource env = _envPool.Get(); SystemTransaction systemTransaction = _systemTransactionPool.Get(); + Transaction? tx = null; try { + // Process transactions in sequential order, rather than partitioning scheme from Parallel.For + // Interlocked.Increment returns the incremented value, so it will start at 1 + int i = Interlocked.Increment(ref progress); + // If the transaction has already been processed or being processed, exit early + if (block.TransactionProcessed > i) return; + + tx = block.Transactions[i]; tx.CopyTo(systemTransaction); using IReadOnlyTxProcessingScope scope = env.Build(stateRoot); if (spec.UseTxAccessLists) @@ -125,7 +138,7 @@ void WarmupTransactions(ParallelOptions parallelOptions, IReleaseSpec spec, Bloc } catch (Exception ex) { - if (_logger.IsDebug) _logger.Error($"Error pre-warming cache {tx.Hash}", ex); + if (_logger.IsDebug) _logger.Error($"Error pre-warming cache {tx?.Hash}", ex); } finally { From 878e21e8b4bd359cd3f715f9eab15e143331caf9 Mon Sep 17 00:00:00 2001 From: Ben Adams Date: Tue, 13 Aug 2024 14:06:27 +0100 Subject: [PATCH 3/4] Add tx index to trace --- .../Nethermind.Consensus/Processing/BlockCachePreWarmer.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Nethermind/Nethermind.Consensus/Processing/BlockCachePreWarmer.cs b/src/Nethermind/Nethermind.Consensus/Processing/BlockCachePreWarmer.cs index e95fa6a7037..d8b1bc404d3 100644 --- a/src/Nethermind/Nethermind.Consensus/Processing/BlockCachePreWarmer.cs +++ b/src/Nethermind/Nethermind.Consensus/Processing/BlockCachePreWarmer.cs @@ -134,7 +134,7 @@ void WarmupTransactions(ParallelOptions parallelOptions, IReleaseSpec spec, Bloc scope.WorldState.WarmUp(tx.AccessList); // eip-2930 } TransactionResult result = scope.TransactionProcessor.Trace(systemTransaction, new BlockExecutionContext(block.Header.Clone()), NullTxTracer.Instance); - if (_logger.IsTrace) _logger.Trace($"Finished pre-warming cache for tx {tx.Hash} with {result}"); + if (_logger.IsTrace) _logger.Trace($"Finished pre-warming cache for tx[{i}] {tx.Hash} with {result}"); } catch (Exception ex) { From 64189d34b0e0f10d9bab7d3dd9c9a65810522c89 Mon Sep 17 00:00:00 2001 From: Ben Adams Date: Wed, 14 Aug 2024 13:48:04 +0100 Subject: [PATCH 4/4] Feedback --- .../Processing/BlockCachePreWarmer.cs | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/src/Nethermind/Nethermind.Consensus/Processing/BlockCachePreWarmer.cs b/src/Nethermind/Nethermind.Consensus/Processing/BlockCachePreWarmer.cs index d8b1bc404d3..c972b86f9d2 100644 --- a/src/Nethermind/Nethermind.Consensus/Processing/BlockCachePreWarmer.cs +++ b/src/Nethermind/Nethermind.Consensus/Processing/BlockCachePreWarmer.cs @@ -30,7 +30,7 @@ public Task PreWarmCaches(Block suggestedBlock, Hash256? parentStateRoot, Cancel { if (targetWorldState.ClearCache()) { - if (_logger.IsWarn) _logger.Warn("Cashes are not empty. Clearing them."); + if (_logger.IsWarn) _logger.Warn("Caches are not empty. Clearing them."); } if (!IsGenesisBlock(parentStateRoot) && Environment.ProcessorCount > 2 && !cancellationToken.IsCancellationRequested) @@ -111,8 +111,7 @@ void WarmupTransactions(ParallelOptions parallelOptions, IReleaseSpec spec, Bloc if (parallelOptions.CancellationToken.IsCancellationRequested) return; int progress = 0; - // We want to start at 1 which is the second transaction, giving total count one less than the length - Parallel.For(1, block.Transactions.Length, parallelOptions, _ => + Parallel.For(0, block.Transactions.Length, parallelOptions, _ => { using ThreadExtensions.Disposable handle = Thread.CurrentThread.BoostPriority(); IReadOnlyTxProcessorSource env = _envPool.Get(); @@ -121,8 +120,8 @@ void WarmupTransactions(ParallelOptions parallelOptions, IReleaseSpec spec, Bloc try { // Process transactions in sequential order, rather than partitioning scheme from Parallel.For - // Interlocked.Increment returns the incremented value, so it will start at 1 - int i = Interlocked.Increment(ref progress); + // Interlocked.Increment returns the incremented value, so subtract 1 to start at 0 + int i = Interlocked.Increment(ref progress) - 1; // If the transaction has already been processed or being processed, exit early if (block.TransactionProcessed > i) return;