Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/master' into eth-types
Browse files Browse the repository at this point in the history
  • Loading branch information
arnetheduck committed Sep 28, 2024
2 parents fda307a + 438e183 commit b260392
Show file tree
Hide file tree
Showing 49 changed files with 2,881 additions and 1,493 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -194,7 +194,7 @@ available.)
cases when the `gc` is involved in a memory corruption or corruption
camouflage.

* ENABLE_LINE_NUMBERS=1
* ENABLE_LINE_NUMBERS=1<br>
Enables logger to print out source code location with log message

* ENABLE_EVMC=1<br>
Expand Down
7 changes: 5 additions & 2 deletions fluffy/database/era1_db.nim
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ type Era1DB* = ref object
## a linear history of pre-merge execution chain data.
path: string
network: string
accumulator: FinishedAccumulator
accumulator: FinishedHistoricalHashesAccumulator
files: seq[Era1File]

proc getEra1File(db: Era1DB, era: Era1): Result[Era1File, string] =
Expand Down Expand Up @@ -51,7 +51,10 @@ proc getEra1File(db: Era1DB, era: Era1): Result[Era1File, string] =
ok(f)

proc new*(
T: type Era1DB, path: string, network: string, accumulator: FinishedAccumulator
T: type Era1DB,
path: string,
network: string,
accumulator: FinishedHistoricalHashesAccumulator,
): Era1DB =
Era1DB(path: path, network: network, accumulator: accumulator)

Expand Down
6 changes: 4 additions & 2 deletions fluffy/eth_data/history_data_ssz_e2s.nim
Original file line number Diff line number Diff line change
Expand Up @@ -19,11 +19,13 @@ export results

# Reading SSZ data from files

proc readAccumulator*(file: string): Result[FinishedAccumulator, string] =
proc readAccumulator*(
file: string
): Result[FinishedHistoricalHashesAccumulator, string] =
let encodedAccumulator = ?readAllFile(file).mapErr(toString)

try:
ok(SSZ.decode(encodedAccumulator, FinishedAccumulator))
ok(SSZ.decode(encodedAccumulator, FinishedHistoricalHashesAccumulator))
except SerializationError as e:
err("Failed decoding accumulator: " & e.msg)

Expand Down
2 changes: 1 addition & 1 deletion fluffy/network/history/content/content_deprecated.nim
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ func toContentId(contentKey: ContentKeyByteList): ContentId =
readUintBE[256](idHash.data)

proc pruneDeprecatedAccumulatorRecords*(
accumulator: FinishedAccumulator, contentDB: ContentDB
accumulator: FinishedHistoricalHashesAccumulator, contentDB: ContentDB
) =
info "Pruning deprecated accumulator records"

Expand Down
14 changes: 8 additions & 6 deletions fluffy/network/history/content/content_values.nim
Original file line number Diff line number Diff line change
Expand Up @@ -27,18 +27,18 @@ const

type
## BlockHeader types
AccumulatorProof* = array[15, Digest]
HistoricalHashesAccumulatorProof* = array[15, Digest]

BlockHeaderProofType* = enum
none = 0x00 # An SSZ Union None
accumulatorProof = 0x01
historicalHashesAccumulatorProof = 0x01

BlockHeaderProof* = object
case proofType*: BlockHeaderProofType
of none:
discard
of accumulatorProof:
accumulatorProof*: AccumulatorProof
of historicalHashesAccumulatorProof:
historicalHashesAccumulatorProof*: HistoricalHashesAccumulatorProof

BlockHeaderWithProof* = object
header*: ByteList[MAX_HEADER_LENGTH] # RLP data
Expand Down Expand Up @@ -68,8 +68,10 @@ type
ReceiptByteList* = ByteList[MAX_RECEIPT_LENGTH] # RLP data
PortalReceipts* = List[ReceiptByteList, MAX_TRANSACTION_COUNT]

func init*(T: type BlockHeaderProof, proof: AccumulatorProof): T =
BlockHeaderProof(proofType: accumulatorProof, accumulatorProof: proof)
func init*(T: type BlockHeaderProof, proof: HistoricalHashesAccumulatorProof): T =
BlockHeaderProof(
proofType: historicalHashesAccumulatorProof, historicalHashesAccumulatorProof: proof
)

func init*(T: type BlockHeaderProof): T =
BlockHeaderProof(proofType: none)
4 changes: 2 additions & 2 deletions fluffy/network/history/history_network.nim
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ type
portalProtocol*: PortalProtocol
contentDB*: ContentDB
contentQueue*: AsyncQueue[(Opt[NodeId], ContentKeysList, seq[seq[byte]])]
accumulator*: FinishedAccumulator
accumulator*: FinishedHistoricalHashesAccumulator
historicalRoots*: HistoricalRoots
processContentLoop: Future[void]
statusLogLoop: Future[void]
Expand Down Expand Up @@ -636,7 +636,7 @@ proc new*(
baseProtocol: protocol.Protocol,
contentDB: ContentDB,
streamManager: StreamManager,
accumulator: FinishedAccumulator,
accumulator: FinishedHistoricalHashesAccumulator,
historicalRoots: HistoricalRoots = loadHistoricalRoots(),
bootstrapRecords: openArray[Record] = [],
portalConfig: PortalProtocolConfig = defaultPortalProtocolConfig,
Expand Down
54 changes: 29 additions & 25 deletions fluffy/network/history/validation/historical_hashes_accumulator.nim
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,8 @@ import

export ssz_serialization, merkleization, proofs, eth_types_rlp, BlockHash

# Header Accumulator, as per specification:
# https://github.com/ethereum/portal-network-specs/blob/master/history-network.md#the-header-accumulator
# But with the adjustment to finish the accumulator at merge point.
# HistoricalHashesAccumulator, as per specification:
# https://github.com/ethereum/portal-network-specs/blob/master/history/history-network.md#the-historical-hashes-accumulator

const
EPOCH_SIZE* = 8192 # block roots per epoch record
Expand Down Expand Up @@ -60,18 +59,19 @@ type
# obviously much faster, so this second type is added for this usage.
EpochRecordCached* = HashList[HeaderRecord, EPOCH_SIZE]

# HistoricalHashesAccumulator
Accumulator* = object
historicalEpochs*: List[common_types.Bytes32, int(MAX_HISTORICAL_EPOCHS)]
HistoricalHashesAccumulator* = object
historicalEpochs*: List[Bytes32, int(MAX_HISTORICAL_EPOCHS)]
currentEpoch*: EpochRecord

# HistoricalHashesAccumulator in its final state
FinishedAccumulator* = object
historicalEpochs*: List[common_types.Bytes32, int(MAX_HISTORICAL_EPOCHS)]
FinishedHistoricalHashesAccumulator* = object
historicalEpochs*: List[Bytes32, int(MAX_HISTORICAL_EPOCHS)]
currentEpoch*: EpochRecord

func init*(T: type Accumulator): T =
Accumulator(
Bytes32 = common_types.Bytes32

func init*(T: type HistoricalHashesAccumulator): T =
HistoricalHashesAccumulator(
historicalEpochs: List[Bytes32, int(MAX_HISTORICAL_EPOCHS)].init(@[]),
currentEpoch: EpochRecord.init(@[]),
)
Expand All @@ -81,7 +81,7 @@ func getEpochRecordRoot*(headerRecords: openArray[HeaderRecord]): Digest =

hash_tree_root(epochRecord)

func updateAccumulator*(a: var Accumulator, header: BlockHeader) =
func updateAccumulator*(a: var HistoricalHashesAccumulator, header: BlockHeader) =
doAssert(
header.number < mergeBlockNumber, "No post merge blocks for header accumulator"
)
Expand All @@ -95,7 +95,7 @@ func updateAccumulator*(a: var Accumulator, header: BlockHeader) =
# TODO: It is a bit annoying to require an extra header + update call to
# finish an epoch. However, if we were to move this after adding the
# `HeaderRecord`, there would be no way to get the current total difficulty,
# unless another field is introduced in the `Accumulator` object.
# unless another field is introduced in the `HistoricalHashesAccumulator` object.
if a.currentEpoch.len() == EPOCH_SIZE:
let epochHash = hash_tree_root(a.currentEpoch)

Expand All @@ -110,17 +110,19 @@ func updateAccumulator*(a: var Accumulator, header: BlockHeader) =
let res = a.currentEpoch.add(headerRecord)
doAssert(res, "Can't fail because of currentEpoch length check")

func finishAccumulator*(a: var Accumulator): FinishedAccumulator =
func finishAccumulator*(
a: var HistoricalHashesAccumulator
): FinishedHistoricalHashesAccumulator =
# doAssert(a.currentEpoch[^2].totalDifficulty < TERMINAL_TOTAL_DIFFICULTY)
# doAssert(a.currentEpoch[^1].totalDifficulty >= TERMINAL_TOTAL_DIFFICULTY)
let epochHash = hash_tree_root(a.currentEpoch)

doAssert(a.historicalEpochs.add(epochHash.data))

FinishedAccumulator(historicalEpochs: a.historicalEpochs)
FinishedHistoricalHashesAccumulator(historicalEpochs: a.historicalEpochs)

## Calls and helper calls for building header proofs and verifying headers
## against the Accumulator and the header proofs.
## against the HistoricalHashesAccumulator and the header proofs.

func getEpochIndex*(blockNumber: uint64): uint64 =
blockNumber div EPOCH_SIZE
Expand All @@ -144,7 +146,9 @@ func isPreMerge*(header: BlockHeader): bool =
isPreMerge(header.number)

func verifyProof(
a: FinishedAccumulator, header: BlockHeader, proof: openArray[Digest]
a: FinishedHistoricalHashesAccumulator,
header: BlockHeader,
proof: openArray[Digest],
): bool =
let
epochIndex = getEpochIndex(header)
Expand All @@ -153,13 +157,14 @@ func verifyProof(
leave = hash_tree_root(BlockHash(data: header.blockHash().data))
headerRecordIndex = getHeaderRecordIndex(header, epochIndex)

# TODO: Implement more generalized `get_generalized_index`
gIndex = GeneralizedIndex(EPOCH_SIZE * 2 * 2 + (headerRecordIndex * 2))

verify_merkle_multiproof(@[leave], proof, @[gIndex], epochRecordHash)

func verifyAccumulatorProof*(
a: FinishedAccumulator, header: BlockHeader, proof: AccumulatorProof
a: FinishedHistoricalHashesAccumulator,
header: BlockHeader,
proof: HistoricalHashesAccumulatorProof,
): Result[void, string] =
if header.isPreMerge():
# Note: The proof is typed with correct depth, so no check on this is
Expand All @@ -172,14 +177,14 @@ func verifyAccumulatorProof*(
err("Cannot verify post merge header with accumulator proof")

func verifyHeader*(
a: FinishedAccumulator, header: BlockHeader, proof: BlockHeaderProof
a: FinishedHistoricalHashesAccumulator, header: BlockHeader, proof: BlockHeaderProof
): Result[void, string] =
case proof.proofType
of BlockHeaderProofType.accumulatorProof:
a.verifyAccumulatorProof(header, proof.accumulatorProof)
of BlockHeaderProofType.historicalHashesAccumulatorProof:
a.verifyAccumulatorProof(header, proof.historicalHashesAccumulatorProof)
of BlockHeaderProofType.none:
if header.isPreMerge():
err("Pre merge header requires AccumulatorProof")
err("Pre merge header requires HistoricalHashesAccumulatorProof")
else:
# TODO:
# Currently there is no proof solution for verifying headers post-merge.
Expand All @@ -191,17 +196,16 @@ func verifyHeader*(

func buildProof*(
header: BlockHeader, epochRecord: EpochRecord | EpochRecordCached
): Result[AccumulatorProof, string] =
): Result[HistoricalHashesAccumulatorProof, string] =
doAssert(header.isPreMerge(), "Must be pre merge header")

let
epochIndex = getEpochIndex(header)
headerRecordIndex = getHeaderRecordIndex(header, epochIndex)

# TODO: Implement more generalized `get_generalized_index`
gIndex = GeneralizedIndex(EPOCH_SIZE * 2 * 2 + (headerRecordIndex * 2))

var proof: AccumulatorProof
var proof: HistoricalHashesAccumulatorProof
?epochRecord.build_proof(gIndex, proof)

ok(proof)
Expand Down
4 changes: 2 additions & 2 deletions fluffy/network_metadata.nim
Original file line number Diff line number Diff line change
Expand Up @@ -52,9 +52,9 @@ const

historicalRootsSSZ* = slurp(portalConfigDir / "historical_roots.ssz")

func loadAccumulator*(): FinishedAccumulator =
func loadAccumulator*(): FinishedHistoricalHashesAccumulator =
try:
SSZ.decode(historicalHashesAccumulatorSSZ, FinishedAccumulator)
SSZ.decode(historicalHashesAccumulatorSSZ, FinishedHistoricalHashesAccumulator)
except SerializationError as err:
raiseAssert "Invalid baked-in accumulator: " & err.msg

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ suite "Historical Hashes Accumulator":

block: # Test invalid headers
# Post merge block number must fail (> than latest header in accumulator)
var proof: AccumulatorProof
var proof: HistoricalHashesAccumulatorProof
let header = BlockHeader(number: mergeBlockNumber)
check verifyAccumulatorProof(accumulator, header, proof).isErr()

Expand All @@ -72,7 +72,7 @@ suite "Historical Hashes Accumulator":
check verifyAccumulatorProof(accumulator, header, proof.get()).isErr()

block: # Test invalid proofs
var proof: AccumulatorProof
var proof: HistoricalHashesAccumulatorProof

for i in headersToTest:
check verifyAccumulatorProof(accumulator, headers[i], proof).isErr()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ suite "Historical Hashes Accumulator Root":
let header = res.get()
headers[header.number] = header

var accumulator: Accumulator
var accumulator: HistoricalHashesAccumulator

for i, hash in hashTreeRoots:
updateAccumulator(accumulator, headers[i])
Expand Down
4 changes: 3 additions & 1 deletion fluffy/tests/history_network_tests/test_history_network.nim
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,9 @@ type HistoryNode = ref object
historyNetwork*: HistoryNetwork

proc newHistoryNode(
rng: ref HmacDrbgContext, port: int, accumulator: FinishedAccumulator
rng: ref HmacDrbgContext,
port: int,
accumulator: FinishedHistoricalHashesAccumulator,
): HistoryNode =
let
node = initDiscoveryNode(rng, PrivateKey.random(rng[]), localAddress(port))
Expand Down
12 changes: 7 additions & 5 deletions fluffy/tests/history_network_tests/test_history_util.nim
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,10 @@ proc buildHeadersWithProof*(

ok(blockHeadersWithProof)

func buildAccumulator*(headers: seq[BlockHeader]): Result[FinishedAccumulator, string] =
var accumulator: Accumulator
func buildAccumulator*(
headers: seq[BlockHeader]
): Result[FinishedHistoricalHashesAccumulator, string] =
var accumulator: HistoricalHashesAccumulator
for header in headers:
updateAccumulator(accumulator, header)

Expand All @@ -43,8 +45,8 @@ func buildAccumulator*(headers: seq[BlockHeader]): Result[FinishedAccumulator, s

func buildAccumulatorData*(
headers: seq[BlockHeader]
): Result[(FinishedAccumulator, seq[EpochRecord]), string] =
var accumulator: Accumulator
): Result[(FinishedHistoricalHashesAccumulator, seq[EpochRecord]), string] =
var accumulator: HistoricalHashesAccumulator
var epochRecords: seq[EpochRecord]
for header in headers:
updateAccumulator(accumulator, header)
Expand All @@ -61,7 +63,7 @@ func buildAccumulatorData*(

func buildProof*(
header: BlockHeader, epochRecords: seq[EpochRecord]
): Result[AccumulatorProof, string] =
): Result[HistoricalHashesAccumulatorProof, string] =
let epochIndex = getEpochIndex(header)
doAssert(epochIndex < uint64(epochRecords.len()))
let epochRecord = epochRecords[epochIndex]
Expand Down
4 changes: 3 additions & 1 deletion fluffy/tests/state_network_tests/state_test_helpers.nim
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,9 @@ proc newStateNode*(
"", uint32.high, RadiusConfig(kind: Dynamic), node.localNode.id, inMemory = true
)
sm = StreamManager.new(node)
hn = HistoryNetwork.new(PortalNetwork.none, node, db, sm, FinishedAccumulator())
hn = HistoryNetwork.new(
PortalNetwork.none, node, db, sm, FinishedHistoricalHashesAccumulator()
)
sn =
StateNetwork.new(PortalNetwork.none, node, db, sm, historyNetwork = Opt.some(hn))

Expand Down
Loading

0 comments on commit b260392

Please sign in to comment.