-
Notifications
You must be signed in to change notification settings - Fork 1.3k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Implement engine_getBlobsV1
#9723
Changes from 5 commits
e8e8b6f
72ba2bc
19794ca
8275266
f50ab9d
e7d41b0
d9feecd
2b1ec41
0754a9d
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Original file line number | Diff line number | Diff line change | ||||
---|---|---|---|---|---|---|
|
@@ -14,6 +14,7 @@ workspace = true | |||||
[dependencies] | ||||||
|
||||||
# ethereum | ||||||
alloy-eips = { workspace = true } | ||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Suggested change
|
||||||
alloy-primitives = { workspace = true, features = ["rand", "rlp", "serde"] } | ||||||
alloy-rpc-types.workspace = true | ||||||
alloy-rpc-types-admin.workspace = true | ||||||
|
@@ -32,6 +33,7 @@ op-alloy-rpc-types-engine.workspace = true | |||||
|
||||||
# misc | ||||||
jsonrpsee-types = { workspace = true, optional = true } | ||||||
serde = { workspace = true } | ||||||
|
||||||
[dev-dependencies] | ||||||
# misc | ||||||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -61,6 +61,20 @@ pub use eth::{ | |
}, | ||
}; | ||
|
||
use alloy_eips::eip4844::BYTES_PER_BLOB; | ||
use alloy_primitives::FixedBytes; | ||
use serde::{Deserialize, Serialize}; | ||
|
||
/// Blob type returned in responses to `engine_getBlobsV1`. | ||
// FIXME(sproul): move to alloy? | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. yeah, but we can include it in this pr, but also add it to alloy and once we have a new release we can phase it out here |
||
#[derive(Debug, Serialize, Deserialize, Clone)] | ||
pub struct BlobAndProofV1 { | ||
/// The blob data. | ||
pub blob: FixedBytes<BYTES_PER_BLOB>, | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. this is a bit tricky, because this can lead to stack overflow on deserialize on debug... I encountered this here as well: see corresponding test so we ideally want a sanity test and perhaps we also need to do There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think we should definitely box this because we do |
||
/// The KZG proof for the blob. | ||
pub proof: FixedBytes<48>, | ||
} | ||
|
||
/// Optimism specific rpc types. | ||
pub mod optimism { | ||
pub use op_alloy_rpc_types::*; | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -3,6 +3,7 @@ use crate::blobstore::{ | |
}; | ||
use parking_lot::RwLock; | ||
use reth_primitives::B256; | ||
use reth_rpc_types::BlobAndProofV1; | ||
use std::{collections::HashMap, sync::Arc}; | ||
|
||
/// An in-memory blob store. | ||
|
@@ -113,6 +114,32 @@ impl BlobStore for InMemoryBlobStore { | |
Ok(items) | ||
} | ||
|
||
fn get_by_versioned_hashes( | ||
&self, | ||
versioned_hashes: &[B256], | ||
) -> Result<Vec<Option<BlobAndProofV1>>, BlobStoreError> { | ||
let mut result = vec![None; versioned_hashes.len()]; | ||
for (_tx_hash, blob_sidecar) in self.inner.store.read().iter() { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Whilst correct, linearly iterating the pool might be a less-than-ideal solution long run? |
||
// FIXME(sproul): these versioned hashes could be cached | ||
for (i, blob_versioned_hash) in blob_sidecar.versioned_hashes().enumerate() { | ||
for (j, target_versioned_hash) in versioned_hashes.iter().enumerate() { | ||
if blob_versioned_hash == *target_versioned_hash { | ||
result[j].get_or_insert_with(|| BlobAndProofV1 { | ||
blob: blob_sidecar.blobs[i].clone(), | ||
proof: blob_sidecar.proofs[i].clone(), | ||
}); | ||
} | ||
} | ||
} | ||
|
||
// Return early if all blobs are found. | ||
if result.iter().all(|blob| blob.is_some()) { | ||
break; | ||
} | ||
} | ||
Ok(result) | ||
} | ||
|
||
fn data_size_hint(&self) -> Option<usize> { | ||
Some(self.inner.size_tracker.data_size()) | ||
} | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I think we can return a servererror in this case