Skip to content

Commit

Permalink
Functionality for submitting several blobs in single DA block (#690)
Browse files Browse the repository at this point in the history
* Introduce sciprts for testing deferred blob execution

* Clean up logging
* Scripts for make demo
* Improve logging
* Use preferred sequencer
* Log transaction hash in batch builder
  • Loading branch information
citizen-stig authored Aug 22, 2023
1 parent 597f324 commit d00fb58
Show file tree
Hide file tree
Showing 17 changed files with 138 additions and 28 deletions.
6 changes: 3 additions & 3 deletions adapters/celestia/src/da_service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ impl DaService for CelestiaService {

let _span = span!(Level::TRACE, "fetching finalized block", height = height);
// Fetch the header and relevant shares via RPC
info!("Fetching header at height={}...", height);
debug!("Fetching header at height={}...", height);
let header = client
.request::<serde_json::Value, _>("header.GetByHeight", vec![height])
.await?;
Expand Down Expand Up @@ -260,7 +260,7 @@ impl DaService for CelestiaService {
async fn send_transaction(&self, blob: &[u8]) -> Result<(), Self::Error> {
// https://node-rpc-docs.celestia.org/
let client = self.client.clone();
info!("Sending {} bytes of raw data to Celestia.", blob.len());
debug!("Sending {} bytes of raw data to Celestia.", blob.len());
let fee: u64 = 2000;
let namespace = self.rollup_namespace.0.to_vec();
let blob = blob.to_vec();
Expand Down Expand Up @@ -304,7 +304,7 @@ struct CelestiaBasicResponse {
}

impl CelestiaBasicResponse {
/// We assume that absence of `code` indicates that request was successfull
/// We assume that absence of `code` indicates that request was successful
pub fn is_success(&self) -> bool {
self.error_code.is_none()
}
Expand Down
21 changes: 21 additions & 0 deletions docker/Makefile
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@

COMPOSE_FILE=docker-compose.celestia.yaml
MOUNT_FOLDER=keyring-test
NODE_1_KEY_FILE=bridge_1_key.txt

up:
docker-compose --file $(COMPOSE_FILE) up -d


down:
docker-compose --file "$(COMPOSE_FILE)" down
rm -rf $(MOUNT_FOLDER)/*.txt
rm -rf config_*.toml

restart: down up generate_configs

generate_configs:
bash ./generate_configs.sh

logs:
docker-compose --file "$(COMPOSE_FILE)" logs --follow
19 changes: 19 additions & 0 deletions docker/generate_configs.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
MOUNT_FOLDER=keyring-test
NODE_1_KEY_FILE=bridge_1_key.txt
NODE_2_KEY_FILE=bridge_2_key.txt

count=0; while [[ ! -f "$MOUNT_FOLDER/$NODE_1_KEY_FILE" && $count -lt 300 ]]; do sleep 1; ((count++)); done

NODE_1_KEY="$(cat "$MOUNT_FOLDER/$NODE_1_KEY_FILE" | egrep -v '^$|^WARNING|^\*\*DO NOT')";
sed "s/^celestia_rpc_auth_token = .*/celestia_rpc_auth_token = \"$NODE_1_KEY\"/g" template.toml | \
sed "s/^path = .*/path = \"demo_data_1\"/g" \
> config_1.toml;

count=0; while [[ ! -f "$MOUNT_FOLDER/$NODE_2_KEY_FILE" && $count -lt 300 ]]; do sleep 1; ((count++)); done

NODE_1_KEY="$(cat "$MOUNT_FOLDER/$NODE_2_KEY_FILE" | egrep -v '^$|^WARNING|^\*\*DO NOT')";
sed "s/^celestia_rpc_auth_token = .*/celestia_rpc_auth_token = \"$NODE_1_KEY\"/g" template.toml | \
sed "s/^path = .*/path = \"demo_data_2\"/g" | \
sed "s/^celestia_rpc_address = .*/celestia_rpc_address = \"http:\/\/127.0.0.1:46658\"/g" | \
sed "s/^bind_port = .*/bind_port = 12346/g" \
> config_2.toml;
15 changes: 15 additions & 0 deletions docker/template.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
[da]
celestia_rpc_auth_token = ""
celestia_rpc_address = "http://127.0.0.1:26658"
max_celestia_response_body_size = 104_857_600
celestia_rpc_timeout_seconds = 60

[storage]
path = "demo_data"

[runner]
start_height = 1

[runner.rpc_config]
bind_host = "127.0.0.1"
bind_port = 12345
21 changes: 21 additions & 0 deletions examples/demo-rollup/submitting_1.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
#!/usr/bin/env bash

RPC_ENDPOINT="http://127.0.0.1:12345"
PRIVATE_KEY="../test-data/keys/token_deployer_private_key.json"
SOV_CLI="../../target/debug/sov-cli"

echo "Preparing..."
$SOV_CLI submit-transaction "$PRIVATE_KEY" Bank ../test-data/requests/create_token.json 0 "$RPC_ENDPOINT"
$SOV_CLI submit-transaction "$PRIVATE_KEY" SequencerRegistry ../test-data/requests/register_sequencer.json 1 "$RPC_ENDPOINT"
$SOV_CLI publish-batch "$RPC_ENDPOINT"


sleep 1
echo "Starting submitting transfers"
for nonce in {2..30}; do
echo "Submitting transaction with nonce $nonce"
$SOV_CLI submit-transaction "$PRIVATE_KEY" Bank ../test-data/requests/transfer.json "$nonce" "$RPC_ENDPOINT"
if [ $((nonce % 3)) -eq 0 ]; then
$SOV_CLI publish-batch "$RPC_ENDPOINT"
fi
done
15 changes: 15 additions & 0 deletions examples/demo-rollup/submitting_2.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
#!/usr/bin/env bash

RPC_ENDPOINT="http://127.0.0.1:12346"
PRIVATE_KEY="../test-data/keys/minter_private_key.json"
SOV_CLI="../../target/debug/sov-cli"

echo "Starting !!!"

for nonce in {0..30}; do
echo "Submitting transaction with nonce $nonce"
$SOV_CLI submit-transaction "$PRIVATE_KEY" Bank ../test-data/requests/transfer.json "$nonce" "$RPC_ENDPOINT"
if [ $((nonce % 3)) -eq 0 ]; then
$SOV_CLI publish-batch "$RPC_ENDPOINT"
fi
done
2 changes: 1 addition & 1 deletion examples/demo-stf/src/genesis_config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ pub fn create_demo_genesis_config<C: Context>(
amount: LOCKED_AMOUNT,
token_address,
},
is_preferred_sequencer: false,
is_preferred_sequencer: true,
};

let value_setter_config = ValueSetterConfig {
Expand Down
11 changes: 4 additions & 7 deletions examples/demo-stf/src/sov-cli/native.rs
Original file line number Diff line number Diff line change
Expand Up @@ -275,16 +275,13 @@ pub async fn main() -> Result<(), anyhow::Error> {
Commands::PublishBatch { rpc_endpoint } => {
let client = HttpClientBuilder::default().build(rpc_endpoint).unwrap();

let response: String = client
client
.request("sequencer_publishBatch", [1u32])
.await
.context("Unable to publish batch")?;

// Print the result
println!(
"Your batch was submitted to the sequencer for publication. Response: {:?}",
response
);
println!("Your batch was submitted to the sequencer for publication");
}
Commands::MakeBatch { path_list } => {
let mut hex_encoded_txs = vec![];
Expand Down Expand Up @@ -378,8 +375,8 @@ mod test {
test_data.minter_address,
);

// The minted amount was 1000 and we transferred 200 and burned 300.
assert_eq!(balance, Some(500))
// The minted amount was 10_000, and we transferred 200 and burned 300.
assert_eq!(balance, Some(99_500))
}

#[test]
Expand Down
3 changes: 2 additions & 1 deletion examples/demo-stf/src/tests/stf_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -189,11 +189,12 @@ pub mod test {
let value_setter_admin_private_key = DefaultPrivateKey::generate();
let election_admin_private_key = DefaultPrivateKey::generate();

let config = create_demo_config(
let mut config = create_demo_config(
LOCKED_AMOUNT + 1,
&value_setter_admin_private_key,
&election_admin_private_key,
);
config.sequencer_registry.is_preferred_sequencer = false;

let mut demo = create_new_demo(path);
demo.init_chain(config);
Expand Down
2 changes: 1 addition & 1 deletion examples/test-data/requests/create_token.json
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
"CreateToken": {
"salt": 11,
"token_name": "sov-test-token",
"initial_balance": 1000,
"initial_balance": 100000,
"minter_address": "sov15vspj48hpttzyvxu8kzq5klhvaczcpyxn6z6k0hwpwtzs4a6wkvqwr57gc",
"authorized_minters": [
"sov1l6n2cku82yfqld30lanm2nfw43n2auc8clw7r5u5m6s7p8jrm4zqrr8r94",
Expand Down
1 change: 0 additions & 1 deletion examples/test-data/requests/transfer.json
Original file line number Diff line number Diff line change
Expand Up @@ -7,4 +7,3 @@
}
}
}

2 changes: 1 addition & 1 deletion full-node/sov-sequencer/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -26,4 +26,4 @@ sov-modules-api = { path = "../../module-system/sov-modules-api", features = ["n
[dev-dependencies]
async-trait = { workspace = true }
sov-rollup-interface = { path = "../../rollup-interface", features = ["mocks"] }
tokio = { workspace = true }
tokio = { workspace = true }
10 changes: 6 additions & 4 deletions full-node/sov-sequencer/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -25,10 +25,11 @@ impl<B: BatchBuilder + Send + Sync, T: DaService + Send + Sync> Sequencer<B, T>
}

async fn submit_batch(&self) -> anyhow::Result<()> {
// Need to release lock before await, so Future is `Send`.
// But potentially it can create blobs that sent out of order.
// Can be improved with atomics, so new batch is only created after previous was submitted.
tracing::info!("Going to submit batch!");
// Need to release lock before await, so the Future is `Send`.
// But potentially it can create blobs that are sent out of order.
// It can be improved with atomics,
// so a new batch is only created after previous was submitted.
tracing::info!("Submit batch request has been received!");
let blob = {
let mut batch_builder = self
.batch_builder
Expand All @@ -37,6 +38,7 @@ impl<B: BatchBuilder + Send + Sync, T: DaService + Send + Sync> Sequencer<B, T>
batch_builder.get_next_blob()?
};
let blob: Vec<u8> = borsh::to_vec(&blob)?;

match self.da_service.send_transaction(&blob).await {
Ok(_) => Ok(()),
Err(e) => Err(anyhow!("failed to submit batch: {:?}", e)),
Expand Down
10 changes: 8 additions & 2 deletions full-node/sov-stf-runner/src/batch_builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,12 @@ use std::io::Cursor;

use anyhow::bail;
use borsh::BorshDeserialize;
use sov_modules_api::digest::Digest;
use sov_modules_api::transaction::Transaction;
use sov_modules_api::{Context, DispatchCall, PublicKey};
use sov_modules_api::{Context, DispatchCall, PublicKey, Spec};
use sov_rollup_interface::services::batch_builder::BatchBuilder;
use sov_state::WorkingSet;
use tracing::warn;
use tracing::{info, warn};

/// BatchBuilder that creates batches of transactions in the order they were submitted
/// Only transactions that were successfully dispatched are included.
Expand Down Expand Up @@ -110,6 +111,11 @@ where
// In order to fill batch as big as possible,
// we only check if valid tx can fit in the batch.
if current_batch_size + tx_len <= self.max_batch_size_bytes {
let tx_hash: [u8; 32] = <C as Spec>::Hasher::digest(&raw_tx[..]).into();
info!(
"Tx with hash 0x{} has been included in the batch",
hex::encode(tx_hash)
);
txs.push(raw_tx);
} else {
self.mempool.push_front(raw_tx);
Expand Down
16 changes: 12 additions & 4 deletions full-node/sov-stf-runner/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ pub use config::{from_toml_path, RollupConfig, RunnerConfig, StorageConfig};
use jsonrpsee::RpcModule;
pub use ledger_rpc::get_ledger_rpc;
use sov_db::ledger_db::{LedgerDB, SlotCommit};
use sov_rollup_interface::da::DaSpec;
use sov_rollup_interface::da::{BlobReaderTrait, DaSpec};
use sov_rollup_interface::services::da::DaService;
use sov_rollup_interface::stf::StateTransitionFunction;
use sov_rollup_interface::zk::Zkvm;
Expand Down Expand Up @@ -116,15 +116,23 @@ where
/// Runs the rollup.
pub async fn run(&mut self) -> Result<(), anyhow::Error> {
for height in self.start_height.. {
info!("Requesting data for height {}", height,);
debug!("Requesting data for height {}", height,);

let filtered_block = self.da_service.get_finalized_at(height).await?;
let mut blobs = self.da_service.extract_relevant_txs(&filtered_block);

info!(
"Extracted {} relevant blobs at height {}",
"Extracted {} relevant blobs at height {}: {:?}",
blobs.len(),
height
height,
blobs
.iter()
.map(|b| format!(
"sequencer={} blob_hash=0x{}",
b.sender(),
hex::encode(b.hash())
))
.collect::<Vec<_>>()
);

let mut data_to_commit = SlotCommit::new(filtered_block.clone());
Expand Down
4 changes: 2 additions & 2 deletions module-system/sov-modules-api/src/transaction.rs
Original file line number Diff line number Diff line change
Expand Up @@ -50,14 +50,14 @@ impl<C: Context> Transaction<C> {
pub fn new_signed_tx(priv_key: &C::PrivateKey, mut message: Vec<u8>, nonce: u64) -> Self {
// Since we own the message already, try to add the serialized nonce in-place.
// This lets us avoid a copy if the message vec has at least 8 bytes of extra capacity.
let orignal_length = message.len();
let original_length = message.len();
message.extend_from_slice(&nonce.to_le_bytes());

let pub_key = priv_key.pub_key();
let signature = priv_key.sign(&message);

// Don't forget to truncate the message back to its original length!
message.truncate(orignal_length);
message.truncate(original_length);

Self {
signature,
Expand Down
8 changes: 7 additions & 1 deletion module-system/sov-modules-stf-template/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -170,6 +170,11 @@ where
.get_blobs_for_this_slot(blobs, &mut batch_workspace)
.expect("blob selection must succeed, probably serialization failed");

info!(
"Selected {} blob(s) for execution in current slot",
selected_blobs.len()
);

self.checkpoint = Some(batch_workspace.checkpoint());

let mut batch_receipts = vec![];
Expand All @@ -179,8 +184,9 @@ where
.apply_blob(blob.as_mut_ref())
.unwrap_or_else(Into::into);
info!(
"priority blob #{} with blob_hash 0x{} has been applied with #{} transactions, sequencer outcome {:?}",
"blob #{} from sequencer {} with blob_hash 0x{} has been applied with #{} transactions, sequencer outcome {:?}",
blob_idx,
blob.as_mut_ref().sender(),
hex::encode(batch_receipt.batch_hash),
batch_receipt.tx_receipts.len(),
batch_receipt.inner
Expand Down

0 comments on commit d00fb58

Please sign in to comment.