Skip to content
This repository has been archived by the owner on Feb 21, 2022. It is now read-only.

Commit

Permalink
Update service.rs
Browse files Browse the repository at this point in the history
Changes related to: paritytech/cumulus#835
  • Loading branch information
HCastano committed Jan 3, 2022
1 parent 0dbd5c4 commit 70b4c05
Show file tree
Hide file tree
Showing 3 changed files with 92 additions and 73 deletions.
32 changes: 31 additions & 1 deletion Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 2 additions & 0 deletions node/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,8 @@ cumulus-client-network = { git = 'https://github.com/paritytech/cumulus', branch
cumulus-client-service = { git = 'https://github.com/paritytech/cumulus', branch = "master" }
cumulus-primitives-core = { git = 'https://github.com/paritytech/cumulus', branch = "master" }
cumulus-primitives-parachain-inherent = { git = 'https://github.com/paritytech/cumulus', branch = "master" }
cumulus-relay-chain-interface = { git = 'https://github.com/paritytech/cumulus', branch = "master" }
cumulus-relay-chain-local = { git = 'https://github.com/paritytech/cumulus', branch = "master" }

# Polkadot dependencies
polkadot-cli = { git = 'https://github.com/paritytech/polkadot', branch = "master" }
Expand Down
131 changes: 59 additions & 72 deletions node/src/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,23 +17,23 @@
//! Service and ServiceFactory implementation. Specialized wrapper over substrate service.

// std
use std::sync::Arc;
use std::{sync::Arc, time::Duration};

// Local Runtime Types
use canvas_runtime::{
opaque::Block, AccountId, Balance, BlockNumber, Hash, Index as Nonce, RuntimeApi,
};

// Cumulus Imports
use cumulus_client_consensus_aura::{
build_aura_consensus, BuildAuraConsensusParams, SlotProportion,
};
use cumulus_client_consensus_aura::{AuraConsensus, BuildAuraConsensusParams, SlotProportion};
use cumulus_client_consensus_common::ParachainConsensus;
use cumulus_client_network::build_block_announce_validator;
use cumulus_client_network::BlockAnnounceValidator;
use cumulus_client_service::{
prepare_node_config, start_collator, start_full_node, StartCollatorParams, StartFullNodeParams,
};
use cumulus_primitives_core::ParaId;
use cumulus_relay_chain_interface::RelayChainInterface;
use cumulus_relay_chain_local::build_relay_chain_interface;

// Substrate Imports
use sc_client_api::ExecutorProvider;
Expand Down Expand Up @@ -130,6 +130,7 @@ where
config.wasm_method,
config.default_heap_pages,
config.max_runtime_instances,
config.runtime_cache_size,
);

let (client, backend, keystore_container, task_manager) =
Expand Down Expand Up @@ -232,7 +233,7 @@ where
Option<&Registry>,
Option<TelemetryHandle>,
&TaskManager,
&polkadot_service::NewFull<polkadot_service::Client>,
Arc<dyn RelayChainInterface>,
Arc<
sc_transaction_pool::FullPool<
Block,
Expand All @@ -253,27 +254,23 @@ where
let params = new_partial::<RuntimeApi, Executor, BIQ>(&parachain_config, build_import_queue)?;
let (mut telemetry, telemetry_worker_handle) = params.other;

let relay_chain_full_node =
cumulus_client_service::build_polkadot_full_node(polkadot_config, telemetry_worker_handle)
let client = params.client.clone();
let backend = params.backend.clone();
let mut task_manager = params.task_manager;

let (relay_chain_interface, collator_key) =
build_relay_chain_interface(polkadot_config, telemetry_worker_handle, &mut task_manager)
.map_err(|e| match e {
polkadot_service::Error::Sub(x) => x,
s => format!("{}", s).into(),
})?;

let client = params.client.clone();
let backend = params.backend.clone();
let block_announce_validator = build_block_announce_validator(
relay_chain_full_node.client.clone(),
id,
Box::new(relay_chain_full_node.network.clone()),
relay_chain_full_node.backend.clone(),
);
let block_announce_validator = BlockAnnounceValidator::new(relay_chain_interface.clone(), id);

let force_authoring = parachain_config.force_authoring;
let validator = parachain_config.role.is_authority();
let prometheus_registry = parachain_config.prometheus_registry().cloned();
let transaction_pool = params.transaction_pool.clone();
let mut task_manager = params.task_manager;
let import_queue = cumulus_client_service::SharedImportQueue::new(params.import_queue);
let (network, system_rpc_tx, start_network) =
sc_service::build_network(sc_service::BuildNetworkParams {
Expand All @@ -282,7 +279,9 @@ where
transaction_pool: transaction_pool.clone(),
spawn_handle: task_manager.spawn_handle(),
import_queue: import_queue.clone(),
block_announce_validator_builder: Some(Box::new(|_| block_announce_validator)),
block_announce_validator_builder: Some(Box::new(|_| {
Box::new(block_announce_validator)
})),
warp_sync: None,
})?;

Expand Down Expand Up @@ -325,7 +324,7 @@ where
prometheus_registry.as_ref(),
telemetry.as_ref().map(|t| t.handle()),
&task_manager,
&relay_chain_full_node,
relay_chain_interface.clone(),
transaction_pool,
network,
params.keystore_container.sync_keystore(),
Expand All @@ -340,10 +339,12 @@ where
announce_block,
client: client.clone(),
task_manager: &mut task_manager,
relay_chain_full_node,
relay_chain_interface,
spawner,
parachain_consensus,
import_queue,
collator_key,
slot_duration: Duration::from_secs(6),
};

start_collator(params).await?;
Expand All @@ -353,7 +354,7 @@ where
announce_block,
task_manager: &mut task_manager,
para_id: id,
relay_chain_full_node,
relay_chain_interface,
};

start_full_node(params)?;
Expand Down Expand Up @@ -429,7 +430,7 @@ pub async fn start_parachain_node(
prometheus_registry,
telemetry,
task_manager,
relay_chain_node,
relay_chain_interface,
transaction_pool,
sync_oracle,
keystore,
Expand All @@ -444,62 +445,48 @@ pub async fn start_parachain_node(
telemetry.clone(),
);

let relay_chain_backend = relay_chain_node.backend.clone();
let relay_chain_client = relay_chain_node.client.clone();
Ok(build_aura_consensus::<
sp_consensus_aura::sr25519::AuthorityPair,
_,
_,
_,
_,
_,
_,
_,
_,
_,
>(BuildAuraConsensusParams {
proposer_factory,
create_inherent_data_providers: move |_, (relay_parent, validation_data)| {
let parachain_inherent =
cumulus_primitives_parachain_inherent::ParachainInherentData::create_at_with_client(
relay_parent,
&relay_chain_client,
&*relay_chain_backend,
&validation_data,
id,
);
async move {
let time = sp_timestamp::InherentDataProvider::from_system_time();

let slot =
Ok(AuraConsensus::build::<sp_consensus_aura::sr25519::AuthorityPair, _, _, _, _, _, _>(
BuildAuraConsensusParams {
proposer_factory,
create_inherent_data_providers: move |_, (relay_parent, validation_data)| {
let parachain_inherent =
cumulus_primitives_parachain_inherent::ParachainInherentData::create_at(
relay_parent,
&relay_chain_interface,
&validation_data,
id,
);
async move {
let time = sp_timestamp::InherentDataProvider::from_system_time();

let slot =
sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_duration(
*time,
slot_duration.slot_duration(),
);

let parachain_inherent = parachain_inherent.ok_or_else(|| {
Box::<dyn std::error::Error + Send + Sync>::from(
"Failed to create parachain inherent",
)
})?;
Ok((time, slot, parachain_inherent))
}
let parachain_inherent = parachain_inherent.ok_or_else(|| {
Box::<dyn std::error::Error + Send + Sync>::from(
"Failed to create parachain inherent",
)
})?;
Ok((time, slot, parachain_inherent))
}
},
block_import: client.clone(),
para_client: client,
backoff_authoring_blocks: Option::<()>::None,
sync_oracle,
keystore,
force_authoring,
slot_duration,
// We got around 500ms for proposing
block_proposal_slot_portion: SlotProportion::new(1f32 / 24f32),
// And a maximum of 750ms if slots are skipped
max_block_proposal_slot_portion: Some(SlotProportion::new(1f32 / 16f32)),
telemetry,
},
block_import: client.clone(),
relay_chain_client: relay_chain_node.client.clone(),
relay_chain_backend: relay_chain_node.backend.clone(),
para_client: client,
backoff_authoring_blocks: Option::<()>::None,
sync_oracle,
keystore,
force_authoring,
slot_duration,
// We got around 500ms for proposing
block_proposal_slot_portion: SlotProportion::new(1f32 / 24f32),
// And a maximum of 750ms if slots are skipped
max_block_proposal_slot_portion: Some(SlotProportion::new(1f32 / 16f32)),
telemetry,
}))
))
},
)
.await
Expand Down

0 comments on commit 70b4c05

Please sign in to comment.