diff --git a/bridges/modules/xcm-bridge-hub-router/src/mock.rs b/bridges/modules/xcm-bridge-hub-router/src/mock.rs index bb265e1925a2..095572883920 100644 --- a/bridges/modules/xcm-bridge-hub-router/src/mock.rs +++ b/bridges/modules/xcm-bridge-hub-router/src/mock.rs @@ -141,8 +141,8 @@ impl InspectMessageQueues for TestToBridgeHubSender { .iter() .map(|(location, message)| { ( - VersionedLocation::V4(location.clone()), - vec![VersionedXcm::V4(message.clone())], + VersionedLocation::from(location.clone()), + vec![VersionedXcm::from(message.clone())], ) }) .collect() diff --git a/bridges/snowbridge/pallets/system/src/benchmarking.rs b/bridges/snowbridge/pallets/system/src/benchmarking.rs index 20798b7c3493..939de9d40d13 100644 --- a/bridges/snowbridge/pallets/system/src/benchmarking.rs +++ b/bridges/snowbridge/pallets/system/src/benchmarking.rs @@ -169,7 +169,7 @@ mod benchmarks { T::Token::mint_into(&caller, amount)?; let relay_token_asset_id: Location = Location::parent(); - let asset = Box::new(VersionedLocation::V4(relay_token_asset_id)); + let asset = Box::new(VersionedLocation::from(relay_token_asset_id)); let asset_metadata = AssetMetadata { name: "wnd".as_bytes().to_vec().try_into().unwrap(), symbol: "wnd".as_bytes().to_vec().try_into().unwrap(), diff --git a/bridges/snowbridge/pallets/system/src/lib.rs b/bridges/snowbridge/pallets/system/src/lib.rs index 1e8a788b7a5a..eb3da095fe85 100644 --- a/bridges/snowbridge/pallets/system/src/lib.rs +++ b/bridges/snowbridge/pallets/system/src/lib.rs @@ -269,12 +269,12 @@ pub mod pallet { /// Lookup table for foreign token ID to native location relative to ethereum #[pallet::storage] pub type ForeignToNativeId = - StorageMap<_, Blake2_128Concat, TokenId, xcm::v4::Location, OptionQuery>; + StorageMap<_, Blake2_128Concat, TokenId, xcm::v5::Location, OptionQuery>; /// Lookup table for native location relative to ethereum to foreign token ID #[pallet::storage] pub type NativeToForeignId = - StorageMap<_, Blake2_128Concat, xcm::v4::Location, TokenId, OptionQuery>; + StorageMap<_, Blake2_128Concat, xcm::v5::Location, TokenId, OptionQuery>; #[pallet::genesis_config] #[derive(frame_support::DefaultNoBound)] diff --git a/cumulus/pallets/xcmp-queue/src/lib.rs b/cumulus/pallets/xcmp-queue/src/lib.rs index 6bb7395f6553..91f71558b54a 100644 --- a/cumulus/pallets/xcmp-queue/src/lib.rs +++ b/cumulus/pallets/xcmp-queue/src/lib.rs @@ -1036,7 +1036,7 @@ impl InspectMessageQueues for Pallet { } ( - VersionedLocation::V4((Parent, Parachain(para_id.into())).into()), + VersionedLocation::from(Location::new(1, Parachain(para_id.into()))), decoded_messages, ) }) diff --git a/cumulus/pallets/xcmp-queue/src/tests.rs b/cumulus/pallets/xcmp-queue/src/tests.rs index 5b02baf2310a..bf042f15ccc0 100644 --- a/cumulus/pallets/xcmp-queue/src/tests.rs +++ b/cumulus/pallets/xcmp-queue/src/tests.rs @@ -456,7 +456,7 @@ fn send_xcm_nested_works() { XcmpQueue::take_outbound_messages(usize::MAX), vec![( HRMP_PARA_ID.into(), - (XcmpMessageFormat::ConcatenatedVersionedXcm, VersionedXcm::V4(good.clone())) + (XcmpMessageFormat::ConcatenatedVersionedXcm, VersionedXcm::from(good.clone())) .encode(), )] ); @@ -512,7 +512,7 @@ fn hrmp_signals_are_prioritized() { // Without a signal we get the messages in order: let mut expected_msg = XcmpMessageFormat::ConcatenatedVersionedXcm.encode(); for _ in 0..31 { - expected_msg.extend(VersionedXcm::V4(message.clone()).encode()); + expected_msg.extend(VersionedXcm::from(message.clone()).encode()); } hypothetically!({ @@ -539,6 +539,7 @@ fn maybe_double_encoded_versioned_xcm_works() { // pre conditions assert_eq!(VersionedXcm::<()>::V3(Default::default()).encode(), &[3, 0]); assert_eq!(VersionedXcm::<()>::V4(Default::default()).encode(), &[4, 0]); + assert_eq!(VersionedXcm::<()>::V5(Default::default()).encode(), &[5, 0]); } // Now also testing a page instead of just concat messages. @@ -597,7 +598,7 @@ fn take_first_concatenated_xcm_good_recursion_depth_works() { for _ in 0..MAX_XCM_DECODE_DEPTH - 1 { good = Xcm(vec![SetAppendix(good)]); } - let good = VersionedXcm::V4(good); + let good = VersionedXcm::from(good); let page = good.encode(); assert_ok!(XcmpQueue::take_first_concatenated_xcm(&mut &page[..], &mut WeightMeter::new())); @@ -610,7 +611,7 @@ fn take_first_concatenated_xcm_good_bad_depth_errors() { for _ in 0..MAX_XCM_DECODE_DEPTH { bad = Xcm(vec![SetAppendix(bad)]); } - let bad = VersionedXcm::V4(bad); + let bad = VersionedXcm::from(bad); let page = bad.encode(); assert_err!( @@ -872,18 +873,18 @@ fn get_messages_works() { queued_messages, vec![ ( - VersionedLocation::V4(other_destination), + VersionedLocation::from(other_destination), vec![ - VersionedXcm::V4(Xcm(vec![ClearOrigin])), - VersionedXcm::V4(Xcm(vec![ClearOrigin])), + VersionedXcm::from(Xcm(vec![ClearOrigin])), + VersionedXcm::from(Xcm(vec![ClearOrigin])), ], ), ( - VersionedLocation::V4(destination), + VersionedLocation::from(destination), vec![ - VersionedXcm::V4(Xcm(vec![ClearOrigin])), - VersionedXcm::V4(Xcm(vec![ClearOrigin])), - VersionedXcm::V4(Xcm(vec![ClearOrigin])), + VersionedXcm::from(Xcm(vec![ClearOrigin])), + VersionedXcm::from(Xcm(vec![ClearOrigin])), + VersionedXcm::from(Xcm(vec![ClearOrigin])), ], ), ], diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/lib.rs index 75b61d6a4cd7..1a075b9fe6be 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-rococo/src/lib.rs @@ -59,7 +59,7 @@ impl_accounts_helpers_for_parachain!(AssetHubRococo); impl_assert_events_helpers_for_parachain!(AssetHubRococo); impl_assets_helpers_for_system_parachain!(AssetHubRococo, Rococo); impl_assets_helpers_for_parachain!(AssetHubRococo); -impl_foreign_assets_helpers_for_parachain!(AssetHubRococo, xcm::v4::Location); +impl_foreign_assets_helpers_for_parachain!(AssetHubRococo, xcm::v5::Location); impl_xcm_helpers_for_parachain!(AssetHubRococo); impl_bridge_helpers_for_chain!( AssetHubRococo, diff --git a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/lib.rs index c44f4b010c0a..3e240ed67482 100644 --- a/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/chains/parachains/assets/asset-hub-westend/src/lib.rs @@ -59,7 +59,7 @@ impl_accounts_helpers_for_parachain!(AssetHubWestend); impl_assert_events_helpers_for_parachain!(AssetHubWestend); impl_assets_helpers_for_system_parachain!(AssetHubWestend, Westend); impl_assets_helpers_for_parachain!(AssetHubWestend); -impl_foreign_assets_helpers_for_parachain!(AssetHubWestend, xcm::v4::Location); +impl_foreign_assets_helpers_for_parachain!(AssetHubWestend, xcm::v5::Location); impl_xcm_helpers_for_parachain!(AssetHubWestend); impl_bridge_helpers_for_chain!( AssetHubWestend, diff --git a/cumulus/parachains/integration-tests/emulated/common/src/lib.rs b/cumulus/parachains/integration-tests/emulated/common/src/lib.rs index 07fde111d3dc..b661e8d1221b 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/lib.rs @@ -41,6 +41,7 @@ use polkadot_primitives::{AssignmentId, ValidatorId}; pub const XCM_V2: u32 = 2; pub const XCM_V3: u32 = 3; pub const XCM_V4: u32 = 4; +pub const XCM_V5: u32 = 5; pub const REF_TIME_THRESHOLD: u64 = 33; pub const PROOF_SIZE_THRESHOLD: u64 = 33; @@ -60,19 +61,19 @@ pub const PENPAL_B_ID: u32 = 2001; pub const ASSETS_PALLET_ID: u8 = 50; parameter_types! { - pub PenpalTeleportableAssetLocation: xcm::v4::Location - = xcm::v4::Location::new(1, [ - xcm::v4::Junction::Parachain(PENPAL_ID), - xcm::v4::Junction::PalletInstance(ASSETS_PALLET_ID), - xcm::v4::Junction::GeneralIndex(TELEPORTABLE_ASSET_ID.into()), + pub PenpalTeleportableAssetLocation: xcm::v5::Location + = xcm::v5::Location::new(1, [ + xcm::v5::Junction::Parachain(PENPAL_ID), + xcm::v5::Junction::PalletInstance(ASSETS_PALLET_ID), + xcm::v5::Junction::GeneralIndex(TELEPORTABLE_ASSET_ID.into()), ] ); pub PenpalSiblingSovereignAccount: AccountId = Sibling::from(PENPAL_ID).into_account_truncating(); - pub PenpalBTeleportableAssetLocation: xcm::v4::Location - = xcm::v4::Location::new(1, [ - xcm::v4::Junction::Parachain(PENPAL_B_ID), - xcm::v4::Junction::PalletInstance(ASSETS_PALLET_ID), - xcm::v4::Junction::GeneralIndex(TELEPORTABLE_ASSET_ID.into()), + pub PenpalBTeleportableAssetLocation: xcm::v5::Location + = xcm::v5::Location::new(1, [ + xcm::v5::Junction::Parachain(PENPAL_B_ID), + xcm::v5::Junction::PalletInstance(ASSETS_PALLET_ID), + xcm::v5::Junction::GeneralIndex(TELEPORTABLE_ASSET_ID.into()), ] ); pub PenpalBSiblingSovereignAccount: AccountId = Sibling::from(PENPAL_B_ID).into_account_truncating(); diff --git a/cumulus/parachains/integration-tests/emulated/common/src/macros.rs b/cumulus/parachains/integration-tests/emulated/common/src/macros.rs index 68926b04bfe6..b2c2ee9bbccb 100644 --- a/cumulus/parachains/integration-tests/emulated/common/src/macros.rs +++ b/cumulus/parachains/integration-tests/emulated/common/src/macros.rs @@ -404,6 +404,230 @@ macro_rules! test_chain_can_claim_assets { }; } +#[macro_export] +macro_rules! test_can_estimate_and_pay_exact_fees { + ( $sender_para:ty, $asset_hub:ty, $receiver_para:ty, ($asset_id:expr, $amount:expr), $owner_prefix:ty ) => { + $crate::macros::paste::paste! { + // We first define the call we'll use throughout the test. + fn get_call( + estimated_local_fees: impl Into, + estimated_intermediate_fees: impl Into, + estimated_remote_fees: impl Into, + ) -> <$sender_para as Chain>::RuntimeCall { + type RuntimeCall = <$sender_para as Chain>::RuntimeCall; + + let beneficiary = [<$receiver_para Receiver>]::get(); + let xcm_in_destination = Xcm::<()>::builder_unsafe() + .pay_fees(estimated_remote_fees) + .deposit_asset(AllCounted(1), beneficiary) + .build(); + let ah_to_receiver = $asset_hub::sibling_location_of($receiver_para::para_id()); + let xcm_in_reserve = Xcm::<()>::builder_unsafe() + .pay_fees(estimated_intermediate_fees) + .deposit_reserve_asset( + AllCounted(1), + ah_to_receiver, + xcm_in_destination, + ) + .build(); + let sender_to_ah = $sender_para::sibling_location_of($asset_hub::para_id()); + let local_xcm = Xcm::<<$sender_para as Chain>::RuntimeCall>::builder() + .withdraw_asset(($asset_id, $amount)) + .pay_fees(estimated_local_fees) + .initiate_reserve_withdraw(AllCounted(1), sender_to_ah, xcm_in_reserve) + .build(); + + RuntimeCall::PolkadotXcm(pallet_xcm::Call::execute { + message: bx!(VersionedXcm::from(local_xcm)), + max_weight: Weight::from_parts(10_000_000_000, 500_000), + }) + } + + let destination = $sender_para::sibling_location_of($receiver_para::para_id()); + let sender = [<$sender_para Sender>]::get(); + let sender_as_seen_by_ah = $asset_hub::sibling_location_of($sender_para::para_id()); + let sov_of_sender_on_ah = $asset_hub::sovereign_account_id_of(sender_as_seen_by_ah.clone()); + let asset_owner = [<$owner_prefix AssetOwner>]::get(); + + // Fund parachain's sender account. + $sender_para::mint_foreign_asset( + <$sender_para as Chain>::RuntimeOrigin::signed(asset_owner.clone()), + $asset_id.clone().into(), + sender.clone(), + $amount * 2, + ); + + // Fund the parachain origin's SA on Asset Hub with the native tokens. + $asset_hub::fund_accounts(vec![(sov_of_sender_on_ah.clone(), $amount * 2)]); + + let beneficiary_id = [<$receiver_para Receiver>]::get(); + + let test_args = TestContext { + sender: sender.clone(), + receiver: beneficiary_id.clone(), + args: TestArgs::new_para( + destination, + beneficiary_id.clone(), + $amount, + ($asset_id, $amount).into(), + None, + 0, + ), + }; + let mut test = ParaToParaThroughAHTest::new(test_args); + + // We get these from the closure. + let mut local_execution_fees = 0; + let mut local_delivery_fees = 0; + let mut remote_message = VersionedXcm::from(Xcm::<()>(Vec::new())); + <$sender_para as TestExt>::execute_with(|| { + type Runtime = <$sender_para as Chain>::Runtime; + type OriginCaller = <$sender_para as Chain>::OriginCaller; + + let call = get_call( + (Parent, 100_000_000_000u128), + (Parent, 100_000_000_000u128), + (Parent, 100_000_000_000u128), + ); + let origin = OriginCaller::system(RawOrigin::Signed(sender.clone())); + let result = Runtime::dry_run_call(origin, call).unwrap(); + let local_xcm = result.local_xcm.unwrap().clone(); + let local_xcm_weight = Runtime::query_xcm_weight(local_xcm).unwrap(); + local_execution_fees = Runtime::query_weight_to_asset_fee( + local_xcm_weight, + VersionedAssetId::from(AssetId(Location::parent())), + ) + .unwrap(); + // We filter the result to get only the messages we are interested in. + let (destination_to_query, messages_to_query) = &result + .forwarded_xcms + .iter() + .find(|(destination, _)| { + *destination == VersionedLocation::from(Location::new(1, [Parachain(1000)])) + }) + .unwrap(); + assert_eq!(messages_to_query.len(), 1); + remote_message = messages_to_query[0].clone(); + let delivery_fees = + Runtime::query_delivery_fees(destination_to_query.clone(), remote_message.clone()) + .unwrap(); + local_delivery_fees = $crate::xcm_helpers::get_amount_from_versioned_assets(delivery_fees); + }); + + // These are set in the AssetHub closure. + let mut intermediate_execution_fees = 0; + let mut intermediate_delivery_fees = 0; + let mut intermediate_remote_message = VersionedXcm::from(Xcm::<()>(Vec::new())); + <$asset_hub as TestExt>::execute_with(|| { + type Runtime = <$asset_hub as Chain>::Runtime; + type RuntimeCall = <$asset_hub as Chain>::RuntimeCall; + + // First we get the execution fees. + let weight = Runtime::query_xcm_weight(remote_message.clone()).unwrap(); + intermediate_execution_fees = Runtime::query_weight_to_asset_fee( + weight, + VersionedAssetId::from(AssetId(Location::new(1, []))), + ) + .unwrap(); + + // We have to do this to turn `VersionedXcm<()>` into `VersionedXcm`. + let xcm_program = + VersionedXcm::from(Xcm::::from(remote_message.clone().try_into().unwrap())); + + // Now we get the delivery fees to the final destination. + let result = + Runtime::dry_run_xcm(sender_as_seen_by_ah.clone().into(), xcm_program).unwrap(); + let (destination_to_query, messages_to_query) = &result + .forwarded_xcms + .iter() + .find(|(destination, _)| { + *destination == VersionedLocation::from(Location::new(1, [Parachain(2001)])) + }) + .unwrap(); + // There's actually two messages here. + // One created when the message we sent from `$sender_para` arrived and was executed. + // The second one when we dry-run the xcm. + // We could've gotten the message from the queue without having to dry-run, but + // offchain applications would have to dry-run, so we do it here as well. + intermediate_remote_message = messages_to_query[0].clone(); + let delivery_fees = Runtime::query_delivery_fees( + destination_to_query.clone(), + intermediate_remote_message.clone(), + ) + .unwrap(); + intermediate_delivery_fees = $crate::xcm_helpers::get_amount_from_versioned_assets(delivery_fees); + }); + + // Get the final execution fees in the destination. + let mut final_execution_fees = 0; + <$receiver_para as TestExt>::execute_with(|| { + type Runtime = <$sender_para as Chain>::Runtime; + + let weight = Runtime::query_xcm_weight(intermediate_remote_message.clone()).unwrap(); + final_execution_fees = + Runtime::query_weight_to_asset_fee(weight, VersionedAssetId::from(AssetId(Location::parent()))) + .unwrap(); + }); + + // Dry-running is done. + $sender_para::reset_ext(); + $asset_hub::reset_ext(); + $receiver_para::reset_ext(); + + // Fund accounts again. + $sender_para::mint_foreign_asset( + <$sender_para as Chain>::RuntimeOrigin::signed(asset_owner), + $asset_id.clone().into(), + sender.clone(), + $amount * 2, + ); + $asset_hub::fund_accounts(vec![(sov_of_sender_on_ah, $amount * 2)]); + + // Actually run the extrinsic. + let sender_assets_before = $sender_para::execute_with(|| { + type ForeignAssets = <$sender_para as [<$sender_para Pallet>]>::ForeignAssets; + >::balance($asset_id.clone().into(), &sender) + }); + let receiver_assets_before = $receiver_para::execute_with(|| { + type ForeignAssets = <$receiver_para as [<$receiver_para Pallet>]>::ForeignAssets; + >::balance($asset_id.clone().into(), &beneficiary_id) + }); + + test.set_assertion::<$sender_para>(sender_assertions); + test.set_assertion::<$asset_hub>(hop_assertions); + test.set_assertion::<$receiver_para>(receiver_assertions); + let call = get_call( + (Parent, local_execution_fees + local_delivery_fees), + (Parent, intermediate_execution_fees + intermediate_delivery_fees), + (Parent, final_execution_fees), + ); + test.set_call(call); + test.assert(); + + let sender_assets_after = $sender_para::execute_with(|| { + type ForeignAssets = <$sender_para as [<$sender_para Pallet>]>::ForeignAssets; + >::balance($asset_id.clone().into(), &sender) + }); + let receiver_assets_after = $receiver_para::execute_with(|| { + type ForeignAssets = <$receiver_para as [<$receiver_para Pallet>]>::ForeignAssets; + >::balance($asset_id.into(), &beneficiary_id) + }); + + // We know the exact fees on every hop. + assert_eq!(sender_assets_after, sender_assets_before - $amount); + assert_eq!( + receiver_assets_after, + receiver_assets_before + $amount - + local_execution_fees - + local_delivery_fees - + intermediate_execution_fees - + intermediate_delivery_fees - + final_execution_fees + ); + } + }; +} + #[macro_export] macro_rules! test_dry_run_transfer_across_pk_bridge { ( $sender_asset_hub:ty, $sender_bridge_hub:ty, $destination:expr ) => { diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs index 12f440fdefee..453308a0c133 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/lib.rs @@ -26,10 +26,7 @@ mod imports { }; // Polkadot - pub use xcm::{ - prelude::{AccountId32 as AccountId32Junction, *}, - v3, - }; + pub use xcm::prelude::{AccountId32 as AccountId32Junction, *}; pub use xcm_executor::traits::TransferType; // Cumulus diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs index 302f71f89f83..698ef2c9e792 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/reserve_transfer.rs @@ -1599,7 +1599,7 @@ fn reserve_withdraw_from_untrusted_reserve_fails() { ]); let result = ::PolkadotXcm::execute( signed_origin, - bx!(xcm::VersionedXcm::V4(xcm)), + bx!(xcm::VersionedXcm::from(xcm)), Weight::MAX, ); assert!(result.is_err()); diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/teleport.rs index 470b4d0f389e..7fde929c0dcb 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/teleport.rs @@ -574,7 +574,7 @@ fn teleport_to_untrusted_chain_fails() { ]); let result = ::PolkadotXcm::execute( signed_origin, - bx!(xcm::VersionedXcm::V4(xcm)), + bx!(xcm::VersionedXcm::from(xcm)), Weight::MAX, ); assert!(result.is_err()); diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/treasury.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/treasury.rs index 3320392b495d..69111d38bcac 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/treasury.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/treasury.rs @@ -71,11 +71,12 @@ fn spend_roc_on_asset_hub() { let teleport_call = RuntimeCall::Utility(pallet_utility::Call::::dispatch_as { as_origin: bx!(RococoOriginCaller::system(RawOrigin::Signed(treasury_account))), call: bx!(RuntimeCall::XcmPallet(pallet_xcm::Call::::teleport_assets { - dest: bx!(VersionedLocation::V4(asset_hub_location.clone())), - beneficiary: bx!(VersionedLocation::V4(treasury_location)), - assets: bx!(VersionedAssets::V4( - Asset { id: native_asset.clone().into(), fun: treasury_balance.into() }.into() - )), + dest: bx!(VersionedLocation::from(asset_hub_location.clone())), + beneficiary: bx!(VersionedLocation::from(treasury_location)), + assets: bx!(VersionedAssets::from(Assets::from(Asset { + id: native_asset.clone().into(), + fun: treasury_balance.into() + }))), fee_asset_item: 0, })), }); @@ -110,12 +111,12 @@ fn spend_roc_on_asset_hub() { let native_asset = Location::parent(); let treasury_spend_call = RuntimeCall::Treasury(pallet_treasury::Call::::spend { - asset_kind: bx!(VersionedLocatableAsset::V4 { - location: asset_hub_location.clone(), - asset_id: native_asset.into(), - }), + asset_kind: bx!(VersionedLocatableAsset::from(( + asset_hub_location.clone(), + native_asset.into() + ))), amount: treasury_spend_balance, - beneficiary: bx!(VersionedLocation::V4(alice_location)), + beneficiary: bx!(VersionedLocation::from(alice_location)), valid_from: None, }); @@ -170,16 +171,12 @@ fn create_and_claim_treasury_spend_in_usdt() { // treasury account on a sibling parachain. let treasury_account = ahr_xcm_config::LocationToAccountId::convert_location(&treasury_location).unwrap(); - let asset_hub_location = - v3::Location::new(0, v3::Junction::Parachain(AssetHubRococo::para_id().into())); + let asset_hub_location = Location::new(0, Parachain(AssetHubRococo::para_id().into())); let root = ::RuntimeOrigin::root(); - // asset kind to be spend from the treasury. - let asset_kind = VersionedLocatableAsset::V3 { - location: asset_hub_location, - asset_id: v3::AssetId::Concrete( - (v3::Junction::PalletInstance(50), v3::Junction::GeneralIndex(USDT_ID.into())).into(), - ), - }; + // asset kind to be spent from the treasury. + let asset_kind: VersionedLocatableAsset = + (asset_hub_location, AssetId((PalletInstance(50), GeneralIndex(USDT_ID.into())).into())) + .into(); // treasury spend beneficiary. let alice: AccountId = Rococo::account_id_of(ALICE); let bob: AccountId = Rococo::account_id_of(BOB); diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/xcm_fee_estimation.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/xcm_fee_estimation.rs index aa0e183ecdda..ea210d4f3b65 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/xcm_fee_estimation.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-rococo/src/tests/xcm_fee_estimation.rs @@ -16,10 +16,8 @@ //! Tests for XCM fee estimation in the runtime. use crate::imports::*; -use frame_support::{ - dispatch::RawOrigin, - sp_runtime::{traits::Dispatchable, DispatchResult}, -}; +use emulated_integration_tests_common::test_can_estimate_and_pay_exact_fees; +use frame_support::dispatch::RawOrigin; use xcm_runtime_apis::{ dry_run::runtime_decl_for_dry_run_api::DryRunApiV1, fees::runtime_decl_for_xcm_payment_api::XcmPaymentApiV1, @@ -76,16 +74,6 @@ fn receiver_assertions(test: ParaToParaThroughAHTest) { ); } -fn transfer_assets_para_to_para_through_ah_dispatchable( - test: ParaToParaThroughAHTest, -) -> DispatchResult { - let call = transfer_assets_para_to_para_through_ah_call(test.clone()); - match call.dispatch(test.signed_origin) { - Ok(_) => Ok(()), - Err(error_with_post_info) => Err(error_with_post_info.error), - } -} - fn transfer_assets_para_to_para_through_ah_call( test: ParaToParaThroughAHTest, ) -> ::RuntimeCall { @@ -100,7 +88,7 @@ fn transfer_assets_para_to_para_through_ah_call( dest: bx!(test.args.dest.into()), assets: bx!(test.args.assets.clone().into()), assets_transfer_type: bx!(TransferType::RemoteReserve(asset_hub_location.clone().into())), - remote_fees_id: bx!(VersionedAssetId::V4(AssetId(Location::new(1, [])))), + remote_fees_id: bx!(VersionedAssetId::from(AssetId(Location::new(1, [])))), fees_transfer_type: bx!(TransferType::RemoteReserve(asset_hub_location.into())), custom_xcm_on_dest: bx!(VersionedXcm::from(custom_xcm_on_dest)), weight_limit: test.args.weight_limit, @@ -151,7 +139,7 @@ fn multi_hop_works() { // We get them from the PenpalA closure. let mut delivery_fees_amount = 0; - let mut remote_message = VersionedXcm::V4(Xcm(Vec::new())); + let mut remote_message = VersionedXcm::from(Xcm(Vec::new())); ::execute_with(|| { type Runtime = ::Runtime; type OriginCaller = ::OriginCaller; @@ -164,7 +152,7 @@ fn multi_hop_works() { .forwarded_xcms .iter() .find(|(destination, _)| { - *destination == VersionedLocation::V4(Location::new(1, [Parachain(1000)])) + *destination == VersionedLocation::from(Location::new(1, [Parachain(1000)])) }) .unwrap(); assert_eq!(messages_to_query.len(), 1); @@ -178,7 +166,7 @@ fn multi_hop_works() { // These are set in the AssetHub closure. let mut intermediate_execution_fees = 0; let mut intermediate_delivery_fees_amount = 0; - let mut intermediate_remote_message = VersionedXcm::V4(Xcm::<()>(Vec::new())); + let mut intermediate_remote_message = VersionedXcm::from(Xcm::<()>(Vec::new())); ::execute_with(|| { type Runtime = ::Runtime; type RuntimeCall = ::RuntimeCall; @@ -187,13 +175,14 @@ fn multi_hop_works() { let weight = Runtime::query_xcm_weight(remote_message.clone()).unwrap(); intermediate_execution_fees = Runtime::query_weight_to_asset_fee( weight, - VersionedAssetId::V4(Location::new(1, []).into()), + VersionedAssetId::from(AssetId(Location::new(1, []))), ) .unwrap(); // We have to do this to turn `VersionedXcm<()>` into `VersionedXcm`. - let xcm_program = - VersionedXcm::V4(Xcm::::from(remote_message.clone().try_into().unwrap())); + let xcm_program = VersionedXcm::from(Xcm::::from( + remote_message.clone().try_into().unwrap(), + )); // Now we get the delivery fees to the final destination. let result = @@ -202,7 +191,7 @@ fn multi_hop_works() { .forwarded_xcms .iter() .find(|(destination, _)| { - *destination == VersionedLocation::V4(Location::new(1, [Parachain(2001)])) + *destination == VersionedLocation::from(Location::new(1, [Parachain(2001)])) }) .unwrap(); // There's actually two messages here. @@ -225,9 +214,11 @@ fn multi_hop_works() { type Runtime = ::Runtime; let weight = Runtime::query_xcm_weight(intermediate_remote_message.clone()).unwrap(); - final_execution_fees = - Runtime::query_weight_to_asset_fee(weight, VersionedAssetId::V4(Parent.into())) - .unwrap(); + final_execution_fees = Runtime::query_weight_to_asset_fee( + weight, + VersionedAssetId::from(AssetId(Location::parent())), + ) + .unwrap(); }); // Dry-running is done. @@ -257,7 +248,8 @@ fn multi_hop_works() { test.set_assertion::(sender_assertions); test.set_assertion::(hop_assertions); test.set_assertion::(receiver_assertions); - test.set_dispatchable::(transfer_assets_para_to_para_through_ah_dispatchable); + let call = transfer_assets_para_to_para_through_ah_call(test.clone()); + test.set_call(call); test.assert(); let sender_assets_after = PenpalA::execute_with(|| { @@ -284,3 +276,14 @@ fn multi_hop_works() { final_execution_fees ); } + +#[test] +fn multi_hop_pay_fees_works() { + test_can_estimate_and_pay_exact_fees!( + PenpalA, + AssetHubRococo, + PenpalB, + (Parent, 1_000_000_000_000u128), + Penpal + ); +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/fellowship_treasury.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/fellowship_treasury.rs index 9520659712fc..124ec2ec1f66 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/fellowship_treasury.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/fellowship_treasury.rs @@ -34,10 +34,9 @@ fn create_and_claim_treasury_spend() { let asset_hub_location = Location::new(1, [Parachain(AssetHubWestend::para_id().into())]); let root = ::RuntimeOrigin::root(); // asset kind to be spent from the treasury. - let asset_kind = VersionedLocatableAsset::V4 { - location: asset_hub_location, - asset_id: AssetId((PalletInstance(50), GeneralIndex(USDT_ID.into())).into()), - }; + let asset_kind: VersionedLocatableAsset = + (asset_hub_location, AssetId((PalletInstance(50), GeneralIndex(USDT_ID.into())).into())) + .into(); // treasury spend beneficiary. let alice: AccountId = Westend::account_id_of(ALICE); let bob: AccountId = CollectivesWestend::account_id_of(BOB); diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs index 10c27c338ec7..de510e5696a4 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/reserve_transfer.rs @@ -1603,7 +1603,7 @@ fn reserve_withdraw_from_untrusted_reserve_fails() { ]); let result = ::PolkadotXcm::execute( signed_origin, - bx!(xcm::VersionedXcm::V4(xcm)), + bx!(xcm::VersionedXcm::from(xcm)), Weight::MAX, ); assert!(result.is_err()); diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs index ee0f297792f8..d3adfdc36dff 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/teleport.rs @@ -577,7 +577,7 @@ fn teleport_to_untrusted_chain_fails() { ]); let result = ::PolkadotXcm::execute( signed_origin, - bx!(xcm::VersionedXcm::V4(xcm)), + bx!(xcm::VersionedXcm::from(xcm)), Weight::MAX, ); assert!(result.is_err()); diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/treasury.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/treasury.rs index b70967184387..c303e6411d33 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/treasury.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/treasury.rs @@ -32,11 +32,10 @@ fn create_and_claim_treasury_spend() { ahw_xcm_config::LocationToAccountId::convert_location(&treasury_location).unwrap(); let asset_hub_location = Location::new(0, Parachain(AssetHubWestend::para_id().into())); let root = ::RuntimeOrigin::root(); - // asset kind to be spend from the treasury. - let asset_kind = VersionedLocatableAsset::V4 { - location: asset_hub_location, - asset_id: AssetId([PalletInstance(50), GeneralIndex(USDT_ID.into())].into()), - }; + // asset kind to be spent from the treasury. + let asset_kind: VersionedLocatableAsset = + (asset_hub_location, AssetId([PalletInstance(50), GeneralIndex(USDT_ID.into())].into())) + .into(); // treasury spend beneficiary. let alice: AccountId = Westend::account_id_of(ALICE); let bob: AccountId = Westend::account_id_of(BOB); diff --git a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/xcm_fee_estimation.rs b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/xcm_fee_estimation.rs index 037d6604ea4d..ec05a074c5ac 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/xcm_fee_estimation.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/assets/asset-hub-westend/src/tests/xcm_fee_estimation.rs @@ -17,10 +17,8 @@ use crate::imports::*; -use frame_support::{ - dispatch::RawOrigin, - sp_runtime::{traits::Dispatchable, DispatchResult}, -}; +use emulated_integration_tests_common::test_can_estimate_and_pay_exact_fees; +use frame_support::dispatch::RawOrigin; use xcm_runtime_apis::{ dry_run::runtime_decl_for_dry_run_api::DryRunApiV1, fees::runtime_decl_for_xcm_payment_api::XcmPaymentApiV1, @@ -77,22 +75,12 @@ fn receiver_assertions(test: ParaToParaThroughAHTest) { ); } -fn transfer_assets_para_to_para_through_ah_dispatchable( - test: ParaToParaThroughAHTest, -) -> DispatchResult { - let call = transfer_assets_para_to_para_through_ah_call(test.clone()); - match call.dispatch(test.signed_origin) { - Ok(_) => Ok(()), - Err(error_with_post_info) => Err(error_with_post_info.error), - } -} - fn transfer_assets_para_to_para_through_ah_call( test: ParaToParaThroughAHTest, ) -> ::RuntimeCall { type RuntimeCall = ::RuntimeCall; - let asset_hub_location: Location = PenpalB::sibling_location_of(AssetHubWestend::para_id()); + let asset_hub_location: Location = PenpalA::sibling_location_of(AssetHubWestend::para_id()); let custom_xcm_on_dest = Xcm::<()>(vec![DepositAsset { assets: Wild(AllCounted(test.args.assets.len() as u32)), beneficiary: test.args.beneficiary, @@ -101,7 +89,7 @@ fn transfer_assets_para_to_para_through_ah_call( dest: bx!(test.args.dest.into()), assets: bx!(test.args.assets.clone().into()), assets_transfer_type: bx!(TransferType::RemoteReserve(asset_hub_location.clone().into())), - remote_fees_id: bx!(VersionedAssetId::V4(AssetId(Location::new(1, [])))), + remote_fees_id: bx!(VersionedAssetId::from(AssetId(Location::parent()))), fees_transfer_type: bx!(TransferType::RemoteReserve(asset_hub_location.into())), custom_xcm_on_dest: bx!(VersionedXcm::from(custom_xcm_on_dest)), weight_limit: test.args.weight_limit, @@ -153,7 +141,7 @@ fn multi_hop_works() { // We get them from the PenpalA closure. let mut delivery_fees_amount = 0; - let mut remote_message = VersionedXcm::V4(Xcm(Vec::new())); + let mut remote_message = VersionedXcm::from(Xcm(Vec::new())); ::execute_with(|| { type Runtime = ::Runtime; type OriginCaller = ::OriginCaller; @@ -166,7 +154,7 @@ fn multi_hop_works() { .forwarded_xcms .iter() .find(|(destination, _)| { - *destination == VersionedLocation::V4(Location::new(1, [Parachain(1000)])) + *destination == VersionedLocation::from(Location::new(1, [Parachain(1000)])) }) .unwrap(); assert_eq!(messages_to_query.len(), 1); @@ -180,7 +168,7 @@ fn multi_hop_works() { // These are set in the AssetHub closure. let mut intermediate_execution_fees = 0; let mut intermediate_delivery_fees_amount = 0; - let mut intermediate_remote_message = VersionedXcm::V4(Xcm::<()>(Vec::new())); + let mut intermediate_remote_message = VersionedXcm::from(Xcm::<()>(Vec::new())); ::execute_with(|| { type Runtime = ::Runtime; type RuntimeCall = ::RuntimeCall; @@ -189,13 +177,14 @@ fn multi_hop_works() { let weight = Runtime::query_xcm_weight(remote_message.clone()).unwrap(); intermediate_execution_fees = Runtime::query_weight_to_asset_fee( weight, - VersionedAssetId::V4(Location::new(1, []).into()), + VersionedAssetId::from(AssetId(Location::new(1, []))), ) .unwrap(); // We have to do this to turn `VersionedXcm<()>` into `VersionedXcm`. - let xcm_program = - VersionedXcm::V4(Xcm::::from(remote_message.clone().try_into().unwrap())); + let xcm_program = VersionedXcm::from(Xcm::::from( + remote_message.clone().try_into().unwrap(), + )); // Now we get the delivery fees to the final destination. let result = @@ -204,7 +193,7 @@ fn multi_hop_works() { .forwarded_xcms .iter() .find(|(destination, _)| { - *destination == VersionedLocation::V4(Location::new(1, [Parachain(2001)])) + *destination == VersionedLocation::from(Location::new(1, [Parachain(2001)])) }) .unwrap(); // There's actually two messages here. @@ -228,7 +217,7 @@ fn multi_hop_works() { let weight = Runtime::query_xcm_weight(intermediate_remote_message.clone()).unwrap(); final_execution_fees = - Runtime::query_weight_to_asset_fee(weight, VersionedAssetId::V4(Parent.into())) + Runtime::query_weight_to_asset_fee(weight, VersionedAssetId::from(Location::parent())) .unwrap(); }); @@ -259,7 +248,8 @@ fn multi_hop_works() { test.set_assertion::(sender_assertions); test.set_assertion::(hop_assertions); test.set_assertion::(receiver_assertions); - test.set_dispatchable::(transfer_assets_para_to_para_through_ah_dispatchable); + let call = transfer_assets_para_to_para_through_ah_call(test.clone()); + test.set_call(call); test.assert(); let sender_assets_after = PenpalA::execute_with(|| { @@ -286,3 +276,14 @@ fn multi_hop_works() { final_execution_fees ); } + +#[test] +fn multi_hop_pay_fees_works() { + test_can_estimate_and_pay_exact_fees!( + PenpalA, + AssetHubWestend, + PenpalB, + (Parent, 1_000_000_000_000u128), + Penpal + ); +} diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs index 77e4c8183e65..0c8f2eac5956 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/lib.rs @@ -24,7 +24,7 @@ mod imports { pub use xcm::{ latest::ParentThen, prelude::{AccountId32 as AccountId32Junction, *}, - v4::{self, NetworkId::Westend as WestendId}, + v5::{self, NetworkId::Westend as WestendId}, }; pub use xcm_executor::traits::TransferType; diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/asset_transfers.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/asset_transfers.rs index 0e1cfdd82aaf..bd602f819ac3 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/asset_transfers.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/asset_transfers.rs @@ -39,7 +39,7 @@ fn send_assets_over_bridge(send_fn: F) { fn set_up_rocs_for_penpal_rococo_through_ahr_to_ahw( sender: &AccountId, amount: u128, -) -> (Location, v4::Location) { +) -> (Location, v5::Location) { let roc_at_rococo_parachains = roc_at_ah_rococo(); let roc_at_asset_hub_westend = bridged_roc_at_ah_westend(); create_foreign_on_ah_westend(roc_at_asset_hub_westend.clone(), true); diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/mod.rs index 767f74f6ad7f..80836095eef6 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/mod.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/mod.rs @@ -73,7 +73,7 @@ pub(crate) fn weth_at_asset_hubs() -> Location { } pub(crate) fn create_foreign_on_ah_rococo( - id: v4::Location, + id: v5::Location, sufficient: bool, prefund_accounts: Vec<(AccountId, u128)>, ) { @@ -82,18 +82,18 @@ pub(crate) fn create_foreign_on_ah_rococo( AssetHubRococo::force_create_foreign_asset(id, owner, sufficient, min, prefund_accounts); } -pub(crate) fn create_foreign_on_ah_westend(id: v4::Location, sufficient: bool) { +pub(crate) fn create_foreign_on_ah_westend(id: v5::Location, sufficient: bool) { let owner = AssetHubWestend::account_id_of(ALICE); AssetHubWestend::force_create_foreign_asset(id, owner, sufficient, ASSET_MIN_BALANCE, vec![]); } -pub(crate) fn foreign_balance_on_ah_rococo(id: v4::Location, who: &AccountId) -> u128 { +pub(crate) fn foreign_balance_on_ah_rococo(id: v5::Location, who: &AccountId) -> u128 { AssetHubRococo::execute_with(|| { type Assets = ::ForeignAssets; >::balance(id, who) }) } -pub(crate) fn foreign_balance_on_ah_westend(id: v4::Location, who: &AccountId) -> u128 { +pub(crate) fn foreign_balance_on_ah_westend(id: v5::Location, who: &AccountId) -> u128 { AssetHubWestend::execute_with(|| { type Assets = ::ForeignAssets; >::balance(id, who) @@ -101,8 +101,8 @@ pub(crate) fn foreign_balance_on_ah_westend(id: v4::Location, who: &AccountId) - } // set up pool -pub(crate) fn set_up_pool_with_wnd_on_ah_westend(asset: v4::Location, is_foreign: bool) { - let wnd: v4::Location = v4::Parent.into(); +pub(crate) fn set_up_pool_with_wnd_on_ah_westend(asset: v5::Location, is_foreign: bool) { + let wnd: v5::Location = v5::Parent.into(); AssetHubWestend::execute_with(|| { type RuntimeEvent = ::RuntimeEvent; let owner = AssetHubWestendSender::get(); diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs index d91a0c6895f9..0ef41e0f053f 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-rococo/src/tests/snowbridge.rs @@ -310,11 +310,13 @@ fn send_token_from_ethereum_to_penpal() { )); }); + let ethereum_network_v5: NetworkId = EthereumNetwork::get().into(); + // The Weth asset location, identified by the contract address on Ethereum let weth_asset_location: Location = - (Parent, Parent, EthereumNetwork::get(), AccountKey20 { network: None, key: WETH }).into(); + (Parent, Parent, ethereum_network_v5, AccountKey20 { network: None, key: WETH }).into(); - let origin_location = (Parent, Parent, EthereumNetwork::get()).into(); + let origin_location = (Parent, Parent, ethereum_network_v5).into(); // Fund ethereum sovereign on AssetHub let ethereum_sovereign: AccountId = @@ -448,14 +450,14 @@ fn send_weth_asset_from_asset_hub_to_ethereum() { )), fun: Fungible(WETH_AMOUNT), }]; - let multi_assets = VersionedAssets::V4(Assets::from(assets)); + let multi_assets = VersionedAssets::from(Assets::from(assets)); - let destination = VersionedLocation::V4(Location::new( + let destination = VersionedLocation::from(Location::new( 2, [GlobalConsensus(Ethereum { chain_id: CHAIN_ID })], )); - let beneficiary = VersionedLocation::V4(Location::new( + let beneficiary = VersionedLocation::from(Location::new( 0, [AccountKey20 { network: None, key: ETHEREUM_DESTINATION_ADDRESS.into() }], )); @@ -564,10 +566,9 @@ fn register_weth_token_in_asset_hub_fail_for_insufficient_fee() { } fn send_token_from_ethereum_to_asset_hub_with_fee(account_id: [u8; 32], fee: u128) { - let weth_asset_location: Location = Location::new( - 2, - [EthereumNetwork::get().into(), AccountKey20 { network: None, key: WETH }], - ); + let ethereum_network_v5: NetworkId = EthereumNetwork::get().into(); + let weth_asset_location: Location = + Location::new(2, [ethereum_network_v5.into(), AccountKey20 { network: None, key: WETH }]); // Fund asset hub sovereign on bridge hub let asset_hub_sovereign = BridgeHubRococo::sovereign_account_id_of(Location::new( 1, diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs index 76e8312921de..8041610dfd43 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/lib.rs @@ -24,7 +24,7 @@ mod imports { pub use xcm::{ latest::ParentThen, prelude::{AccountId32 as AccountId32Junction, *}, - v4::{self, NetworkId::Rococo as RococoId}, + v5::{self, NetworkId::Rococo as RococoId}, }; pub use xcm_executor::traits::TransferType; diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs index 0856c9526009..8085ec98c846 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/asset_transfers.rs @@ -38,7 +38,7 @@ fn send_assets_over_bridge(send_fn: F) { fn set_up_wnds_for_penpal_westend_through_ahw_to_ahr( sender: &AccountId, amount: u128, -) -> (Location, v4::Location) { +) -> (Location, v5::Location) { let wnd_at_westend_parachains = wnd_at_ah_westend(); let wnd_at_asset_hub_rococo = bridged_wnd_at_ah_rococo(); create_foreign_on_ah_rococo(wnd_at_asset_hub_rococo.clone(), true); diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/mod.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/mod.rs index af11f0f7ba72..169e7d9649da 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/mod.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/mod.rs @@ -74,13 +74,13 @@ pub(crate) fn weth_at_asset_hubs() -> Location { ) } -pub(crate) fn create_foreign_on_ah_rococo(id: v4::Location, sufficient: bool) { +pub(crate) fn create_foreign_on_ah_rococo(id: v5::Location, sufficient: bool) { let owner = AssetHubRococo::account_id_of(ALICE); AssetHubRococo::force_create_foreign_asset(id, owner, sufficient, ASSET_MIN_BALANCE, vec![]); } pub(crate) fn create_foreign_on_ah_westend( - id: v4::Location, + id: v5::Location, sufficient: bool, prefund_accounts: Vec<(AccountId, u128)>, ) { @@ -89,13 +89,13 @@ pub(crate) fn create_foreign_on_ah_westend( AssetHubWestend::force_create_foreign_asset(id, owner, sufficient, min, prefund_accounts); } -pub(crate) fn foreign_balance_on_ah_rococo(id: v4::Location, who: &AccountId) -> u128 { +pub(crate) fn foreign_balance_on_ah_rococo(id: v5::Location, who: &AccountId) -> u128 { AssetHubRococo::execute_with(|| { type Assets = ::ForeignAssets; >::balance(id, who) }) } -pub(crate) fn foreign_balance_on_ah_westend(id: v4::Location, who: &AccountId) -> u128 { +pub(crate) fn foreign_balance_on_ah_westend(id: v5::Location, who: &AccountId) -> u128 { AssetHubWestend::execute_with(|| { type Assets = ::ForeignAssets; >::balance(id, who) @@ -103,8 +103,8 @@ pub(crate) fn foreign_balance_on_ah_westend(id: v4::Location, who: &AccountId) - } // set up pool -pub(crate) fn set_up_pool_with_roc_on_ah_rococo(asset: v4::Location, is_foreign: bool) { - let roc: v4::Location = v4::Parent.into(); +pub(crate) fn set_up_pool_with_roc_on_ah_rococo(asset: v5::Location, is_foreign: bool) { + let roc: v5::Location = v5::Parent.into(); AssetHubRococo::execute_with(|| { type RuntimeEvent = ::RuntimeEvent; let owner = AssetHubRococoSender::get(); @@ -119,7 +119,7 @@ pub(crate) fn set_up_pool_with_roc_on_ah_rococo(asset: v4::Location, is_foreign: )); } else { let asset_id = match asset.interior.last() { - Some(v4::Junction::GeneralIndex(id)) => *id as u32, + Some(v5::Junction::GeneralIndex(id)) => *id as u32, _ => unreachable!(), }; assert_ok!(::Assets::mint( diff --git a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge.rs b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge.rs index 4e9dd5a77dd7..c2978786ad9b 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/bridges/bridge-hub-westend/src/tests/snowbridge.rs @@ -96,8 +96,10 @@ fn send_token_from_ethereum_to_asset_hub() { // Fund ethereum sovereign on AssetHub AssetHubWestend::fund_accounts(vec![(AssetHubWestendReceiver::get(), INITIAL_FUND)]); + let ethereum_network_v5: NetworkId = EthereumNetwork::get().into(); + let weth_asset_location: Location = - (Parent, Parent, EthereumNetwork::get(), AccountKey20 { network: None, key: WETH }).into(); + (Parent, Parent, ethereum_network_v5, AccountKey20 { network: None, key: WETH }).into(); AssetHubWestend::execute_with(|| { type RuntimeOrigin = ::RuntimeOrigin; @@ -156,8 +158,9 @@ fn send_token_from_ethereum_to_asset_hub() { fn send_weth_asset_from_asset_hub_to_ethereum() { let assethub_location = BridgeHubWestend::sibling_location_of(AssetHubWestend::para_id()); let assethub_sovereign = BridgeHubWestend::sovereign_account_id_of(assethub_location); + let ethereum_network_v5: NetworkId = EthereumNetwork::get().into(); let weth_asset_location: Location = - (Parent, Parent, EthereumNetwork::get(), AccountKey20 { network: None, key: WETH }).into(); + (Parent, Parent, ethereum_network_v5, AccountKey20 { network: None, key: WETH }).into(); BridgeHubWestend::fund_accounts(vec![(assethub_sovereign.clone(), INITIAL_FUND)]); @@ -218,14 +221,14 @@ fn send_weth_asset_from_asset_hub_to_ethereum() { )), fun: Fungible(TOKEN_AMOUNT), }]; - let versioned_assets = VersionedAssets::V4(Assets::from(assets)); + let versioned_assets = VersionedAssets::from(Assets::from(assets)); - let destination = VersionedLocation::V4(Location::new( + let destination = VersionedLocation::from(Location::new( 2, [GlobalConsensus(Ethereum { chain_id: CHAIN_ID })], )); - let beneficiary = VersionedLocation::V4(Location::new( + let beneficiary = VersionedLocation::from(Location::new( 0, [AccountKey20 { network: None, key: ETHEREUM_DESTINATION_ADDRESS.into() }], )); @@ -317,7 +320,7 @@ fn transfer_relay_token() { assert_ok!(::EthereumSystem::register_token( RuntimeOrigin::root(), - Box::new(VersionedLocation::V4(asset_id.clone())), + Box::new(VersionedLocation::from(asset_id.clone())), AssetMetadata { name: "wnd".as_bytes().to_vec().try_into().unwrap(), symbol: "wnd".as_bytes().to_vec().try_into().unwrap(), @@ -337,14 +340,14 @@ fn transfer_relay_token() { type RuntimeEvent = ::RuntimeEvent; let assets = vec![Asset { id: AssetId(Location::parent()), fun: Fungible(TOKEN_AMOUNT) }]; - let versioned_assets = VersionedAssets::V4(Assets::from(assets)); + let versioned_assets = VersionedAssets::from(Assets::from(assets)); - let destination = VersionedLocation::V4(Location::new( + let destination = VersionedLocation::from(Location::new( 2, [GlobalConsensus(Ethereum { chain_id: CHAIN_ID })], )); - let beneficiary = VersionedLocation::V4(Location::new( + let beneficiary = VersionedLocation::from(Location::new( 0, [AccountKey20 { network: None, key: ETHEREUM_DESTINATION_ADDRESS.into() }], )); @@ -475,7 +478,7 @@ fn transfer_ah_token() { assert_ok!(::EthereumSystem::register_token( RuntimeOrigin::root(), - Box::new(VersionedLocation::V4(asset_id_in_bh.clone())), + Box::new(VersionedLocation::from(asset_id_in_bh.clone())), AssetMetadata { name: "ah_asset".as_bytes().to_vec().try_into().unwrap(), symbol: "ah_asset".as_bytes().to_vec().try_into().unwrap(), @@ -500,9 +503,9 @@ fn transfer_ah_token() { // Send partial of the token, will fail if send all let assets = vec![Asset { id: AssetId(asset_id.clone()), fun: Fungible(TOKEN_AMOUNT / 10) }]; - let versioned_assets = VersionedAssets::V4(Assets::from(assets)); + let versioned_assets = VersionedAssets::from(Assets::from(assets)); - let beneficiary = VersionedLocation::V4(Location::new( + let beneficiary = VersionedLocation::from(Location::new( 0, [AccountKey20 { network: None, key: ETHEREUM_DESTINATION_ADDRESS.into() }], )); diff --git a/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/fellowship_treasury.rs b/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/fellowship_treasury.rs index 943f8965540d..8418e3da3bba 100644 --- a/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/fellowship_treasury.rs +++ b/cumulus/parachains/integration-tests/emulated/tests/collectives/collectives-westend/src/tests/fellowship_treasury.rs @@ -64,11 +64,12 @@ fn fellowship_treasury_spend() { let teleport_call = RuntimeCall::Utility(pallet_utility::Call::::dispatch_as { as_origin: bx!(WestendOriginCaller::system(RawOrigin::Signed(treasury_account))), call: bx!(RuntimeCall::XcmPallet(pallet_xcm::Call::::teleport_assets { - dest: bx!(VersionedLocation::V4(asset_hub_location.clone())), - beneficiary: bx!(VersionedLocation::V4(treasury_location)), - assets: bx!(VersionedAssets::V4( - Asset { id: native_asset.clone().into(), fun: treasury_balance.into() }.into() - )), + dest: bx!(VersionedLocation::from(asset_hub_location.clone())), + beneficiary: bx!(VersionedLocation::from(treasury_location)), + assets: bx!(VersionedAssets::from(Assets::from(Asset { + id: native_asset.clone().into(), + fun: treasury_balance.into() + }))), fee_asset_item: 0, })), }); @@ -101,12 +102,12 @@ fn fellowship_treasury_spend() { let native_asset = Location::parent(); let treasury_spend_call = RuntimeCall::Treasury(pallet_treasury::Call::::spend { - asset_kind: bx!(VersionedLocatableAsset::V4 { - location: asset_hub_location.clone(), - asset_id: native_asset.into(), - }), + asset_kind: bx!(VersionedLocatableAsset::from(( + asset_hub_location.clone(), + native_asset.into() + ))), amount: fellowship_treasury_balance, - beneficiary: bx!(VersionedLocation::V4(fellowship_treasury_location)), + beneficiary: bx!(VersionedLocation::from(fellowship_treasury_location)), valid_from: None, }); @@ -179,12 +180,12 @@ fn fellowship_treasury_spend() { let fellowship_treasury_spend_call = RuntimeCall::FellowshipTreasury(pallet_treasury::Call::::spend { - asset_kind: bx!(VersionedLocatableAsset::V4 { - location: asset_hub_location, - asset_id: native_asset.into(), - }), + asset_kind: bx!(VersionedLocatableAsset::from(( + asset_hub_location, + native_asset.into() + ))), amount: fellowship_spend_balance, - beneficiary: bx!(VersionedLocation::V4(alice_location)), + beneficiary: bx!(VersionedLocation::from(alice_location)), valid_from: None, }); diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 64fdf4883720..75ff74f4caad 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -330,11 +330,11 @@ pub type LocalAndForeignAssets = fungibles::UnionOf< Assets, ForeignAssets, LocalFromLeft< - AssetIdForTrustBackedAssetsConvert, + AssetIdForTrustBackedAssetsConvert, AssetIdForTrustBackedAssets, - xcm::v4::Location, + xcm::v5::Location, >, - xcm::v4::Location, + xcm::v5::Location, AccountId, >; @@ -342,21 +342,21 @@ pub type LocalAndForeignAssets = fungibles::UnionOf< pub type NativeAndAssets = fungible::UnionOf< Balances, LocalAndForeignAssets, - TargetFromLeft, - xcm::v4::Location, + TargetFromLeft, + xcm::v5::Location, AccountId, >; pub type PoolIdToAccountId = pallet_asset_conversion::AccountIdConverter< AssetConversionPalletId, - (xcm::v4::Location, xcm::v4::Location), + (xcm::v5::Location, xcm::v5::Location), >; impl pallet_asset_conversion::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Balance = Balance; type HigherPrecisionBalance = sp_core::U256; - type AssetKind = xcm::v4::Location; + type AssetKind = xcm::v5::Location; type Assets = NativeAndAssets; type PoolId = (Self::AssetKind, Self::AssetKind); type PoolLocator = pallet_asset_conversion::WithFirstAsset< @@ -381,7 +381,7 @@ impl pallet_asset_conversion::Config for Runtime { TokenLocation, parachain_info::Pallet, xcm_config::TrustBackedAssetsPalletIndex, - xcm::v4::Location, + xcm::v5::Location, >; } @@ -415,18 +415,18 @@ pub type ForeignAssetsInstance = pallet_assets::Instance2; impl pallet_assets::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Balance = Balance; - type AssetId = xcm::v4::Location; - type AssetIdParameter = xcm::v4::Location; + type AssetId = xcm::v5::Location; + type AssetIdParameter = xcm::v5::Location; type Currency = Balances; type CreateOrigin = ForeignCreators< ( - FromSiblingParachain, xcm::v4::Location>, - FromNetwork, + FromSiblingParachain, xcm::v5::Location>, + FromNetwork, xcm_config::bridging::to_westend::WestendOrEthereumAssetFromAssetHubWestend, ), ForeignCreatorsSovereignAccountOf, AccountId, - xcm::v4::Location, + xcm::v5::Location, >; type ForceOrigin = AssetsForceOrigin; type AssetDeposit = ForeignAssetsAssetDeposit; @@ -813,7 +813,7 @@ parameter_types! { impl pallet_asset_conversion_tx_payment::Config for Runtime { type RuntimeEvent = RuntimeEvent; - type AssetId = xcm::v4::Location; + type AssetId = xcm::v5::Location; type OnChargeAssetTransaction = SwapAssetAdapter< TokenLocation, NativeAndAssets, @@ -1310,16 +1310,16 @@ impl_runtime_apis! { impl pallet_asset_conversion::AssetConversionApi< Block, Balance, - xcm::v4::Location, + xcm::v5::Location, > for Runtime { - fn quote_price_exact_tokens_for_tokens(asset1: xcm::v4::Location, asset2: xcm::v4::Location, amount: Balance, include_fee: bool) -> Option { + fn quote_price_exact_tokens_for_tokens(asset1: xcm::v5::Location, asset2: xcm::v5::Location, amount: Balance, include_fee: bool) -> Option { AssetConversion::quote_price_exact_tokens_for_tokens(asset1, asset2, amount, include_fee) } - fn quote_price_tokens_for_exact_tokens(asset1: xcm::v4::Location, asset2: xcm::v4::Location, amount: Balance, include_fee: bool) -> Option { + fn quote_price_tokens_for_exact_tokens(asset1: xcm::v5::Location, asset2: xcm::v5::Location, amount: Balance, include_fee: bool) -> Option { AssetConversion::quote_price_tokens_for_exact_tokens(asset1, asset2, amount, include_fee) } - fn get_reserves(asset1: xcm::v4::Location, asset2: xcm::v4::Location) -> Option<(Balance, Balance)> { + fn get_reserves(asset1: xcm::v5::Location, asset2: xcm::v5::Location) -> Option<(Balance, Balance)> { AssetConversion::get_reserves(asset1, asset2).ok() } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/mod.rs index 8c52ecd9f1b1..19a10ba370bb 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/mod.rs @@ -138,6 +138,9 @@ impl XcmWeightInfo for AssetHubRococoXcmWeight { fn buy_execution(_fees: &Asset, _weight_limit: &WeightLimit) -> Weight { XcmGeneric::::buy_execution() } + fn pay_fees(_asset: &Asset) -> Weight { + XcmGeneric::::pay_fees() + } fn refund_surplus() -> Weight { XcmGeneric::::refund_surplus() } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index f6a883c03e9d..48918767561b 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::generic` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-08-15, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-08-27, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-696hpswk-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-svzsllib-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: Compiled, CHAIN: Some("asset-hub-rococo-dev"), DB CACHE: 1024 // Executed Command: @@ -68,8 +68,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `246` // Estimated: `6196` - // Minimum execution time: 99_552_000 picoseconds. - Weight::from_parts(101_720_000, 6196) + // Minimum execution time: 99_561_000 picoseconds. + Weight::from_parts(101_317_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -77,8 +77,15 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 659_000 picoseconds. - Weight::from_parts(706_000, 0) + // Minimum execution time: 733_000 picoseconds. + Weight::from_parts(786_000, 0) + } + pub fn pay_fees() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_938_000 picoseconds. + Weight::from_parts(4_178_000, 0) } // Storage: `PolkadotXcm::Queries` (r:1 w:0) // Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -86,58 +93,58 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `103` // Estimated: `3568` - // Minimum execution time: 9_665_000 picoseconds. - Weight::from_parts(9_878_000, 3568) + // Minimum execution time: 9_503_000 picoseconds. + Weight::from_parts(10_067_000, 3568) .saturating_add(T::DbWeight::get().reads(1)) } pub fn transact() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_959_000 picoseconds. - Weight::from_parts(7_111_000, 0) + // Minimum execution time: 7_143_000 picoseconds. + Weight::from_parts(7_363_000, 0) } pub fn refund_surplus() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_682_000 picoseconds. - Weight::from_parts(2_799_000, 0) + // Minimum execution time: 2_808_000 picoseconds. + Weight::from_parts(2_916_000, 0) } pub fn set_error_handler() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 656_000 picoseconds. - Weight::from_parts(683_000, 0) + // Minimum execution time: 758_000 picoseconds. + Weight::from_parts(817_000, 0) } pub fn set_appendix() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 687_000 picoseconds. - Weight::from_parts(719_000, 0) + // Minimum execution time: 749_000 picoseconds. + Weight::from_parts(777_000, 0) } pub fn clear_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 588_000 picoseconds. - Weight::from_parts(653_000, 0) + // Minimum execution time: 726_000 picoseconds. + Weight::from_parts(770_000, 0) } pub fn descend_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 690_000 picoseconds. - Weight::from_parts(714_000, 0) + // Minimum execution time: 763_000 picoseconds. + Weight::from_parts(824_000, 0) } pub fn clear_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 671_000 picoseconds. - Weight::from_parts(710_000, 0) + // Minimum execution time: 727_000 picoseconds. + Weight::from_parts(780_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -159,8 +166,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `246` // Estimated: `6196` - // Minimum execution time: 67_374_000 picoseconds. - Weight::from_parts(68_899_000, 6196) + // Minimum execution time: 65_926_000 picoseconds. + Weight::from_parts(67_107_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -170,8 +177,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `160` // Estimated: `3625` - // Minimum execution time: 12_896_000 picoseconds. - Weight::from_parts(13_191_000, 3625) + // Minimum execution time: 12_879_000 picoseconds. + Weight::from_parts(13_214_000, 3625) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -179,8 +186,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 634_000 picoseconds. - Weight::from_parts(677_000, 0) + // Minimum execution time: 684_000 picoseconds. + Weight::from_parts(746_000, 0) } // Storage: `PolkadotXcm::VersionNotifyTargets` (r:1 w:1) // Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -200,8 +207,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 28_197_000 picoseconds. - Weight::from_parts(28_752_000, 3610) + // Minimum execution time: 27_664_000 picoseconds. + Weight::from_parts(28_321_000, 3610) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -211,44 +218,44 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_678_000 picoseconds. - Weight::from_parts(2_803_000, 0) + // Minimum execution time: 2_644_000 picoseconds. + Weight::from_parts(2_714_000, 0) .saturating_add(T::DbWeight::get().writes(1)) } pub fn burn_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 22_806_000 picoseconds. - Weight::from_parts(23_217_000, 0) + // Minimum execution time: 22_472_000 picoseconds. + Weight::from_parts(23_159_000, 0) } pub fn expect_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_221_000 picoseconds. - Weight::from_parts(6_347_000, 0) + // Minimum execution time: 6_297_000 picoseconds. + Weight::from_parts(6_480_000, 0) } pub fn expect_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 653_000 picoseconds. - Weight::from_parts(676_000, 0) + // Minimum execution time: 712_000 picoseconds. + Weight::from_parts(763_000, 0) } pub fn expect_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 621_000 picoseconds. - Weight::from_parts(678_000, 0) + // Minimum execution time: 738_000 picoseconds. + Weight::from_parts(783_000, 0) } pub fn expect_transact_status() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 770_000 picoseconds. - Weight::from_parts(829_000, 0) + // Minimum execution time: 856_000 picoseconds. + Weight::from_parts(919_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -270,8 +277,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `246` // Estimated: `6196` - // Minimum execution time: 71_654_000 picoseconds. - Weight::from_parts(73_329_000, 6196) + // Minimum execution time: 71_036_000 picoseconds. + Weight::from_parts(72_631_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -279,8 +286,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_999_000 picoseconds. - Weight::from_parts(4_179_000, 0) + // Minimum execution time: 4_518_000 picoseconds. + Weight::from_parts(4_737_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -302,8 +309,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `246` // Estimated: `6196` - // Minimum execution time: 66_722_000 picoseconds. - Weight::from_parts(68_812_000, 6196) + // Minimum execution time: 66_855_000 picoseconds. + Weight::from_parts(68_087_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -311,22 +318,22 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 718_000 picoseconds. - Weight::from_parts(745_000, 0) + // Minimum execution time: 742_000 picoseconds. + Weight::from_parts(816_000, 0) } pub fn set_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 623_000 picoseconds. - Weight::from_parts(682_000, 0) + // Minimum execution time: 713_000 picoseconds. + Weight::from_parts(786_000, 0) } pub fn clear_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 664_000 picoseconds. - Weight::from_parts(696_000, 0) + // Minimum execution time: 689_000 picoseconds. + Weight::from_parts(744_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -334,22 +341,22 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1489` - // Minimum execution time: 2_495_000 picoseconds. - Weight::from_parts(2_604_000, 1489) + // Minimum execution time: 2_654_000 picoseconds. + Weight::from_parts(2_809_000, 1489) .saturating_add(T::DbWeight::get().reads(1)) } pub fn set_fees_mode() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 645_000 picoseconds. - Weight::from_parts(673_000, 0) + // Minimum execution time: 698_000 picoseconds. + Weight::from_parts(758_000, 0) } pub fn unpaid_execution() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 643_000 picoseconds. - Weight::from_parts(701_000, 0) + // Minimum execution time: 757_000 picoseconds. + Weight::from_parts(800_000, 0) } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs index 32fbfb6d0199..7d22addf9333 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/xcm_config.rs @@ -175,7 +175,7 @@ pub type ForeignAssetsConvertedConcreteId = assets_common::ForeignAssetsConverte StartsWithExplicitGlobalConsensus, ), Balance, - xcm::v4::Location, + xcm::v5::Location, >; /// Means for transacting foreign assets from different global consensus. @@ -335,14 +335,14 @@ pub type PoolAssetsExchanger = SingleAssetExchangeAdapter< crate::AssetConversion, crate::NativeAndAssets, ( - TrustBackedAssetsAsLocation, + TrustBackedAssetsAsLocation, ForeignAssetsConvertedConcreteId, // `ForeignAssetsConvertedConcreteId` excludes the relay token, so we add it back here. MatchedConvertedConcreteId< - xcm::v4::Location, + xcm::v5::Location, Balance, Equals, - WithLatestLocationConverter, + WithLatestLocationConverter, TryConvertInto, >, ), @@ -389,7 +389,7 @@ impl xcm_executor::Config for XcmConfig { TrustBackedAssetsAsLocation< TrustBackedAssetsPalletLocation, Balance, - xcm::v4::Location, + xcm::v5::Location, >, ForeignAssetsConvertedConcreteId, ), @@ -523,9 +523,9 @@ pub type ForeignCreatorsSovereignAccountOf = ( /// Simple conversion of `u32` into an `AssetId` for use in benchmarking. pub struct XcmBenchmarkHelper; #[cfg(feature = "runtime-benchmarks")] -impl pallet_assets::BenchmarkHelper for XcmBenchmarkHelper { - fn create_asset_id_parameter(id: u32) -> xcm::v4::Location { - xcm::v4::Location::new(1, [xcm::v4::Junction::Parachain(id)]) +impl pallet_assets::BenchmarkHelper for XcmBenchmarkHelper { + fn create_asset_id_parameter(id: u32) -> xcm::v5::Location { + xcm::v5::Location::new(1, [xcm::v5::Junction::Parachain(id)]) } } @@ -660,7 +660,7 @@ pub mod bridging { /// `Option` represents static "base fee" which is used for total delivery fee calculation. pub BridgeTable: alloc::vec::Vec = alloc::vec![ NetworkExportTableItem::new( - EthereumNetwork::get(), + EthereumNetwork::get().into(), Some(alloc::vec![Junctions::Here]), SiblingBridgeHub::get(), Some(( @@ -673,7 +673,7 @@ pub mod bridging { /// Universal aliases pub UniversalAliases: BTreeSet<(Location, Junction)> = BTreeSet::from_iter( alloc::vec![ - (SiblingBridgeHubWithEthereumInboundQueueInstance::get(), GlobalConsensus(EthereumNetwork::get())), + (SiblingBridgeHubWithEthereumInboundQueueInstance::get(), GlobalConsensus(EthereumNetwork::get().into())), ] ); } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs index 32d121749534..9afea1281f5f 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/lib.rs @@ -328,11 +328,11 @@ pub type LocalAndForeignAssets = fungibles::UnionOf< Assets, ForeignAssets, LocalFromLeft< - AssetIdForTrustBackedAssetsConvert, + AssetIdForTrustBackedAssetsConvert, AssetIdForTrustBackedAssets, - xcm::v4::Location, + xcm::v5::Location, >, - xcm::v4::Location, + xcm::v5::Location, AccountId, >; @@ -340,21 +340,21 @@ pub type LocalAndForeignAssets = fungibles::UnionOf< pub type NativeAndAssets = fungible::UnionOf< Balances, LocalAndForeignAssets, - TargetFromLeft, - xcm::v4::Location, + TargetFromLeft, + xcm::v5::Location, AccountId, >; pub type PoolIdToAccountId = pallet_asset_conversion::AccountIdConverter< AssetConversionPalletId, - (xcm::v4::Location, xcm::v4::Location), + (xcm::v5::Location, xcm::v5::Location), >; impl pallet_asset_conversion::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Balance = Balance; type HigherPrecisionBalance = sp_core::U256; - type AssetKind = xcm::v4::Location; + type AssetKind = xcm::v5::Location; type Assets = NativeAndAssets; type PoolId = (Self::AssetKind, Self::AssetKind); type PoolLocator = pallet_asset_conversion::WithFirstAsset< @@ -379,7 +379,7 @@ impl pallet_asset_conversion::Config for Runtime { WestendLocation, parachain_info::Pallet, xcm_config::TrustBackedAssetsPalletIndex, - xcm::v4::Location, + xcm::v5::Location, >; } @@ -413,18 +413,18 @@ pub type ForeignAssetsInstance = pallet_assets::Instance2; impl pallet_assets::Config for Runtime { type RuntimeEvent = RuntimeEvent; type Balance = Balance; - type AssetId = xcm::v4::Location; - type AssetIdParameter = xcm::v4::Location; + type AssetId = xcm::v5::Location; + type AssetIdParameter = xcm::v5::Location; type Currency = Balances; type CreateOrigin = ForeignCreators< ( - FromSiblingParachain, xcm::v4::Location>, - FromNetwork, + FromSiblingParachain, xcm::v5::Location>, + FromNetwork, xcm_config::bridging::to_rococo::RococoAssetFromAssetHubRococo, ), ForeignCreatorsSovereignAccountOf, AccountId, - xcm::v4::Location, + xcm::v5::Location, >; type ForceOrigin = AssetsForceOrigin; type AssetDeposit = ForeignAssetsAssetDeposit; @@ -806,7 +806,7 @@ parameter_types! { impl pallet_asset_conversion_tx_payment::Config for Runtime { type RuntimeEvent = RuntimeEvent; - type AssetId = xcm::v4::Location; + type AssetId = xcm::v5::Location; type OnChargeAssetTransaction = SwapAssetAdapter< WestendLocation, NativeAndAssets, @@ -1407,18 +1407,18 @@ impl_runtime_apis! { impl pallet_asset_conversion::AssetConversionApi< Block, Balance, - xcm::v4::Location, + xcm::v5::Location, > for Runtime { - fn quote_price_exact_tokens_for_tokens(asset1: xcm::v4::Location, asset2: xcm::v4::Location, amount: Balance, include_fee: bool) -> Option { + fn quote_price_exact_tokens_for_tokens(asset1: xcm::v5::Location, asset2: xcm::v5::Location, amount: Balance, include_fee: bool) -> Option { AssetConversion::quote_price_exact_tokens_for_tokens(asset1, asset2, amount, include_fee) } - fn quote_price_tokens_for_exact_tokens(asset1: xcm::v4::Location, asset2: xcm::v4::Location, amount: Balance, include_fee: bool) -> Option { + fn quote_price_tokens_for_exact_tokens(asset1: xcm::v5::Location, asset2: xcm::v5::Location, amount: Balance, include_fee: bool) -> Option { AssetConversion::quote_price_tokens_for_exact_tokens(asset1, asset2, amount, include_fee) } - fn get_reserves(asset1: xcm::v4::Location, asset2: xcm::v4::Location) -> Option<(Balance, Balance)> { + fn get_reserves(asset1: xcm::v5::Location, asset2: xcm::v5::Location) -> Option<(Balance, Balance)> { AssetConversion::get_reserves(asset1, asset2).ok() } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/mod.rs index d39052c5c03b..478113359a7e 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/mod.rs @@ -138,6 +138,9 @@ impl XcmWeightInfo for AssetHubWestendXcmWeight { fn buy_execution(_fees: &Asset, _weight_limit: &WeightLimit) -> Weight { XcmGeneric::::buy_execution() } + fn pay_fees(_asset: &Asset) -> Weight { + XcmGeneric::::pay_fees() + } fn refund_surplus() -> Weight { XcmGeneric::::refund_surplus() } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index 98ecd7bd3092..6a960e1a073e 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::generic` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-08-15, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-08-27, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-696hpswk-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-svzsllib-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: Compiled, CHAIN: Some("asset-hub-westend-dev"), DB CACHE: 1024 // Executed Command: @@ -68,8 +68,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `246` // Estimated: `6196` - // Minimum execution time: 100_823_000 picoseconds. - Weight::from_parts(103_071_000, 6196) + // Minimum execution time: 97_854_000 picoseconds. + Weight::from_parts(100_164_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -77,8 +77,15 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 600_000 picoseconds. - Weight::from_parts(686_000, 0) + // Minimum execution time: 723_000 picoseconds. + Weight::from_parts(769_000, 0) + } + pub fn pay_fees() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 3_933_000 picoseconds. + Weight::from_parts(4_168_000, 0) } // Storage: `PolkadotXcm::Queries` (r:1 w:0) // Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -86,58 +93,58 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `103` // Estimated: `3568` - // Minimum execution time: 8_226_000 picoseconds. - Weight::from_parts(8_650_000, 3568) + // Minimum execution time: 8_228_000 picoseconds. + Weight::from_parts(8_428_000, 3568) .saturating_add(T::DbWeight::get().reads(1)) } pub fn transact() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_131_000 picoseconds. - Weight::from_parts(7_600_000, 0) + // Minimum execution time: 7_123_000 picoseconds. + Weight::from_parts(7_371_000, 0) } pub fn refund_surplus() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_589_000 picoseconds. - Weight::from_parts(2_705_000, 0) + // Minimum execution time: 2_718_000 picoseconds. + Weight::from_parts(2_877_000, 0) } pub fn set_error_handler() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 667_000 picoseconds. - Weight::from_parts(744_000, 0) + // Minimum execution time: 737_000 picoseconds. + Weight::from_parts(769_000, 0) } pub fn set_appendix() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 646_000 picoseconds. - Weight::from_parts(720_000, 0) + // Minimum execution time: 705_000 picoseconds. + Weight::from_parts(766_000, 0) } pub fn clear_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 633_000 picoseconds. - Weight::from_parts(669_000, 0) + // Minimum execution time: 688_000 picoseconds. + Weight::from_parts(742_000, 0) } pub fn descend_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 671_000 picoseconds. - Weight::from_parts(726_000, 0) + // Minimum execution time: 736_000 picoseconds. + Weight::from_parts(800_000, 0) } pub fn clear_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 615_000 picoseconds. - Weight::from_parts(675_000, 0) + // Minimum execution time: 698_000 picoseconds. + Weight::from_parts(730_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -159,8 +166,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `246` // Estimated: `6196` - // Minimum execution time: 67_236_000 picoseconds. - Weight::from_parts(69_899_000, 6196) + // Minimum execution time: 65_608_000 picoseconds. + Weight::from_parts(67_828_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -170,8 +177,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `160` // Estimated: `3625` - // Minimum execution time: 12_976_000 picoseconds. - Weight::from_parts(13_357_000, 3625) + // Minimum execution time: 12_895_000 picoseconds. + Weight::from_parts(13_134_000, 3625) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -179,8 +186,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 633_000 picoseconds. - Weight::from_parts(685_000, 0) + // Minimum execution time: 705_000 picoseconds. + Weight::from_parts(741_000, 0) } // Storage: `PolkadotXcm::VersionNotifyTargets` (r:1 w:1) // Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -200,8 +207,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `145` // Estimated: `3610` - // Minimum execution time: 28_707_000 picoseconds. - Weight::from_parts(31_790_000, 3610) + // Minimum execution time: 27_604_000 picoseconds. + Weight::from_parts(28_364_000, 3610) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -211,44 +218,44 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_670_000 picoseconds. - Weight::from_parts(2_833_000, 0) + // Minimum execution time: 2_584_000 picoseconds. + Weight::from_parts(2_706_000, 0) .saturating_add(T::DbWeight::get().writes(1)) } pub fn burn_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 23_459_000 picoseconds. - Weight::from_parts(23_817_000, 0) + // Minimum execution time: 22_537_000 picoseconds. + Weight::from_parts(22_881_000, 0) } pub fn expect_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_197_000 picoseconds. - Weight::from_parts(6_338_000, 0) + // Minimum execution time: 6_248_000 picoseconds. + Weight::from_parts(6_464_000, 0) } pub fn expect_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 671_000 picoseconds. - Weight::from_parts(715_000, 0) + // Minimum execution time: 734_000 picoseconds. + Weight::from_parts(780_000, 0) } pub fn expect_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 655_000 picoseconds. - Weight::from_parts(694_000, 0) + // Minimum execution time: 684_000 picoseconds. + Weight::from_parts(741_000, 0) } pub fn expect_transact_status() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 810_000 picoseconds. - Weight::from_parts(858_000, 0) + // Minimum execution time: 863_000 picoseconds. + Weight::from_parts(930_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -270,8 +277,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `246` // Estimated: `6196` - // Minimum execution time: 73_136_000 picoseconds. - Weight::from_parts(75_314_000, 6196) + // Minimum execution time: 71_041_000 picoseconds. + Weight::from_parts(72_948_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -279,8 +286,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_515_000 picoseconds. - Weight::from_parts(4_768_000, 0) + // Minimum execution time: 4_267_000 picoseconds. + Weight::from_parts(4_557_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -302,8 +309,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `246` // Estimated: `6196` - // Minimum execution time: 68_072_000 picoseconds. - Weight::from_parts(69_866_000, 6196) + // Minimum execution time: 65_605_000 picoseconds. + Weight::from_parts(67_382_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -311,22 +318,22 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 696_000 picoseconds. - Weight::from_parts(736_000, 0) + // Minimum execution time: 743_000 picoseconds. + Weight::from_parts(791_000, 0) } pub fn set_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 618_000 picoseconds. - Weight::from_parts(681_000, 0) + // Minimum execution time: 711_000 picoseconds. + Weight::from_parts(751_000, 0) } pub fn clear_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 647_000 picoseconds. - Weight::from_parts(672_000, 0) + // Minimum execution time: 722_000 picoseconds. + Weight::from_parts(753_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -334,22 +341,22 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `1489` - // Minimum execution time: 2_496_000 picoseconds. - Weight::from_parts(2_617_000, 1489) + // Minimum execution time: 2_653_000 picoseconds. + Weight::from_parts(2_720_000, 1489) .saturating_add(T::DbWeight::get().reads(1)) } pub fn set_fees_mode() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 637_000 picoseconds. - Weight::from_parts(675_000, 0) + // Minimum execution time: 668_000 picoseconds. + Weight::from_parts(695_000, 0) } pub fn unpaid_execution() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 607_000 picoseconds. - Weight::from_parts(683_000, 0) + // Minimum execution time: 742_000 picoseconds. + Weight::from_parts(773_000, 0) } } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs index cfd9fd2fd463..fd806eeadc07 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/src/xcm_config.rs @@ -171,7 +171,7 @@ pub type ForeignAssetsConvertedConcreteId = assets_common::ForeignAssetsConverte StartsWithExplicitGlobalConsensus, ), Balance, - xcm::v4::Location, + xcm::v5::Location, >; /// Means for transacting foreign assets from different global consensus. @@ -358,14 +358,14 @@ pub type PoolAssetsExchanger = SingleAssetExchangeAdapter< crate::AssetConversion, crate::NativeAndAssets, ( - TrustBackedAssetsAsLocation, + TrustBackedAssetsAsLocation, ForeignAssetsConvertedConcreteId, // `ForeignAssetsConvertedConcreteId` excludes the relay token, so we add it back here. MatchedConvertedConcreteId< - xcm::v4::Location, + xcm::v5::Location, Balance, Equals, - WithLatestLocationConverter, + WithLatestLocationConverter, TryConvertInto, >, ), @@ -411,7 +411,7 @@ impl xcm_executor::Config for XcmConfig { TrustBackedAssetsAsLocation< TrustBackedAssetsPalletLocation, Balance, - xcm::v4::Location, + xcm::v5::Location, >, ForeignAssetsConvertedConcreteId, ), @@ -546,9 +546,9 @@ pub type ForeignCreatorsSovereignAccountOf = ( /// Simple conversion of `u32` into an `AssetId` for use in benchmarking. pub struct XcmBenchmarkHelper; #[cfg(feature = "runtime-benchmarks")] -impl pallet_assets::BenchmarkHelper for XcmBenchmarkHelper { - fn create_asset_id_parameter(id: u32) -> xcm::v4::Location { - xcm::v4::Location::new(1, [xcm::v4::Junction::Parachain(id)]) +impl pallet_assets::BenchmarkHelper for XcmBenchmarkHelper { + fn create_asset_id_parameter(id: u32) -> xcm::v5::Location { + xcm::v5::Location::new(1, [xcm::v5::Junction::Parachain(id)]) } } @@ -675,7 +675,7 @@ pub mod bridging { /// `Option` represents static "base fee" which is used for total delivery fee calculation. pub BridgeTable: sp_std::vec::Vec = sp_std::vec![ NetworkExportTableItem::new( - EthereumNetwork::get(), + EthereumNetwork::get().into(), Some(sp_std::vec![Junctions::Here]), SiblingBridgeHub::get(), Some(( @@ -688,7 +688,7 @@ pub mod bridging { /// Universal aliases pub UniversalAliases: BTreeSet<(Location, Junction)> = BTreeSet::from_iter( sp_std::vec![ - (SiblingBridgeHubWithEthereumInboundQueueInstance::get(), GlobalConsensus(EthereumNetwork::get())), + (SiblingBridgeHubWithEthereumInboundQueueInstance::get(), GlobalConsensus(EthereumNetwork::get().into())), ] ); diff --git a/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs b/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs index ff84bdea69f4..41d4142cf740 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-westend/tests/tests.rs @@ -88,7 +88,7 @@ fn slot_durations() -> SlotDurations { fn setup_pool_for_paying_fees_with_foreign_assets( (foreign_asset_owner, foreign_asset_id_location, foreign_asset_id_minimum_balance): ( AccountId, - xcm::v4::Location, + xcm::v5::Location, Balance, ), ) { @@ -96,7 +96,7 @@ fn setup_pool_for_paying_fees_with_foreign_assets( // setup a pool to pay fees with `foreign_asset_id_location` tokens let pool_owner: AccountId = [14u8; 32].into(); - let native_asset = xcm::v4::Location::parent(); + let native_asset = xcm::v5::Location::parent(); let pool_liquidity: Balance = existential_deposit.max(foreign_asset_id_minimum_balance).mul(100_000); @@ -221,10 +221,10 @@ fn test_buy_and_refund_weight_with_swap_local_asset_xcm_trader() { assert_ok!(AssetConversion::create_pool( RuntimeHelper::origin_of(bob.clone()), Box::new( - xcm::v4::Location::try_from(native_location.clone()).expect("conversion works") + xcm::v5::Location::try_from(native_location.clone()).expect("conversion works") ), Box::new( - xcm::v4::Location::try_from(asset_1_location.clone()) + xcm::v5::Location::try_from(asset_1_location.clone()) .expect("conversion works") ) )); @@ -232,10 +232,10 @@ fn test_buy_and_refund_weight_with_swap_local_asset_xcm_trader() { assert_ok!(AssetConversion::add_liquidity( RuntimeHelper::origin_of(bob.clone()), Box::new( - xcm::v4::Location::try_from(native_location.clone()).expect("conversion works") + xcm::v5::Location::try_from(native_location.clone()).expect("conversion works") ), Box::new( - xcm::v4::Location::try_from(asset_1_location.clone()) + xcm::v5::Location::try_from(asset_1_location.clone()) .expect("conversion works") ), pool_liquidity, @@ -273,8 +273,8 @@ fn test_buy_and_refund_weight_with_swap_local_asset_xcm_trader() { let refund_weight = Weight::from_parts(1_000_000_000, 0); let refund = WeightToFee::weight_to_fee(&refund_weight); let (reserve1, reserve2) = AssetConversion::get_reserves( - xcm::v4::Location::try_from(native_location).expect("conversion works"), - xcm::v4::Location::try_from(asset_1_location.clone()).expect("conversion works"), + xcm::v5::Location::try_from(native_location).expect("conversion works"), + xcm::v5::Location::try_from(asset_1_location.clone()).expect("conversion works"), ) .unwrap(); let asset_refund = @@ -312,12 +312,12 @@ fn test_buy_and_refund_weight_with_swap_foreign_asset_xcm_trader() { let bob: AccountId = SOME_ASSET_ADMIN.into(); let staking_pot = CollatorSelection::account_id(); let native_location = - xcm::v4::Location::try_from(WestendLocation::get()).expect("conversion works"); - let foreign_location = xcm::v4::Location { + xcm::v5::Location::try_from(WestendLocation::get()).expect("conversion works"); + let foreign_location = xcm::v5::Location { parents: 1, interior: ( - xcm::v4::Junction::Parachain(1234), - xcm::v4::Junction::GeneralIndex(12345), + xcm::v5::Junction::Parachain(1234), + xcm::v5::Junction::GeneralIndex(12345), ) .into(), }; @@ -499,11 +499,11 @@ fn test_foreign_asset_xcm_take_first_trader() { .execute_with(|| { // We need root origin to create a sufficient asset let minimum_asset_balance = 3333333_u128; - let foreign_location = xcm::v4::Location { + let foreign_location = xcm::v5::Location { parents: 1, interior: ( - xcm::v4::Junction::Parachain(1234), - xcm::v4::Junction::GeneralIndex(12345), + xcm::v5::Junction::Parachain(1234), + xcm::v5::Junction::GeneralIndex(12345), ) .into(), }; @@ -523,7 +523,7 @@ fn test_foreign_asset_xcm_take_first_trader() { minimum_asset_balance )); - let asset_location_v4: Location = foreign_location.clone().try_into().unwrap(); + let asset_location_v5: Location = foreign_location.clone().try_into().unwrap(); // Set Alice as block author, who will receive fees RuntimeHelper::run_to_block(2, AccountId::from(ALICE)); @@ -538,7 +538,7 @@ fn test_foreign_asset_xcm_take_first_trader() { // Lets pay with: asset_amount_needed + asset_amount_extra let asset_amount_extra = 100_u128; let asset: Asset = - (asset_location_v4.clone(), asset_amount_needed + asset_amount_extra).into(); + (asset_location_v5.clone(), asset_amount_needed + asset_amount_extra).into(); let mut trader = ::Trader::new(); let ctx = XcmContext { origin: None, message_id: XcmHash::default(), topic: None }; @@ -547,7 +547,7 @@ fn test_foreign_asset_xcm_take_first_trader() { let unused_assets = trader.buy_weight(bought, asset.into(), &ctx).expect("Expected Ok"); // Check whether a correct amount of unused assets is returned assert_ok!( - unused_assets.ensure_contains(&(asset_location_v4, asset_amount_extra).into()) + unused_assets.ensure_contains(&(asset_location_v5, asset_amount_extra).into()) ); // Drop trader @@ -835,11 +835,11 @@ fn test_assets_balances_api_works() { .build() .execute_with(|| { let local_asset_id = 1; - let foreign_asset_id_location = xcm::v4::Location { + let foreign_asset_id_location = xcm::v5::Location { parents: 1, interior: [ - xcm::v4::Junction::Parachain(1234), - xcm::v4::Junction::GeneralIndex(12345), + xcm::v5::Junction::Parachain(1234), + xcm::v5::Junction::GeneralIndex(12345), ] .into(), }; @@ -930,7 +930,7 @@ fn test_assets_balances_api_works() { .into()))); // check foreign asset assert!(result.inner().iter().any(|asset| asset.eq(&( - WithLatestLocationConverter::::convert_back( + WithLatestLocationConverter::::convert_back( &foreign_asset_id_location ) .unwrap(), @@ -1023,13 +1023,13 @@ asset_test_utils::include_asset_transactor_transfer_with_pallet_assets_instance_ Runtime, XcmConfig, ForeignAssetsInstance, - xcm::v4::Location, + xcm::v5::Location, JustTry, collator_session_keys(), ExistentialDeposit::get(), - xcm::v4::Location { + xcm::v5::Location { parents: 1, - interior: [xcm::v4::Junction::Parachain(1313), xcm::v4::Junction::GeneralIndex(12345)] + interior: [xcm::v5::Junction::Parachain(1313), xcm::v5::Junction::GeneralIndex(12345)] .into() }, Box::new(|| { @@ -1046,8 +1046,8 @@ asset_test_utils::include_create_and_manage_foreign_assets_for_local_consensus_p WeightToFee, ForeignCreatorsSovereignAccountOf, ForeignAssetsInstance, - xcm::v4::Location, - WithLatestLocationConverter, + xcm::v5::Location, + WithLatestLocationConverter, collator_session_keys(), ExistentialDeposit::get(), AssetDeposit::get(), @@ -1125,7 +1125,7 @@ fn receive_reserve_asset_deposited_roc_from_asset_hub_rococo_fees_paid_by_pool_s let staking_pot = StakingPot::get(); let foreign_asset_id_location = - xcm::v4::Location::new(2, [xcm::v4::Junction::GlobalConsensus(xcm::v4::NetworkId::Rococo)]); + xcm::v5::Location::new(2, [xcm::v5::Junction::GlobalConsensus(xcm::v5::NetworkId::Rococo)]); let foreign_asset_id_minimum_balance = 1_000_000_000; // sovereign account as foreign asset owner (can be whoever for this scenario) let foreign_asset_owner = LocationToAccountId::convert_location(&Location::parent()).unwrap(); @@ -1194,7 +1194,7 @@ fn receive_reserve_asset_deposited_roc_from_asset_hub_rococo_fees_paid_by_suffic let staking_pot = StakingPot::get(); let foreign_asset_id_location = - xcm::v4::Location::new(2, [xcm::v4::Junction::GlobalConsensus(xcm::v4::NetworkId::Rococo)]); + xcm::v5::Location::new(2, [xcm::v5::Junction::GlobalConsensus(xcm::v5::NetworkId::Rococo)]); let foreign_asset_id_minimum_balance = 1_000_000_000; // sovereign account as foreign asset owner (can be whoever for this scenario) let foreign_asset_owner = LocationToAccountId::convert_location(&Location::parent()).unwrap(); diff --git a/cumulus/parachains/runtimes/assets/common/src/lib.rs b/cumulus/parachains/runtimes/assets/common/src/lib.rs index deda5fa4ab9c..a40892668f11 100644 --- a/cumulus/parachains/runtimes/assets/common/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/common/src/lib.rs @@ -260,15 +260,15 @@ mod tests { pub UniversalLocationNetworkId: NetworkId = NetworkId::ByGenesis([9; 32]); } - // set up a converter which uses `xcm::v3::Location` under the hood + // set up a converter which uses `xcm::v4::Location` under the hood type Convert = ForeignAssetsConvertedConcreteId< ( StartsWith, StartsWithExplicitGlobalConsensus, ), u128, - xcm::v3::Location, - WithLatestLocationConverter, + xcm::v4::Location, + WithLatestLocationConverter, >; let test_data = vec![ @@ -315,18 +315,18 @@ mod tests { // ok ( ma_1000(1, [Parachain(200)].into()), - Ok((xcm::v3::Location::new(1, [xcm::v3::Junction::Parachain(200)]), 1000)), + Ok((xcm::v4::Location::new(1, [xcm::v4::Junction::Parachain(200)]), 1000)), ), ( ma_1000(2, [Parachain(200)].into()), - Ok((xcm::v3::Location::new(2, [xcm::v3::Junction::Parachain(200)]), 1000)), + Ok((xcm::v4::Location::new(2, [xcm::v4::Junction::Parachain(200)]), 1000)), ), ( ma_1000(1, [Parachain(200), GeneralIndex(1234)].into()), Ok(( - xcm::v3::Location::new( + xcm::v4::Location::new( 1, - [xcm::v3::Junction::Parachain(200), xcm::v3::Junction::GeneralIndex(1234)], + [xcm::v4::Junction::Parachain(200), xcm::v4::Junction::GeneralIndex(1234)], ), 1000, )), @@ -334,9 +334,9 @@ mod tests { ( ma_1000(2, [Parachain(200), GeneralIndex(1234)].into()), Ok(( - xcm::v3::Location::new( + xcm::v4::Location::new( 2, - [xcm::v3::Junction::Parachain(200), xcm::v3::Junction::GeneralIndex(1234)], + [xcm::v4::Junction::Parachain(200), xcm::v4::Junction::GeneralIndex(1234)], ), 1000, )), @@ -344,9 +344,9 @@ mod tests { ( ma_1000(2, [GlobalConsensus(NetworkId::ByGenesis([7; 32]))].into()), Ok(( - xcm::v3::Location::new( + xcm::v4::Location::new( 2, - [xcm::v3::Junction::GlobalConsensus(xcm::v3::NetworkId::ByGenesis( + [xcm::v4::Junction::GlobalConsensus(xcm::v4::NetworkId::ByGenesis( [7; 32], ))], ), @@ -364,14 +364,14 @@ mod tests { .into(), ), Ok(( - xcm::v3::Location::new( + xcm::v4::Location::new( 2, [ - xcm::v3::Junction::GlobalConsensus(xcm::v3::NetworkId::ByGenesis( + xcm::v4::Junction::GlobalConsensus(xcm::v4::NetworkId::ByGenesis( [7; 32], )), - xcm::v3::Junction::Parachain(200), - xcm::v3::Junction::GeneralIndex(1234), + xcm::v4::Junction::Parachain(200), + xcm::v4::Junction::GeneralIndex(1234), ], ), 1000, @@ -381,7 +381,7 @@ mod tests { for (asset, expected_result) in test_data { assert_eq!( - >::matches_fungibles( + >::matches_fungibles( &asset.clone().try_into().unwrap() ), expected_result, diff --git a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs index c80222142304..7323e806f34f 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs +++ b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases.rs @@ -367,9 +367,9 @@ pub fn teleports_for_foreign_assets_works< ::Balance: From + Into, SovereignAccountOf: ConvertLocation>, >::AssetId: - From + Into, + From + Into, >::AssetIdParameter: - From + Into, + From + Into, >::Balance: From + Into, ::AccountId: @@ -381,11 +381,11 @@ pub fn teleports_for_foreign_assets_works< { // foreign parachain with the same consensus currency as asset let foreign_para_id = 2222; - let foreign_asset_id_location = xcm::v4::Location { + let foreign_asset_id_location = xcm::v5::Location { parents: 1, interior: [ - xcm::v4::Junction::Parachain(foreign_para_id), - xcm::v4::Junction::GeneralIndex(1234567), + xcm::v5::Junction::Parachain(foreign_para_id), + xcm::v5::Junction::GeneralIndex(1234567), ] .into(), }; diff --git a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs index d86761174740..4f144e24aa30 100644 --- a/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs +++ b/cumulus/parachains/runtimes/assets/test-utils/src/test_cases_over_bridge.rs @@ -331,7 +331,7 @@ pub fn receive_reserve_asset_deposited_from_different_consensus_works< block_author_account: AccountIdOf, (foreign_asset_owner, foreign_asset_id_location, foreign_asset_id_minimum_balance): ( AccountIdOf, - xcm::v4::Location, + xcm::v5::Location, u128, ), foreign_asset_id_amount_to_transfer: u128, @@ -357,9 +357,9 @@ pub fn receive_reserve_asset_deposited_from_different_consensus_works< BalanceOf: From + Into, XcmConfig: xcm_executor::Config, >::AssetId: - From + Into, + From + Into, >::AssetIdParameter: - From + Into, + From + Into, >::Balance: From + Into + From, ::AccountId: Into<<::RuntimeOrigin as OriginTrait>::AccountId> diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/mod.rs index b40cbfeeb8f2..bc1c9980e140 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/mod.rs @@ -139,6 +139,9 @@ impl XcmWeightInfo for BridgeHubRococoXcmWeight { fn buy_execution(_fees: &Asset, _weight_limit: &WeightLimit) -> Weight { XcmGeneric::::buy_execution() } + fn pay_fees(_asset: &Asset) -> Weight { + XcmGeneric::::pay_fees() + } fn refund_surplus() -> Weight { XcmGeneric::::refund_surplus() } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index 9a9137c18093..3dd636c32a4e 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::generic` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-08-15, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-08-27, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-696hpswk-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-svzsllib-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-rococo-dev"), DB CACHE: 1024 // Executed Command: @@ -68,8 +68,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `171` // Estimated: `6196` - // Minimum execution time: 70_133_000 picoseconds. - Weight::from_parts(71_765_000, 6196) + // Minimum execution time: 69_010_000 picoseconds. + Weight::from_parts(70_067_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -77,8 +77,15 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 959_000 picoseconds. - Weight::from_parts(996_000, 0) + // Minimum execution time: 1_069_000 picoseconds. + Weight::from_parts(1_116_000, 0) + } + pub fn pay_fees() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 2_011_000 picoseconds. + Weight::from_parts(2_095_000, 0) } // Storage: `PolkadotXcm::Queries` (r:1 w:0) // Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -86,58 +93,58 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `3497` - // Minimum execution time: 7_537_000 picoseconds. - Weight::from_parts(7_876_000, 3497) + // Minimum execution time: 7_630_000 picoseconds. + Weight::from_parts(7_992_000, 3497) .saturating_add(T::DbWeight::get().reads(1)) } pub fn transact() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_774_000 picoseconds. - Weight::from_parts(7_895_000, 0) + // Minimum execution time: 7_909_000 picoseconds. + Weight::from_parts(8_100_000, 0) } pub fn refund_surplus() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_577_000 picoseconds. - Weight::from_parts(1_622_000, 0) + // Minimum execution time: 1_749_000 picoseconds. + Weight::from_parts(1_841_000, 0) } pub fn set_error_handler() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 973_000 picoseconds. - Weight::from_parts(1_008_000, 0) + // Minimum execution time: 1_109_000 picoseconds. + Weight::from_parts(1_156_000, 0) } pub fn set_appendix() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_027_000 picoseconds. - Weight::from_parts(1_052_000, 0) + // Minimum execution time: 1_073_000 picoseconds. + Weight::from_parts(1_143_000, 0) } pub fn clear_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 953_000 picoseconds. - Weight::from_parts(992_000, 0) + // Minimum execution time: 1_050_000 picoseconds. + Weight::from_parts(1_084_000, 0) } pub fn descend_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 949_000 picoseconds. - Weight::from_parts(1_020_000, 0) + // Minimum execution time: 1_060_000 picoseconds. + Weight::from_parts(1_114_000, 0) } pub fn clear_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 979_000 picoseconds. - Weight::from_parts(1_032_000, 0) + // Minimum execution time: 1_065_000 picoseconds. + Weight::from_parts(1_112_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -159,8 +166,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `171` // Estimated: `6196` - // Minimum execution time: 66_663_000 picoseconds. - Weight::from_parts(67_728_000, 6196) + // Minimum execution time: 65_538_000 picoseconds. + Weight::from_parts(66_943_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -170,8 +177,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 11_074_000 picoseconds. - Weight::from_parts(11_439_000, 3555) + // Minimum execution time: 10_898_000 picoseconds. + Weight::from_parts(11_262_000, 3555) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -179,8 +186,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 943_000 picoseconds. - Weight::from_parts(1_021_000, 0) + // Minimum execution time: 1_026_000 picoseconds. + Weight::from_parts(1_104_000, 0) } // Storage: `PolkadotXcm::VersionNotifyTargets` (r:1 w:1) // Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -200,8 +207,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 25_123_000 picoseconds. - Weight::from_parts(25_687_000, 3503) + // Minimum execution time: 25_133_000 picoseconds. + Weight::from_parts(25_526_000, 3503) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -211,44 +218,44 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_868_000 picoseconds. - Weight::from_parts(3_124_000, 0) + // Minimum execution time: 2_946_000 picoseconds. + Weight::from_parts(3_074_000, 0) .saturating_add(T::DbWeight::get().writes(1)) } pub fn burn_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_378_000 picoseconds. - Weight::from_parts(1_458_000, 0) + // Minimum execution time: 1_428_000 picoseconds. + Weight::from_parts(1_490_000, 0) } pub fn expect_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_036_000 picoseconds. - Weight::from_parts(1_105_000, 0) + // Minimum execution time: 1_158_000 picoseconds. + Weight::from_parts(1_222_000, 0) } pub fn expect_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 945_000 picoseconds. - Weight::from_parts(1_021_000, 0) + // Minimum execution time: 1_056_000 picoseconds. + Weight::from_parts(1_117_000, 0) } pub fn expect_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 931_000 picoseconds. - Weight::from_parts(1_006_000, 0) + // Minimum execution time: 1_045_000 picoseconds. + Weight::from_parts(1_084_000, 0) } pub fn expect_transact_status() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_139_000 picoseconds. - Weight::from_parts(1_206_000, 0) + // Minimum execution time: 1_224_000 picoseconds. + Weight::from_parts(1_268_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -270,8 +277,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `171` // Estimated: `6196` - // Minimum execution time: 72_884_000 picoseconds. - Weight::from_parts(74_331_000, 6196) + // Minimum execution time: 70_789_000 picoseconds. + Weight::from_parts(72_321_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -279,8 +286,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_432_000 picoseconds. - Weight::from_parts(4_542_000, 0) + // Minimum execution time: 4_521_000 picoseconds. + Weight::from_parts(4_649_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -302,8 +309,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `171` // Estimated: `6196` - // Minimum execution time: 67_102_000 picoseconds. - Weight::from_parts(68_630_000, 6196) + // Minimum execution time: 66_129_000 picoseconds. + Weight::from_parts(68_089_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -311,22 +318,22 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 995_000 picoseconds. - Weight::from_parts(1_057_000, 0) + // Minimum execution time: 1_094_000 picoseconds. + Weight::from_parts(1_157_000, 0) } pub fn set_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 956_000 picoseconds. - Weight::from_parts(1_021_000, 0) + // Minimum execution time: 1_059_000 picoseconds. + Weight::from_parts(1_109_000, 0) } pub fn clear_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 944_000 picoseconds. - Weight::from_parts(986_000, 0) + // Minimum execution time: 1_053_000 picoseconds. + Weight::from_parts(1_080_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -343,12 +350,12 @@ impl WeightInfo { /// The range of component `x` is `[1, 1000]`. pub fn export_message(x: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `589` - // Estimated: `6529` - // Minimum execution time: 58_111_000 picoseconds. - Weight::from_parts(59_123_071, 6529) - // Standard Error: 167 - .saturating_add(Weight::from_parts(43_658, 0).saturating_mul(x.into())) + // Measured: `190` + // Estimated: `6130` + // Minimum execution time: 42_081_000 picoseconds. + Weight::from_parts(42_977_658, 6130) + // Standard Error: 77 + .saturating_add(Weight::from_parts(44_912, 0).saturating_mul(x.into())) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -356,14 +363,14 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 950_000 picoseconds. - Weight::from_parts(1_002_000, 0) + // Minimum execution time: 1_041_000 picoseconds. + Weight::from_parts(1_084_000, 0) } pub fn unpaid_execution() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 963_000 picoseconds. - Weight::from_parts(1_012_000, 0) + // Minimum execution time: 1_085_000 picoseconds. + Weight::from_parts(1_161_000, 0) } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs index 2fb186703a88..b56daeb24d1a 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/xcm_config.rs @@ -296,7 +296,7 @@ impl, FeeHandler: HandleFee> FeeManager fn is_waived(origin: Option<&Location>, fee_reason: FeeReason) -> bool { let Some(loc) = origin else { return false }; if let Export { network, destination: Here } = fee_reason { - if network == EthereumNetwork::get() { + if network == EthereumNetwork::get().into() { return false } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/mod.rs index 3961cc6d5cdd..35abce1083d9 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/mod.rs @@ -140,6 +140,9 @@ impl XcmWeightInfo for BridgeHubWestendXcmWeight { fn buy_execution(_fees: &Asset, _weight_limit: &WeightLimit) -> Weight { XcmGeneric::::buy_execution() } + fn pay_fees(_asset: &Asset) -> Weight { + XcmGeneric::::pay_fees() + } fn refund_surplus() -> Weight { XcmGeneric::::refund_surplus() } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index 16c483a21817..893d991958e1 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::generic` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-08-15, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-08-27, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-696hpswk-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-svzsllib-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: Compiled, CHAIN: Some("bridge-hub-westend-dev"), DB CACHE: 1024 // Executed Command: @@ -68,8 +68,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `208` // Estimated: `6196` - // Minimum execution time: 70_715_000 picoseconds. - Weight::from_parts(72_211_000, 6196) + // Minimum execution time: 70_353_000 picoseconds. + Weight::from_parts(72_257_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -77,8 +77,15 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 968_000 picoseconds. - Weight::from_parts(1_022_000, 0) + // Minimum execution time: 996_000 picoseconds. + Weight::from_parts(1_027_000, 0) + } + pub fn pay_fees() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_926_000 picoseconds. + Weight::from_parts(2_033_000, 0) } // Storage: `PolkadotXcm::Queries` (r:1 w:0) // Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -86,58 +93,58 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `3497` - // Minimum execution time: 7_718_000 picoseconds. - Weight::from_parts(7_894_000, 3497) + // Minimum execution time: 7_961_000 picoseconds. + Weight::from_parts(8_256_000, 3497) .saturating_add(T::DbWeight::get().reads(1)) } pub fn transact() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_662_000 picoseconds. - Weight::from_parts(7_937_000, 0) + // Minimum execution time: 7_589_000 picoseconds. + Weight::from_parts(7_867_000, 0) } pub fn refund_surplus() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_699_000 picoseconds. - Weight::from_parts(1_783_000, 0) + // Minimum execution time: 1_602_000 picoseconds. + Weight::from_parts(1_660_000, 0) } pub fn set_error_handler() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 977_000 picoseconds. - Weight::from_parts(1_045_000, 0) + // Minimum execution time: 1_056_000 picoseconds. + Weight::from_parts(1_096_000, 0) } pub fn set_appendix() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 971_000 picoseconds. - Weight::from_parts(1_030_000, 0) + // Minimum execution time: 1_014_000 picoseconds. + Weight::from_parts(1_075_000, 0) } pub fn clear_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 958_000 picoseconds. - Weight::from_parts(996_000, 0) + // Minimum execution time: 986_000 picoseconds. + Weight::from_parts(1_031_000, 0) } pub fn descend_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 992_000 picoseconds. - Weight::from_parts(1_056_000, 0) + // Minimum execution time: 1_015_000 picoseconds. + Weight::from_parts(1_069_000, 0) } pub fn clear_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 975_000 picoseconds. - Weight::from_parts(1_026_000, 0) + // Minimum execution time: 993_000 picoseconds. + Weight::from_parts(1_063_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -159,8 +166,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `208` // Estimated: `6196` - // Minimum execution time: 67_236_000 picoseconds. - Weight::from_parts(68_712_000, 6196) + // Minimum execution time: 66_350_000 picoseconds. + Weight::from_parts(68_248_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -170,8 +177,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 10_890_000 picoseconds. - Weight::from_parts(11_223_000, 3555) + // Minimum execution time: 11_247_000 picoseconds. + Weight::from_parts(11_468_000, 3555) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -179,8 +186,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 959_000 picoseconds. - Weight::from_parts(1_018_000, 0) + // Minimum execution time: 1_060_000 picoseconds. + Weight::from_parts(1_103_000, 0) } // Storage: `PolkadotXcm::VersionNotifyTargets` (r:1 w:1) // Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -200,8 +207,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 25_162_000 picoseconds. - Weight::from_parts(25_621_000, 3503) + // Minimum execution time: 25_599_000 picoseconds. + Weight::from_parts(26_336_000, 3503) .saturating_add(T::DbWeight::get().reads(7)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -211,44 +218,44 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_949_000 picoseconds. - Weight::from_parts(3_119_000, 0) + // Minimum execution time: 2_863_000 picoseconds. + Weight::from_parts(3_090_000, 0) .saturating_add(T::DbWeight::get().writes(1)) } pub fn burn_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_329_000 picoseconds. - Weight::from_parts(1_410_000, 0) + // Minimum execution time: 1_385_000 picoseconds. + Weight::from_parts(1_468_000, 0) } pub fn expect_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_063_000 picoseconds. - Weight::from_parts(1_101_000, 0) + // Minimum execution time: 1_087_000 picoseconds. + Weight::from_parts(1_164_000, 0) } pub fn expect_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 991_000 picoseconds. - Weight::from_parts(1_041_000, 0) + // Minimum execution time: 1_022_000 picoseconds. + Weight::from_parts(1_066_000, 0) } pub fn expect_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 944_000 picoseconds. - Weight::from_parts(998_000, 0) + // Minimum execution time: 1_015_000 picoseconds. + Weight::from_parts(1_070_000, 0) } pub fn expect_transact_status() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_100_000 picoseconds. - Weight::from_parts(1_180_000, 0) + // Minimum execution time: 1_203_000 picoseconds. + Weight::from_parts(1_241_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -270,8 +277,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `208` // Estimated: `6196` - // Minimum execution time: 71_203_000 picoseconds. - Weight::from_parts(73_644_000, 6196) + // Minimum execution time: 70_773_000 picoseconds. + Weight::from_parts(72_730_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -279,8 +286,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_018_000 picoseconds. - Weight::from_parts(4_267_000, 0) + // Minimum execution time: 4_173_000 picoseconds. + Weight::from_parts(4_445_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -302,8 +309,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `208` // Estimated: `6196` - // Minimum execution time: 67_893_000 picoseconds. - Weight::from_parts(69_220_000, 6196) + // Minimum execution time: 66_471_000 picoseconds. + Weight::from_parts(68_362_000, 6196) .saturating_add(T::DbWeight::get().reads(9)) .saturating_add(T::DbWeight::get().writes(4)) } @@ -311,22 +318,22 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 980_000 picoseconds. - Weight::from_parts(1_043_000, 0) + // Minimum execution time: 1_067_000 picoseconds. + Weight::from_parts(1_108_000, 0) } pub fn set_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 944_000 picoseconds. - Weight::from_parts(981_000, 0) + // Minimum execution time: 997_000 picoseconds. + Weight::from_parts(1_043_000, 0) } pub fn clear_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 930_000 picoseconds. - Weight::from_parts(962_000, 0) + // Minimum execution time: 1_000_000 picoseconds. + Weight::from_parts(1_056_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -343,12 +350,12 @@ impl WeightInfo { /// The range of component `x` is `[1, 1000]`. pub fn export_message(x: u32, ) -> Weight { // Proof Size summary in bytes: - // Measured: `552` - // Estimated: `6492` - // Minimum execution time: 56_762_000 picoseconds. - Weight::from_parts(58_320_046, 6492) - // Standard Error: 162 - .saturating_add(Weight::from_parts(51_730, 0).saturating_mul(x.into())) + // Measured: `225` + // Estimated: `6165` + // Minimum execution time: 43_316_000 picoseconds. + Weight::from_parts(45_220_843, 6165) + // Standard Error: 169 + .saturating_add(Weight::from_parts(44_459, 0).saturating_mul(x.into())) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -356,14 +363,14 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 971_000 picoseconds. - Weight::from_parts(1_018_000, 0) + // Minimum execution time: 998_000 picoseconds. + Weight::from_parts(1_054_000, 0) } pub fn unpaid_execution() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 979_000 picoseconds. - Weight::from_parts(1_026_000, 0) + // Minimum execution time: 995_000 picoseconds. + Weight::from_parts(1_060_000, 0) } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs index ae31ca4cedf2..1182eca3dce1 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/xcm_config.rs @@ -290,7 +290,7 @@ impl, FeeHandler: HandleFee> FeeManager fn is_waived(origin: Option<&Location>, fee_reason: FeeReason) -> bool { let Some(loc) = origin else { return false }; if let Export { network, destination: Here } = fee_reason { - if network == EthereumNetwork::get() { + if network == EthereumNetwork::get().into() { return false } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/common/src/message_queue.rs b/cumulus/parachains/runtimes/bridge-hubs/common/src/message_queue.rs index 5f91897262f4..ad9fff75cc34 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/common/src/message_queue.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/common/src/message_queue.rs @@ -23,7 +23,7 @@ use frame_support::{ use pallet_message_queue::OnQueueChanged; use scale_info::TypeInfo; use snowbridge_core::ChannelId; -use xcm::v4::{Junction, Location}; +use xcm::v5::{Junction, Location}; /// The aggregate origin of an inbound message. /// This is specialized for BridgeHub, as the snowbridge-outbound-queue-pallet is also using diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/xcm/mod.rs index b8db473f1066..9983aaf4c0ed 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/xcm/mod.rs @@ -138,6 +138,9 @@ impl XcmWeightInfo for CoretimeRococoXcmWeight { fn buy_execution(_fees: &Asset, _weight_limit: &WeightLimit) -> Weight { XcmGeneric::::buy_execution() } + fn pay_fees(_asset: &Asset) -> Weight { + XcmGeneric::::pay_fees() + } fn refund_surplus() -> Weight { XcmGeneric::::refund_surplus() } diff --git a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index 676048f92ad9..3a93c80766de 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::generic` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-05-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-08-27, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-unxyhko3-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-svzsllib-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: Compiled, CHAIN: Some("coretime-rococo-dev"), DB CACHE: 1024 // Executed Command: @@ -64,8 +64,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3571` - // Minimum execution time: 23_760_000 picoseconds. - Weight::from_parts(24_411_000, 3571) + // Minimum execution time: 29_263_000 picoseconds. + Weight::from_parts(30_387_000, 3571) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -73,8 +73,15 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 522_000 picoseconds. - Weight::from_parts(546_000, 0) + // Minimum execution time: 603_000 picoseconds. + Weight::from_parts(664_000, 0) + } + pub fn pay_fees() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_530_000 picoseconds. + Weight::from_parts(1_662_000, 0) } // Storage: `PolkadotXcm::Queries` (r:1 w:0) // Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -82,58 +89,58 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `3497` - // Minimum execution time: 5_830_000 picoseconds. - Weight::from_parts(6_069_000, 3497) + // Minimum execution time: 7_290_000 picoseconds. + Weight::from_parts(7_493_000, 3497) .saturating_add(T::DbWeight::get().reads(1)) } pub fn transact() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_508_000 picoseconds. - Weight::from_parts(5_801_000, 0) + // Minimum execution time: 6_785_000 picoseconds. + Weight::from_parts(7_012_000, 0) } pub fn refund_surplus() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_130_000 picoseconds. - Weight::from_parts(1_239_000, 0) + // Minimum execution time: 1_299_000 picoseconds. + Weight::from_parts(1_380_000, 0) } pub fn set_error_handler() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 541_000 picoseconds. - Weight::from_parts(567_000, 0) + // Minimum execution time: 655_000 picoseconds. + Weight::from_parts(681_000, 0) } pub fn set_appendix() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 560_000 picoseconds. - Weight::from_parts(591_000, 0) + // Minimum execution time: 625_000 picoseconds. + Weight::from_parts(669_000, 0) } pub fn clear_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 505_000 picoseconds. - Weight::from_parts(547_000, 0) + // Minimum execution time: 607_000 picoseconds. + Weight::from_parts(650_000, 0) } pub fn descend_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 538_000 picoseconds. - Weight::from_parts(565_000, 0) + // Minimum execution time: 655_000 picoseconds. + Weight::from_parts(688_000, 0) } pub fn clear_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 514_000 picoseconds. - Weight::from_parts(541_000, 0) + // Minimum execution time: 602_000 picoseconds. + Weight::from_parts(650_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -151,8 +158,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3571` - // Minimum execution time: 20_920_000 picoseconds. - Weight::from_parts(21_437_000, 3571) + // Minimum execution time: 26_176_000 picoseconds. + Weight::from_parts(26_870_000, 3571) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -162,8 +169,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 8_549_000 picoseconds. - Weight::from_parts(8_821_000, 3555) + // Minimum execution time: 10_674_000 picoseconds. + Weight::from_parts(10_918_000, 3555) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -171,8 +178,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 525_000 picoseconds. - Weight::from_parts(544_000, 0) + // Minimum execution time: 601_000 picoseconds. + Weight::from_parts(639_000, 0) } // Storage: `PolkadotXcm::VersionNotifyTargets` (r:1 w:1) // Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -190,8 +197,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 19_645_000 picoseconds. - Weight::from_parts(20_104_000, 3539) + // Minimum execution time: 24_220_000 picoseconds. + Weight::from_parts(24_910_000, 3539) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -201,44 +208,44 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_232_000 picoseconds. - Weight::from_parts(2_334_000, 0) + // Minimum execution time: 2_464_000 picoseconds. + Weight::from_parts(2_618_000, 0) .saturating_add(T::DbWeight::get().writes(1)) } pub fn burn_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 883_000 picoseconds. - Weight::from_parts(945_000, 0) + // Minimum execution time: 984_000 picoseconds. + Weight::from_parts(1_041_000, 0) } pub fn expect_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 600_000 picoseconds. - Weight::from_parts(645_000, 0) + // Minimum execution time: 730_000 picoseconds. + Weight::from_parts(769_000, 0) } pub fn expect_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 527_000 picoseconds. - Weight::from_parts(552_000, 0) + // Minimum execution time: 615_000 picoseconds. + Weight::from_parts(658_000, 0) } pub fn expect_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 527_000 picoseconds. - Weight::from_parts(550_000, 0) + // Minimum execution time: 607_000 picoseconds. + Weight::from_parts(637_000, 0) } pub fn expect_transact_status() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 657_000 picoseconds. - Weight::from_parts(703_000, 0) + // Minimum execution time: 791_000 picoseconds. + Weight::from_parts(838_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -256,8 +263,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3571` - // Minimum execution time: 24_999_000 picoseconds. - Weight::from_parts(25_671_000, 3571) + // Minimum execution time: 30_210_000 picoseconds. + Weight::from_parts(30_973_000, 3571) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -265,8 +272,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_159_000 picoseconds. - Weight::from_parts(3_296_000, 0) + // Minimum execution time: 3_097_000 picoseconds. + Weight::from_parts(3_277_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -284,8 +291,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3571` - // Minimum execution time: 21_052_000 picoseconds. - Weight::from_parts(22_153_000, 3571) + // Minimum execution time: 26_487_000 picoseconds. + Weight::from_parts(27_445_000, 3571) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -293,35 +300,35 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 547_000 picoseconds. - Weight::from_parts(584_000, 0) + // Minimum execution time: 655_000 picoseconds. + Weight::from_parts(689_000, 0) } pub fn set_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 506_000 picoseconds. - Weight::from_parts(551_000, 0) + // Minimum execution time: 627_000 picoseconds. + Weight::from_parts(659_000, 0) } pub fn clear_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 508_000 picoseconds. - Weight::from_parts(527_000, 0) + // Minimum execution time: 603_000 picoseconds. + Weight::from_parts(650_000, 0) } pub fn set_fees_mode() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 527_000 picoseconds. - Weight::from_parts(558_000, 0) + // Minimum execution time: 594_000 picoseconds. + Weight::from_parts(645_000, 0) } pub fn unpaid_execution() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 514_000 picoseconds. - Weight::from_parts(553_000, 0) + // Minimum execution time: 650_000 picoseconds. + Weight::from_parts(673_000, 0) } } diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/mod.rs index f35f7bfc188d..24787079e4c0 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/mod.rs @@ -138,6 +138,9 @@ impl XcmWeightInfo for CoretimeWestendXcmWeight { fn buy_execution(_fees: &Asset, _weight_limit: &WeightLimit) -> Weight { XcmGeneric::::buy_execution() } + fn pay_fees(_asset: &Asset) -> Weight { + XcmGeneric::::pay_fees() + } fn refund_surplus() -> Weight { XcmGeneric::::refund_surplus() } diff --git a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index 7390f35e3974..c8ba2d8b4ce7 100644 --- a/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/cumulus/parachains/runtimes/coretime/coretime-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::generic` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-05-07, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-08-27, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-unxyhko3-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-svzsllib-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: Compiled, CHAIN: Some("coretime-westend-dev"), DB CACHE: 1024 // Executed Command: @@ -64,8 +64,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3571` - // Minimum execution time: 23_688_000 picoseconds. - Weight::from_parts(24_845_000, 3571) + // Minimum execution time: 29_463_000 picoseconds. + Weight::from_parts(30_178_000, 3571) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -73,8 +73,15 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 569_000 picoseconds. - Weight::from_parts(619_000, 0) + // Minimum execution time: 568_000 picoseconds. + Weight::from_parts(608_000, 0) + } + pub fn pay_fees() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_530_000 picoseconds. + Weight::from_parts(1_585_000, 0) } // Storage: `PolkadotXcm::Queries` (r:1 w:0) // Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -82,58 +89,58 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `3497` - // Minimum execution time: 5_851_000 picoseconds. - Weight::from_parts(6_061_000, 3497) + // Minimum execution time: 7_400_000 picoseconds. + Weight::from_parts(7_572_000, 3497) .saturating_add(T::DbWeight::get().reads(1)) } pub fn transact() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 5_770_000 picoseconds. - Weight::from_parts(5_916_000, 0) + // Minimum execution time: 6_951_000 picoseconds. + Weight::from_parts(7_173_000, 0) } pub fn refund_surplus() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_155_000 picoseconds. - Weight::from_parts(1_270_000, 0) + // Minimum execution time: 1_245_000 picoseconds. + Weight::from_parts(1_342_000, 0) } pub fn set_error_handler() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 558_000 picoseconds. - Weight::from_parts(628_000, 0) + // Minimum execution time: 613_000 picoseconds. + Weight::from_parts(657_000, 0) } pub fn set_appendix() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 603_000 picoseconds. - Weight::from_parts(630_000, 0) + // Minimum execution time: 613_000 picoseconds. + Weight::from_parts(656_000, 0) } pub fn clear_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 533_000 picoseconds. - Weight::from_parts(563_000, 0) + // Minimum execution time: 570_000 picoseconds. + Weight::from_parts(608_000, 0) } pub fn descend_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 597_000 picoseconds. - Weight::from_parts(644_000, 0) + // Minimum execution time: 557_000 picoseconds. + Weight::from_parts(607_000, 0) } pub fn clear_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 536_000 picoseconds. - Weight::from_parts(588_000, 0) + // Minimum execution time: 557_000 picoseconds. + Weight::from_parts(578_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -151,8 +158,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3571` - // Minimum execution time: 21_146_000 picoseconds. - Weight::from_parts(21_771_000, 3571) + // Minimum execution time: 26_179_000 picoseconds. + Weight::from_parts(27_089_000, 3571) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -162,8 +169,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 8_446_000 picoseconds. - Weight::from_parts(8_660_000, 3555) + // Minimum execution time: 10_724_000 picoseconds. + Weight::from_parts(10_896_000, 3555) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -171,8 +178,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 561_000 picoseconds. - Weight::from_parts(594_000, 0) + // Minimum execution time: 567_000 picoseconds. + Weight::from_parts(623_000, 0) } // Storage: `PolkadotXcm::VersionNotifyTargets` (r:1 w:1) // Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -190,8 +197,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `74` // Estimated: `3539` - // Minimum execution time: 19_953_000 picoseconds. - Weight::from_parts(20_608_000, 3539) + // Minimum execution time: 24_367_000 picoseconds. + Weight::from_parts(25_072_000, 3539) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -201,44 +208,44 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_290_000 picoseconds. - Weight::from_parts(2_370_000, 0) + // Minimum execution time: 2_554_000 picoseconds. + Weight::from_parts(2_757_000, 0) .saturating_add(T::DbWeight::get().writes(1)) } pub fn burn_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 943_000 picoseconds. - Weight::from_parts(987_000, 0) + // Minimum execution time: 922_000 picoseconds. + Weight::from_parts(992_000, 0) } pub fn expect_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 635_000 picoseconds. - Weight::from_parts(699_000, 0) + // Minimum execution time: 688_000 picoseconds. + Weight::from_parts(723_000, 0) } pub fn expect_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 553_000 picoseconds. - Weight::from_parts(609_000, 0) + // Minimum execution time: 607_000 picoseconds. + Weight::from_parts(647_000, 0) } pub fn expect_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 547_000 picoseconds. - Weight::from_parts(581_000, 0) + // Minimum execution time: 591_000 picoseconds. + Weight::from_parts(620_000, 0) } pub fn expect_transact_status() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 700_000 picoseconds. - Weight::from_parts(757_000, 0) + // Minimum execution time: 735_000 picoseconds. + Weight::from_parts(802_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -256,8 +263,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3571` - // Minimum execution time: 24_953_000 picoseconds. - Weight::from_parts(25_516_000, 3571) + // Minimum execution time: 29_923_000 picoseconds. + Weight::from_parts(30_770_000, 3571) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -265,8 +272,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_746_000 picoseconds. - Weight::from_parts(2_944_000, 0) + // Minimum execution time: 2_884_000 picoseconds. + Weight::from_parts(3_088_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -284,8 +291,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `106` // Estimated: `3571` - // Minimum execution time: 21_325_000 picoseconds. - Weight::from_parts(21_942_000, 3571) + // Minimum execution time: 26_632_000 picoseconds. + Weight::from_parts(27_228_000, 3571) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -293,35 +300,35 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 600_000 picoseconds. - Weight::from_parts(631_000, 0) + // Minimum execution time: 599_000 picoseconds. + Weight::from_parts(655_000, 0) } pub fn set_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 534_000 picoseconds. - Weight::from_parts(566_000, 0) + // Minimum execution time: 587_000 picoseconds. + Weight::from_parts(628_000, 0) } pub fn clear_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 540_000 picoseconds. - Weight::from_parts(565_000, 0) + // Minimum execution time: 572_000 picoseconds. + Weight::from_parts(631_000, 0) } pub fn set_fees_mode() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 542_000 picoseconds. - Weight::from_parts(581_000, 0) + // Minimum execution time: 570_000 picoseconds. + Weight::from_parts(615_000, 0) } pub fn unpaid_execution() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 568_000 picoseconds. - Weight::from_parts(597_000, 0) + // Minimum execution time: 624_000 picoseconds. + Weight::from_parts(659_000, 0) } } diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/mod.rs index 58007173ae1d..09e3b3732206 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/mod.rs @@ -137,6 +137,9 @@ impl XcmWeightInfo for PeopleRococoXcmWeight { fn buy_execution(_fees: &Asset, _weight_limit: &WeightLimit) -> Weight { XcmGeneric::::buy_execution() } + fn pay_fees(_asset: &Asset) -> Weight { + XcmGeneric::::pay_fees() + } fn refund_surplus() -> Weight { XcmGeneric::::refund_surplus() } diff --git a/cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index 729a32117041..e4f103e25370 100644 --- a/cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/cumulus/parachains/runtimes/people/people-rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::generic` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-08-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-08-27, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-696hpswk-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-svzsllib-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: Compiled, CHAIN: Some("people-rococo-dev"), DB CACHE: 1024 // Executed Command: @@ -64,8 +64,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3535` - // Minimum execution time: 29_430_000 picoseconds. - Weight::from_parts(30_111_000, 3535) + // Minimum execution time: 28_898_000 picoseconds. + Weight::from_parts(29_717_000, 3535) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -73,8 +73,15 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 607_000 picoseconds. - Weight::from_parts(672_000, 0) + // Minimum execution time: 690_000 picoseconds. + Weight::from_parts(759_000, 0) + } + pub fn pay_fees() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_695_000 picoseconds. + Weight::from_parts(1_799_000, 0) } // Storage: `PolkadotXcm::Queries` (r:1 w:0) // Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -82,58 +89,58 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `3497` - // Minimum execution time: 7_445_000 picoseconds. - Weight::from_parts(7_623_000, 3497) + // Minimum execution time: 7_441_000 picoseconds. + Weight::from_parts(7_746_000, 3497) .saturating_add(T::DbWeight::get().reads(1)) } pub fn transact() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_749_000 picoseconds. - Weight::from_parts(7_073_000, 0) + // Minimum execution time: 6_881_000 picoseconds. + Weight::from_parts(7_219_000, 0) } pub fn refund_surplus() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_275_000 picoseconds. - Weight::from_parts(1_409_000, 0) + // Minimum execution time: 1_390_000 picoseconds. + Weight::from_parts(1_471_000, 0) } pub fn set_error_handler() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 670_000 picoseconds. - Weight::from_parts(709_000, 0) + // Minimum execution time: 698_000 picoseconds. + Weight::from_parts(743_000, 0) } pub fn set_appendix() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 635_000 picoseconds. - Weight::from_parts(723_000, 0) + // Minimum execution time: 695_000 picoseconds. + Weight::from_parts(746_000, 0) } pub fn clear_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 650_000 picoseconds. + // Minimum execution time: 664_000 picoseconds. Weight::from_parts(699_000, 0) } pub fn descend_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 678_000 picoseconds. - Weight::from_parts(728_000, 0) + // Minimum execution time: 698_000 picoseconds. + Weight::from_parts(748_000, 0) } pub fn clear_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 657_000 picoseconds. - Weight::from_parts(703_000, 0) + // Minimum execution time: 669_000 picoseconds. + Weight::from_parts(726_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -151,8 +158,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3535` - // Minimum execution time: 25_795_000 picoseconds. - Weight::from_parts(26_415_000, 3535) + // Minimum execution time: 25_991_000 picoseconds. + Weight::from_parts(26_602_000, 3535) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -162,8 +169,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 10_792_000 picoseconds. - Weight::from_parts(11_061_000, 3555) + // Minimum execution time: 10_561_000 picoseconds. + Weight::from_parts(10_913_000, 3555) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -171,8 +178,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 624_000 picoseconds. - Weight::from_parts(682_000, 0) + // Minimum execution time: 654_000 picoseconds. + Weight::from_parts(707_000, 0) } // Storage: `PolkadotXcm::VersionNotifyTargets` (r:1 w:1) // Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -190,8 +197,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 23_906_000 picoseconds. - Weight::from_parts(24_740_000, 3503) + // Minimum execution time: 23_813_000 picoseconds. + Weight::from_parts(24_352_000, 3503) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -201,44 +208,44 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_621_000 picoseconds. - Weight::from_parts(2_788_000, 0) + // Minimum execution time: 2_499_000 picoseconds. + Weight::from_parts(2_655_000, 0) .saturating_add(T::DbWeight::get().writes(1)) } pub fn burn_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 954_000 picoseconds. - Weight::from_parts(1_046_000, 0) + // Minimum execution time: 1_065_000 picoseconds. + Weight::from_parts(1_108_000, 0) } pub fn expect_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 742_000 picoseconds. - Weight::from_parts(790_000, 0) + // Minimum execution time: 747_000 picoseconds. + Weight::from_parts(807_000, 0) } pub fn expect_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 664_000 picoseconds. - Weight::from_parts(722_000, 0) + // Minimum execution time: 685_000 picoseconds. + Weight::from_parts(750_000, 0) } pub fn expect_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 619_000 picoseconds. - Weight::from_parts(672_000, 0) + // Minimum execution time: 664_000 picoseconds. + Weight::from_parts(711_000, 0) } pub fn expect_transact_status() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 798_000 picoseconds. - Weight::from_parts(851_000, 0) + // Minimum execution time: 830_000 picoseconds. + Weight::from_parts(880_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -256,8 +263,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3535` - // Minimum execution time: 29_580_000 picoseconds. - Weight::from_parts(31_100_000, 3535) + // Minimum execution time: 30_051_000 picoseconds. + Weight::from_parts(30_720_000, 3535) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -265,8 +272,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_150_000 picoseconds. - Weight::from_parts(3_326_000, 0) + // Minimum execution time: 3_136_000 picoseconds. + Weight::from_parts(3_265_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -284,8 +291,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3535` - // Minimum execution time: 26_152_000 picoseconds. - Weight::from_parts(26_635_000, 3535) + // Minimum execution time: 25_980_000 picoseconds. + Weight::from_parts(26_868_000, 3535) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -293,35 +300,35 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 693_000 picoseconds. - Weight::from_parts(724_000, 0) + // Minimum execution time: 708_000 picoseconds. + Weight::from_parts(755_000, 0) } pub fn set_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 632_000 picoseconds. - Weight::from_parts(678_000, 0) + // Minimum execution time: 667_000 picoseconds. + Weight::from_parts(702_000, 0) } pub fn clear_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 646_000 picoseconds. - Weight::from_parts(694_000, 0) + // Minimum execution time: 660_000 picoseconds. + Weight::from_parts(695_000, 0) } pub fn set_fees_mode() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 622_000 picoseconds. - Weight::from_parts(656_000, 0) + // Minimum execution time: 669_000 picoseconds. + Weight::from_parts(707_000, 0) } pub fn unpaid_execution() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 639_000 picoseconds. - Weight::from_parts(679_000, 0) + // Minimum execution time: 685_000 picoseconds. + Weight::from_parts(757_000, 0) } } diff --git a/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/mod.rs b/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/mod.rs index b44e8d4b61b8..3f0bda0f4f57 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/mod.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/mod.rs @@ -137,6 +137,9 @@ impl XcmWeightInfo for PeopleWestendXcmWeight { fn buy_execution(_fees: &Asset, _weight_limit: &WeightLimit) -> Weight { XcmGeneric::::buy_execution() } + fn pay_fees(_asset: &Asset) -> Weight { + XcmGeneric::::pay_fees() + } fn refund_surplus() -> Weight { XcmGeneric::::refund_surplus() } diff --git a/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index 1377d31f2db7..818c2e23a2e9 100644 --- a/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/cumulus/parachains/runtimes/people/people-westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -17,9 +17,9 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::generic` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 -//! DATE: 2024-08-09, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2024-08-27, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `runner-696hpswk-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! HOSTNAME: `runner-svzsllib-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` //! WASM-EXECUTION: Compiled, CHAIN: Some("people-westend-dev"), DB CACHE: 1024 // Executed Command: @@ -64,8 +64,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3535` - // Minimum execution time: 29_537_000 picoseconds. - Weight::from_parts(30_513_000, 3535) + // Minimum execution time: 29_015_000 picoseconds. + Weight::from_parts(30_359_000, 3535) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -73,8 +73,15 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 683_000 picoseconds. - Weight::from_parts(738_000, 0) + // Minimum execution time: 572_000 picoseconds. + Weight::from_parts(637_000, 0) + } + pub fn pay_fees() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_550_000 picoseconds. + Weight::from_parts(1_604_000, 0) } // Storage: `PolkadotXcm::Queries` (r:1 w:0) // Proof: `PolkadotXcm::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -82,58 +89,58 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `32` // Estimated: `3497` - // Minimum execution time: 7_498_000 picoseconds. - Weight::from_parts(7_904_000, 3497) + // Minimum execution time: 7_354_000 picoseconds. + Weight::from_parts(7_808_000, 3497) .saturating_add(T::DbWeight::get().reads(1)) } pub fn transact() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_029_000 picoseconds. - Weight::from_parts(7_325_000, 0) + // Minimum execution time: 6_716_000 picoseconds. + Weight::from_parts(7_067_000, 0) } pub fn refund_surplus() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_343_000 picoseconds. - Weight::from_parts(1_410_000, 0) + // Minimum execution time: 1_280_000 picoseconds. + Weight::from_parts(1_355_000, 0) } pub fn set_error_handler() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 696_000 picoseconds. - Weight::from_parts(734_000, 0) + // Minimum execution time: 587_000 picoseconds. + Weight::from_parts(645_000, 0) } pub fn set_appendix() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 690_000 picoseconds. - Weight::from_parts(740_000, 0) + // Minimum execution time: 629_000 picoseconds. + Weight::from_parts(662_000, 0) } pub fn clear_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 667_000 picoseconds. - Weight::from_parts(697_000, 0) + // Minimum execution time: 590_000 picoseconds. + Weight::from_parts(639_000, 0) } pub fn descend_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 692_000 picoseconds. - Weight::from_parts(743_000, 0) + // Minimum execution time: 651_000 picoseconds. + Weight::from_parts(688_000, 0) } pub fn clear_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 670_000 picoseconds. - Weight::from_parts(712_000, 0) + // Minimum execution time: 601_000 picoseconds. + Weight::from_parts(630_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -151,8 +158,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3535` - // Minimum execution time: 26_405_000 picoseconds. - Weight::from_parts(26_877_000, 3535) + // Minimum execution time: 25_650_000 picoseconds. + Weight::from_parts(26_440_000, 3535) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -162,8 +169,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `90` // Estimated: `3555` - // Minimum execution time: 10_953_000 picoseconds. - Weight::from_parts(11_345_000, 3555) + // Minimum execution time: 10_492_000 picoseconds. + Weight::from_parts(10_875_000, 3555) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -171,8 +178,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 644_000 picoseconds. - Weight::from_parts(693_000, 0) + // Minimum execution time: 597_000 picoseconds. + Weight::from_parts(647_000, 0) } // Storage: `PolkadotXcm::VersionNotifyTargets` (r:1 w:1) // Proof: `PolkadotXcm::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) @@ -190,8 +197,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `38` // Estimated: `3503` - // Minimum execution time: 24_157_000 picoseconds. - Weight::from_parts(24_980_000, 3503) + // Minimum execution time: 23_732_000 picoseconds. + Weight::from_parts(24_290_000, 3503) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(3)) } @@ -201,44 +208,44 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_767_000 picoseconds. - Weight::from_parts(2_844_000, 0) + // Minimum execution time: 2_446_000 picoseconds. + Weight::from_parts(2_613_000, 0) .saturating_add(T::DbWeight::get().writes(1)) } pub fn burn_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 1_079_000 picoseconds. - Weight::from_parts(1_141_000, 0) + // Minimum execution time: 960_000 picoseconds. + Weight::from_parts(1_045_000, 0) } pub fn expect_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 776_000 picoseconds. - Weight::from_parts(829_000, 0) + // Minimum execution time: 703_000 picoseconds. + Weight::from_parts(739_000, 0) } pub fn expect_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 696_000 picoseconds. - Weight::from_parts(740_000, 0) + // Minimum execution time: 616_000 picoseconds. + Weight::from_parts(651_000, 0) } pub fn expect_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 655_000 picoseconds. - Weight::from_parts(684_000, 0) + // Minimum execution time: 621_000 picoseconds. + Weight::from_parts(660_000, 0) } pub fn expect_transact_status() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 825_000 picoseconds. - Weight::from_parts(853_000, 0) + // Minimum execution time: 794_000 picoseconds. + Weight::from_parts(831_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -256,8 +263,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3535` - // Minimum execution time: 30_222_000 picoseconds. - Weight::from_parts(31_110_000, 3535) + // Minimum execution time: 29_527_000 picoseconds. + Weight::from_parts(30_614_000, 3535) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -265,8 +272,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_108_000 picoseconds. - Weight::from_parts(3_325_000, 0) + // Minimum execution time: 3_189_000 picoseconds. + Weight::from_parts(3_296_000, 0) } // Storage: `ParachainInfo::ParachainId` (r:1 w:0) // Proof: `ParachainInfo::ParachainId` (`max_values`: Some(1), `max_size`: Some(4), added: 499, mode: `MaxEncodedLen`) @@ -284,8 +291,8 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `70` // Estimated: `3535` - // Minimum execution time: 26_548_000 picoseconds. - Weight::from_parts(26_911_000, 3535) + // Minimum execution time: 25_965_000 picoseconds. + Weight::from_parts(26_468_000, 3535) .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(2)) } @@ -293,35 +300,35 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 684_000 picoseconds. - Weight::from_parts(726_000, 0) + // Minimum execution time: 618_000 picoseconds. + Weight::from_parts(659_000, 0) } pub fn set_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 649_000 picoseconds. - Weight::from_parts(700_000, 0) + // Minimum execution time: 593_000 picoseconds. + Weight::from_parts(618_000, 0) } pub fn clear_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 650_000 picoseconds. - Weight::from_parts(686_000, 0) + // Minimum execution time: 603_000 picoseconds. + Weight::from_parts(634_000, 0) } pub fn set_fees_mode() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 652_000 picoseconds. - Weight::from_parts(703_000, 0) + // Minimum execution time: 568_000 picoseconds. + Weight::from_parts(629_000, 0) } pub fn unpaid_execution() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 673_000 picoseconds. - Weight::from_parts(742_000, 0) + // Minimum execution time: 598_000 picoseconds. + Weight::from_parts(655_000, 0) } } diff --git a/cumulus/xcm/xcm-emulator/src/lib.rs b/cumulus/xcm/xcm-emulator/src/lib.rs index b91246a7bda2..ff14b747973c 100644 --- a/cumulus/xcm/xcm-emulator/src/lib.rs +++ b/cumulus/xcm/xcm-emulator/src/lib.rs @@ -36,7 +36,10 @@ pub use core::{cell::RefCell, fmt::Debug}; pub use cumulus_primitives_core::AggregateMessageOrigin as CumulusAggregateMessageOrigin; pub use frame_support::{ assert_ok, - sp_runtime::{traits::Header as HeaderT, DispatchResult}, + sp_runtime::{ + traits::{Dispatchable, Header as HeaderT}, + DispatchResult, + }, traits::{ EnqueueMessage, ExecuteOverweightError, Get, Hooks, OnInitialize, OriginTrait, ProcessMessage, ProcessMessageError, ServiceQueues, @@ -221,7 +224,7 @@ pub trait Network { pub trait Chain: TestExt { type Network: Network; type Runtime: SystemConfig; - type RuntimeCall; + type RuntimeCall: Clone + Dispatchable; type RuntimeOrigin; type RuntimeEvent; type System; @@ -1221,7 +1224,7 @@ macro_rules! __impl_check_assertion { Args: Clone, { fn check_assertion(test: $crate::Test) { - use $crate::TestExt; + use $crate::{Dispatchable, TestExt}; let chain_name = std::any::type_name::<$chain<$network>>(); @@ -1229,6 +1232,15 @@ macro_rules! __impl_check_assertion { if let Some(dispatchable) = test.hops_dispatchable.get(chain_name) { $crate::assert_ok!(dispatchable(test.clone())); } + if let Some(call) = test.hops_calls.get(chain_name) { + $crate::assert_ok!( + match call.clone().dispatch(test.signed_origin.clone()) { + // We get rid of `post_info`. + Ok(_) => Ok(()), + Err(error_with_post_info) => Err(error_with_post_info.error), + } + ); + } if let Some(assertion) = test.hops_assertion.get(chain_name) { assertion(test); } @@ -1530,11 +1542,12 @@ where pub root_origin: Origin::RuntimeOrigin, pub hops_assertion: HashMap, pub hops_dispatchable: HashMap DispatchResult>, + pub hops_calls: HashMap, pub args: Args, _marker: PhantomData<(Destination, Hops)>, } -/// `Test` implementation +/// `Test` implementation. impl Test where Args: Clone, @@ -1544,7 +1557,7 @@ where Destination::RuntimeOrigin: OriginTrait> + Clone, Hops: Clone + CheckAssertion, { - /// Creates a new `Test` instance + /// Creates a new `Test` instance. pub fn new(test_args: TestContext) -> Self { Test { sender: TestAccount { @@ -1559,6 +1572,7 @@ where root_origin: ::RuntimeOrigin::root(), hops_assertion: Default::default(), hops_dispatchable: Default::default(), + hops_calls: Default::default(), args: test_args.args, _marker: Default::default(), } @@ -1573,6 +1587,11 @@ where let chain_name = std::any::type_name::(); self.hops_dispatchable.insert(chain_name.to_string(), dispatchable); } + /// Stores a call in a particular Chain, this will later be dispatched. + pub fn set_call(&mut self, call: Origin::RuntimeCall) { + let chain_name = std::any::type_name::(); + self.hops_calls.insert(chain_name.to_string(), call); + } /// Executes all dispatchables and assertions in order from `Origin` to `Destination` pub fn assert(&mut self) { Origin::check_assertion(self.clone()); diff --git a/polkadot/runtime/common/src/impls.rs b/polkadot/runtime/common/src/impls.rs index b6a93cf53685..ddd124aee9d2 100644 --- a/polkadot/runtime/common/src/impls.rs +++ b/polkadot/runtime/common/src/impls.rs @@ -138,6 +138,15 @@ pub enum VersionedLocatableAsset { V3 { location: xcm::v3::Location, asset_id: xcm::v3::AssetId }, #[codec(index = 4)] V4 { location: xcm::v4::Location, asset_id: xcm::v4::AssetId }, + #[codec(index = 5)] + V5 { location: xcm::v5::Location, asset_id: xcm::v5::AssetId }, +} + +/// A conversion from latest xcm to `VersionedLocatableAsset`. +impl From<(xcm::latest::Location, xcm::latest::AssetId)> for VersionedLocatableAsset { + fn from(value: (xcm::latest::Location, xcm::latest::AssetId)) -> Self { + VersionedLocatableAsset::V5 { location: value.0, asset_id: value.1 } + } } /// Converts the [`VersionedLocatableAsset`] to the [`xcm_builder::LocatableAssetId`]. @@ -149,12 +158,22 @@ impl TryConvert asset: VersionedLocatableAsset, ) -> Result { match asset { - VersionedLocatableAsset::V3 { location, asset_id } => + VersionedLocatableAsset::V3 { location, asset_id } => { + let v4_location: xcm::v4::Location = + location.try_into().map_err(|_| asset.clone())?; + let v4_asset_id: xcm::v4::AssetId = + asset_id.try_into().map_err(|_| asset.clone())?; + Ok(xcm_builder::LocatableAssetId { + location: v4_location.try_into().map_err(|_| asset.clone())?, + asset_id: v4_asset_id.try_into().map_err(|_| asset.clone())?, + }) + }, + VersionedLocatableAsset::V4 { ref location, ref asset_id } => Ok(xcm_builder::LocatableAssetId { - location: location.try_into().map_err(|_| asset.clone())?, - asset_id: asset_id.try_into().map_err(|_| asset.clone())?, + location: location.clone().try_into().map_err(|_| asset.clone())?, + asset_id: asset_id.clone().try_into().map_err(|_| asset.clone())?, }), - VersionedLocatableAsset::V4 { location, asset_id } => + VersionedLocatableAsset::V5 { location, asset_id } => Ok(xcm_builder::LocatableAssetId { location, asset_id }), } } @@ -167,12 +186,12 @@ impl TryConvert<&VersionedLocation, xcm::latest::Location> for VersionedLocation location: &VersionedLocation, ) -> Result { let latest = match location.clone() { - VersionedLocation::V2(l) => { - let v3: xcm::v3::Location = l.try_into().map_err(|_| location)?; - v3.try_into().map_err(|_| location)? + VersionedLocation::V3(l) => { + let v4_location: xcm::v4::Location = l.try_into().map_err(|_| location)?; + v4_location.try_into().map_err(|_| location)? }, - VersionedLocation::V3(l) => l.try_into().map_err(|_| location)?, - VersionedLocation::V4(l) => l, + VersionedLocation::V4(l) => l.try_into().map_err(|_| location)?, + VersionedLocation::V5(l) => l, }; Ok(latest) } @@ -188,11 +207,25 @@ where fn contains(asset: &VersionedLocatableAsset) -> bool { use VersionedLocatableAsset::*; let (location, asset_id) = match asset.clone() { - V3 { location, asset_id } => match (location.try_into(), asset_id.try_into()) { + V3 { location, asset_id } => { + let v4_location: xcm::v4::Location = match location.try_into() { + Ok(l) => l, + Err(_) => return false, + }; + let v4_asset_id: xcm::v4::AssetId = match asset_id.try_into() { + Ok(a) => a, + Err(_) => return false, + }; + match (v4_location.try_into(), v4_asset_id.try_into()) { + (Ok(l), Ok(a)) => (l, a), + _ => return false, + } + }, + V4 { location, asset_id } => match (location.try_into(), asset_id.try_into()) { (Ok(l), Ok(a)) => (l, a), _ => return false, }, - V4 { location, asset_id } => (location, asset_id), + V5 { location, asset_id } => (location, asset_id), }; C::contains(&location, &asset_id.0) } @@ -213,17 +246,14 @@ pub mod benchmarks { pub struct AssetRateArguments; impl AssetKindFactory for AssetRateArguments { fn create_asset_kind(seed: u32) -> VersionedLocatableAsset { - VersionedLocatableAsset::V4 { - location: xcm::v4::Location::new(0, [xcm::v4::Junction::Parachain(seed)]), - asset_id: xcm::v4::Location::new( + ( + Location::new(0, [Parachain(seed)]), + AssetId(Location::new( 0, - [ - xcm::v4::Junction::PalletInstance(seed.try_into().unwrap()), - xcm::v4::Junction::GeneralIndex(seed.into()), - ], - ) - .into(), - } + [PalletInstance(seed.try_into().unwrap()), GeneralIndex(seed.into())], + )), + ) + .into() } } @@ -238,26 +268,17 @@ pub mod benchmarks { for TreasuryArguments { fn create_asset_kind(seed: u32) -> VersionedLocatableAsset { - VersionedLocatableAsset::V3 { - location: xcm::v3::Location::new( - Parents::get(), - [xcm::v3::Junction::Parachain(ParaId::get())], - ), - asset_id: xcm::v3::Location::new( + ( + Location::new(Parents::get(), [Junction::Parachain(ParaId::get())]), + AssetId(Location::new( 0, - [ - xcm::v3::Junction::PalletInstance(seed.try_into().unwrap()), - xcm::v3::Junction::GeneralIndex(seed.into()), - ], - ) - .into(), - } + [PalletInstance(seed.try_into().unwrap()), GeneralIndex(seed.into())], + )), + ) + .into() } fn create_beneficiary(seed: [u8; 32]) -> VersionedLocation { - VersionedLocation::V4(xcm::v4::Location::new( - 0, - [xcm::v4::Junction::AccountId32 { network: None, id: seed }], - )) + VersionedLocation::from(Location::new(0, [AccountId32 { network: None, id: seed }])) } } } diff --git a/polkadot/runtime/common/src/xcm_sender.rs b/polkadot/runtime/common/src/xcm_sender.rs index 37fe7f0b59e9..7ff7f69faf14 100644 --- a/polkadot/runtime/common/src/xcm_sender.rs +++ b/polkadot/runtime/common/src/xcm_sender.rs @@ -157,7 +157,7 @@ impl InspectMessageQueues for ChildParachainRouter>, const TIMESLICE_PERIOD: u32, > MigrateToCoretime @@ -97,7 +97,7 @@ mod v_coretime { impl< T: Config + crate::dmp::Config, - SendXcm: xcm::v4::SendXcm, + SendXcm: xcm::v5::SendXcm, LegacyLease: GetLegacyLease>, const TIMESLICE_PERIOD: u32, > OnRuntimeUpgrade for MigrateToCoretime @@ -158,7 +158,7 @@ mod v_coretime { // NOTE: Also migrates `num_cores` config value in configuration::ActiveConfig. fn migrate_to_coretime< T: Config, - SendXcm: xcm::v4::SendXcm, + SendXcm: xcm::v5::SendXcm, LegacyLease: GetLegacyLease>, const TIMESLICE_PERIOD: u32, >() -> Weight { @@ -216,7 +216,7 @@ mod v_coretime { fn migrate_send_assignments_to_coretime_chain< T: Config, - SendXcm: xcm::v4::SendXcm, + SendXcm: xcm::v5::SendXcm, LegacyLease: GetLegacyLease>, const TIMESLICE_PERIOD: u32, >() -> result::Result<(), SendError> { diff --git a/polkadot/runtime/parachains/src/coretime/mod.rs b/polkadot/runtime/parachains/src/coretime/mod.rs index 9b9bdb86878f..17527bcf4e0a 100644 --- a/polkadot/runtime/parachains/src/coretime/mod.rs +++ b/polkadot/runtime/parachains/src/coretime/mod.rs @@ -30,20 +30,7 @@ use pallet_broker::{CoreAssignment, CoreIndex as BrokerCoreIndex}; use polkadot_primitives::{Balance, BlockNumber, CoreIndex, Id as ParaId}; use sp_arithmetic::traits::SaturatedConversion; use sp_runtime::traits::TryConvert; -use xcm::{ - prelude::{send_xcm, Instruction, Junction, Location, OriginKind, SendXcm, WeightLimit, Xcm}, - v4::{ - Asset, - AssetFilter::Wild, - AssetId, Assets, Error as XcmError, - Fungibility::Fungible, - Instruction::{DepositAsset, ReceiveTeleportedAsset}, - Junctions::Here, - Reanchorable, - WildAsset::AllCounted, - XcmContext, - }, -}; +use xcm::prelude::*; use xcm_executor::traits::TransactAsset; use crate::{ @@ -119,7 +106,7 @@ pub mod pallet { use crate::configuration; use sp_runtime::traits::TryConvert; - use xcm::v4::InteriorLocation; + use xcm::v5::InteriorLocation; use xcm_executor::traits::TransactAsset; use super::*; @@ -362,7 +349,7 @@ fn do_notify_revenue(when: BlockNumber, raw_revenue: Balance) -> Resu weight_limit: WeightLimit::Unlimited, check_origin: None, }]; - let asset = Asset { id: AssetId(Location::here()), fun: Fungible(raw_revenue) }; + let asset = Asset { id: Location::here().into(), fun: Fungible(raw_revenue) }; let dummy_xcm_context = XcmContext { origin: None, message_id: [0; 32], topic: None }; if raw_revenue > 0 { diff --git a/polkadot/runtime/parachains/src/mock.rs b/polkadot/runtime/parachains/src/mock.rs index 80751a2b7a02..989e130235f4 100644 --- a/polkadot/runtime/parachains/src/mock.rs +++ b/polkadot/runtime/parachains/src/mock.rs @@ -56,7 +56,7 @@ use std::{ }; use xcm::{ prelude::XcmVersion, - v4::{Assets, InteriorLocation, Location, SendError, SendResult, SendXcm, Xcm, XcmHash}, + v5::{Assets, InteriorLocation, Location, SendError, SendResult, SendXcm, Xcm, XcmHash}, IntoVersion, VersionedXcm, WrapVersion, }; diff --git a/polkadot/runtime/rococo/src/impls.rs b/polkadot/runtime/rococo/src/impls.rs index f01440ea02bc..8d5d9a737701 100644 --- a/polkadot/runtime/rococo/src/impls.rs +++ b/polkadot/runtime/rococo/src/impls.rs @@ -171,8 +171,8 @@ where // send let _ = >::send( RawOrigin::Root.into(), - Box::new(VersionedLocation::V4(destination)), - Box::new(VersionedXcm::V4(program)), + Box::new(VersionedLocation::from(destination)), + Box::new(VersionedXcm::from(program)), )?; Ok(()) } diff --git a/polkadot/runtime/rococo/src/weights/xcm/mod.rs b/polkadot/runtime/rococo/src/weights/xcm/mod.rs index bd2b0fbb8c06..0c4b7e7c1596 100644 --- a/polkadot/runtime/rococo/src/weights/xcm/mod.rs +++ b/polkadot/runtime/rococo/src/weights/xcm/mod.rs @@ -169,6 +169,9 @@ impl XcmWeightInfo for RococoXcmWeight { fn buy_execution(_fees: &Asset, _weight_limit: &WeightLimit) -> Weight { XcmGeneric::::buy_execution() } + fn pay_fees(_asset: &Asset) -> Weight { + XcmGeneric::::pay_fees() + } fn refund_surplus() -> Weight { XcmGeneric::::refund_surplus() } diff --git a/polkadot/runtime/rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/polkadot/runtime/rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index df2f9b2d0e8d..53d42a0e5c47 100644 --- a/polkadot/runtime/rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/polkadot/runtime/rococo/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -16,28 +16,27 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::generic` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-02, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-08-27, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-svzsllib-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 // Executed Command: -// ./target/production/polkadot +// target/production/polkadot // benchmark // pallet // --steps=50 // --repeat=20 // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 -// --json-file=/var/lib/gitlab-runner/builds/zyw4fam_/0/parity/mirrors/polkadot/.git/.artifacts/bench.json +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json // --pallet=pallet_xcm_benchmarks::generic // --chain=rococo-dev -// --header=./file_header.txt -// --template=./xcm/pallet-xcm-benchmarks/template.hbs -// --output=./runtime/rococo/src/weights/xcm/ +// --header=./polkadot/file_header.txt +// --template=./polkadot/xcm/pallet-xcm-benchmarks/template.hbs +// --output=./polkadot/runtime/rococo/src/weights/xcm/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -50,130 +49,125 @@ use core::marker::PhantomData; /// Weight functions for `pallet_xcm_benchmarks::generic`. pub struct WeightInfo(PhantomData); impl WeightInfo { - /// Storage: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Proof Skipped: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:0) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet SupportedVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: XcmPallet VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: XcmPallet SafeXcmVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) pub(crate) fn report_holding() -> Weight { // Proof Size summary in bytes: - // Measured: `565` - // Estimated: `4030` - // Minimum execution time: 36_305_000 picoseconds. - Weight::from_parts(37_096_000, 4030) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(4)) + // Measured: `281` + // Estimated: `3746` + // Minimum execution time: 64_284_000 picoseconds. + Weight::from_parts(65_590_000, 3746) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) } pub(crate) fn buy_execution() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_831_000 picoseconds. - Weight::from_parts(2_904_000, 0) + // Minimum execution time: 777_000 picoseconds. + Weight::from_parts(825_000, 0) } - /// Storage: XcmPallet Queries (r:1 w:0) - /// Proof Skipped: XcmPallet Queries (max_values: None, max_size: None, mode: Measured) + pub(crate) fn pay_fees() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_543_000 picoseconds. + Weight::from_parts(1_627_000, 0) + } + /// Storage: `XcmPallet::Queries` (r:1 w:0) + /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) pub(crate) fn query_response() -> Weight { // Proof Size summary in bytes: - // Measured: `169` - // Estimated: `3634` - // Minimum execution time: 11_769_000 picoseconds. - Weight::from_parts(12_122_000, 3634) + // Measured: `0` + // Estimated: `3465` + // Minimum execution time: 5_995_000 picoseconds. + Weight::from_parts(6_151_000, 3465) .saturating_add(T::DbWeight::get().reads(1)) } pub(crate) fn transact() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 12_293_000 picoseconds. - Weight::from_parts(12_522_000, 0) + // Minimum execution time: 7_567_000 picoseconds. + Weight::from_parts(7_779_000, 0) } pub(crate) fn refund_surplus() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_858_000 picoseconds. - Weight::from_parts(2_965_000, 0) + // Minimum execution time: 1_226_000 picoseconds. + Weight::from_parts(1_322_000, 0) } pub(crate) fn set_error_handler() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_623_000 picoseconds. - Weight::from_parts(2_774_000, 0) + // Minimum execution time: 768_000 picoseconds. + Weight::from_parts(828_000, 0) } pub(crate) fn set_appendix() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_664_000 picoseconds. - Weight::from_parts(2_752_000, 0) + // Minimum execution time: 765_000 picoseconds. + Weight::from_parts(814_000, 0) } pub(crate) fn clear_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_646_000 picoseconds. - Weight::from_parts(2_709_000, 0) + // Minimum execution time: 739_000 picoseconds. + Weight::from_parts(820_000, 0) } pub(crate) fn descend_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_602_000 picoseconds. - Weight::from_parts(3_669_000, 0) + // Minimum execution time: 806_000 picoseconds. + Weight::from_parts(849_000, 0) } pub(crate) fn clear_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_609_000 picoseconds. - Weight::from_parts(2_721_000, 0) + // Minimum execution time: 782_000 picoseconds. + Weight::from_parts(820_000, 0) } - /// Storage: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Proof Skipped: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:0) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet SupportedVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: XcmPallet VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: XcmPallet SafeXcmVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) pub(crate) fn report_error() -> Weight { // Proof Size summary in bytes: - // Measured: `565` - // Estimated: `4030` - // Minimum execution time: 31_776_000 picoseconds. - Weight::from_parts(32_354_000, 4030) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(4)) + // Measured: `281` + // Estimated: `3746` + // Minimum execution time: 61_410_000 picoseconds. + Weight::from_parts(62_813_000, 3746) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: XcmPallet AssetTraps (r:1 w:1) - /// Proof Skipped: XcmPallet AssetTraps (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::AssetTraps` (r:1 w:1) + /// Proof: `XcmPallet::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) pub(crate) fn claim_asset() -> Weight { // Proof Size summary in bytes: - // Measured: `226` - // Estimated: `3691` - // Minimum execution time: 15_912_000 picoseconds. - Weight::from_parts(16_219_000, 3691) + // Measured: `23` + // Estimated: `3488` + // Minimum execution time: 9_315_000 picoseconds. + Weight::from_parts(9_575_000, 3488) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -181,171 +175,151 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_704_000 picoseconds. - Weight::from_parts(2_777_000, 0) + // Minimum execution time: 733_000 picoseconds. + Weight::from_parts(813_000, 0) } - /// Storage: XcmPallet VersionNotifyTargets (r:1 w:1) - /// Proof Skipped: XcmPallet VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) - /// Storage: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Proof Skipped: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:0) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet SupportedVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: XcmPallet VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: XcmPallet SafeXcmVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:1 w:1) + /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) pub(crate) fn subscribe_version() -> Weight { // Proof Size summary in bytes: - // Measured: `565` - // Estimated: `4030` - // Minimum execution time: 38_690_000 picoseconds. - Weight::from_parts(39_157_000, 4030) - .saturating_add(T::DbWeight::get().reads(9)) - .saturating_add(T::DbWeight::get().writes(5)) + // Measured: `180` + // Estimated: `3645` + // Minimum execution time: 30_641_000 picoseconds. + Weight::from_parts(31_822_000, 3645) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: XcmPallet VersionNotifyTargets (r:0 w:1) - /// Proof Skipped: XcmPallet VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:0 w:1) + /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) pub(crate) fn unsubscribe_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_943_000 picoseconds. - Weight::from_parts(5_128_000, 0) + // Minimum execution time: 2_978_000 picoseconds. + Weight::from_parts(3_260_000, 0) .saturating_add(T::DbWeight::get().writes(1)) } pub(crate) fn burn_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 6_438_000 picoseconds. - Weight::from_parts(6_500_000, 0) + // Minimum execution time: 1_139_000 picoseconds. + Weight::from_parts(1_272_000, 0) } pub(crate) fn expect_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_773_000 picoseconds. - Weight::from_parts(4_840_000, 0) + // Minimum execution time: 850_000 picoseconds. + Weight::from_parts(879_000, 0) } pub(crate) fn expect_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_818_000 picoseconds. - Weight::from_parts(2_893_000, 0) + // Minimum execution time: 770_000 picoseconds. + Weight::from_parts(834_000, 0) } pub(crate) fn expect_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_611_000 picoseconds. - Weight::from_parts(2_708_000, 0) + // Minimum execution time: 756_000 picoseconds. + Weight::from_parts(797_000, 0) } pub(crate) fn expect_transact_status() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_870_000 picoseconds. - Weight::from_parts(2_958_000, 0) + // Minimum execution time: 888_000 picoseconds. + Weight::from_parts(1_000_000, 0) } - /// Storage: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Proof Skipped: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:0) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet SupportedVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: XcmPallet VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: XcmPallet SafeXcmVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) pub(crate) fn query_pallet() -> Weight { // Proof Size summary in bytes: - // Measured: `565` - // Estimated: `4030` - // Minimum execution time: 40_735_000 picoseconds. - Weight::from_parts(66_023_000, 4030) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(4)) + // Measured: `281` + // Estimated: `3746` + // Minimum execution time: 72_138_000 picoseconds. + Weight::from_parts(73_728_000, 3746) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) } pub(crate) fn expect_pallet() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 8_293_000 picoseconds. - Weight::from_parts(18_088_000, 0) + // Minimum execution time: 8_482_000 picoseconds. + Weight::from_parts(8_667_000, 0) } - /// Storage: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Proof Skipped: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Storage: Configuration ActiveConfig (r:1 w:0) - /// Proof Skipped: Configuration ActiveConfig (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:0) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet SupportedVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: XcmPallet VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: XcmPallet SafeXcmVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:1 w:1) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) pub(crate) fn report_transact_status() -> Weight { // Proof Size summary in bytes: - // Measured: `565` - // Estimated: `4030` - // Minimum execution time: 31_438_000 picoseconds. - Weight::from_parts(32_086_000, 4030) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(4)) + // Measured: `281` + // Estimated: `3746` + // Minimum execution time: 61_580_000 picoseconds. + Weight::from_parts(62_928_000, 3746) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) } pub(crate) fn clear_transact_status() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_676_000 picoseconds. - Weight::from_parts(2_746_000, 0) + // Minimum execution time: 807_000 picoseconds. + Weight::from_parts(844_000, 0) } pub(crate) fn set_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_629_000 picoseconds. - Weight::from_parts(2_724_000, 0) + // Minimum execution time: 757_000 picoseconds. + Weight::from_parts(808_000, 0) } pub(crate) fn clear_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_602_000 picoseconds. - Weight::from_parts(2_671_000, 0) + // Minimum execution time: 740_000 picoseconds. + Weight::from_parts(810_000, 0) } pub(crate) fn set_fees_mode() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_681_000 picoseconds. - Weight::from_parts(2_768_000, 0) + // Minimum execution time: 752_000 picoseconds. + Weight::from_parts(786_000, 0) } pub(crate) fn unpaid_execution() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_764_000 picoseconds. - Weight::from_parts(2_865_000, 0) + // Minimum execution time: 798_000 picoseconds. + Weight::from_parts(845_000, 0) } } diff --git a/polkadot/runtime/westend/src/impls.rs b/polkadot/runtime/westend/src/impls.rs index ac3f9e679f8d..5e7babf30e25 100644 --- a/polkadot/runtime/westend/src/impls.rs +++ b/polkadot/runtime/westend/src/impls.rs @@ -171,8 +171,8 @@ where // send let _ = >::send( RawOrigin::Root.into(), - Box::new(VersionedLocation::V4(destination)), - Box::new(VersionedXcm::V4(program)), + Box::new(VersionedLocation::from(destination)), + Box::new(VersionedXcm::from(program)), )?; Ok(()) } diff --git a/polkadot/runtime/westend/src/lib.rs b/polkadot/runtime/westend/src/lib.rs index b7dae533224c..1016207458c1 100644 --- a/polkadot/runtime/westend/src/lib.rs +++ b/polkadot/runtime/westend/src/lib.rs @@ -1805,6 +1805,8 @@ pub mod migrations { MaxAgentsToMigrate, >, parachains_shared::migration::MigrateToV1, + // permanent + pallet_xcm::migration::MigrateToLatestXcmVersion, ); } diff --git a/polkadot/runtime/westend/src/weights/xcm/mod.rs b/polkadot/runtime/westend/src/weights/xcm/mod.rs index cb5894ea51e3..8035439e8892 100644 --- a/polkadot/runtime/westend/src/weights/xcm/mod.rs +++ b/polkadot/runtime/westend/src/weights/xcm/mod.rs @@ -172,6 +172,9 @@ impl XcmWeightInfo for WestendXcmWeight { fn buy_execution(_fees: &Asset, _weight_limit: &WeightLimit) -> Weight { XcmGeneric::::buy_execution() } + fn pay_fees(_asset: &Asset) -> Weight { + XcmGeneric::::pay_fees() + } fn refund_surplus() -> Weight { XcmGeneric::::refund_surplus() } diff --git a/polkadot/runtime/westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs b/polkadot/runtime/westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs index 49beb85c2784..4cc979959483 100644 --- a/polkadot/runtime/westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs +++ b/polkadot/runtime/westend/src/weights/xcm/pallet_xcm_benchmarks_generic.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `pallet_xcm_benchmarks::generic` //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2023-06-02, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 32.0.0 +//! DATE: 2024-08-27, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` //! WORST CASE MAP SIZE: `1000000` -//! HOSTNAME: `bm3`, CPU: `Intel(R) Core(TM) i7-7700K CPU @ 4.20GHz` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("westend-dev"), DB CACHE: 1024 +//! HOSTNAME: `runner-svzsllib-project-674-concurrent-0`, CPU: `Intel(R) Xeon(R) CPU @ 2.60GHz` +//! WASM-EXECUTION: Compiled, CHAIN: Some("westend-dev"), DB CACHE: 1024 // Executed Command: // target/production/polkadot @@ -29,14 +29,14 @@ // --steps=50 // --repeat=20 // --extrinsic=* -// --execution=wasm // --wasm-execution=compiled // --heap-pages=4096 +// --json-file=/builds/parity/mirrors/polkadot-sdk/.git/.artifacts/bench.json // --pallet=pallet_xcm_benchmarks::generic // --chain=westend-dev -// --header=./file_header.txt -// --template=./xcm/pallet-xcm-benchmarks/template.hbs -// --output=./runtime/westend/src/weights/xcm/ +// --header=./polkadot/file_header.txt +// --template=./polkadot/xcm/pallet-xcm-benchmarks/template.hbs +// --output=./polkadot/runtime/westend/src/weights/xcm/ #![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] @@ -49,126 +49,125 @@ use core::marker::PhantomData; /// Weight functions for `pallet_xcm_benchmarks::generic`. pub struct WeightInfo(PhantomData); impl WeightInfo { - /// Storage: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Proof Skipped: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:0) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet SupportedVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: XcmPallet VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: XcmPallet SafeXcmVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) pub(crate) fn report_holding() -> Weight { // Proof Size summary in bytes: - // Measured: `169` - // Estimated: `3634` - // Minimum execution time: 30_790_000 picoseconds. - Weight::from_parts(31_265_000, 3634) - .saturating_add(T::DbWeight::get().reads(7)) + // Measured: `351` + // Estimated: `6196` + // Minimum execution time: 68_015_000 picoseconds. + Weight::from_parts(69_575_000, 6196) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) } pub(crate) fn buy_execution() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_741_000 picoseconds. - Weight::from_parts(2_823_000, 0) + // Minimum execution time: 738_000 picoseconds. + Weight::from_parts(780_000, 0) } - /// Storage: XcmPallet Queries (r:1 w:0) - /// Proof Skipped: XcmPallet Queries (max_values: None, max_size: None, mode: Measured) + pub(crate) fn pay_fees() -> Weight { + // Proof Size summary in bytes: + // Measured: `0` + // Estimated: `0` + // Minimum execution time: 1_621_000 picoseconds. + Weight::from_parts(1_750_000, 0) + } + /// Storage: `XcmPallet::Queries` (r:1 w:0) + /// Proof: `XcmPallet::Queries` (`max_values`: None, `max_size`: None, mode: `Measured`) pub(crate) fn query_response() -> Weight { // Proof Size summary in bytes: - // Measured: `169` - // Estimated: `3634` - // Minimum execution time: 10_848_000 picoseconds. - Weight::from_parts(11_183_000, 3634) + // Measured: `0` + // Estimated: `3465` + // Minimum execution time: 6_548_000 picoseconds. + Weight::from_parts(6_765_000, 3465) .saturating_add(T::DbWeight::get().reads(1)) } pub(crate) fn transact() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 12_145_000 picoseconds. - Weight::from_parts(12_366_000, 0) + // Minimum execution time: 7_049_000 picoseconds. + Weight::from_parts(7_267_000, 0) } pub(crate) fn refund_surplus() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_837_000 picoseconds. - Weight::from_parts(2_939_000, 0) + // Minimum execution time: 1_300_000 picoseconds. + Weight::from_parts(1_408_000, 0) } pub(crate) fn set_error_handler() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_526_000 picoseconds. - Weight::from_parts(2_622_000, 0) + // Minimum execution time: 738_000 picoseconds. + Weight::from_parts(810_000, 0) } pub(crate) fn set_appendix() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_603_000 picoseconds. - Weight::from_parts(2_642_000, 0) + // Minimum execution time: 759_000 picoseconds. + Weight::from_parts(796_000, 0) } pub(crate) fn clear_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_500_000 picoseconds. - Weight::from_parts(2_573_000, 0) + // Minimum execution time: 707_000 picoseconds. + Weight::from_parts(780_000, 0) } pub(crate) fn descend_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 3_323_000 picoseconds. - Weight::from_parts(3_401_000, 0) + // Minimum execution time: 749_000 picoseconds. + Weight::from_parts(835_000, 0) } pub(crate) fn clear_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_557_000 picoseconds. - Weight::from_parts(2_620_000, 0) + // Minimum execution time: 751_000 picoseconds. + Weight::from_parts(799_000, 0) } - /// Storage: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Proof Skipped: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:0) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet SupportedVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: XcmPallet VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: XcmPallet SafeXcmVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) pub(crate) fn report_error() -> Weight { // Proof Size summary in bytes: - // Measured: `169` - // Estimated: `3634` - // Minimum execution time: 25_828_000 picoseconds. - Weight::from_parts(26_318_000, 3634) - .saturating_add(T::DbWeight::get().reads(7)) + // Measured: `351` + // Estimated: `6196` + // Minimum execution time: 65_464_000 picoseconds. + Weight::from_parts(67_406_000, 6196) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) } - /// Storage: XcmPallet AssetTraps (r:1 w:1) - /// Proof Skipped: XcmPallet AssetTraps (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::AssetTraps` (r:1 w:1) + /// Proof: `XcmPallet::AssetTraps` (`max_values`: None, `max_size`: None, mode: `Measured`) pub(crate) fn claim_asset() -> Weight { // Proof Size summary in bytes: - // Measured: `226` - // Estimated: `3691` - // Minimum execution time: 14_794_000 picoseconds. - Weight::from_parts(15_306_000, 3691) + // Measured: `23` + // Estimated: `3488` + // Minimum execution time: 9_887_000 picoseconds. + Weight::from_parts(10_310_000, 3488) .saturating_add(T::DbWeight::get().reads(1)) .saturating_add(T::DbWeight::get().writes(1)) } @@ -176,165 +175,151 @@ impl WeightInfo { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_534_000 picoseconds. - Weight::from_parts(2_574_000, 0) + // Minimum execution time: 737_000 picoseconds. + Weight::from_parts(792_000, 0) } - /// Storage: XcmPallet VersionNotifyTargets (r:1 w:1) - /// Proof Skipped: XcmPallet VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) - /// Storage: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Proof Skipped: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:0) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet SupportedVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: XcmPallet VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: XcmPallet SafeXcmVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:1 w:1) + /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) pub(crate) fn subscribe_version() -> Weight { // Proof Size summary in bytes: - // Measured: `169` - // Estimated: `3634` - // Minimum execution time: 32_218_000 picoseconds. - Weight::from_parts(32_945_000, 3634) - .saturating_add(T::DbWeight::get().reads(8)) - .saturating_add(T::DbWeight::get().writes(5)) + // Measured: `147` + // Estimated: `3612` + // Minimum execution time: 30_726_000 picoseconds. + Weight::from_parts(31_268_000, 3612) + .saturating_add(T::DbWeight::get().reads(5)) + .saturating_add(T::DbWeight::get().writes(3)) } - /// Storage: XcmPallet VersionNotifyTargets (r:0 w:1) - /// Proof Skipped: XcmPallet VersionNotifyTargets (max_values: None, max_size: None, mode: Measured) + /// Storage: `XcmPallet::VersionNotifyTargets` (r:0 w:1) + /// Proof: `XcmPallet::VersionNotifyTargets` (`max_values`: None, `max_size`: None, mode: `Measured`) pub(crate) fn unsubscribe_version() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_983_000 picoseconds. - Weight::from_parts(5_132_000, 0) + // Minimum execution time: 3_051_000 picoseconds. + Weight::from_parts(3_154_000, 0) .saturating_add(T::DbWeight::get().writes(1)) } pub(crate) fn burn_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 4_101_000 picoseconds. - Weight::from_parts(4_228_000, 0) + // Minimum execution time: 1_146_000 picoseconds. + Weight::from_parts(1_223_000, 0) } pub(crate) fn expect_asset() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_740_000 picoseconds. - Weight::from_parts(2_814_000, 0) + // Minimum execution time: 821_000 picoseconds. + Weight::from_parts(901_000, 0) } pub(crate) fn expect_origin() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_716_000 picoseconds. - Weight::from_parts(2_795_000, 0) + // Minimum execution time: 762_000 picoseconds. + Weight::from_parts(808_000, 0) } pub(crate) fn expect_error() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_550_000 picoseconds. - Weight::from_parts(2_601_000, 0) + // Minimum execution time: 727_000 picoseconds. + Weight::from_parts(789_000, 0) } pub(crate) fn expect_transact_status() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_762_000 picoseconds. - Weight::from_parts(2_849_000, 0) + // Minimum execution time: 867_000 picoseconds. + Weight::from_parts(933_000, 0) } - /// Storage: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Proof Skipped: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:0) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet SupportedVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: XcmPallet VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: XcmPallet SafeXcmVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) pub(crate) fn query_pallet() -> Weight { // Proof Size summary in bytes: - // Measured: `169` - // Estimated: `3634` - // Minimum execution time: 31_709_000 picoseconds. - Weight::from_parts(32_288_000, 3634) - .saturating_add(T::DbWeight::get().reads(7)) + // Measured: `351` + // Estimated: `6196` + // Minimum execution time: 74_949_000 picoseconds. + Weight::from_parts(76_124_000, 6196) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) } pub(crate) fn expect_pallet() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 7_209_000 picoseconds. - Weight::from_parts(7_332_000, 0) + // Minimum execution time: 7_553_000 picoseconds. + Weight::from_parts(7_889_000, 0) } - /// Storage: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Proof Skipped: unknown `0x3a696e747261626c6f636b5f656e74726f7079` (r:1 w:1) - /// Storage: Dmp DeliveryFeeFactor (r:1 w:0) - /// Proof Skipped: Dmp DeliveryFeeFactor (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet SupportedVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SupportedVersion (max_values: None, max_size: None, mode: Measured) - /// Storage: XcmPallet VersionDiscoveryQueue (r:1 w:1) - /// Proof Skipped: XcmPallet VersionDiscoveryQueue (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: XcmPallet SafeXcmVersion (r:1 w:0) - /// Proof Skipped: XcmPallet SafeXcmVersion (max_values: Some(1), max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueues (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueues (max_values: None, max_size: None, mode: Measured) - /// Storage: Dmp DownwardMessageQueueHeads (r:1 w:1) - /// Proof Skipped: Dmp DownwardMessageQueueHeads (max_values: None, max_size: None, mode: Measured) + /// Storage: `Dmp::DeliveryFeeFactor` (r:1 w:0) + /// Proof: `Dmp::DeliveryFeeFactor` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `XcmPallet::SupportedVersion` (r:1 w:0) + /// Proof: `XcmPallet::SupportedVersion` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `Dmp::DownwardMessageQueues` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueues` (`max_values`: None, `max_size`: None, mode: `Measured`) + /// Storage: `System::Account` (r:2 w:2) + /// Proof: `System::Account` (`max_values`: None, `max_size`: Some(128), added: 2603, mode: `MaxEncodedLen`) + /// Storage: `Dmp::DownwardMessageQueueHeads` (r:1 w:1) + /// Proof: `Dmp::DownwardMessageQueueHeads` (`max_values`: None, `max_size`: None, mode: `Measured`) pub(crate) fn report_transact_status() -> Weight { // Proof Size summary in bytes: - // Measured: `169` - // Estimated: `3634` - // Minimum execution time: 26_161_000 picoseconds. - Weight::from_parts(26_605_000, 3634) - .saturating_add(T::DbWeight::get().reads(7)) + // Measured: `351` + // Estimated: `6196` + // Minimum execution time: 65_953_000 picoseconds. + Weight::from_parts(67_221_000, 6196) + .saturating_add(T::DbWeight::get().reads(6)) .saturating_add(T::DbWeight::get().writes(4)) } pub(crate) fn clear_transact_status() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_539_000 picoseconds. - Weight::from_parts(2_647_000, 0) + // Minimum execution time: 770_000 picoseconds. + Weight::from_parts(848_000, 0) } pub(crate) fn set_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_494_000 picoseconds. - Weight::from_parts(2_588_000, 0) + // Minimum execution time: 685_000 picoseconds. + Weight::from_parts(766_000, 0) } pub(crate) fn clear_topic() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_510_000 picoseconds. - Weight::from_parts(2_590_000, 0) + // Minimum execution time: 693_000 picoseconds. + Weight::from_parts(759_000, 0) } pub(crate) fn set_fees_mode() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_491_000 picoseconds. - Weight::from_parts(2_546_000, 0) + // Minimum execution time: 739_000 picoseconds. + Weight::from_parts(791_000, 0) } pub(crate) fn unpaid_execution() -> Weight { // Proof Size summary in bytes: // Measured: `0` // Estimated: `0` - // Minimum execution time: 2_696_000 picoseconds. - Weight::from_parts(2_816_000, 0) + // Minimum execution time: 735_000 picoseconds. + Weight::from_parts(811_000, 0) } } diff --git a/polkadot/xcm/docs/src/cookbook/relay_token_transactor/parachain/xcm_config.rs b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/parachain/xcm_config.rs index 7cb230f6e006..f53499b7b99a 100644 --- a/polkadot/xcm/docs/src/cookbook/relay_token_transactor/parachain/xcm_config.rs +++ b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/parachain/xcm_config.rs @@ -21,7 +21,7 @@ use frame::{ runtime::prelude::*, traits::{Everything, Nothing}, }; -use xcm::v4::prelude::*; +use xcm::v5::prelude::*; use xcm_builder::{ AccountId32Aliases, DescribeAllTerminal, DescribeFamily, EnsureXcmOrigin, FrameTransactionalProcessor, FungibleAdapter, HashedDescription, IsConcrete, diff --git a/polkadot/xcm/docs/src/cookbook/relay_token_transactor/relay_chain/mod.rs b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/relay_chain/mod.rs index 686f86b37b73..5921d9425aa0 100644 --- a/polkadot/xcm/docs/src/cookbook/relay_token_transactor/relay_chain/mod.rs +++ b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/relay_chain/mod.rs @@ -23,7 +23,7 @@ use frame::{ traits::{IdentityLookup, ProcessMessage, ProcessMessageError}, }; use polkadot_runtime_parachains::inclusion::{AggregateMessageOrigin, UmpQueueId}; -use xcm::v4::prelude::*; +use xcm::v5::prelude::*; mod xcm_config; pub use xcm_config::LocationToAccountId; diff --git a/polkadot/xcm/docs/src/cookbook/relay_token_transactor/relay_chain/xcm_config.rs b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/relay_chain/xcm_config.rs index a31e664d8216..64c98c2ffa2b 100644 --- a/polkadot/xcm/docs/src/cookbook/relay_token_transactor/relay_chain/xcm_config.rs +++ b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/relay_chain/xcm_config.rs @@ -21,7 +21,7 @@ use frame::{ runtime::prelude::*, traits::{Everything, Nothing}, }; -use xcm::v4::prelude::*; +use xcm::v5::prelude::*; use xcm_builder::{ AccountId32Aliases, DescribeAllTerminal, DescribeFamily, EnsureXcmOrigin, FrameTransactionalProcessor, FungibleAdapter, HashedDescription, IsConcrete, diff --git a/polkadot/xcm/docs/src/cookbook/relay_token_transactor/tests.rs b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/tests.rs index 792cf6149e7c..b7fdaa34ec8c 100644 --- a/polkadot/xcm/docs/src/cookbook/relay_token_transactor/tests.rs +++ b/polkadot/xcm/docs/src/cookbook/relay_token_transactor/tests.rs @@ -65,9 +65,9 @@ fn reserve_asset_transfers_work() { let assets: Assets = (Here, 50u128 * CENTS as u128).into(); assert_ok!(relay_chain::XcmPallet::transfer_assets( relay_chain::RuntimeOrigin::signed(ALICE), - Box::new(VersionedLocation::V4(destination.clone())), - Box::new(VersionedLocation::V4(beneficiary)), - Box::new(VersionedAssets::V4(assets)), + Box::new(VersionedLocation::from(destination.clone())), + Box::new(VersionedLocation::from(beneficiary)), + Box::new(VersionedAssets::from(assets)), 0, WeightLimit::Unlimited, )); @@ -101,9 +101,9 @@ fn reserve_asset_transfers_work() { let assets: Assets = (Parent, 25u128 * CENTS as u128).into(); assert_ok!(parachain::XcmPallet::transfer_assets( parachain::RuntimeOrigin::signed(BOB), - Box::new(VersionedLocation::V4(destination)), - Box::new(VersionedLocation::V4(beneficiary)), - Box::new(VersionedAssets::V4(assets)), + Box::new(VersionedLocation::from(destination)), + Box::new(VersionedLocation::from(beneficiary)), + Box::new(VersionedAssets::from(assets)), 0, WeightLimit::Unlimited, )); diff --git a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs index f1ec3f604d7b..003882416a5c 100644 --- a/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs +++ b/polkadot/xcm/pallet-xcm-benchmarks/src/generic/benchmarking.rs @@ -98,6 +98,21 @@ benchmarks! { } + pay_fees { + let holding = T::worst_case_holding(0).into(); + + let mut executor = new_executor::(Default::default()); + executor.set_holding(holding); + + let fee_asset = T::fee_asset().unwrap(); + + let instruction = Instruction::>::PayFees { asset: fee_asset }; + + let xcm = Xcm(vec![instruction]); + } : { + executor.bench_process(xcm)?; + } verify {} + query_response { let mut executor = new_executor::(Default::default()); let (query_id, response) = T::worst_case_response(); diff --git a/polkadot/xcm/pallet-xcm/src/benchmarking.rs b/polkadot/xcm/pallet-xcm/src/benchmarking.rs index 404b9358d4d9..e493d4838f5c 100644 --- a/polkadot/xcm/pallet-xcm/src/benchmarking.rs +++ b/polkadot/xcm/pallet-xcm/src/benchmarking.rs @@ -382,8 +382,8 @@ benchmarks! { asset.clone().into(), &XcmContext { origin: None, message_id: [0u8; 32], topic: None } ); - let versioned_assets = VersionedAssets::V4(asset.into()); - }: _>(claim_origin.into(), Box::new(versioned_assets), Box::new(VersionedLocation::V4(claim_location))) + let versioned_assets = VersionedAssets::from(Assets::from(asset)); + }: _>(claim_origin.into(), Box::new(versioned_assets), Box::new(VersionedLocation::from(claim_location))) impl_benchmark_test_suite!( Pallet, diff --git a/polkadot/xcm/pallet-xcm/src/tests/mod.rs b/polkadot/xcm/pallet-xcm/src/tests/mod.rs index c16c1a1ba986..94c21560abfc 100644 --- a/polkadot/xcm/pallet-xcm/src/tests/mod.rs +++ b/polkadot/xcm/pallet-xcm/src/tests/mod.rs @@ -478,14 +478,14 @@ fn claim_assets_works() { // Even though assets are trapped, the extrinsic returns success. assert_ok!(XcmPallet::execute( RuntimeOrigin::signed(ALICE), - Box::new(VersionedXcm::V4(trapping_program)), + Box::new(VersionedXcm::from(trapping_program)), BaseXcmWeight::get() * 2, )); assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE - SEND_AMOUNT); // Expected `AssetsTrapped` event info. let source: Location = Junction::AccountId32 { network: None, id: ALICE.into() }.into(); - let versioned_assets = VersionedAssets::V4(Assets::from((Here, SEND_AMOUNT))); + let versioned_assets = VersionedAssets::from(Assets::from((Here, SEND_AMOUNT))); let hash = BlakeTwo256::hash_of(&(source.clone(), versioned_assets.clone())); // Assets were indeed trapped. @@ -508,10 +508,11 @@ fn claim_assets_works() { // Now claim them with the extrinsic. assert_ok!(XcmPallet::claim_assets( RuntimeOrigin::signed(ALICE), - Box::new(VersionedAssets::V4((Here, SEND_AMOUNT).into())), - Box::new(VersionedLocation::V4( - AccountId32 { network: None, id: ALICE.clone().into() }.into() - )), + Box::new(VersionedAssets::from(Assets::from((Here, SEND_AMOUNT)))), + Box::new(VersionedLocation::from(Location::from(AccountId32 { + network: None, + id: ALICE.clone().into() + }))), )); assert_eq!(Balances::total_balance(&ALICE), INITIAL_BALANCE); assert_eq!(AssetTraps::::iter().collect::>(), vec![]); diff --git a/polkadot/xcm/procedural/src/builder_pattern.rs b/polkadot/xcm/procedural/src/builder_pattern.rs index 09ead1389d19..b65290332af9 100644 --- a/polkadot/xcm/procedural/src/builder_pattern.rs +++ b/polkadot/xcm/procedural/src/builder_pattern.rs @@ -160,13 +160,16 @@ fn generate_builder_impl(name: &Ident, data_enum: &DataEnum) -> Result>>()?; @@ -260,50 +263,75 @@ fn generate_builder_impl(name: &Ident, data_enum: &DataEnum) -> Result, _>>()?; // Then we require fees to be paid - let buy_execution_method = data_enum + let pay_fees_variants = data_enum .variants .iter() - .find(|variant| variant.ident == "BuyExecution") - .map_or( - Err(Error::new_spanned(&data_enum.variants, "No BuyExecution instruction")), - |variant| { - let variant_name = &variant.ident; - let method_name_string = &variant_name.to_string().to_snake_case(); - let method_name = syn::Ident::new(method_name_string, variant_name.span()); - let docs = get_doc_comments(variant); - let fields = match &variant.fields { - Fields::Named(fields) => { - let arg_names: Vec<_> = - fields.named.iter().map(|field| &field.ident).collect(); - let arg_types: Vec<_> = - fields.named.iter().map(|field| &field.ty).collect(); - quote! { - #(#docs)* - pub fn #method_name(self, #(#arg_names: impl Into<#arg_types>),*) -> XcmBuilder { - let mut new_instructions = self.instructions; - #(let #arg_names = #arg_names.into();)* - new_instructions.push(#name::::#variant_name { #(#arg_names),* }); - XcmBuilder { - instructions: new_instructions, - state: core::marker::PhantomData, - } + .map(|variant| { + let maybe_builder_attr = variant.attrs.iter().find(|attr| match attr.meta { + Meta::List(ref list) => list.path.is_ident("builder"), + _ => false, + }); + let builder_attr = match maybe_builder_attr { + Some(builder) => builder.clone(), + None => return Ok(None), /* It's not going to be an instruction that pays fees */ + }; + let Meta::List(ref list) = builder_attr.meta else { unreachable!("We checked before") }; + let inner_ident: Ident = syn::parse2(list.tokens.clone()).map_err(|_| { + Error::new_spanned( + &builder_attr, + "Expected `builder(loads_holding)` or `builder(pays_fees)`", + ) + })?; + let ident_to_match: Ident = syn::parse_quote!(pays_fees); + if inner_ident == ident_to_match { + Ok(Some(variant)) + } else { + Ok(None) // Must have been `loads_holding` instead. + } + }) + .collect::>>()?; + + let pay_fees_methods = pay_fees_variants + .into_iter() + .flatten() + .map(|variant| { + let variant_name = &variant.ident; + let method_name_string = &variant_name.to_string().to_snake_case(); + let method_name = syn::Ident::new(method_name_string, variant_name.span()); + let docs = get_doc_comments(variant); + let fields = match &variant.fields { + Fields::Named(fields) => { + let arg_names: Vec<_> = + fields.named.iter().map(|field| &field.ident).collect(); + let arg_types: Vec<_> = + fields.named.iter().map(|field| &field.ty).collect(); + quote! { + #(#docs)* + pub fn #method_name(self, #(#arg_names: impl Into<#arg_types>),*) -> XcmBuilder { + let mut new_instructions = self.instructions; + #(let #arg_names = #arg_names.into();)* + new_instructions.push(#name::::#variant_name { #(#arg_names),* }); + XcmBuilder { + instructions: new_instructions, + state: core::marker::PhantomData, } } - }, - _ => - return Err(Error::new_spanned( - variant, - "BuyExecution should have named fields", - )), - }; - Ok(fields) - }, - )?; + } + }, + _ => + return Err(Error::new_spanned( + variant, + "Both BuyExecution and PayFees have named fields", + )), + }; + Ok(fields) + }) + .collect::>>()?; let second_impl = quote! { impl XcmBuilder { #(#allowed_after_load_holding_methods)* - #buy_execution_method + #(#pay_fees_methods)* } }; diff --git a/polkadot/xcm/procedural/src/lib.rs b/polkadot/xcm/procedural/src/lib.rs index 4980d84d3282..9971fdceb69a 100644 --- a/polkadot/xcm/procedural/src/lib.rs +++ b/polkadot/xcm/procedural/src/lib.rs @@ -20,25 +20,11 @@ use proc_macro::TokenStream; use syn::{parse_macro_input, DeriveInput}; mod builder_pattern; -mod v2; mod v3; mod v4; +mod v5; mod weight_info; -#[proc_macro] -pub fn impl_conversion_functions_for_multilocation_v2(input: TokenStream) -> TokenStream { - v2::multilocation::generate_conversion_functions(input) - .unwrap_or_else(syn::Error::into_compile_error) - .into() -} - -#[proc_macro] -pub fn impl_conversion_functions_for_junctions_v2(input: TokenStream) -> TokenStream { - v2::junctions::generate_conversion_functions(input) - .unwrap_or_else(syn::Error::into_compile_error) - .into() -} - #[proc_macro_derive(XcmWeightInfoTrait)] pub fn derive_xcm_weight_info(item: TokenStream) -> TokenStream { weight_info::derive(item) @@ -72,6 +58,20 @@ pub fn impl_conversion_functions_for_junctions_v4(input: TokenStream) -> TokenSt .into() } +#[proc_macro] +pub fn impl_conversion_functions_for_junctions_v5(input: TokenStream) -> TokenStream { + v5::junctions::generate_conversion_functions(input) + .unwrap_or_else(syn::Error::into_compile_error) + .into() +} + +#[proc_macro] +pub fn impl_conversion_functions_for_location_v5(input: TokenStream) -> TokenStream { + v5::location::generate_conversion_functions(input) + .unwrap_or_else(syn::Error::into_compile_error) + .into() +} + /// This is called on the `Instruction` enum, not on the `Xcm` struct, /// and allows for the following syntax for building XCMs: /// let message = Xcm::builder() diff --git a/polkadot/xcm/procedural/src/v2.rs b/polkadot/xcm/procedural/src/v2.rs deleted file mode 100644 index 6878f7755cc7..000000000000 --- a/polkadot/xcm/procedural/src/v2.rs +++ /dev/null @@ -1,226 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -use proc_macro2::{Span, TokenStream}; -use quote::{format_ident, quote}; -use syn::{Result, Token}; - -pub mod multilocation { - use super::*; - - pub fn generate_conversion_functions(input: proc_macro::TokenStream) -> Result { - if !input.is_empty() { - return Err(syn::Error::new(Span::call_site(), "No arguments expected")) - } - - // Support up to 8 Parents in a tuple, assuming that most use cases don't go past 8 parents. - let from_tuples = generate_conversion_from_tuples(8); - let from_v3 = generate_conversion_from_v3(); - - Ok(quote! { - #from_tuples - #from_v3 - }) - } - - fn generate_conversion_from_tuples(max_parents: u8) -> TokenStream { - let mut from_tuples = (0..8usize) - .map(|num_junctions| { - let junctions = - (0..=num_junctions).map(|_| format_ident!("Junction")).collect::>(); - let idents = - (0..=num_junctions).map(|i| format_ident!("j{}", i)).collect::>(); - let variant = &format_ident!("X{}", num_junctions + 1); - let array_size = num_junctions + 1; - - let mut from_tuple = quote! { - impl From<( #(#junctions,)* )> for MultiLocation { - fn from( ( #(#idents,)* ): ( #(#junctions,)* ) ) -> Self { - MultiLocation { parents: 0, interior: Junctions::#variant( #(#idents),* ) } - } - } - - impl From<(u8, #(#junctions),*)> for MultiLocation { - fn from( ( parents, #(#idents),* ): (u8, #(#junctions),* ) ) -> Self { - MultiLocation { parents, interior: Junctions::#variant( #(#idents),* ) } - } - } - - impl From<(Ancestor, #(#junctions),*)> for MultiLocation { - fn from( ( Ancestor(parents), #(#idents),* ): (Ancestor, #(#junctions),* ) ) -> Self { - MultiLocation { parents, interior: Junctions::#variant( #(#idents),* ) } - } - } - - impl From<[Junction; #array_size]> for MultiLocation { - fn from(j: [Junction; #array_size]) -> Self { - let [#(#idents),*] = j; - MultiLocation { parents: 0, interior: Junctions::#variant( #(#idents),* ) } - } - } - }; - - let from_parent_tuples = (1..=max_parents).map(|cur_parents| { - let parents = - (0..cur_parents).map(|_| format_ident!("Parent")).collect::>(); - let underscores = - (0..cur_parents).map(|_| Token![_](Span::call_site())).collect::>(); - - quote! { - impl From<( #(#parents,)* #(#junctions),* )> for MultiLocation { - fn from( (#(#underscores,)* #(#idents),*): ( #(#parents,)* #(#junctions),* ) ) -> Self { - MultiLocation { parents: #cur_parents, interior: Junctions::#variant( #(#idents),* ) } - } - } - } - }); - - from_tuple.extend(from_parent_tuples); - from_tuple - }) - .collect::(); - - let from_parent_junctions_tuples = (1..=max_parents).map(|cur_parents| { - let parents = (0..cur_parents).map(|_| format_ident!("Parent")).collect::>(); - let underscores = - (0..cur_parents).map(|_| Token![_](Span::call_site())).collect::>(); - - quote! { - impl From<( #(#parents,)* Junctions )> for MultiLocation { - fn from( (#(#underscores,)* junctions): ( #(#parents,)* Junctions ) ) -> Self { - MultiLocation { parents: #cur_parents, interior: junctions } - } - } - } - }); - from_tuples.extend(from_parent_junctions_tuples); - - quote! { - impl From for MultiLocation { - fn from(junctions: Junctions) -> Self { - MultiLocation { parents: 0, interior: junctions } - } - } - - impl From<(u8, Junctions)> for MultiLocation { - fn from((parents, interior): (u8, Junctions)) -> Self { - MultiLocation { parents, interior } - } - } - - impl From<(Ancestor, Junctions)> for MultiLocation { - fn from((Ancestor(parents), interior): (Ancestor, Junctions)) -> Self { - MultiLocation { parents, interior } - } - } - - impl From<()> for MultiLocation { - fn from(_: ()) -> Self { - MultiLocation { parents: 0, interior: Junctions::Here } - } - } - - impl From<(u8,)> for MultiLocation { - fn from((parents,): (u8,)) -> Self { - MultiLocation { parents, interior: Junctions::Here } - } - } - - impl From for MultiLocation { - fn from(x: Junction) -> Self { - MultiLocation { parents: 0, interior: Junctions::X1(x) } - } - } - - impl From<[Junction; 0]> for MultiLocation { - fn from(_: [Junction; 0]) -> Self { - MultiLocation { parents: 0, interior: Junctions::Here } - } - } - - #from_tuples - } - } - - fn generate_conversion_from_v3() -> TokenStream { - let match_variants = (0..8u8) - .map(|cur_num| { - let num_ancestors = cur_num + 1; - let variant = format_ident!("X{}", num_ancestors); - let idents = (0..=cur_num).map(|i| format_ident!("j{}", i)).collect::>(); - - quote! { - crate::v3::Junctions::#variant( #(#idents),* ) => - #variant( #( core::convert::TryInto::try_into(#idents)? ),* ), - } - }) - .collect::(); - - quote! { - impl core::convert::TryFrom for Junctions { - type Error = (); - fn try_from(mut new: crate::v3::Junctions) -> core::result::Result { - use Junctions::*; - Ok(match new { - crate::v3::Junctions::Here => Here, - #match_variants - }) - } - } - } - } -} - -pub mod junctions { - use super::*; - - pub fn generate_conversion_functions(input: proc_macro::TokenStream) -> Result { - if !input.is_empty() { - return Err(syn::Error::new(Span::call_site(), "No arguments expected")) - } - - let from_slice_syntax = generate_conversion_from_slice_syntax(); - - Ok(quote! { - #from_slice_syntax - }) - } - - fn generate_conversion_from_slice_syntax() -> TokenStream { - quote! { - macro_rules! impl_junction { - ($count:expr, $variant:ident, ($($index:literal),+)) => { - /// Additional helper for building junctions - /// Useful for converting to future XCM versions - impl From<[Junction; $count]> for Junctions { - fn from(junctions: [Junction; $count]) -> Self { - Self::$variant($(junctions[$index].clone()),*) - } - } - }; - } - - impl_junction!(1, X1, (0)); - impl_junction!(2, X2, (0, 1)); - impl_junction!(3, X3, (0, 1, 2)); - impl_junction!(4, X4, (0, 1, 2, 3)); - impl_junction!(5, X5, (0, 1, 2, 3, 4)); - impl_junction!(6, X6, (0, 1, 2, 3, 4, 5)); - impl_junction!(7, X7, (0, 1, 2, 3, 4, 5, 6)); - impl_junction!(8, X8, (0, 1, 2, 3, 4, 5, 6, 7)); - } - } -} diff --git a/polkadot/xcm/procedural/src/v3.rs b/polkadot/xcm/procedural/src/v3.rs index f0556d5a8d44..1292b56277dd 100644 --- a/polkadot/xcm/procedural/src/v3.rs +++ b/polkadot/xcm/procedural/src/v3.rs @@ -127,12 +127,10 @@ pub mod junctions { } // Support up to 8 Parents in a tuple, assuming that most use cases don't go past 8 parents. - let from_v2 = generate_conversion_from_v2(MAX_JUNCTIONS); let from_v4 = generate_conversion_from_v4(); let from_tuples = generate_conversion_from_tuples(MAX_JUNCTIONS); Ok(quote! { - #from_v2 #from_v4 #from_tuples }) @@ -194,32 +192,4 @@ pub mod junctions { } } } - - fn generate_conversion_from_v2(max_junctions: usize) -> TokenStream { - let match_variants = (0..max_junctions) - .map(|cur_num| { - let num_ancestors = cur_num + 1; - let variant = format_ident!("X{}", num_ancestors); - let idents = (0..=cur_num).map(|i| format_ident!("j{}", i)).collect::>(); - - quote! { - crate::v2::Junctions::#variant( #(#idents),* ) => - #variant( #( core::convert::TryInto::try_into(#idents)? ),* ), - } - }) - .collect::(); - - quote! { - impl core::convert::TryFrom for Junctions { - type Error = (); - fn try_from(mut old: crate::v2::Junctions) -> core::result::Result { - use Junctions::*; - Ok(match old { - crate::v2::Junctions::Here => Here, - #match_variants - }) - } - } - } - } } diff --git a/polkadot/xcm/procedural/src/v4.rs b/polkadot/xcm/procedural/src/v4.rs index 5f5e10d3081b..9bc2f094d021 100644 --- a/polkadot/xcm/procedural/src/v4.rs +++ b/polkadot/xcm/procedural/src/v4.rs @@ -132,10 +132,12 @@ pub mod junctions { // Support up to 8 Parents in a tuple, assuming that most use cases don't go past 8 parents. let from_v3 = generate_conversion_from_v3(MAX_JUNCTIONS); + let from_v5 = generate_conversion_from_v5(MAX_JUNCTIONS); let from_tuples = generate_conversion_from_tuples(MAX_JUNCTIONS); Ok(quote! { #from_v3 + #from_v5 #from_tuples }) } @@ -193,4 +195,43 @@ pub mod junctions { } } } + + fn generate_conversion_from_v5(max_junctions: usize) -> TokenStream { + let match_variants = (0..max_junctions) + .map(|current_number| { + let number_ancestors = current_number + 1; + let variant = format_ident!("X{}", number_ancestors); + let idents = + (0..=current_number).map(|i| format_ident!("j{}", i)).collect::>(); + let convert = idents + .iter() + .map(|ident| { + quote! { let #ident = core::convert::TryInto::try_into(#ident.clone())?; } + }) + .collect::>(); + + quote! { + crate::v5::Junctions::#variant( junctions ) => { + let [#(#idents),*] = &*junctions; + #(#convert);* + [#(#idents),*].into() + }, + } + }) + .collect::(); + + quote! { + impl core::convert::TryFrom for Junctions { + type Error = (); + + fn try_from(mut new: crate::v5::Junctions) -> core::result::Result { + use Junctions::*; + Ok(match new { + crate::v5::Junctions::Here => Here, + #match_variants + }) + } + } + } + } } diff --git a/polkadot/xcm/procedural/src/v5.rs b/polkadot/xcm/procedural/src/v5.rs new file mode 100644 index 000000000000..895a323c1738 --- /dev/null +++ b/polkadot/xcm/procedural/src/v5.rs @@ -0,0 +1,198 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use proc_macro2::{Span, TokenStream}; +use quote::{format_ident, quote}; +use syn::{Result, Token}; + +const MAX_JUNCTIONS: usize = 8; + +pub mod location { + use super::*; + + /// Generates conversion functions from other types to the `Location` type: + /// - [PalletInstance(50), GeneralIndex(1984)].into() + /// - (Parent, Parachain(1000), AccountId32 { .. }).into() + pub fn generate_conversion_functions(input: proc_macro::TokenStream) -> Result { + if !input.is_empty() { + return Err(syn::Error::new(Span::call_site(), "No arguments expected")) + } + + let from_tuples = generate_conversion_from_tuples(8, 8); + + Ok(quote! { + #from_tuples + }) + } + + fn generate_conversion_from_tuples(max_junctions: usize, max_parents: usize) -> TokenStream { + let mut from_tuples = (0..=max_junctions) + .map(|num_junctions| { + let types = (0..num_junctions).map(|i| format_ident!("J{}", i)).collect::>(); + let idents = + (0..num_junctions).map(|i| format_ident!("j{}", i)).collect::>(); + let array_size = num_junctions; + let interior = if num_junctions == 0 { + quote!(Junctions::Here) + } else { + let variant = format_ident!("X{}", num_junctions); + quote! { + Junctions::#variant( alloc::sync::Arc::new( [#(#idents .into()),*] ) ) + } + }; + + let mut from_tuple = quote! { + impl< #(#types : Into,)* > From<( Ancestor, #( #types ),* )> for Location { + fn from( ( Ancestor(parents), #(#idents),* ): ( Ancestor, #( #types ),* ) ) -> Self { + Location { parents, interior: #interior } + } + } + + impl From<[Junction; #array_size]> for Location { + fn from(j: [Junction; #array_size]) -> Self { + let [#(#idents),*] = j; + Location { parents: 0, interior: #interior } + } + } + }; + + let from_parent_tuples = (0..=max_parents).map(|cur_parents| { + let parents = + (0..cur_parents).map(|_| format_ident!("Parent")).collect::>(); + let underscores = + (0..cur_parents).map(|_| Token![_](Span::call_site())).collect::>(); + + quote! { + impl< #(#types : Into,)* > From<( #( #parents , )* #( #types , )* )> for Location { + fn from( ( #(#underscores,)* #(#idents,)* ): ( #(#parents,)* #(#types,)* ) ) -> Self { + Self { parents: #cur_parents as u8, interior: #interior } + } + } + } + }); + + from_tuple.extend(from_parent_tuples); + from_tuple + }) + .collect::(); + + let from_parent_junctions_tuples = (0..=max_parents).map(|cur_parents| { + let parents = (0..cur_parents).map(|_| format_ident!("Parent")).collect::>(); + let underscores = + (0..cur_parents).map(|_| Token![_](Span::call_site())).collect::>(); + + quote! { + impl From<( #(#parents,)* Junctions )> for Location { + fn from( (#(#underscores,)* junctions): ( #(#parents,)* Junctions ) ) -> Self { + Location { parents: #cur_parents as u8, interior: junctions } + } + } + } + }); + from_tuples.extend(from_parent_junctions_tuples); + + quote! { + impl From<(Ancestor, Junctions)> for Location { + fn from((Ancestor(parents), interior): (Ancestor, Junctions)) -> Self { + Location { parents, interior } + } + } + + impl From for Location { + fn from(x: Junction) -> Self { + Location { parents: 0, interior: [x].into() } + } + } + + #from_tuples + } + } +} + +pub mod junctions { + use super::*; + + pub fn generate_conversion_functions(input: proc_macro::TokenStream) -> Result { + if !input.is_empty() { + return Err(syn::Error::new(Span::call_site(), "No arguments expected")) + } + + // Support up to 8 Parents in a tuple, assuming that most use cases don't go past 8 parents. + let from_v4 = generate_conversion_from_v4(MAX_JUNCTIONS); + let from_tuples = generate_conversion_from_tuples(MAX_JUNCTIONS); + + Ok(quote! { + #from_v4 + #from_tuples + }) + } + + fn generate_conversion_from_tuples(max_junctions: usize) -> TokenStream { + (1..=max_junctions) + .map(|num_junctions| { + let idents = + (0..num_junctions).map(|i| format_ident!("j{}", i)).collect::>(); + let types = (0..num_junctions).map(|i| format_ident!("J{}", i)).collect::>(); + + quote! { + impl<#(#types : Into,)*> From<( #(#types,)* )> for Junctions { + fn from( ( #(#idents,)* ): ( #(#types,)* ) ) -> Self { + [#(#idents .into()),*].into() + } + } + } + }) + .collect() + } + + fn generate_conversion_from_v4(max_junctions: usize) -> TokenStream { + let match_variants = (0..max_junctions) + .map(|cur_num| { + let num_ancestors = cur_num + 1; + let variant = format_ident!("X{}", num_ancestors); + let idents = (0..=cur_num).map(|i| format_ident!("j{}", i)).collect::>(); + let convert = idents + .iter() + .enumerate() + .map(|(index, ident)| { + quote! { let #ident = core::convert::TryInto::try_into(slice[#index].clone())?; } + }) + .collect::>(); + + quote! { + crate::v4::Junctions::#variant( arc ) => { + let slice = &arc[..]; + #(#convert);*; + let junctions: Junctions = [#(#idents),*].into(); + junctions + }, + } + }) + .collect::(); + + quote! { + impl core::convert::TryFrom for Junctions { + type Error = (); + fn try_from(mut old: crate::v4::Junctions) -> core::result::Result { + Ok(match old { + crate::v4::Junctions::Here => Junctions::Here, + #match_variants + }) + } + } + } + } +} diff --git a/polkadot/xcm/src/lib.rs b/polkadot/xcm/src/lib.rs index 0b916c87f549..c51717541ed7 100644 --- a/polkadot/xcm/src/lib.rs +++ b/polkadot/xcm/src/lib.rs @@ -21,8 +21,6 @@ // // Hence, `no_std` rather than sp-runtime. #![cfg_attr(not(feature = "std"), no_std)] -// Because of XCMv2. -#![allow(deprecated)] extern crate alloc; @@ -30,19 +28,16 @@ use codec::{Decode, DecodeLimit, Encode, Error as CodecError, Input, MaxEncodedL use derivative::Derivative; use scale_info::TypeInfo; -#[deprecated( - note = "XCMv2 will be removed once XCMv5 is released. Please use XCMv3 or XCMv4 instead." -)] -pub mod v2; pub mod v3; pub mod v4; +pub mod v5; pub mod lts { pub use super::v4::*; } pub mod latest { - pub use super::v4::*; + pub use super::v5::*; } mod double_encoded; @@ -81,12 +76,16 @@ pub trait TryAs { fn try_as(&self) -> Result<&T, ()>; } +// Macro that generated versioned wrapper types. +// NOTE: converting a v4 type into a versioned type will make it v5. macro_rules! versioned_type { ($(#[$attr:meta])* pub enum $n:ident { $(#[$index3:meta])+ V3($v3:ty), $(#[$index4:meta])+ V4($v4:ty), + $(#[$index5:meta])+ + V5($v5:ty), }) => { #[derive(Derivative, Encode, Decode, TypeInfo)] #[derivative( @@ -104,6 +103,8 @@ macro_rules! versioned_type { V3($v3), $(#[$index4])* V4($v4), + $(#[$index5])* + V5($v5), } impl $n { pub fn try_as(&self) -> Result<&T, ()> where Self: TryAs { @@ -126,11 +127,20 @@ macro_rules! versioned_type { } } } + impl TryAs<$v5> for $n { + fn try_as(&self) -> Result<&$v5, ()> { + match &self { + Self::V5(ref x) => Ok(x), + _ => Err(()), + } + } + } impl IntoVersion for $n { fn into_version(self, n: Version) -> Result { Ok(match n { 3 => Self::V3(self.try_into()?), 4 => Self::V4(self.try_into()?), + 5 => Self::V5(self.try_into()?), _ => return Err(()), }) } @@ -140,9 +150,9 @@ macro_rules! versioned_type { $n::V3(x.into()) } } - impl From<$v4> for $n { - fn from(x: $v4) -> Self { - $n::V4(x.into()) + impl> From for $n { + fn from(x: T) -> Self { + $n::V5(x.into()) } } impl TryFrom<$n> for $v3 { @@ -151,7 +161,11 @@ macro_rules! versioned_type { use $n::*; match x { V3(x) => Ok(x), - V4(x) => x.try_into(), + V4(x) => x.try_into().map_err(|_| ()), + V5(x) => { + let v4: $v4 = x.try_into().map_err(|_| ())?; + v4.try_into().map_err(|_| ()) + } } } } @@ -162,137 +176,21 @@ macro_rules! versioned_type { match x { V3(x) => x.try_into().map_err(|_| ()), V4(x) => Ok(x), + V5(x) => x.try_into().map_err(|_| ()), } } } - impl MaxEncodedLen for $n { - fn max_encoded_len() -> usize { - <$v3>::max_encoded_len() - } - } - impl IdentifyVersion for $n { - fn identify_version(&self) -> Version { - use $n::*; - match self { - V3(_) => v3::VERSION, - V4(_) => v4::VERSION, - } - } - } - }; - - ($(#[$attr:meta])* pub enum $n:ident { - $(#[$index2:meta])+ - V2($v2:ty), - $(#[$index3:meta])+ - V3($v3:ty), - $(#[$index4:meta])+ - V4($v4:ty), - }) => { - #[derive(Derivative, Encode, Decode, TypeInfo)] - #[derivative( - Clone(bound = ""), - Eq(bound = ""), - PartialEq(bound = ""), - Debug(bound = "") - )] - #[codec(encode_bound())] - #[codec(decode_bound())] - #[scale_info(replace_segment("staging_xcm", "xcm"))] - $(#[$attr])* - pub enum $n { - $(#[$index2])* - V2($v2), - $(#[$index3])* - V3($v3), - $(#[$index4])* - V4($v4), - } - impl $n { - pub fn try_as(&self) -> Result<&T, ()> where Self: TryAs { - >::try_as(&self) - } - } - impl TryAs<$v2> for $n { - fn try_as(&self) -> Result<&$v2, ()> { - match &self { - Self::V2(ref x) => Ok(x), - _ => Err(()), - } - } - } - impl TryAs<$v3> for $n { - fn try_as(&self) -> Result<&$v3, ()> { - match &self { - Self::V3(ref x) => Ok(x), - _ => Err(()), - } - } - } - impl TryAs<$v4> for $n { - fn try_as(&self) -> Result<&$v4, ()> { - match &self { - Self::V4(ref x) => Ok(x), - _ => Err(()), - } - } - } - impl IntoVersion for $n { - fn into_version(self, n: Version) -> Result { - Ok(match n { - 1 | 2 => Self::V2(self.try_into()?), - 3 => Self::V3(self.try_into()?), - 4 => Self::V4(self.try_into()?), - _ => return Err(()), - }) - } - } - impl From<$v2> for $n { - fn from(x: $v2) -> Self { - $n::V2(x) - } - } - impl> From for $n { - fn from(x: T) -> Self { - $n::V4(x.into()) - } - } - impl TryFrom<$n> for $v2 { + impl TryFrom<$n> for $v5 { type Error = (); fn try_from(x: $n) -> Result { use $n::*; match x { - V2(x) => Ok(x), - V3(x) => x.try_into(), - V4(x) => { - let v3: $v3 = x.try_into().map_err(|_| ())?; - v3.try_into() + V3(x) => { + let v4: $v4 = x.try_into().map_err(|_| ())?; + v4.try_into().map_err(|_| ()) }, - } - } - } - impl TryFrom<$n> for $v3 { - type Error = (); - fn try_from(x: $n) -> Result { - use $n::*; - match x { - V2(x) => x.try_into(), - V3(x) => Ok(x), V4(x) => x.try_into().map_err(|_| ()), - } - } - } - impl TryFrom<$n> for $v4 { - type Error = (); - fn try_from(x: $n) -> Result { - use $n::*; - match x { - V2(x) => { - let v3: $v3 = x.try_into().map_err(|_| ())?; - v3.try_into().map_err(|_| ()) - }, - V3(x) => x.try_into().map_err(|_| ()), - V4(x) => Ok(x), + V5(x) => Ok(x), } } } @@ -305,9 +203,9 @@ macro_rules! versioned_type { fn identify_version(&self) -> Version { use $n::*; match self { - V2(_) => v2::VERSION, V3(_) => v3::VERSION, V4(_) => v4::VERSION, + V5(_) => v5::VERSION, } } } @@ -321,42 +219,44 @@ versioned_type! { V3(v3::AssetId), #[codec(index = 4)] V4(v4::AssetId), + #[codec(index = 5)] + V5(v5::AssetId), } } versioned_type! { /// A single version's `Response` value, together with its version code. pub enum VersionedResponse { - #[codec(index = 2)] - V2(v2::Response), #[codec(index = 3)] V3(v3::Response), #[codec(index = 4)] V4(v4::Response), + #[codec(index = 5)] + V5(v5::Response), } } versioned_type! { /// A single `NetworkId` value, together with its version code. pub enum VersionedNetworkId { - #[codec(index = 2)] - V2(v2::NetworkId), #[codec(index = 3)] V3(v3::NetworkId), #[codec(index = 4)] V4(v4::NetworkId), + #[codec(index = 5)] + V5(v5::NetworkId), } } versioned_type! { /// A single `Junction` value, together with its version code. pub enum VersionedJunction { - #[codec(index = 2)] - V2(v2::Junction), #[codec(index = 3)] V3(v3::Junction), #[codec(index = 4)] V4(v4::Junction), + #[codec(index = 5)] + V5(v5::Junction), } } @@ -364,63 +264,51 @@ versioned_type! { /// A single `Location` value, together with its version code. #[derive(Ord, PartialOrd)] pub enum VersionedLocation { - #[codec(index = 1)] // v2 is same as v1 and therefore re-using the v1 index - V2(v2::MultiLocation), #[codec(index = 3)] V3(v3::MultiLocation), #[codec(index = 4)] V4(v4::Location), + #[codec(index = 5)] + V5(v5::Location), } } -#[deprecated(note = "Use `VersionedLocation` instead")] -pub type VersionedMultiLocation = VersionedLocation; - versioned_type! { /// A single `InteriorLocation` value, together with its version code. pub enum VersionedInteriorLocation { - #[codec(index = 2)] // while this is same as v1::Junctions, VersionedInteriorLocation is introduced in v3 - V2(v2::InteriorMultiLocation), #[codec(index = 3)] V3(v3::InteriorMultiLocation), #[codec(index = 4)] V4(v4::InteriorLocation), + #[codec(index = 5)] + V5(v5::InteriorLocation), } } -#[deprecated(note = "Use `VersionedInteriorLocation` instead")] -pub type VersionedInteriorMultiLocation = VersionedInteriorLocation; - versioned_type! { /// A single `Asset` value, together with its version code. pub enum VersionedAsset { - #[codec(index = 1)] // v2 is same as v1 and therefore re-using the v1 index - V2(v2::MultiAsset), #[codec(index = 3)] V3(v3::MultiAsset), #[codec(index = 4)] V4(v4::Asset), + #[codec(index = 5)] + V5(v5::Asset), } } -#[deprecated(note = "Use `VersionedAsset` instead")] -pub type VersionedMultiAsset = VersionedAsset; - versioned_type! { /// A single `MultiAssets` value, together with its version code. pub enum VersionedAssets { - #[codec(index = 1)] // v2 is same as v1 and therefore re-using the v1 index - V2(v2::MultiAssets), #[codec(index = 3)] V3(v3::MultiAssets), #[codec(index = 4)] V4(v4::Assets), + #[codec(index = 5)] + V5(v5::Assets), } } -#[deprecated(note = "Use `VersionedAssets` instead")] -pub type VersionedMultiAssets = VersionedAssets; - /// A single XCM message, together with its version code. #[derive(Derivative, Encode, Decode, TypeInfo)] #[derivative(Clone(bound = ""), Eq(bound = ""), PartialEq(bound = ""), Debug(bound = ""))] @@ -429,21 +317,20 @@ pub type VersionedMultiAssets = VersionedAssets; #[scale_info(bounds(), skip_type_params(RuntimeCall))] #[scale_info(replace_segment("staging_xcm", "xcm"))] pub enum VersionedXcm { - #[codec(index = 2)] - #[deprecated] - V2(v2::Xcm), #[codec(index = 3)] V3(v3::Xcm), #[codec(index = 4)] V4(v4::Xcm), + #[codec(index = 5)] + V5(v5::Xcm), } impl IntoVersion for VersionedXcm { fn into_version(self, n: Version) -> Result { Ok(match n { - 2 => Self::V2(self.try_into()?), 3 => Self::V3(self.try_into()?), 4 => Self::V4(self.try_into()?), + 5 => Self::V5(self.try_into()?), _ => return Err(()), }) } @@ -452,9 +339,9 @@ impl IntoVersion for VersionedXcm { impl IdentifyVersion for VersionedXcm { fn identify_version(&self) -> Version { match self { - Self::V2(_) => v2::VERSION, Self::V3(_) => v3::VERSION, Self::V4(_) => v4::VERSION, + Self::V5(_) => v5::VERSION, } } } @@ -476,12 +363,6 @@ impl VersionedXcm { } } -impl From> for VersionedXcm { - fn from(x: v2::Xcm) -> Self { - VersionedXcm::V2(x) - } -} - impl From> for VersionedXcm { fn from(x: v3::Xcm) -> Self { VersionedXcm::V3(x) @@ -494,18 +375,9 @@ impl From> for VersionedXcm { } } -impl TryFrom> for v2::Xcm { - type Error = (); - fn try_from(x: VersionedXcm) -> Result { - use VersionedXcm::*; - match x { - V2(x) => Ok(x), - V3(x) => x.try_into(), - V4(x) => { - let v3: v3::Xcm = x.try_into()?; - v3.try_into() - }, - } +impl From> for VersionedXcm { + fn from(x: v5::Xcm) -> Self { + VersionedXcm::V5(x) } } @@ -514,9 +386,12 @@ impl TryFrom> for v3::Xcm { fn try_from(x: VersionedXcm) -> Result { use VersionedXcm::*; match x { - V2(x) => x.try_into(), V3(x) => Ok(x), V4(x) => x.try_into(), + V5(x) => { + let v4: v4::Xcm = x.try_into()?; + v4.try_into() + }, } } } @@ -526,12 +401,24 @@ impl TryFrom> for v4::Xcm { fn try_from(x: VersionedXcm) -> Result { use VersionedXcm::*; match x { - V2(x) => { - let v3: v3::Xcm = x.try_into()?; - v3.try_into() - }, V3(x) => x.try_into(), V4(x) => Ok(x), + V5(x) => x.try_into(), + } + } +} + +impl TryFrom> for v5::Xcm { + type Error = (); + fn try_from(x: VersionedXcm) -> Result { + use VersionedXcm::*; + match x { + V3(x) => { + let v4: v4::Xcm = x.try_into()?; + v4.try_into() + }, + V4(x) => x.try_into(), + V5(x) => Ok(x), } } } @@ -568,23 +455,6 @@ impl WrapVersion for () { } } -/// `WrapVersion` implementation which attempts to always convert the XCM to version 2 before -/// wrapping it. -pub struct AlwaysV2; -impl WrapVersion for AlwaysV2 { - fn wrap_version( - _: &latest::Location, - xcm: impl Into>, - ) -> Result, ()> { - Ok(VersionedXcm::::V2(xcm.into().try_into()?)) - } -} -impl GetVersion for AlwaysV2 { - fn get_version_for(_dest: &latest::Location) -> Option { - Some(v2::VERSION) - } -} - /// `WrapVersion` implementation which attempts to always convert the XCM to version 3 before /// wrapping it. pub struct AlwaysV3; @@ -619,9 +489,26 @@ impl GetVersion for AlwaysV4 { } } +/// `WrapVersion` implementation which attempts to always convert the XCM to version 3 before +/// wrapping it. +pub struct AlwaysV5; +impl WrapVersion for AlwaysV5 { + fn wrap_version( + _: &latest::Location, + xcm: impl Into>, + ) -> Result, ()> { + Ok(VersionedXcm::::V5(xcm.into().try_into()?)) + } +} +impl GetVersion for AlwaysV5 { + fn get_version_for(_dest: &latest::Location) -> Option { + Some(v5::VERSION) + } +} + /// `WrapVersion` implementation which attempts to always convert the XCM to the latest version /// before wrapping it. -pub type AlwaysLatest = AlwaysV4; +pub type AlwaysLatest = AlwaysV5; /// `WrapVersion` implementation which attempts to always convert the XCM to the most recent Long- /// Term-Support version before wrapping it. @@ -629,7 +516,7 @@ pub type AlwaysLts = AlwaysV4; pub mod prelude { pub use super::{ - latest::prelude::*, AlwaysLatest, AlwaysLts, AlwaysV2, AlwaysV3, AlwaysV4, GetVersion, + latest::prelude::*, AlwaysLatest, AlwaysLts, AlwaysV3, AlwaysV4, AlwaysV5, GetVersion, IdentifyVersion, IntoVersion, Unsupported, Version as XcmVersion, VersionedAsset, VersionedAssetId, VersionedAssets, VersionedInteriorLocation, VersionedLocation, VersionedResponse, VersionedXcm, WrapVersion, @@ -637,12 +524,6 @@ pub mod prelude { } pub mod opaque { - pub mod v2 { - // Everything from v2 - pub use crate::v2::*; - // Then override with the opaque types in v2 - pub use crate::v2::opaque::{Instruction, Xcm}; - } pub mod v3 { // Everything from v3 pub use crate::v3::*; @@ -655,9 +536,15 @@ pub mod opaque { // Then override with the opaque types in v4 pub use crate::v4::opaque::{Instruction, Xcm}; } + pub mod v5 { + // Everything from v4 + pub use crate::v5::*; + // Then override with the opaque types in v5 + pub use crate::v5::opaque::{Instruction, Xcm}; + } pub mod latest { - pub use super::v4::*; + pub use super::v5::*; } pub mod lts { diff --git a/polkadot/xcm/src/tests.rs b/polkadot/xcm/src/tests.rs index 4c666063f3f4..5a267b3a9048 100644 --- a/polkadot/xcm/src/tests.rs +++ b/polkadot/xcm/src/tests.rs @@ -34,43 +34,43 @@ fn encode_decode_versioned_asset_id_v3() { } #[test] -fn encode_decode_versioned_response_v2() { - let response = VersionedResponse::V2(v2::Response::Null); +fn encode_decode_versioned_response_v3() { + let response = VersionedResponse::V3(v3::Response::Null); let encoded = response.encode(); - assert_eq!(encoded, hex_literal::hex!("0200"), "encode format changed"); - assert_eq!(encoded[0], 2, "bad version number"); + assert_eq!(encoded, hex_literal::hex!("0300"), "encode format changed"); + assert_eq!(encoded[0], 3, "bad version number"); let decoded = VersionedResponse::decode(&mut &encoded[..]).unwrap(); assert_eq!(response, decoded); } #[test] -fn encode_decode_versioned_response_v3() { - let response = VersionedResponse::V3(v3::Response::Null); +fn encode_decode_versioned_response_v4() { + let response = VersionedResponse::V4(v4::Response::Null); let encoded = response.encode(); - assert_eq!(encoded, hex_literal::hex!("0300"), "encode format changed"); - assert_eq!(encoded[0], 3, "bad version number"); + assert_eq!(encoded, hex_literal::hex!("0400"), "encode format changed"); + assert_eq!(encoded[0], 4, "bad version number"); let decoded = VersionedResponse::decode(&mut &encoded[..]).unwrap(); assert_eq!(response, decoded); } #[test] -fn encode_decode_versioned_multi_location_v2() { - let location = VersionedLocation::V2(v2::MultiLocation::new(0, v2::Junctions::Here)); - let encoded = location.encode(); +fn encode_decode_versioned_response_v5() { + let response = VersionedResponse::V5(v5::Response::Null); + let encoded = response.encode(); - assert_eq!(encoded, hex_literal::hex!("010000"), "encode format changed"); - assert_eq!(encoded[0], 1, "bad version number"); // this is introduced in v1 + assert_eq!(encoded, hex_literal::hex!("0500"), "encode format changed"); + assert_eq!(encoded[0], 5, "bad version number"); - let decoded = VersionedLocation::decode(&mut &encoded[..]).unwrap(); - assert_eq!(location, decoded); + let decoded = VersionedResponse::decode(&mut &encoded[..]).unwrap(); + assert_eq!(response, decoded); } #[test] -fn encode_decode_versioned_multi_location_v3() { +fn encode_decode_versioned_location_v3() { let location = VersionedLocation::V3(v3::MultiLocation::new(0, v3::Junctions::Here)); let encoded = location.encode(); @@ -82,19 +82,31 @@ fn encode_decode_versioned_multi_location_v3() { } #[test] -fn encode_decode_versioned_interior_multi_location_v2() { - let location = VersionedInteriorLocation::V2(v2::InteriorMultiLocation::Here); +fn encode_decode_versioned_location_v4() { + let location = VersionedLocation::V4(v4::Location::new(0, v4::Junctions::Here)); let encoded = location.encode(); - assert_eq!(encoded, hex_literal::hex!("0200"), "encode format changed"); - assert_eq!(encoded[0], 2, "bad version number"); + assert_eq!(encoded, hex_literal::hex!("040000"), "encode format changed"); + assert_eq!(encoded[0], 4, "bad version number"); - let decoded = VersionedInteriorLocation::decode(&mut &encoded[..]).unwrap(); + let decoded = VersionedLocation::decode(&mut &encoded[..]).unwrap(); assert_eq!(location, decoded); } #[test] -fn encode_decode_versioned_interior_multi_location_v3() { +fn encode_decode_versioned_location_v5() { + let location = VersionedLocation::V5(v5::Location::new(0, v5::Junctions::Here)); + let encoded = location.encode(); + + assert_eq!(encoded, hex_literal::hex!("050000"), "encode format changed"); + assert_eq!(encoded[0], 5, "bad version number"); + + let decoded = VersionedLocation::decode(&mut &encoded[..]).unwrap(); + assert_eq!(location, decoded); +} + +#[test] +fn encode_decode_versioned_interior_location_v3() { let location = VersionedInteriorLocation::V3(v3::InteriorMultiLocation::Here); let encoded = location.encode(); @@ -106,19 +118,31 @@ fn encode_decode_versioned_interior_multi_location_v3() { } #[test] -fn encode_decode_versioned_multi_asset_v2() { - let asset = VersionedAsset::V2(v2::MultiAsset::from(((0, v2::Junctions::Here), 1))); - let encoded = asset.encode(); +fn encode_decode_versioned_interior_location_v4() { + let location = VersionedInteriorLocation::V4(v4::InteriorLocation::Here); + let encoded = location.encode(); - assert_eq!(encoded, hex_literal::hex!("010000000004"), "encode format changed"); - assert_eq!(encoded[0], 1, "bad version number"); + assert_eq!(encoded, hex_literal::hex!("0400"), "encode format changed"); + assert_eq!(encoded[0], 4, "bad version number"); - let decoded = VersionedAsset::decode(&mut &encoded[..]).unwrap(); - assert_eq!(asset, decoded); + let decoded = VersionedInteriorLocation::decode(&mut &encoded[..]).unwrap(); + assert_eq!(location, decoded); } #[test] -fn encode_decode_versioned_multi_asset_v3() { +fn encode_decode_versioned_interior_location_v5() { + let location = VersionedInteriorLocation::V5(v5::InteriorLocation::Here); + let encoded = location.encode(); + + assert_eq!(encoded, hex_literal::hex!("0500"), "encode format changed"); + assert_eq!(encoded[0], 5, "bad version number"); + + let decoded = VersionedInteriorLocation::decode(&mut &encoded[..]).unwrap(); + assert_eq!(location, decoded); +} + +#[test] +fn encode_decode_versioned_asset_v3() { let asset = VersionedAsset::V3(v3::MultiAsset::from((v3::MultiLocation::default(), 1))); let encoded = asset.encode(); @@ -130,22 +154,31 @@ fn encode_decode_versioned_multi_asset_v3() { } #[test] -fn encode_decode_versioned_multi_assets_v2() { - let assets = VersionedAssets::V2(v2::MultiAssets::from(vec![v2::MultiAsset::from(( - (0, v2::Junctions::Here), - 1, - ))])); - let encoded = assets.encode(); +fn encode_decode_versioned_asset_v4() { + let asset = VersionedAsset::V4(v4::Asset::from((v4::Location::default(), 1))); + let encoded = asset.encode(); - assert_eq!(encoded, hex_literal::hex!("01040000000004"), "encode format changed"); - assert_eq!(encoded[0], 1, "bad version number"); + assert_eq!(encoded, hex_literal::hex!("0400000004"), "encode format changed"); + assert_eq!(encoded[0], 4, "bad version number"); - let decoded = VersionedAssets::decode(&mut &encoded[..]).unwrap(); - assert_eq!(assets, decoded); + let decoded = VersionedAsset::decode(&mut &encoded[..]).unwrap(); + assert_eq!(asset, decoded); } #[test] -fn encode_decode_versioned_multi_assets_v3() { +fn encode_decode_versioned_asset_v5() { + let asset = VersionedAsset::V5(v5::Asset::from((v5::Location::default(), 1))); + let encoded = asset.encode(); + + assert_eq!(encoded, hex_literal::hex!("0500000004"), "encode format changed"); + assert_eq!(encoded[0], 5, "bad version number"); + + let decoded = VersionedAsset::decode(&mut &encoded[..]).unwrap(); + assert_eq!(asset, decoded); +} + +#[test] +fn encode_decode_versioned_assets_v3() { let assets = VersionedAssets::V3(v3::MultiAssets::from(vec![ (v3::MultiAsset::from((v3::MultiLocation::default(), 1))), ])); @@ -158,6 +191,34 @@ fn encode_decode_versioned_multi_assets_v3() { assert_eq!(assets, decoded); } +#[test] +fn encode_decode_versioned_assets_v4() { + let assets = VersionedAssets::V4(v4::Assets::from(vec![ + (v4::Asset::from((v4::Location::default(), 1))), + ])); + let encoded = assets.encode(); + + assert_eq!(encoded, hex_literal::hex!("040400000004"), "encode format changed"); + assert_eq!(encoded[0], 4, "bad version number"); + + let decoded = VersionedAssets::decode(&mut &encoded[..]).unwrap(); + assert_eq!(assets, decoded); +} + +#[test] +fn encode_decode_versioned_assets_v5() { + let assets = VersionedAssets::V5(v5::Assets::from(vec![ + (v5::Asset::from((v5::Location::default(), 1))), + ])); + let encoded = assets.encode(); + + assert_eq!(encoded, hex_literal::hex!("050400000004"), "encode format changed"); + assert_eq!(encoded[0], 5, "bad version number"); + + let decoded = VersionedAssets::decode(&mut &encoded[..]).unwrap(); + assert_eq!(assets, decoded); +} + #[test] fn encode_decode_versioned_xcm_v3() { let xcm = VersionedXcm::V3(v3::Xcm::<()>::new()); @@ -170,6 +231,30 @@ fn encode_decode_versioned_xcm_v3() { assert_eq!(xcm, decoded); } +#[test] +fn encode_decode_versioned_xcm_v4() { + let xcm = VersionedXcm::V4(v4::Xcm::<()>::new()); + let encoded = xcm.encode(); + + assert_eq!(encoded, hex_literal::hex!("0400"), "encode format changed"); + assert_eq!(encoded[0], 4, "bad version number"); + + let decoded = VersionedXcm::decode(&mut &encoded[..]).unwrap(); + assert_eq!(xcm, decoded); +} + +#[test] +fn encode_decode_versioned_xcm_v5() { + let xcm = VersionedXcm::V5(v5::Xcm::<()>::new()); + let encoded = xcm.encode(); + + assert_eq!(encoded, hex_literal::hex!("0500"), "encode format changed"); + assert_eq!(encoded[0], 5, "bad version number"); + + let decoded = VersionedXcm::decode(&mut &encoded[..]).unwrap(); + assert_eq!(xcm, decoded); +} + // With the renaming of the crate to `staging-xcm` the naming in the metadata changed as well and // this broke downstream users. This test ensures that the name in the metadata isn't changed. #[test] diff --git a/polkadot/xcm/src/v3/junction.rs b/polkadot/xcm/src/v3/junction.rs index 24348bf2e672..24e9c16bf699 100644 --- a/polkadot/xcm/src/v3/junction.rs +++ b/polkadot/xcm/src/v3/junction.rs @@ -18,10 +18,6 @@ use super::{Junctions, MultiLocation}; use crate::{ - v2::{ - BodyId as OldBodyId, BodyPart as OldBodyPart, Junction as OldJunction, - NetworkId as OldNetworkId, - }, v4::{Junction as NewJunction, NetworkId as NewNetworkId}, VersionedLocation, }; @@ -80,30 +76,6 @@ pub enum NetworkId { PolkadotBulletin, } -impl From for Option { - fn from(old: OldNetworkId) -> Option { - use OldNetworkId::*; - match old { - Any => None, - Named(_) => None, - Polkadot => Some(NetworkId::Polkadot), - Kusama => Some(NetworkId::Kusama), - } - } -} - -impl TryFrom for NetworkId { - type Error = (); - fn try_from(old: OldNetworkId) -> Result { - use OldNetworkId::*; - match old { - Any | Named(_) => Err(()), - Polkadot => Ok(NetworkId::Polkadot), - Kusama => Ok(NetworkId::Kusama), - } - } -} - impl From for Option { fn from(new: NewNetworkId) -> Self { Some(NetworkId::from(new)) @@ -175,32 +147,6 @@ pub enum BodyId { Treasury, } -impl TryFrom for BodyId { - type Error = (); - fn try_from(value: OldBodyId) -> Result { - use OldBodyId::*; - Ok(match value { - Unit => Self::Unit, - Named(n) => - if n.len() == 4 { - let mut r = [0u8; 4]; - r.copy_from_slice(&n[..]); - Self::Moniker(r) - } else { - return Err(()) - }, - Index(n) => Self::Index(n), - Executive => Self::Executive, - Technical => Self::Technical, - Legislative => Self::Legislative, - Judicial => Self::Judicial, - Defense => Self::Defense, - Administration => Self::Administration, - Treasury => Self::Treasury, - }) - } -} - /// A part of a pluralistic body. #[derive( Copy, @@ -262,20 +208,6 @@ impl BodyPart { } } -impl TryFrom for BodyPart { - type Error = (); - fn try_from(value: OldBodyPart) -> Result { - use OldBodyPart::*; - Ok(match value { - Voice => Self::Voice, - Members { count } => Self::Members { count }, - Fraction { nom, denom } => Self::Fraction { nom, denom }, - AtLeastProportion { nom, denom } => Self::AtLeastProportion { nom, denom }, - MoreThanProportion { nom, denom } => Self::MoreThanProportion { nom, denom }, - }) - } -} - /// A single item in a path to describe the relative location of a consensus system. /// /// Each item assumes a pre-existing location as its context and is defined in terms of it. @@ -409,36 +341,6 @@ impl From for Junction { } } -impl TryFrom for Junction { - type Error = (); - fn try_from(value: OldJunction) -> Result { - use OldJunction::*; - Ok(match value { - Parachain(id) => Self::Parachain(id), - AccountId32 { network, id } => Self::AccountId32 { network: network.into(), id }, - AccountIndex64 { network, index } => - Self::AccountIndex64 { network: network.into(), index }, - AccountKey20 { network, key } => Self::AccountKey20 { network: network.into(), key }, - PalletInstance(index) => Self::PalletInstance(index), - GeneralIndex(id) => Self::GeneralIndex(id), - GeneralKey(key) => match key.len() { - len @ 0..=32 => Self::GeneralKey { - length: len as u8, - data: { - let mut data = [0u8; 32]; - data[..len].copy_from_slice(&key[..]); - data - }, - }, - _ => return Err(()), - }, - OnlyChild => Self::OnlyChild, - Plurality { id, part } => - Self::Plurality { id: id.try_into()?, part: part.try_into()? }, - }) - } -} - impl TryFrom for Junction { type Error = (); @@ -496,30 +398,3 @@ impl Junction { } } } - -#[cfg(test)] -mod tests { - use super::*; - use alloc::vec; - - #[test] - fn junction_round_trip_works() { - let j = Junction::GeneralKey { length: 32, data: [1u8; 32] }; - let k = Junction::try_from(OldJunction::try_from(j).unwrap()).unwrap(); - assert_eq!(j, k); - - let j = OldJunction::GeneralKey(vec![1u8; 32].try_into().unwrap()); - let k = OldJunction::try_from(Junction::try_from(j.clone()).unwrap()).unwrap(); - assert_eq!(j, k); - - let j = Junction::from(BoundedVec::try_from(vec![1u8, 2, 3, 4]).unwrap()); - let k = Junction::try_from(OldJunction::try_from(j).unwrap()).unwrap(); - assert_eq!(j, k); - let s: BoundedSlice<_, _> = (&k).try_into().unwrap(); - assert_eq!(s, &[1u8, 2, 3, 4][..]); - - let j = OldJunction::GeneralKey(vec![1u8, 2, 3, 4].try_into().unwrap()); - let k = OldJunction::try_from(Junction::try_from(j.clone()).unwrap()).unwrap(); - assert_eq!(j, k); - } -} diff --git a/polkadot/xcm/src/v3/mod.rs b/polkadot/xcm/src/v3/mod.rs index ff64c98e15b3..b60209a440c6 100644 --- a/polkadot/xcm/src/v3/mod.rs +++ b/polkadot/xcm/src/v3/mod.rs @@ -16,11 +16,6 @@ //! Version 3 of the Cross-Consensus Message format data structures. -#[allow(deprecated)] -use super::v2::{ - Instruction as OldInstruction, OriginKind as OldOriginKind, Response as OldResponse, - WeightLimit as OldWeightLimit, Xcm as OldXcm, -}; use super::v4::{ Instruction as NewInstruction, PalletInfo as NewPalletInfo, QueryResponseInfo as NewQueryResponseInfo, Response as NewResponse, Xcm as NewXcm, @@ -56,43 +51,6 @@ pub use traits::{ SendError, SendResult, SendXcm, Weight, XcmHash, }; -/// Basically just the XCM (more general) version of `ParachainDispatchOrigin`. -#[derive(Copy, Clone, Eq, PartialEq, Encode, Decode, Debug, TypeInfo)] -#[scale_info(replace_segment("staging_xcm", "xcm"))] -#[cfg_attr(feature = "json-schema", derive(schemars::JsonSchema))] -pub enum OriginKind { - /// Origin should just be the native dispatch origin representation for the sender in the - /// local runtime framework. For Cumulus/Frame chains this is the `Parachain` or `Relay` origin - /// if coming from a chain, though there may be others if the `MultiLocation` XCM origin has a - /// primary/native dispatch origin form. - Native, - - /// Origin should just be the standard account-based origin with the sovereign account of - /// the sender. For Cumulus/Frame chains, this is the `Signed` origin. - SovereignAccount, - - /// Origin should be the super-user. For Cumulus/Frame chains, this is the `Root` origin. - /// This will not usually be an available option. - Superuser, - - /// Origin should be interpreted as an XCM native origin and the `MultiLocation` should be - /// encoded directly in the dispatch origin unchanged. For Cumulus/Frame chains, this will be - /// the `pallet_xcm::Origin::Xcm` type. - Xcm, -} - -impl From for OriginKind { - fn from(old: OldOriginKind) -> Self { - use OldOriginKind::*; - match old { - Native => Self::Native, - SovereignAccount => Self::SovereignAccount, - Superuser => Self::Superuser, - Xcm => Self::Xcm, - } - } -} - /// This module's XCM version. pub const VERSION: super::Version = 3; @@ -456,14 +414,29 @@ impl From for Option { } } -impl From for WeightLimit { - fn from(x: OldWeightLimit) -> Self { - use OldWeightLimit::*; - match x { - Limited(w) => Self::Limited(Weight::from_parts(w, DEFAULT_PROOF_SIZE)), - Unlimited => Self::Unlimited, - } - } +/// Basically just the XCM (more general) version of `ParachainDispatchOrigin`. +#[derive(Copy, Clone, Eq, PartialEq, Encode, Decode, Debug, TypeInfo)] +#[scale_info(replace_segment("staging_xcm", "xcm"))] +#[cfg_attr(feature = "json-schema", derive(schemars::JsonSchema))] +pub enum OriginKind { + /// Origin should just be the native dispatch origin representation for the sender in the + /// local runtime framework. For Cumulus/Frame chains this is the `Parachain` or `Relay` origin + /// if coming from a chain, though there may be others if the `MultiLocation` XCM origin has a + /// primary/native dispatch origin form. + Native, + + /// Origin should just be the standard account-based origin with the sovereign account of + /// the sender. For Cumulus/Frame chains, this is the `Signed` origin. + SovereignAccount, + + /// Origin should be the super-user. For Cumulus/Frame chains, this is the `Root` origin. + /// This will not usually be an available option. + Superuser, + + /// Origin should be interpreted as an XCM native origin and the `MultiLocation` should be + /// encoded directly in the dispatch origin unchanged. For Cumulus/Frame chains, this will be + /// the `pallet_xcm::Origin::Xcm` type. + Xcm, } /// Contextual data pertaining to a specific list of XCM instructions. @@ -819,6 +792,7 @@ pub enum Instruction { /// Kind: *Command* /// /// Errors: + #[builder(pays_fees)] BuyExecution { fees: MultiAsset, weight_limit: WeightLimit }, /// Refund any surplus weight previously bought with `BuyExecution`. @@ -1327,31 +1301,6 @@ pub mod opaque { pub type Instruction = super::Instruction<()>; } -// Convert from a v2 response to a v3 response. -impl TryFrom for Response { - type Error = (); - fn try_from(old_response: OldResponse) -> result::Result { - match old_response { - OldResponse::Assets(assets) => Ok(Self::Assets(assets.try_into()?)), - OldResponse::Version(version) => Ok(Self::Version(version)), - OldResponse::ExecutionResult(error) => Ok(Self::ExecutionResult(match error { - Some((i, e)) => Some((i, e.try_into()?)), - None => None, - })), - OldResponse::Null => Ok(Self::Null), - } - } -} - -// Convert from a v2 XCM to a v3 XCM. -#[allow(deprecated)] -impl TryFrom> for Xcm { - type Error = (); - fn try_from(old_xcm: OldXcm) -> result::Result { - Ok(Xcm(old_xcm.0.into_iter().map(TryInto::try_into).collect::>()?)) - } -} - // Convert from a v4 XCM to a v3 XCM. impl TryFrom> for Xcm { type Error = (); @@ -1501,109 +1450,6 @@ impl TryFrom> for Instruction { } } -/// Default value for the proof size weight component when converting from V2. Set at 64 KB. -/// NOTE: Make sure this is removed after we properly account for PoV weights. -const DEFAULT_PROOF_SIZE: u64 = 64 * 1024; - -// Convert from a v2 instruction to a v3 instruction. -impl TryFrom> for Instruction { - type Error = (); - fn try_from(old_instruction: OldInstruction) -> result::Result { - use OldInstruction::*; - Ok(match old_instruction { - WithdrawAsset(assets) => Self::WithdrawAsset(assets.try_into()?), - ReserveAssetDeposited(assets) => Self::ReserveAssetDeposited(assets.try_into()?), - ReceiveTeleportedAsset(assets) => Self::ReceiveTeleportedAsset(assets.try_into()?), - QueryResponse { query_id, response, max_weight } => Self::QueryResponse { - query_id, - response: response.try_into()?, - max_weight: Weight::from_parts(max_weight, DEFAULT_PROOF_SIZE), - querier: None, - }, - TransferAsset { assets, beneficiary } => Self::TransferAsset { - assets: assets.try_into()?, - beneficiary: beneficiary.try_into()?, - }, - TransferReserveAsset { assets, dest, xcm } => Self::TransferReserveAsset { - assets: assets.try_into()?, - dest: dest.try_into()?, - xcm: xcm.try_into()?, - }, - HrmpNewChannelOpenRequest { sender, max_message_size, max_capacity } => - Self::HrmpNewChannelOpenRequest { sender, max_message_size, max_capacity }, - HrmpChannelAccepted { recipient } => Self::HrmpChannelAccepted { recipient }, - HrmpChannelClosing { initiator, sender, recipient } => - Self::HrmpChannelClosing { initiator, sender, recipient }, - Transact { origin_type, require_weight_at_most, call } => Self::Transact { - origin_kind: origin_type.into(), - require_weight_at_most: Weight::from_parts( - require_weight_at_most, - DEFAULT_PROOF_SIZE, - ), - call: call.into(), - }, - ReportError { query_id, dest, max_response_weight } => { - let response_info = QueryResponseInfo { - destination: dest.try_into()?, - query_id, - max_weight: Weight::from_parts(max_response_weight, DEFAULT_PROOF_SIZE), - }; - Self::ReportError(response_info) - }, - DepositAsset { assets, max_assets, beneficiary } => Self::DepositAsset { - assets: (assets, max_assets).try_into()?, - beneficiary: beneficiary.try_into()?, - }, - DepositReserveAsset { assets, max_assets, dest, xcm } => { - let assets = (assets, max_assets).try_into()?; - Self::DepositReserveAsset { assets, dest: dest.try_into()?, xcm: xcm.try_into()? } - }, - ExchangeAsset { give, receive } => { - let give = give.try_into()?; - let want = receive.try_into()?; - Self::ExchangeAsset { give, want, maximal: true } - }, - InitiateReserveWithdraw { assets, reserve, xcm } => Self::InitiateReserveWithdraw { - assets: assets.try_into()?, - reserve: reserve.try_into()?, - xcm: xcm.try_into()?, - }, - InitiateTeleport { assets, dest, xcm } => Self::InitiateTeleport { - assets: assets.try_into()?, - dest: dest.try_into()?, - xcm: xcm.try_into()?, - }, - QueryHolding { query_id, dest, assets, max_response_weight } => { - let response_info = QueryResponseInfo { - destination: dest.try_into()?, - query_id, - max_weight: Weight::from_parts(max_response_weight, DEFAULT_PROOF_SIZE), - }; - Self::ReportHolding { response_info, assets: assets.try_into()? } - }, - BuyExecution { fees, weight_limit } => - Self::BuyExecution { fees: fees.try_into()?, weight_limit: weight_limit.into() }, - ClearOrigin => Self::ClearOrigin, - DescendOrigin(who) => Self::DescendOrigin(who.try_into()?), - RefundSurplus => Self::RefundSurplus, - SetErrorHandler(xcm) => Self::SetErrorHandler(xcm.try_into()?), - SetAppendix(xcm) => Self::SetAppendix(xcm.try_into()?), - ClearError => Self::ClearError, - ClaimAsset { assets, ticket } => { - let assets = assets.try_into()?; - let ticket = ticket.try_into()?; - Self::ClaimAsset { assets, ticket } - }, - Trap(code) => Self::Trap(code), - SubscribeVersion { query_id, max_response_weight } => Self::SubscribeVersion { - query_id, - max_response_weight: Weight::from_parts(max_response_weight, DEFAULT_PROOF_SIZE), - }, - UnsubscribeVersion => Self::UnsubscribeVersion, - }) - } -} - #[cfg(test)] mod tests { use super::{prelude::*, *}; diff --git a/polkadot/xcm/src/v3/multiasset.rs b/polkadot/xcm/src/v3/multiasset.rs index 56b46b1d921e..e8bd3e167f61 100644 --- a/polkadot/xcm/src/v3/multiasset.rs +++ b/polkadot/xcm/src/v3/multiasset.rs @@ -27,18 +27,10 @@ //! filtering an XCM holding account. use super::{InteriorMultiLocation, MultiLocation}; -use crate::{ - v2::{ - AssetId as OldAssetId, AssetInstance as OldAssetInstance, Fungibility as OldFungibility, - MultiAsset as OldMultiAsset, MultiAssetFilter as OldMultiAssetFilter, - MultiAssets as OldMultiAssets, WildFungibility as OldWildFungibility, - WildMultiAsset as OldWildMultiAsset, - }, - v4::{ - Asset as NewMultiAsset, AssetFilter as NewMultiAssetFilter, AssetId as NewAssetId, - AssetInstance as NewAssetInstance, Assets as NewMultiAssets, Fungibility as NewFungibility, - WildAsset as NewWildMultiAsset, WildFungibility as NewWildFungibility, - }, +use crate::v4::{ + Asset as NewMultiAsset, AssetFilter as NewMultiAssetFilter, AssetId as NewAssetId, + AssetInstance as NewAssetInstance, Assets as NewMultiAssets, Fungibility as NewFungibility, + WildAsset as NewWildMultiAsset, WildFungibility as NewWildFungibility, }; use alloc::{vec, vec::Vec}; use bounded_collections::{BoundedVec, ConstU32}; @@ -85,22 +77,6 @@ pub enum AssetInstance { Array32([u8; 32]), } -impl TryFrom for AssetInstance { - type Error = (); - fn try_from(value: OldAssetInstance) -> Result { - use OldAssetInstance::*; - Ok(match value { - Undefined => Self::Undefined, - Index(n) => Self::Index(n), - Array4(n) => Self::Array4(n), - Array8(n) => Self::Array8(n), - Array16(n) => Self::Array16(n), - Array32(n) => Self::Array32(n), - Blob(_) => return Err(()), - }) - } -} - impl TryFrom for AssetInstance { type Error = (); fn try_from(value: NewAssetInstance) -> Result { @@ -340,17 +316,6 @@ impl> From for Fungibility { } } -impl TryFrom for Fungibility { - type Error = (); - fn try_from(value: OldFungibility) -> Result { - use OldFungibility::*; - Ok(match value { - Fungible(n) => Self::Fungible(n), - NonFungible(i) => Self::NonFungible(i.try_into()?), - }) - } -} - impl TryFrom for Fungibility { type Error = (); fn try_from(value: NewFungibility) -> Result { @@ -387,17 +352,6 @@ pub enum WildFungibility { NonFungible, } -impl TryFrom for WildFungibility { - type Error = (); - fn try_from(value: OldWildFungibility) -> Result { - use OldWildFungibility::*; - Ok(match value { - Fungible => Self::Fungible, - NonFungible => Self::NonFungible, - }) - } -} - impl TryFrom for WildFungibility { type Error = (); fn try_from(value: NewWildFungibility) -> Result { @@ -447,22 +401,6 @@ impl From<[u8; 32]> for AssetId { } } -impl TryFrom for AssetId { - type Error = (); - fn try_from(old: OldAssetId) -> Result { - use OldAssetId::*; - Ok(match old { - Concrete(l) => Self::Concrete(l.try_into()?), - Abstract(v) if v.len() <= 32 => { - let mut r = [0u8; 32]; - r[..v.len()].copy_from_slice(&v[..]); - Self::Abstract(r) - }, - _ => return Err(()), - }) - } -} - impl TryFrom for AssetId { type Error = (); fn try_from(new: NewAssetId) -> Result { @@ -601,13 +539,6 @@ impl MultiAsset { } } -impl TryFrom for MultiAsset { - type Error = (); - fn try_from(old: OldMultiAsset) -> Result { - Ok(Self { id: old.id.try_into()?, fun: old.fun.try_into()? }) - } -} - impl TryFrom for MultiAsset { type Error = (); fn try_from(new: NewMultiAsset) -> Result { @@ -657,18 +588,6 @@ impl Decode for MultiAssets { } } -impl TryFrom for MultiAssets { - type Error = (); - fn try_from(old: OldMultiAssets) -> Result { - let v = old - .drain() - .into_iter() - .map(MultiAsset::try_from) - .collect::, ()>>()?; - Ok(MultiAssets(v)) - } -} - impl TryFrom for MultiAssets { type Error = (); fn try_from(new: NewMultiAssets) -> Result { @@ -882,17 +801,6 @@ pub enum WildMultiAsset { }, } -impl TryFrom for WildMultiAsset { - type Error = (); - fn try_from(old: OldWildMultiAsset) -> Result { - use OldWildMultiAsset::*; - Ok(match old { - AllOf { id, fun } => Self::AllOf { id: id.try_into()?, fun: fun.try_into()? }, - All => Self::All, - }) - } -} - impl TryFrom for WildMultiAsset { type Error = (); fn try_from(new: NewWildMultiAsset) -> Result { @@ -907,19 +815,6 @@ impl TryFrom for WildMultiAsset { } } -impl TryFrom<(OldWildMultiAsset, u32)> for WildMultiAsset { - type Error = (); - fn try_from(old: (OldWildMultiAsset, u32)) -> Result { - use OldWildMultiAsset::*; - let count = old.1; - Ok(match old.0 { - AllOf { id, fun } => - Self::AllOfCounted { id: id.try_into()?, fun: fun.try_into()?, count }, - All => Self::AllCounted(count), - }) - } -} - impl WildMultiAsset { /// Returns true if `self` is a super-set of the given `inner` asset. pub fn contains(&self, inner: &MultiAsset) -> bool { @@ -1079,16 +974,6 @@ impl MultiAssetFilter { } } -impl TryFrom for MultiAssetFilter { - type Error = (); - fn try_from(old: OldMultiAssetFilter) -> Result { - Ok(match old { - OldMultiAssetFilter::Definite(x) => Self::Definite(x.try_into()?), - OldMultiAssetFilter::Wild(x) => Self::Wild(x.try_into()?), - }) - } -} - impl TryFrom for MultiAssetFilter { type Error = (); fn try_from(new: NewMultiAssetFilter) -> Result { @@ -1100,19 +985,6 @@ impl TryFrom for MultiAssetFilter { } } -impl TryFrom<(OldMultiAssetFilter, u32)> for MultiAssetFilter { - type Error = (); - fn try_from(old: (OldMultiAssetFilter, u32)) -> Result { - let count = old.1; - Ok(match old.0 { - OldMultiAssetFilter::Definite(x) if count >= x.len() as u32 => - Self::Definite(x.try_into()?), - OldMultiAssetFilter::Wild(x) => Self::Wild((x, count).try_into()?), - _ => return Err(()), - }) - } -} - #[cfg(test)] mod tests { use super::super::prelude::*; diff --git a/polkadot/xcm/src/v3/multilocation.rs b/polkadot/xcm/src/v3/multilocation.rs index e51981204d96..8f18312046f8 100644 --- a/polkadot/xcm/src/v3/multilocation.rs +++ b/polkadot/xcm/src/v3/multilocation.rs @@ -17,9 +17,7 @@ //! XCM `MultiLocation` datatype. use super::{Junction, Junctions}; -use crate::{ - v2::MultiLocation as OldMultiLocation, v4::Location as NewMultiLocation, VersionedLocation, -}; +use crate::{v4::Location as NewMultiLocation, VersionedLocation}; use codec::{Decode, Encode, MaxEncodedLen}; use core::result; use scale_info::TypeInfo; @@ -464,13 +462,6 @@ impl MultiLocation { } } -impl TryFrom for MultiLocation { - type Error = (); - fn try_from(x: OldMultiLocation) -> result::Result { - Ok(MultiLocation { parents: x.parents, interior: x.interior.try_into()? }) - } -} - impl TryFrom for Option { type Error = (); fn try_from(new: NewMultiLocation) -> result::Result { @@ -759,37 +750,4 @@ mod tests { let expected = MultiLocation::new(2, (GlobalConsensus(Kusama), Parachain(42))); assert_eq!(para_to_remote_para.chain_location(), expected); } - - #[test] - fn conversion_from_other_types_works() { - use crate::v2; - - fn takes_multilocation>(_arg: Arg) {} - - takes_multilocation(Parent); - takes_multilocation(Here); - takes_multilocation(X1(Parachain(42))); - takes_multilocation((Ancestor(255), PalletInstance(8))); - takes_multilocation((Ancestor(5), Parachain(1), PalletInstance(3))); - takes_multilocation((Ancestor(2), Here)); - takes_multilocation(AncestorThen( - 3, - X2(Parachain(43), AccountIndex64 { network: None, index: 155 }), - )); - takes_multilocation((Parent, AccountId32 { network: None, id: [0; 32] })); - takes_multilocation((Parent, Here)); - takes_multilocation(ParentThen(X1(Parachain(75)))); - takes_multilocation([Parachain(100), PalletInstance(3)]); - - assert_eq!( - v2::MultiLocation::from(v2::Junctions::Here).try_into(), - Ok(MultiLocation::here()) - ); - assert_eq!(v2::MultiLocation::from(v2::Parent).try_into(), Ok(MultiLocation::parent())); - assert_eq!( - v2::MultiLocation::from((v2::Parent, v2::Parent, v2::Junction::GeneralIndex(42u128),)) - .try_into(), - Ok(MultiLocation { parents: 2, interior: X1(GeneralIndex(42u128)) }), - ); - } } diff --git a/polkadot/xcm/src/v3/traits.rs b/polkadot/xcm/src/v3/traits.rs index 34c46453b9a8..aa42244b4964 100644 --- a/polkadot/xcm/src/v3/traits.rs +++ b/polkadot/xcm/src/v3/traits.rs @@ -16,20 +16,18 @@ //! Cross-Consensus Message format data structures. -use crate::v2::Error as OldError; -use codec::{Decode, Encode, MaxEncodedLen}; use core::result; use scale_info::TypeInfo; pub use sp_weights::Weight; -use super::*; - // A simple trait to get the weight of some object. pub trait GetWeight { fn weight(&self) -> sp_weights::Weight; } +use super::*; + /// Error codes used in XCM. The first errors codes have explicit indices and are part of the XCM /// format. Those trailing are merely part of the XCM implementation; there is no expectation that /// they will retain the same index over time. @@ -174,38 +172,6 @@ impl MaxEncodedLen for Error { } } -impl TryFrom for Error { - type Error = (); - fn try_from(old_error: OldError) -> result::Result { - use OldError::*; - Ok(match old_error { - Overflow => Self::Overflow, - Unimplemented => Self::Unimplemented, - UntrustedReserveLocation => Self::UntrustedReserveLocation, - UntrustedTeleportLocation => Self::UntrustedTeleportLocation, - MultiLocationFull => Self::LocationFull, - MultiLocationNotInvertible => Self::LocationNotInvertible, - BadOrigin => Self::BadOrigin, - InvalidLocation => Self::InvalidLocation, - AssetNotFound => Self::AssetNotFound, - FailedToTransactAsset(s) => Self::FailedToTransactAsset(s), - NotWithdrawable => Self::NotWithdrawable, - LocationCannotHold => Self::LocationCannotHold, - ExceedsMaxMessageSize => Self::ExceedsMaxMessageSize, - DestinationUnsupported => Self::DestinationUnsupported, - Transport(s) => Self::Transport(s), - Unroutable => Self::Unroutable, - UnknownClaim => Self::UnknownClaim, - FailedToDecode => Self::FailedToDecode, - MaxWeightInvalid => Self::MaxWeightInvalid, - NotHoldingFees => Self::NotHoldingFees, - TooExpensive => Self::TooExpensive, - Trap(i) => Self::Trap(i), - _ => return Err(()), - }) - } -} - impl From for Error { fn from(e: SendError) -> Self { match e { diff --git a/polkadot/xcm/src/v4/asset.rs b/polkadot/xcm/src/v4/asset.rs index 41f1f82f828c..d7a9297d6932 100644 --- a/polkadot/xcm/src/v4/asset.rs +++ b/polkadot/xcm/src/v4/asset.rs @@ -27,10 +27,17 @@ //! holding account. use super::{InteriorLocation, Location, Reanchorable}; -use crate::v3::{ - AssetId as OldAssetId, AssetInstance as OldAssetInstance, Fungibility as OldFungibility, - MultiAsset as OldAsset, MultiAssetFilter as OldAssetFilter, MultiAssets as OldAssets, - WildFungibility as OldWildFungibility, WildMultiAsset as OldWildAsset, +use crate::{ + v3::{ + AssetId as OldAssetId, AssetInstance as OldAssetInstance, Fungibility as OldFungibility, + MultiAsset as OldAsset, MultiAssetFilter as OldAssetFilter, MultiAssets as OldAssets, + WildFungibility as OldWildFungibility, WildMultiAsset as OldWildAsset, + }, + v5::{ + Asset as NewAsset, AssetFilter as NewAssetFilter, AssetId as NewAssetId, + AssetInstance as NewAssetInstance, Assets as NewAssets, Fungibility as NewFungibility, + WildAsset as NewWildAsset, WildFungibility as NewWildFungibility, + }, }; use alloc::{vec, vec::Vec}; use bounded_collections::{BoundedVec, ConstU32}; @@ -90,6 +97,21 @@ impl TryFrom for AssetInstance { } } +impl TryFrom for AssetInstance { + type Error = (); + fn try_from(value: NewAssetInstance) -> Result { + use NewAssetInstance::*; + Ok(match value { + Undefined => Self::Undefined, + Index(n) => Self::Index(n), + Array4(n) => Self::Array4(n), + Array8(n) => Self::Array8(n), + Array16(n) => Self::Array16(n), + Array32(n) => Self::Array32(n), + }) + } +} + impl From<()> for AssetInstance { fn from(_: ()) -> Self { Self::Undefined @@ -244,6 +266,17 @@ impl TryFrom for u128 { } } +impl TryFrom for Fungibility { + type Error = (); + fn try_from(value: NewFungibility) -> Result { + use NewFungibility::*; + Ok(match value { + Fungible(n) => Self::Fungible(n), + NonFungible(i) => Self::NonFungible(i.try_into()?), + }) + } +} + /// Classification of whether an asset is fungible or not, along with a mandatory amount or /// instance. #[derive( @@ -357,6 +390,17 @@ impl TryFrom for WildFungibility { } } +impl TryFrom for WildFungibility { + type Error = (); + fn try_from(value: NewWildFungibility) -> Result { + use NewWildFungibility::*; + Ok(match value { + Fungible => Self::Fungible, + NonFungible => Self::NonFungible, + }) + } +} + /// Location to identify an asset. #[derive( Clone, @@ -391,6 +435,13 @@ impl TryFrom for AssetId { } } +impl TryFrom for AssetId { + type Error = (); + fn try_from(new: NewAssetId) -> Result { + Ok(Self(new.0.try_into()?)) + } +} + impl AssetId { /// Prepend a `Location` to an asset id, giving it a new root location. pub fn prepend_with(&mut self, prepend: &Location) -> Result<(), ()> { @@ -526,6 +577,13 @@ impl TryFrom for Asset { } } +impl TryFrom for Asset { + type Error = (); + fn try_from(new: NewAsset) -> Result { + Ok(Self { id: new.id.try_into()?, fun: new.fun.try_into()? }) + } +} + /// A `Vec` of `Asset`s. /// /// There are a number of invariants which the construction and mutation functions must ensure are @@ -579,6 +637,18 @@ impl TryFrom for Assets { } } +impl TryFrom for Assets { + type Error = (); + fn try_from(new: NewAssets) -> Result { + let v = new + .into_inner() + .into_iter() + .map(Asset::try_from) + .collect::, ()>>()?; + Ok(Assets(v)) + } +} + impl From> for Assets { fn from(mut assets: Vec) -> Self { let mut res = Vec::with_capacity(assets.len()); @@ -795,6 +865,20 @@ impl TryFrom for WildAsset { } } +impl TryFrom for WildAsset { + type Error = (); + fn try_from(new: NewWildAsset) -> Result { + use NewWildAsset::*; + Ok(match new { + AllOf { id, fun } => Self::AllOf { id: id.try_into()?, fun: fun.try_into()? }, + AllOfCounted { id, fun, count } => + Self::AllOfCounted { id: id.try_into()?, fun: fun.try_into()?, count }, + All => Self::All, + AllCounted(count) => Self::AllCounted(count), + }) + } +} + impl WildAsset { /// Returns true if `self` is a super-set of the given `inner` asset. pub fn contains(&self, inner: &Asset) -> bool { @@ -944,6 +1028,17 @@ impl AssetFilter { } } +impl TryFrom for AssetFilter { + type Error = (); + fn try_from(new: NewAssetFilter) -> Result { + use NewAssetFilter::*; + Ok(match new { + Definite(x) => Self::Definite(x.try_into()?), + Wild(x) => Self::Wild(x.try_into()?), + }) + } +} + impl TryFrom for AssetFilter { type Error = (); fn try_from(old: OldAssetFilter) -> Result { diff --git a/polkadot/xcm/src/v4/junction.rs b/polkadot/xcm/src/v4/junction.rs index 36fb616d2dc5..88125b03ef0c 100644 --- a/polkadot/xcm/src/v4/junction.rs +++ b/polkadot/xcm/src/v4/junction.rs @@ -20,6 +20,7 @@ use super::Location; pub use crate::v3::{BodyId, BodyPart}; use crate::{ v3::{Junction as OldJunction, NetworkId as OldNetworkId}, + v5::{Junction as NewJunction, NetworkId as NewNetworkId}, VersionedLocation, }; use bounded_collections::{BoundedSlice, BoundedVec, ConstU32}; @@ -72,7 +73,6 @@ pub enum Junction { /// An instanced, indexed pallet that forms a constituent part of the context. /// /// Generally used when the context is a Frame-based chain. - // TODO XCMv4 inner should be `Compact`. PalletInstance(u8), /// A non-descript index within the context location. /// @@ -103,6 +103,31 @@ pub enum Junction { GlobalConsensus(NetworkId), } +impl From for Option { + fn from(new: NewNetworkId) -> Self { + Some(NetworkId::from(new)) + } +} + +impl From for NetworkId { + fn from(new: NewNetworkId) -> Self { + use NewNetworkId::*; + match new { + ByGenesis(hash) => Self::ByGenesis(hash), + ByFork { block_number, block_hash } => Self::ByFork { block_number, block_hash }, + Polkadot => Self::Polkadot, + Kusama => Self::Kusama, + Westend => Self::Westend, + Rococo => Self::Rococo, + Wococo => Self::Wococo, + Ethereum { chain_id } => Self::Ethereum { chain_id }, + BitcoinCore => Self::BitcoinCore, + BitcoinCash => Self::BitcoinCash, + PolkadotBulletin => Self::PolkadotBulletin, + } + } +} + /// A global identifier of a data structure existing within consensus. /// /// Maintenance note: Networks with global consensus and which are practically bridgeable within the @@ -253,6 +278,29 @@ impl TryFrom for Junction { } } +impl TryFrom for Junction { + type Error = (); + + fn try_from(value: NewJunction) -> Result { + use NewJunction::*; + Ok(match value { + Parachain(id) => Self::Parachain(id), + AccountId32 { network: maybe_network, id } => + Self::AccountId32 { network: maybe_network.map(|network| network.into()), id }, + AccountIndex64 { network: maybe_network, index } => + Self::AccountIndex64 { network: maybe_network.map(|network| network.into()), index }, + AccountKey20 { network: maybe_network, key } => + Self::AccountKey20 { network: maybe_network.map(|network| network.into()), key }, + PalletInstance(index) => Self::PalletInstance(index), + GeneralIndex(id) => Self::GeneralIndex(id), + GeneralKey { length, data } => Self::GeneralKey { length, data }, + OnlyChild => Self::OnlyChild, + Plurality { id, part } => Self::Plurality { id, part }, + GlobalConsensus(network) => Self::GlobalConsensus(network.into()), + }) + } +} + impl Junction { /// Convert `self` into a `Location` containing 0 parents. /// diff --git a/polkadot/xcm/src/v4/location.rs b/polkadot/xcm/src/v4/location.rs index f2c302495c73..3a44b0696be4 100644 --- a/polkadot/xcm/src/v4/location.rs +++ b/polkadot/xcm/src/v4/location.rs @@ -17,7 +17,7 @@ //! XCM `Location` datatype. use super::{traits::Reanchorable, Junction, Junctions}; -use crate::{v3::MultiLocation as OldLocation, VersionedLocation}; +use crate::{v3::MultiLocation as OldLocation, v5::Location as NewLocation, VersionedLocation}; use codec::{Decode, Encode, MaxEncodedLen}; use core::result; use scale_info::TypeInfo; @@ -489,6 +489,20 @@ impl TryFrom for Location { } } +impl TryFrom for Option { + type Error = (); + fn try_from(new: NewLocation) -> result::Result { + Ok(Some(Location::try_from(new)?)) + } +} + +impl TryFrom for Location { + type Error = (); + fn try_from(new: NewLocation) -> result::Result { + Ok(Location { parents: new.parent_count(), interior: new.interior().clone().try_into()? }) + } +} + /// A unit struct which can be converted into a `Location` of `parents` value 1. #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug)] pub struct Parent; diff --git a/polkadot/xcm/src/v4/mod.rs b/polkadot/xcm/src/v4/mod.rs index a2b12dcc54ce..034c4dab8417 100644 --- a/polkadot/xcm/src/v4/mod.rs +++ b/polkadot/xcm/src/v4/mod.rs @@ -17,9 +17,15 @@ //! Version 4 of the Cross-Consensus Message format data structures. pub use super::v3::GetWeight; -use super::v3::{ - Instruction as OldInstruction, PalletInfo as OldPalletInfo, - QueryResponseInfo as OldQueryResponseInfo, Response as OldResponse, Xcm as OldXcm, +use super::{ + v3::{ + Instruction as OldInstruction, PalletInfo as OldPalletInfo, + QueryResponseInfo as OldQueryResponseInfo, Response as OldResponse, Xcm as OldXcm, + }, + v5::{ + Instruction as NewInstruction, PalletInfo as NewPalletInfo, + QueryResponseInfo as NewQueryResponseInfo, Response as NewResponse, Xcm as NewXcm, + }, }; use crate::DoubleEncoded; use alloc::{vec, vec::Vec}; @@ -258,6 +264,22 @@ impl TryInto for PalletInfo { } } +impl TryInto for PalletInfo { + type Error = (); + + fn try_into(self) -> result::Result { + NewPalletInfo::new( + self.index, + self.name.into_inner(), + self.module_name.into_inner(), + self.major, + self.minor, + self.patch, + ) + .map_err(|_| ()) + } +} + impl PalletInfo { pub fn new( index: u32, @@ -322,6 +344,32 @@ impl TryFrom for Response { } } +impl TryFrom for Response { + type Error = (); + + fn try_from(new: NewResponse) -> result::Result { + use NewResponse::*; + Ok(match new { + Null => Self::Null, + Assets(assets) => Self::Assets(assets.try_into()?), + ExecutionResult(result) => + Self::ExecutionResult(result.map(|(num, old_error)| (num, old_error.into()))), + Version(version) => Self::Version(version), + PalletsInfo(pallet_info) => { + let inner = pallet_info + .into_iter() + .map(TryInto::try_into) + .collect::, _>>()?; + Self::PalletsInfo( + BoundedVec::::try_from(inner).map_err(|_| ())?, + ) + }, + DispatchResult(maybe_error) => + Self::DispatchResult(maybe_error.try_into().map_err(|_| ())?), + }) + } +} + /// Information regarding the composition of a query response. #[derive(Clone, Eq, PartialEq, Encode, Decode, Debug, TypeInfo)] pub struct QueryResponseInfo { @@ -334,6 +382,18 @@ pub struct QueryResponseInfo { pub max_weight: Weight, } +impl TryFrom for QueryResponseInfo { + type Error = (); + + fn try_from(new: NewQueryResponseInfo) -> result::Result { + Ok(Self { + destination: new.destination.try_into()?, + query_id: new.query_id, + max_weight: new.max_weight, + }) + } +} + impl TryFrom for QueryResponseInfo { type Error = (); @@ -690,6 +750,7 @@ pub enum Instruction { /// Kind: *Command* /// /// Errors: + #[builder(pays_fees)] BuyExecution { fees: Asset, weight_limit: WeightLimit }, /// Refund any surplus weight previously bought with `BuyExecution`. @@ -1206,6 +1267,158 @@ impl TryFrom> for Xcm { } } +// Convert from a v5 XCM to a v4 XCM. +impl TryFrom> for Xcm { + type Error = (); + fn try_from(new_xcm: NewXcm) -> result::Result { + Ok(Xcm(new_xcm.0.into_iter().map(TryInto::try_into).collect::>()?)) + } +} + +// Convert from a v5 instruction to a v4 instruction. +impl TryFrom> for Instruction { + type Error = (); + fn try_from(new_instruction: NewInstruction) -> result::Result { + use NewInstruction::*; + Ok(match new_instruction { + WithdrawAsset(assets) => Self::WithdrawAsset(assets.try_into()?), + ReserveAssetDeposited(assets) => Self::ReserveAssetDeposited(assets.try_into()?), + ReceiveTeleportedAsset(assets) => Self::ReceiveTeleportedAsset(assets.try_into()?), + QueryResponse { query_id, response, max_weight, querier: Some(querier) } => + Self::QueryResponse { + query_id, + querier: querier.try_into()?, + response: response.try_into()?, + max_weight, + }, + QueryResponse { query_id, response, max_weight, querier: None } => + Self::QueryResponse { + query_id, + querier: None, + response: response.try_into()?, + max_weight, + }, + TransferAsset { assets, beneficiary } => Self::TransferAsset { + assets: assets.try_into()?, + beneficiary: beneficiary.try_into()?, + }, + TransferReserveAsset { assets, dest, xcm } => Self::TransferReserveAsset { + assets: assets.try_into()?, + dest: dest.try_into()?, + xcm: xcm.try_into()?, + }, + HrmpNewChannelOpenRequest { sender, max_message_size, max_capacity } => + Self::HrmpNewChannelOpenRequest { sender, max_message_size, max_capacity }, + HrmpChannelAccepted { recipient } => Self::HrmpChannelAccepted { recipient }, + HrmpChannelClosing { initiator, sender, recipient } => + Self::HrmpChannelClosing { initiator, sender, recipient }, + Transact { origin_kind, require_weight_at_most, call } => + Self::Transact { origin_kind, require_weight_at_most, call: call.into() }, + ReportError(response_info) => Self::ReportError(QueryResponseInfo { + query_id: response_info.query_id, + destination: response_info.destination.try_into().map_err(|_| ())?, + max_weight: response_info.max_weight, + }), + DepositAsset { assets, beneficiary } => { + let beneficiary = beneficiary.try_into()?; + let assets = assets.try_into()?; + Self::DepositAsset { assets, beneficiary } + }, + DepositReserveAsset { assets, dest, xcm } => { + let dest = dest.try_into()?; + let xcm = xcm.try_into()?; + let assets = assets.try_into()?; + Self::DepositReserveAsset { assets, dest, xcm } + }, + ExchangeAsset { give, want, maximal } => { + let give = give.try_into()?; + let want = want.try_into()?; + Self::ExchangeAsset { give, want, maximal } + }, + InitiateReserveWithdraw { assets, reserve, xcm } => { + // No `max_assets` here, so if there's a connt, then we cannot translate. + let assets = assets.try_into()?; + let reserve = reserve.try_into()?; + let xcm = xcm.try_into()?; + Self::InitiateReserveWithdraw { assets, reserve, xcm } + }, + InitiateTeleport { assets, dest, xcm } => { + // No `max_assets` here, so if there's a connt, then we cannot translate. + let assets = assets.try_into()?; + let dest = dest.try_into()?; + let xcm = xcm.try_into()?; + Self::InitiateTeleport { assets, dest, xcm } + }, + ReportHolding { response_info, assets } => { + let response_info = QueryResponseInfo { + destination: response_info.destination.try_into().map_err(|_| ())?, + query_id: response_info.query_id, + max_weight: response_info.max_weight, + }; + Self::ReportHolding { response_info, assets: assets.try_into()? } + }, + BuyExecution { fees, weight_limit } => { + let fees = fees.try_into()?; + let weight_limit = weight_limit.into(); + Self::BuyExecution { fees, weight_limit } + }, + ClearOrigin => Self::ClearOrigin, + DescendOrigin(who) => Self::DescendOrigin(who.try_into()?), + RefundSurplus => Self::RefundSurplus, + SetErrorHandler(xcm) => Self::SetErrorHandler(xcm.try_into()?), + SetAppendix(xcm) => Self::SetAppendix(xcm.try_into()?), + ClearError => Self::ClearError, + ClaimAsset { assets, ticket } => { + let assets = assets.try_into()?; + let ticket = ticket.try_into()?; + Self::ClaimAsset { assets, ticket } + }, + Trap(code) => Self::Trap(code), + SubscribeVersion { query_id, max_response_weight } => + Self::SubscribeVersion { query_id, max_response_weight }, + UnsubscribeVersion => Self::UnsubscribeVersion, + BurnAsset(assets) => Self::BurnAsset(assets.try_into()?), + ExpectAsset(assets) => Self::ExpectAsset(assets.try_into()?), + ExpectOrigin(maybe_origin) => + Self::ExpectOrigin(maybe_origin.map(|origin| origin.try_into()).transpose()?), + ExpectError(maybe_error) => Self::ExpectError(maybe_error), + ExpectTransactStatus(maybe_error_code) => Self::ExpectTransactStatus(maybe_error_code), + QueryPallet { module_name, response_info } => + Self::QueryPallet { module_name, response_info: response_info.try_into()? }, + ExpectPallet { index, name, module_name, crate_major, min_crate_minor } => + Self::ExpectPallet { index, name, module_name, crate_major, min_crate_minor }, + ReportTransactStatus(response_info) => + Self::ReportTransactStatus(response_info.try_into()?), + ClearTransactStatus => Self::ClearTransactStatus, + UniversalOrigin(junction) => Self::UniversalOrigin(junction.try_into()?), + ExportMessage { network, destination, xcm } => Self::ExportMessage { + network: network.into(), + destination: destination.try_into()?, + xcm: xcm.try_into()?, + }, + LockAsset { asset, unlocker } => + Self::LockAsset { asset: asset.try_into()?, unlocker: unlocker.try_into()? }, + UnlockAsset { asset, target } => + Self::UnlockAsset { asset: asset.try_into()?, target: target.try_into()? }, + NoteUnlockable { asset, owner } => + Self::NoteUnlockable { asset: asset.try_into()?, owner: owner.try_into()? }, + RequestUnlock { asset, locker } => + Self::RequestUnlock { asset: asset.try_into()?, locker: locker.try_into()? }, + SetFeesMode { jit_withdraw } => Self::SetFeesMode { jit_withdraw }, + SetTopic(topic) => Self::SetTopic(topic), + ClearTopic => Self::ClearTopic, + AliasOrigin(location) => Self::AliasOrigin(location.try_into()?), + UnpaidExecution { weight_limit, check_origin } => Self::UnpaidExecution { + weight_limit, + check_origin: check_origin.map(|origin| origin.try_into()).transpose()?, + }, + PayFees { .. } => { + return Err(()); + }, + }) + } +} + // Convert from a v3 instruction to a v4 instruction impl TryFrom> for Instruction { type Error = (); diff --git a/polkadot/xcm/src/v5/asset.rs b/polkadot/xcm/src/v5/asset.rs new file mode 100644 index 000000000000..0b5f6e8b6df9 --- /dev/null +++ b/polkadot/xcm/src/v5/asset.rs @@ -0,0 +1,1116 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Cross-Consensus Message format asset data structures. +//! +//! This encompasses four types for representing assets: +//! - `Asset`: A description of a single asset, either an instance of a non-fungible or some amount +//! of a fungible. +//! - `Assets`: A collection of `Asset`s. These are stored in a `Vec` and sorted with fungibles +//! first. +//! - `Wild`: A single asset wildcard, this can either be "all" assets, or all assets of a specific +//! kind. +//! - `AssetFilter`: A combination of `Wild` and `Assets` designed for efficiently filtering an XCM +//! holding account. + +use super::{InteriorLocation, Location, Reanchorable}; +use crate::v4::{ + Asset as OldAsset, AssetFilter as OldAssetFilter, AssetId as OldAssetId, + AssetInstance as OldAssetInstance, Assets as OldAssets, Fungibility as OldFungibility, + WildAsset as OldWildAsset, WildFungibility as OldWildFungibility, +}; +use alloc::{vec, vec::Vec}; +use bounded_collections::{BoundedVec, ConstU32}; +use codec::{self as codec, Decode, Encode, MaxEncodedLen}; +use core::cmp::Ordering; +use scale_info::TypeInfo; + +/// A general identifier for an instance of a non-fungible asset class. +#[derive( + Copy, + Clone, + Eq, + PartialEq, + Ord, + PartialOrd, + Encode, + Decode, + Debug, + TypeInfo, + MaxEncodedLen, + serde::Serialize, + serde::Deserialize, +)] +pub enum AssetInstance { + /// Undefined - used if the non-fungible asset class has only one instance. + Undefined, + + /// A compact index. Technically this could be greater than `u128`, but this implementation + /// supports only values up to `2**128 - 1`. + Index(#[codec(compact)] u128), + + /// A 4-byte fixed-length datum. + Array4([u8; 4]), + + /// An 8-byte fixed-length datum. + Array8([u8; 8]), + + /// A 16-byte fixed-length datum. + Array16([u8; 16]), + + /// A 32-byte fixed-length datum. + Array32([u8; 32]), +} + +impl TryFrom for AssetInstance { + type Error = (); + fn try_from(value: OldAssetInstance) -> Result { + use OldAssetInstance::*; + Ok(match value { + Undefined => Self::Undefined, + Index(n) => Self::Index(n), + Array4(n) => Self::Array4(n), + Array8(n) => Self::Array8(n), + Array16(n) => Self::Array16(n), + Array32(n) => Self::Array32(n), + }) + } +} + +impl From<()> for AssetInstance { + fn from(_: ()) -> Self { + Self::Undefined + } +} + +impl From<[u8; 4]> for AssetInstance { + fn from(x: [u8; 4]) -> Self { + Self::Array4(x) + } +} + +impl From<[u8; 8]> for AssetInstance { + fn from(x: [u8; 8]) -> Self { + Self::Array8(x) + } +} + +impl From<[u8; 16]> for AssetInstance { + fn from(x: [u8; 16]) -> Self { + Self::Array16(x) + } +} + +impl From<[u8; 32]> for AssetInstance { + fn from(x: [u8; 32]) -> Self { + Self::Array32(x) + } +} + +impl From for AssetInstance { + fn from(x: u8) -> Self { + Self::Index(x as u128) + } +} + +impl From for AssetInstance { + fn from(x: u16) -> Self { + Self::Index(x as u128) + } +} + +impl From for AssetInstance { + fn from(x: u32) -> Self { + Self::Index(x as u128) + } +} + +impl From for AssetInstance { + fn from(x: u64) -> Self { + Self::Index(x as u128) + } +} + +impl TryFrom for () { + type Error = (); + fn try_from(x: AssetInstance) -> Result { + match x { + AssetInstance::Undefined => Ok(()), + _ => Err(()), + } + } +} + +impl TryFrom for [u8; 4] { + type Error = (); + fn try_from(x: AssetInstance) -> Result { + match x { + AssetInstance::Array4(x) => Ok(x), + _ => Err(()), + } + } +} + +impl TryFrom for [u8; 8] { + type Error = (); + fn try_from(x: AssetInstance) -> Result { + match x { + AssetInstance::Array8(x) => Ok(x), + _ => Err(()), + } + } +} + +impl TryFrom for [u8; 16] { + type Error = (); + fn try_from(x: AssetInstance) -> Result { + match x { + AssetInstance::Array16(x) => Ok(x), + _ => Err(()), + } + } +} + +impl TryFrom for [u8; 32] { + type Error = (); + fn try_from(x: AssetInstance) -> Result { + match x { + AssetInstance::Array32(x) => Ok(x), + _ => Err(()), + } + } +} + +impl TryFrom for u8 { + type Error = (); + fn try_from(x: AssetInstance) -> Result { + match x { + AssetInstance::Index(x) => x.try_into().map_err(|_| ()), + _ => Err(()), + } + } +} + +impl TryFrom for u16 { + type Error = (); + fn try_from(x: AssetInstance) -> Result { + match x { + AssetInstance::Index(x) => x.try_into().map_err(|_| ()), + _ => Err(()), + } + } +} + +impl TryFrom for u32 { + type Error = (); + fn try_from(x: AssetInstance) -> Result { + match x { + AssetInstance::Index(x) => x.try_into().map_err(|_| ()), + _ => Err(()), + } + } +} + +impl TryFrom for u64 { + type Error = (); + fn try_from(x: AssetInstance) -> Result { + match x { + AssetInstance::Index(x) => x.try_into().map_err(|_| ()), + _ => Err(()), + } + } +} + +impl TryFrom for u128 { + type Error = (); + fn try_from(x: AssetInstance) -> Result { + match x { + AssetInstance::Index(x) => Ok(x), + _ => Err(()), + } + } +} + +/// Classification of whether an asset is fungible or not, along with a mandatory amount or +/// instance. +#[derive( + Clone, + Eq, + PartialEq, + Ord, + PartialOrd, + Debug, + Encode, + TypeInfo, + MaxEncodedLen, + serde::Serialize, + serde::Deserialize, +)] +pub enum Fungibility { + /// A fungible asset; we record a number of units, as a `u128` in the inner item. + Fungible(#[codec(compact)] u128), + /// A non-fungible asset. We record the instance identifier in the inner item. Only one asset + /// of each instance identifier may ever be in existence at once. + NonFungible(AssetInstance), +} + +#[derive(Decode)] +enum UncheckedFungibility { + Fungible(#[codec(compact)] u128), + NonFungible(AssetInstance), +} + +impl Decode for Fungibility { + fn decode(input: &mut I) -> Result { + match UncheckedFungibility::decode(input)? { + UncheckedFungibility::Fungible(a) if a != 0 => Ok(Self::Fungible(a)), + UncheckedFungibility::NonFungible(i) => Ok(Self::NonFungible(i)), + UncheckedFungibility::Fungible(_) => + Err("Fungible asset of zero amount is not allowed".into()), + } + } +} + +impl Fungibility { + pub fn is_kind(&self, w: WildFungibility) -> bool { + use Fungibility::*; + use WildFungibility::{Fungible as WildFungible, NonFungible as WildNonFungible}; + matches!((self, w), (Fungible(_), WildFungible) | (NonFungible(_), WildNonFungible)) + } +} + +impl From for Fungibility { + fn from(amount: i32) -> Fungibility { + debug_assert_ne!(amount, 0); + Fungibility::Fungible(amount as u128) + } +} + +impl From for Fungibility { + fn from(amount: u128) -> Fungibility { + debug_assert_ne!(amount, 0); + Fungibility::Fungible(amount) + } +} + +impl> From for Fungibility { + fn from(instance: T) -> Fungibility { + Fungibility::NonFungible(instance.into()) + } +} + +impl TryFrom for Fungibility { + type Error = (); + fn try_from(value: OldFungibility) -> Result { + use OldFungibility::*; + Ok(match value { + Fungible(n) => Self::Fungible(n), + NonFungible(i) => Self::NonFungible(i.try_into()?), + }) + } +} + +/// Classification of whether an asset is fungible or not. +#[derive( + Copy, + Clone, + Eq, + PartialEq, + Ord, + PartialOrd, + Debug, + Encode, + Decode, + TypeInfo, + MaxEncodedLen, + serde::Serialize, + serde::Deserialize, +)] +pub enum WildFungibility { + /// The asset is fungible. + Fungible, + /// The asset is not fungible. + NonFungible, +} + +impl TryFrom for WildFungibility { + type Error = (); + fn try_from(value: OldWildFungibility) -> Result { + use OldWildFungibility::*; + Ok(match value { + Fungible => Self::Fungible, + NonFungible => Self::NonFungible, + }) + } +} + +/// Location to identify an asset. +#[derive( + Clone, + Eq, + PartialEq, + Ord, + PartialOrd, + Debug, + Encode, + Decode, + TypeInfo, + MaxEncodedLen, + serde::Serialize, + serde::Deserialize, +)] +pub struct AssetId(pub Location); + +impl> From for AssetId { + fn from(x: T) -> Self { + Self(x.into()) + } +} + +impl TryFrom for AssetId { + type Error = (); + fn try_from(old: OldAssetId) -> Result { + Ok(Self(old.0.try_into()?)) + } +} + +impl AssetId { + /// Prepend a `Location` to an asset id, giving it a new root location. + pub fn prepend_with(&mut self, prepend: &Location) -> Result<(), ()> { + self.0.prepend_with(prepend.clone()).map_err(|_| ())?; + Ok(()) + } + + /// Use the value of `self` along with a `fun` fungibility specifier to create the corresponding + /// `Asset` value. + pub fn into_asset(self, fun: Fungibility) -> Asset { + Asset { fun, id: self } + } + + /// Use the value of `self` along with a `fun` fungibility specifier to create the corresponding + /// `WildAsset` wildcard (`AllOf`) value. + pub fn into_wild(self, fun: WildFungibility) -> WildAsset { + WildAsset::AllOf { fun, id: self } + } +} + +impl Reanchorable for AssetId { + type Error = (); + + /// Mutate the asset to represent the same value from the perspective of a new `target` + /// location. The local chain's location is provided in `context`. + fn reanchor(&mut self, target: &Location, context: &InteriorLocation) -> Result<(), ()> { + self.0.reanchor(target, context)?; + Ok(()) + } + + fn reanchored(mut self, target: &Location, context: &InteriorLocation) -> Result { + match self.reanchor(target, context) { + Ok(()) => Ok(self), + Err(()) => Err(()), + } + } +} + +/// Either an amount of a single fungible asset, or a single well-identified non-fungible asset. +#[derive( + Clone, + Eq, + PartialEq, + Debug, + Encode, + Decode, + TypeInfo, + MaxEncodedLen, + serde::Serialize, + serde::Deserialize, +)] +pub struct Asset { + /// The overall asset identity (aka *class*, in the case of a non-fungible). + pub id: AssetId, + /// The fungibility of the asset, which contains either the amount (in the case of a fungible + /// asset) or the *instance ID*, the secondary asset identifier. + pub fun: Fungibility, +} + +impl PartialOrd for Asset { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for Asset { + fn cmp(&self, other: &Self) -> Ordering { + match (&self.fun, &other.fun) { + (Fungibility::Fungible(..), Fungibility::NonFungible(..)) => Ordering::Less, + (Fungibility::NonFungible(..), Fungibility::Fungible(..)) => Ordering::Greater, + _ => (&self.id, &self.fun).cmp(&(&other.id, &other.fun)), + } + } +} + +impl, B: Into> From<(A, B)> for Asset { + fn from((id, fun): (A, B)) -> Asset { + Asset { fun: fun.into(), id: id.into() } + } +} + +impl Asset { + pub fn is_fungible(&self, maybe_id: Option) -> bool { + use Fungibility::*; + matches!(self.fun, Fungible(..)) && maybe_id.map_or(true, |i| i == self.id) + } + + pub fn is_non_fungible(&self, maybe_id: Option) -> bool { + use Fungibility::*; + matches!(self.fun, NonFungible(..)) && maybe_id.map_or(true, |i| i == self.id) + } + + /// Prepend a `Location` to a concrete asset, giving it a new root location. + pub fn prepend_with(&mut self, prepend: &Location) -> Result<(), ()> { + self.id.prepend_with(prepend) + } + + /// Returns true if `self` is a super-set of the given `inner` asset. + pub fn contains(&self, inner: &Asset) -> bool { + use Fungibility::*; + if self.id == inner.id { + match (&self.fun, &inner.fun) { + (Fungible(a), Fungible(i)) if a >= i => return true, + (NonFungible(a), NonFungible(i)) if a == i => return true, + _ => (), + } + } + false + } +} + +impl Reanchorable for Asset { + type Error = (); + + /// Mutate the location of the asset identifier if concrete, giving it the same location + /// relative to a `target` context. The local context is provided as `context`. + fn reanchor(&mut self, target: &Location, context: &InteriorLocation) -> Result<(), ()> { + self.id.reanchor(target, context) + } + + /// Mutate the location of the asset identifier if concrete, giving it the same location + /// relative to a `target` context. The local context is provided as `context`. + fn reanchored(mut self, target: &Location, context: &InteriorLocation) -> Result { + self.id.reanchor(target, context)?; + Ok(self) + } +} + +impl TryFrom for Asset { + type Error = (); + fn try_from(old: OldAsset) -> Result { + Ok(Self { id: old.id.try_into()?, fun: old.fun.try_into()? }) + } +} + +/// A `Vec` of `Asset`s. +/// +/// There are a number of invariants which the construction and mutation functions must ensure are +/// maintained: +/// - It may contain no items of duplicate asset class; +/// - All items must be ordered; +/// - The number of items should grow no larger than `MAX_ITEMS_IN_ASSETS`. +#[derive( + Clone, + Eq, + PartialEq, + Ord, + PartialOrd, + Debug, + Encode, + TypeInfo, + Default, + serde::Serialize, + serde::Deserialize, +)] +pub struct Assets(Vec); + +/// Maximum number of items we expect in a single `Assets` value. Note this is not (yet) +/// enforced, and just serves to provide a sensible `max_encoded_len` for `Assets`. +pub const MAX_ITEMS_IN_ASSETS: usize = 20; + +impl MaxEncodedLen for Assets { + fn max_encoded_len() -> usize { + Asset::max_encoded_len() * MAX_ITEMS_IN_ASSETS + } +} + +impl Decode for Assets { + fn decode(input: &mut I) -> Result { + let bounded_instructions = + BoundedVec::>::decode(input)?; + Self::from_sorted_and_deduplicated(bounded_instructions.into_inner()) + .map_err(|()| "Out of order".into()) + } +} + +impl TryFrom for Assets { + type Error = (); + fn try_from(old: OldAssets) -> Result { + let v = old + .into_inner() + .into_iter() + .map(Asset::try_from) + .collect::, ()>>()?; + Ok(Assets(v)) + } +} + +impl From> for Assets { + fn from(mut assets: Vec) -> Self { + let mut res = Vec::with_capacity(assets.len()); + if !assets.is_empty() { + assets.sort(); + let mut iter = assets.into_iter(); + if let Some(first) = iter.next() { + let last = iter.fold(first, |a, b| -> Asset { + match (a, b) { + ( + Asset { fun: Fungibility::Fungible(a_amount), id: a_id }, + Asset { fun: Fungibility::Fungible(b_amount), id: b_id }, + ) if a_id == b_id => Asset { + id: a_id, + fun: Fungibility::Fungible(a_amount.saturating_add(b_amount)), + }, + ( + Asset { fun: Fungibility::NonFungible(a_instance), id: a_id }, + Asset { fun: Fungibility::NonFungible(b_instance), id: b_id }, + ) if a_id == b_id && a_instance == b_instance => + Asset { fun: Fungibility::NonFungible(a_instance), id: a_id }, + (to_push, to_remember) => { + res.push(to_push); + to_remember + }, + } + }); + res.push(last); + } + } + Self(res) + } +} + +impl> From for Assets { + fn from(x: T) -> Self { + Self(vec![x.into()]) + } +} + +impl Assets { + /// A new (empty) value. + pub fn new() -> Self { + Self(Vec::new()) + } + + /// Create a new instance of `Assets` from a `Vec` whose contents are sorted + /// and which contain no duplicates. + /// + /// Returns `Ok` if the operation succeeds and `Err` if `r` is out of order or had duplicates. + /// If you can't guarantee that `r` is sorted and deduplicated, then use + /// `From::>::from` which is infallible. + pub fn from_sorted_and_deduplicated(r: Vec) -> Result { + if r.is_empty() { + return Ok(Self(Vec::new())) + } + r.iter().skip(1).try_fold(&r[0], |a, b| -> Result<&Asset, ()> { + if a.id < b.id || a < b && (a.is_non_fungible(None) || b.is_non_fungible(None)) { + Ok(b) + } else { + Err(()) + } + })?; + Ok(Self(r)) + } + + /// Create a new instance of `Assets` from a `Vec` whose contents are sorted + /// and which contain no duplicates. + /// + /// In release mode, this skips any checks to ensure that `r` is correct, making it a + /// negligible-cost operation. Generally though you should avoid using it unless you have a + /// strict proof that `r` is valid. + #[cfg(test)] + pub fn from_sorted_and_deduplicated_skip_checks(r: Vec) -> Self { + Self::from_sorted_and_deduplicated(r).expect("Invalid input r is not sorted/deduped") + } + /// Create a new instance of `Assets` from a `Vec` whose contents are sorted + /// and which contain no duplicates. + /// + /// In release mode, this skips any checks to ensure that `r` is correct, making it a + /// negligible-cost operation. Generally though you should avoid using it unless you have a + /// strict proof that `r` is valid. + /// + /// In test mode, this checks anyway and panics on fail. + #[cfg(not(test))] + pub fn from_sorted_and_deduplicated_skip_checks(r: Vec) -> Self { + Self(r) + } + + /// Add some asset onto the list, saturating. This is quite a laborious operation since it + /// maintains the ordering. + pub fn push(&mut self, a: Asset) { + for asset in self.0.iter_mut().filter(|x| x.id == a.id) { + match (&a.fun, &mut asset.fun) { + (Fungibility::Fungible(amount), Fungibility::Fungible(balance)) => { + *balance = balance.saturating_add(*amount); + return + }, + (Fungibility::NonFungible(inst1), Fungibility::NonFungible(inst2)) + if inst1 == inst2 => + return, + _ => (), + } + } + self.0.push(a); + self.0.sort(); + } + + /// Returns `true` if this definitely represents no asset. + pub fn is_none(&self) -> bool { + self.0.is_empty() + } + + /// Returns true if `self` is a super-set of the given `inner` asset. + pub fn contains(&self, inner: &Asset) -> bool { + self.0.iter().any(|i| i.contains(inner)) + } + + /// Consume `self` and return the inner vec. + #[deprecated = "Use `into_inner()` instead"] + pub fn drain(self) -> Vec { + self.0 + } + + /// Consume `self` and return the inner vec. + pub fn into_inner(self) -> Vec { + self.0 + } + + /// Return a reference to the inner vec. + pub fn inner(&self) -> &Vec { + &self.0 + } + + /// Return the number of distinct asset instances contained. + pub fn len(&self) -> usize { + self.0.len() + } + + /// Prepend a `Location` to any concrete asset items, giving it a new root location. + pub fn prepend_with(&mut self, prefix: &Location) -> Result<(), ()> { + self.0.iter_mut().try_for_each(|i| i.prepend_with(prefix))?; + self.0.sort(); + Ok(()) + } + + /// Return a reference to an item at a specific index or `None` if it doesn't exist. + pub fn get(&self, index: usize) -> Option<&Asset> { + self.0.get(index) + } +} + +impl Reanchorable for Assets { + type Error = (); + + fn reanchor(&mut self, target: &Location, context: &InteriorLocation) -> Result<(), ()> { + self.0.iter_mut().try_for_each(|i| i.reanchor(target, context))?; + self.0.sort(); + Ok(()) + } + + fn reanchored(mut self, target: &Location, context: &InteriorLocation) -> Result { + match self.reanchor(target, context) { + Ok(()) => Ok(self), + Err(()) => Err(()), + } + } +} + +/// A wildcard representing a set of assets. +#[derive( + Clone, + Eq, + PartialEq, + Ord, + PartialOrd, + Debug, + Encode, + Decode, + TypeInfo, + MaxEncodedLen, + serde::Serialize, + serde::Deserialize, +)] +pub enum WildAsset { + /// All assets in Holding. + All, + /// All assets in Holding of a given fungibility and ID. + AllOf { id: AssetId, fun: WildFungibility }, + /// All assets in Holding, up to `u32` individual assets (different instances of non-fungibles + /// are separate assets). + AllCounted(#[codec(compact)] u32), + /// All assets in Holding of a given fungibility and ID up to `count` individual assets + /// (different instances of non-fungibles are separate assets). + AllOfCounted { + id: AssetId, + fun: WildFungibility, + #[codec(compact)] + count: u32, + }, +} + +impl TryFrom for WildAsset { + type Error = (); + fn try_from(old: OldWildAsset) -> Result { + use OldWildAsset::*; + Ok(match old { + AllOf { id, fun } => Self::AllOf { id: id.try_into()?, fun: fun.try_into()? }, + All => Self::All, + AllOfCounted { id, fun, count } => + Self::AllOfCounted { id: id.try_into()?, fun: fun.try_into()?, count }, + AllCounted(count) => Self::AllCounted(count), + }) + } +} + +impl WildAsset { + /// Returns true if `self` is a super-set of the given `inner` asset. + pub fn contains(&self, inner: &Asset) -> bool { + use WildAsset::*; + match self { + AllOfCounted { count: 0, .. } | AllCounted(0) => false, + AllOf { fun, id } | AllOfCounted { id, fun, .. } => + inner.fun.is_kind(*fun) && &inner.id == id, + All | AllCounted(_) => true, + } + } + + /// Returns true if the wild element of `self` matches `inner`. + /// + /// Note that for `Counted` variants of wildcards, then it will disregard the count except for + /// always returning `false` when equal to 0. + #[deprecated = "Use `contains` instead"] + pub fn matches(&self, inner: &Asset) -> bool { + self.contains(inner) + } + + /// Mutate the asset to represent the same value from the perspective of a new `target` + /// location. The local chain's location is provided in `context`. + pub fn reanchor(&mut self, target: &Location, context: &InteriorLocation) -> Result<(), ()> { + use WildAsset::*; + match self { + AllOf { ref mut id, .. } | AllOfCounted { ref mut id, .. } => + id.reanchor(target, context), + All | AllCounted(_) => Ok(()), + } + } + + /// Maximum count of assets allowed to match, if any. + pub fn count(&self) -> Option { + use WildAsset::*; + match self { + AllOfCounted { count, .. } | AllCounted(count) => Some(*count), + All | AllOf { .. } => None, + } + } + + /// Explicit limit on number of assets allowed to match, if any. + pub fn limit(&self) -> Option { + self.count() + } + + /// Consume self and return the equivalent version but counted and with the `count` set to the + /// given parameter. + pub fn counted(self, count: u32) -> Self { + use WildAsset::*; + match self { + AllOfCounted { fun, id, .. } | AllOf { fun, id } => AllOfCounted { fun, id, count }, + All | AllCounted(_) => AllCounted(count), + } + } +} + +impl, B: Into> From<(A, B)> for WildAsset { + fn from((id, fun): (A, B)) -> WildAsset { + WildAsset::AllOf { fun: fun.into(), id: id.into() } + } +} + +/// `Asset` collection, defined either by a number of `Assets` or a single wildcard. +#[derive( + Clone, + Eq, + PartialEq, + Ord, + PartialOrd, + Debug, + Encode, + Decode, + TypeInfo, + MaxEncodedLen, + serde::Serialize, + serde::Deserialize, +)] +pub enum AssetFilter { + /// Specify the filter as being everything contained by the given `Assets` inner. + Definite(Assets), + /// Specify the filter as the given `WildAsset` wildcard. + Wild(WildAsset), +} + +impl> From for AssetFilter { + fn from(x: T) -> Self { + Self::Wild(x.into()) + } +} + +impl From for AssetFilter { + fn from(x: Asset) -> Self { + Self::Definite(vec![x].into()) + } +} + +impl From> for AssetFilter { + fn from(x: Vec) -> Self { + Self::Definite(x.into()) + } +} + +impl From for AssetFilter { + fn from(x: Assets) -> Self { + Self::Definite(x) + } +} + +impl AssetFilter { + /// Returns true if `inner` would be matched by `self`. + /// + /// Note that for `Counted` variants of wildcards, then it will disregard the count except for + /// always returning `false` when equal to 0. + pub fn matches(&self, inner: &Asset) -> bool { + match self { + AssetFilter::Definite(ref assets) => assets.contains(inner), + AssetFilter::Wild(ref wild) => wild.contains(inner), + } + } + + /// Mutate the location of the asset identifier if concrete, giving it the same location + /// relative to a `target` context. The local context is provided as `context`. + pub fn reanchor(&mut self, target: &Location, context: &InteriorLocation) -> Result<(), ()> { + match self { + AssetFilter::Definite(ref mut assets) => assets.reanchor(target, context), + AssetFilter::Wild(ref mut wild) => wild.reanchor(target, context), + } + } + + /// Maximum count of assets it is possible to match, if known. + pub fn count(&self) -> Option { + use AssetFilter::*; + match self { + Definite(x) => Some(x.len() as u32), + Wild(x) => x.count(), + } + } + + /// Explicit limit placed on the number of items, if any. + pub fn limit(&self) -> Option { + use AssetFilter::*; + match self { + Definite(_) => None, + Wild(x) => x.limit(), + } + } +} + +impl TryFrom for AssetFilter { + type Error = (); + fn try_from(old: OldAssetFilter) -> Result { + Ok(match old { + OldAssetFilter::Definite(x) => Self::Definite(x.try_into()?), + OldAssetFilter::Wild(x) => Self::Wild(x.try_into()?), + }) + } +} + +#[cfg(test)] +mod tests { + use super::super::prelude::*; + + #[test] + fn conversion_works() { + let _: Assets = (Here, 1u128).into(); + } + + #[test] + fn from_sorted_and_deduplicated_works() { + use super::*; + use alloc::vec; + + let empty = vec![]; + let r = Assets::from_sorted_and_deduplicated(empty); + assert_eq!(r, Ok(Assets(vec![]))); + + let dup_fun = vec![(Here, 100).into(), (Here, 10).into()]; + let r = Assets::from_sorted_and_deduplicated(dup_fun); + assert!(r.is_err()); + + let dup_nft = vec![(Here, *b"notgood!").into(), (Here, *b"notgood!").into()]; + let r = Assets::from_sorted_and_deduplicated(dup_nft); + assert!(r.is_err()); + + let good_fun = vec![(Here, 10).into(), (Parent, 10).into()]; + let r = Assets::from_sorted_and_deduplicated(good_fun.clone()); + assert_eq!(r, Ok(Assets(good_fun))); + + let bad_fun = vec![(Parent, 10).into(), (Here, 10).into()]; + let r = Assets::from_sorted_and_deduplicated(bad_fun); + assert!(r.is_err()); + + let good_nft = vec![(Here, ()).into(), (Here, *b"good").into()]; + let r = Assets::from_sorted_and_deduplicated(good_nft.clone()); + assert_eq!(r, Ok(Assets(good_nft))); + + let bad_nft = vec![(Here, *b"bad!").into(), (Here, ()).into()]; + let r = Assets::from_sorted_and_deduplicated(bad_nft); + assert!(r.is_err()); + + let mixed_good = vec![(Here, 10).into(), (Here, *b"good").into()]; + let r = Assets::from_sorted_and_deduplicated(mixed_good.clone()); + assert_eq!(r, Ok(Assets(mixed_good))); + + let mixed_bad = vec![(Here, *b"bad!").into(), (Here, 10).into()]; + let r = Assets::from_sorted_and_deduplicated(mixed_bad); + assert!(r.is_err()); + } + + #[test] + fn reanchor_preserves_sorting() { + use super::*; + use alloc::vec; + + let reanchor_context: Junctions = Parachain(2000).into(); + let dest = Location::new(1, []); + + let asset_1: Asset = (Location::new(0, [PalletInstance(50), GeneralIndex(1)]), 10).into(); + let mut asset_1_reanchored = asset_1.clone(); + assert!(asset_1_reanchored.reanchor(&dest, &reanchor_context).is_ok()); + assert_eq!( + asset_1_reanchored, + (Location::new(0, [Parachain(2000), PalletInstance(50), GeneralIndex(1)]), 10).into() + ); + + let asset_2: Asset = (Location::new(1, []), 10).into(); + let mut asset_2_reanchored = asset_2.clone(); + assert!(asset_2_reanchored.reanchor(&dest, &reanchor_context).is_ok()); + assert_eq!(asset_2_reanchored, (Location::new(0, []), 10).into()); + + let asset_3: Asset = (Location::new(1, [Parachain(1000)]), 10).into(); + let mut asset_3_reanchored = asset_3.clone(); + assert!(asset_3_reanchored.reanchor(&dest, &reanchor_context).is_ok()); + assert_eq!(asset_3_reanchored, (Location::new(0, [Parachain(1000)]), 10).into()); + + let mut assets: Assets = vec![asset_1.clone(), asset_2.clone(), asset_3.clone()].into(); + assert_eq!(assets.clone(), vec![asset_1.clone(), asset_2.clone(), asset_3.clone()].into()); + + // decoding respects limits and sorting + assert!(assets.using_encoded(|mut enc| Assets::decode(&mut enc).map(|_| ())).is_ok()); + + assert!(assets.reanchor(&dest, &reanchor_context).is_ok()); + assert_eq!(assets.0, vec![asset_2_reanchored, asset_3_reanchored, asset_1_reanchored]); + + // decoding respects limits and sorting + assert!(assets.using_encoded(|mut enc| Assets::decode(&mut enc).map(|_| ())).is_ok()); + } + + #[test] + fn prepend_preserves_sorting() { + use super::*; + use alloc::vec; + + let prefix = Location::new(0, [Parachain(1000)]); + + let asset_1: Asset = (Location::new(0, [PalletInstance(50), GeneralIndex(1)]), 10).into(); + let mut asset_1_prepended = asset_1.clone(); + assert!(asset_1_prepended.prepend_with(&prefix).is_ok()); + // changes interior X2->X3 + assert_eq!( + asset_1_prepended, + (Location::new(0, [Parachain(1000), PalletInstance(50), GeneralIndex(1)]), 10).into() + ); + + let asset_2: Asset = (Location::new(1, [PalletInstance(50), GeneralIndex(1)]), 10).into(); + let mut asset_2_prepended = asset_2.clone(); + assert!(asset_2_prepended.prepend_with(&prefix).is_ok()); + // changes parent + assert_eq!( + asset_2_prepended, + (Location::new(0, [PalletInstance(50), GeneralIndex(1)]), 10).into() + ); + + let asset_3: Asset = (Location::new(2, [PalletInstance(50), GeneralIndex(1)]), 10).into(); + let mut asset_3_prepended = asset_3.clone(); + assert!(asset_3_prepended.prepend_with(&prefix).is_ok()); + // changes parent + assert_eq!( + asset_3_prepended, + (Location::new(1, [PalletInstance(50), GeneralIndex(1)]), 10).into() + ); + + // `From` impl does sorting. + let mut assets: Assets = vec![asset_1, asset_2, asset_3].into(); + // decoding respects limits and sorting + assert!(assets.using_encoded(|mut enc| Assets::decode(&mut enc).map(|_| ())).is_ok()); + + // let's do `prepend_with` + assert!(assets.prepend_with(&prefix).is_ok()); + assert_eq!(assets.0, vec![asset_2_prepended, asset_1_prepended, asset_3_prepended]); + + // decoding respects limits and sorting + assert!(assets.using_encoded(|mut enc| Assets::decode(&mut enc).map(|_| ())).is_ok()); + } + + #[test] + fn decoding_respects_limit() { + use super::*; + + // Having lots of one asset will work since they are deduplicated + let lots_of_one_asset: Assets = + vec![(GeneralIndex(1), 1u128).into(); MAX_ITEMS_IN_ASSETS + 1].into(); + let encoded = lots_of_one_asset.encode(); + assert!(Assets::decode(&mut &encoded[..]).is_ok()); + + // Fewer assets than the limit works + let mut few_assets: Assets = Vec::new().into(); + for i in 0..MAX_ITEMS_IN_ASSETS { + few_assets.push((GeneralIndex(i as u128), 1u128).into()); + } + let encoded = few_assets.encode(); + assert!(Assets::decode(&mut &encoded[..]).is_ok()); + + // Having lots of different assets will not work + let mut too_many_different_assets: Assets = Vec::new().into(); + for i in 0..MAX_ITEMS_IN_ASSETS + 1 { + too_many_different_assets.push((GeneralIndex(i as u128), 1u128).into()); + } + let encoded = too_many_different_assets.encode(); + assert!(Assets::decode(&mut &encoded[..]).is_err()); + } +} diff --git a/polkadot/xcm/src/v5/junction.rs b/polkadot/xcm/src/v5/junction.rs new file mode 100644 index 000000000000..ed57de6f49bd --- /dev/null +++ b/polkadot/xcm/src/v5/junction.rs @@ -0,0 +1,315 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Support data structures for `Location`, primarily the `Junction` datatype. + +use super::Location; +pub use crate::v4::{BodyId, BodyPart}; +use crate::{ + v4::{Junction as OldJunction, NetworkId as OldNetworkId}, + VersionedLocation, +}; +use bounded_collections::{BoundedSlice, BoundedVec, ConstU32}; +use codec::{self, Decode, Encode, MaxEncodedLen}; +use scale_info::TypeInfo; +use serde::{Deserialize, Serialize}; + +/// A single item in a path to describe the relative location of a consensus system. +/// +/// Each item assumes a pre-existing location as its context and is defined in terms of it. +#[derive( + Copy, + Clone, + Eq, + PartialEq, + Ord, + PartialOrd, + Encode, + Decode, + Debug, + TypeInfo, + MaxEncodedLen, + Serialize, + Deserialize, +)] +pub enum Junction { + /// An indexed parachain belonging to and operated by the context. + /// + /// Generally used when the context is a Polkadot Relay-chain. + Parachain(#[codec(compact)] u32), + /// A 32-byte identifier for an account of a specific network that is respected as a sovereign + /// endpoint within the context. + /// + /// Generally used when the context is a Substrate-based chain. + AccountId32 { network: Option, id: [u8; 32] }, + /// An 8-byte index for an account of a specific network that is respected as a sovereign + /// endpoint within the context. + /// + /// May be used when the context is a Frame-based chain and includes e.g. an indices pallet. + AccountIndex64 { + network: Option, + #[codec(compact)] + index: u64, + }, + /// A 20-byte identifier for an account of a specific network that is respected as a sovereign + /// endpoint within the context. + /// + /// May be used when the context is an Ethereum or Bitcoin chain or smart-contract. + AccountKey20 { network: Option, key: [u8; 20] }, + /// An instanced, indexed pallet that forms a constituent part of the context. + /// + /// Generally used when the context is a Frame-based chain. + PalletInstance(u8), + /// A non-descript index within the context location. + /// + /// Usage will vary widely owing to its generality. + /// + /// NOTE: Try to avoid using this and instead use a more specific item. + GeneralIndex(#[codec(compact)] u128), + /// A nondescript array datum, 32 bytes, acting as a key within the context + /// location. + /// + /// Usage will vary widely owing to its generality. + /// + /// NOTE: Try to avoid using this and instead use a more specific item. + // Note this is implemented as an array with a length rather than using `BoundedVec` owing to + // the bound for `Copy`. + GeneralKey { length: u8, data: [u8; 32] }, + /// The unambiguous child. + /// + /// Not currently used except as a fallback when deriving context. + OnlyChild, + /// A pluralistic body existing within consensus. + /// + /// Typical to be used to represent a governance origin of a chain, but could in principle be + /// used to represent things such as multisigs also. + Plurality { id: BodyId, part: BodyPart }, + /// A global network capable of externalizing its own consensus. This is not generally + /// meaningful outside of the universal level. + GlobalConsensus(NetworkId), +} + +/// A global identifier of a data structure existing within consensus. +/// +/// Maintenance note: Networks with global consensus and which are practically bridgeable within the +/// Polkadot ecosystem are given preference over explicit naming in this enumeration. +#[derive( + Copy, + Clone, + Eq, + PartialEq, + Ord, + PartialOrd, + Encode, + Decode, + Debug, + TypeInfo, + MaxEncodedLen, + Serialize, + Deserialize, +)] +pub enum NetworkId { + /// Network specified by the first 32 bytes of its genesis block. + ByGenesis([u8; 32]), + /// Network defined by the first 32-bytes of the hash and number of some block it contains. + ByFork { block_number: u64, block_hash: [u8; 32] }, + /// The Polkadot mainnet Relay-chain. + Polkadot, + /// The Kusama canary-net Relay-chain. + Kusama, + /// The Westend testnet Relay-chain. + Westend, + /// The Rococo testnet Relay-chain. + Rococo, + /// The Wococo testnet Relay-chain. + Wococo, + /// An Ethereum network specified by its chain ID. + Ethereum { + /// The EIP-155 chain ID. + #[codec(compact)] + chain_id: u64, + }, + /// The Bitcoin network, including hard-forks supported by Bitcoin Core development team. + BitcoinCore, + /// The Bitcoin network, including hard-forks supported by Bitcoin Cash developers. + BitcoinCash, + /// The Polkadot Bulletin chain. + PolkadotBulletin, +} + +impl From for Option { + fn from(old: OldNetworkId) -> Self { + Some(NetworkId::from(old)) + } +} + +impl From for NetworkId { + fn from(old: OldNetworkId) -> Self { + use OldNetworkId::*; + match old { + ByGenesis(hash) => Self::ByGenesis(hash), + ByFork { block_number, block_hash } => Self::ByFork { block_number, block_hash }, + Polkadot => Self::Polkadot, + Kusama => Self::Kusama, + Westend => Self::Westend, + Rococo => Self::Rococo, + Wococo => Self::Wococo, + Ethereum { chain_id } => Self::Ethereum { chain_id }, + BitcoinCore => Self::BitcoinCore, + BitcoinCash => Self::BitcoinCash, + PolkadotBulletin => Self::PolkadotBulletin, + } + } +} + +impl From for Junction { + fn from(n: NetworkId) -> Self { + Self::GlobalConsensus(n) + } +} + +impl From<[u8; 32]> for Junction { + fn from(id: [u8; 32]) -> Self { + Self::AccountId32 { network: None, id } + } +} + +impl From>> for Junction { + fn from(key: BoundedVec>) -> Self { + key.as_bounded_slice().into() + } +} + +impl<'a> From>> for Junction { + fn from(key: BoundedSlice<'a, u8, ConstU32<32>>) -> Self { + let mut data = [0u8; 32]; + data[..key.len()].copy_from_slice(&key[..]); + Self::GeneralKey { length: key.len() as u8, data } + } +} + +impl<'a> TryFrom<&'a Junction> for BoundedSlice<'a, u8, ConstU32<32>> { + type Error = (); + fn try_from(key: &'a Junction) -> Result { + match key { + Junction::GeneralKey { length, data } => + BoundedSlice::try_from(&data[..data.len().min(*length as usize)]).map_err(|_| ()), + _ => Err(()), + } + } +} + +impl From<[u8; 20]> for Junction { + fn from(key: [u8; 20]) -> Self { + Self::AccountKey20 { network: None, key } + } +} + +impl From for Junction { + fn from(index: u64) -> Self { + Self::AccountIndex64 { network: None, index } + } +} + +impl From for Junction { + fn from(id: u128) -> Self { + Self::GeneralIndex(id) + } +} + +impl TryFrom for Junction { + type Error = (); + fn try_from(value: OldJunction) -> Result { + use OldJunction::*; + Ok(match value { + Parachain(id) => Self::Parachain(id), + AccountId32 { network: maybe_network, id } => + Self::AccountId32 { network: maybe_network.map(|network| network.into()), id }, + AccountIndex64 { network: maybe_network, index } => + Self::AccountIndex64 { network: maybe_network.map(|network| network.into()), index }, + AccountKey20 { network: maybe_network, key } => + Self::AccountKey20 { network: maybe_network.map(|network| network.into()), key }, + PalletInstance(index) => Self::PalletInstance(index), + GeneralIndex(id) => Self::GeneralIndex(id), + GeneralKey { length, data } => Self::GeneralKey { length, data }, + OnlyChild => Self::OnlyChild, + Plurality { id, part } => Self::Plurality { id, part }, + GlobalConsensus(network) => Self::GlobalConsensus(network.into()), + }) + } +} + +impl Junction { + /// Convert `self` into a `Location` containing 0 parents. + /// + /// Similar to `Into::into`, except that this method can be used in a const evaluation context. + pub fn into_location(self) -> Location { + Location::new(0, [self]) + } + + /// Convert `self` into a `Location` containing `n` parents. + /// + /// Similar to `Self::into_location`, with the added ability to specify the number of parent + /// junctions. + pub fn into_exterior(self, n: u8) -> Location { + Location::new(n, [self]) + } + + /// Convert `self` into a `VersionedLocation` containing 0 parents. + /// + /// Similar to `Into::into`, except that this method can be used in a const evaluation context. + pub fn into_versioned(self) -> VersionedLocation { + self.into_location().into_versioned() + } + + /// Remove the `NetworkId` value. + pub fn remove_network_id(&mut self) { + use Junction::*; + match self { + AccountId32 { ref mut network, .. } | + AccountIndex64 { ref mut network, .. } | + AccountKey20 { ref mut network, .. } => *network = None, + _ => {}, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use alloc::vec; + + #[test] + fn junction_round_trip_works() { + let j = Junction::GeneralKey { length: 32, data: [1u8; 32] }; + let k = Junction::try_from(OldJunction::try_from(j).unwrap()).unwrap(); + assert_eq!(j, k); + + let j = OldJunction::GeneralKey { length: 32, data: [1u8; 32] }; + let k = OldJunction::try_from(Junction::try_from(j).unwrap()).unwrap(); + assert_eq!(j, k); + + let j = Junction::from(BoundedVec::try_from(vec![1u8, 2, 3, 4]).unwrap()); + let k = Junction::try_from(OldJunction::try_from(j).unwrap()).unwrap(); + assert_eq!(j, k); + let s: BoundedSlice<_, _> = (&k).try_into().unwrap(); + assert_eq!(s, &[1u8, 2, 3, 4][..]); + + let j = OldJunction::GeneralKey { length: 32, data: [1u8; 32] }; + let k = OldJunction::try_from(Junction::try_from(j).unwrap()).unwrap(); + assert_eq!(j, k); + } +} diff --git a/polkadot/xcm/src/v5/junctions.rs b/polkadot/xcm/src/v5/junctions.rs new file mode 100644 index 000000000000..dc93c541d19d --- /dev/null +++ b/polkadot/xcm/src/v5/junctions.rs @@ -0,0 +1,723 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! XCM `Junctions`/`InteriorLocation` datatype. + +use super::{Junction, Location, NetworkId}; +use alloc::sync::Arc; +use codec::{Decode, Encode, MaxEncodedLen}; +use core::{mem, ops::Range, result}; +use scale_info::TypeInfo; + +/// Maximum number of `Junction`s that a `Junctions` can contain. +pub(crate) const MAX_JUNCTIONS: usize = 8; + +/// Non-parent junctions that can be constructed, up to the length of 8. This specific `Junctions` +/// implementation uses a Rust `enum` in order to make pattern matching easier. +/// +/// Parent junctions cannot be constructed with this type. Refer to `Location` for +/// instructions on constructing parent junctions. +#[derive( + Clone, + Eq, + PartialEq, + Ord, + PartialOrd, + Encode, + Decode, + Debug, + TypeInfo, + MaxEncodedLen, + serde::Serialize, + serde::Deserialize, +)] +pub enum Junctions { + /// The interpreting consensus system. + Here, + /// A relative path comprising 1 junction. + X1(Arc<[Junction; 1]>), + /// A relative path comprising 2 junctions. + X2(Arc<[Junction; 2]>), + /// A relative path comprising 3 junctions. + X3(Arc<[Junction; 3]>), + /// A relative path comprising 4 junctions. + X4(Arc<[Junction; 4]>), + /// A relative path comprising 5 junctions. + X5(Arc<[Junction; 5]>), + /// A relative path comprising 6 junctions. + X6(Arc<[Junction; 6]>), + /// A relative path comprising 7 junctions. + X7(Arc<[Junction; 7]>), + /// A relative path comprising 8 junctions. + X8(Arc<[Junction; 8]>), +} + +macro_rules! impl_junctions { + ($count:expr, $variant:ident) => { + impl From<[Junction; $count]> for Junctions { + fn from(junctions: [Junction; $count]) -> Self { + Self::$variant(Arc::new(junctions)) + } + } + impl PartialEq<[Junction; $count]> for Junctions { + fn eq(&self, rhs: &[Junction; $count]) -> bool { + self.as_slice() == rhs + } + } + }; +} + +impl_junctions!(1, X1); +impl_junctions!(2, X2); +impl_junctions!(3, X3); +impl_junctions!(4, X4); +impl_junctions!(5, X5); +impl_junctions!(6, X6); +impl_junctions!(7, X7); +impl_junctions!(8, X8); + +pub struct JunctionsIterator { + junctions: Junctions, + range: Range, +} + +impl Iterator for JunctionsIterator { + type Item = Junction; + fn next(&mut self) -> Option { + self.junctions.at(self.range.next()?).cloned() + } +} + +impl DoubleEndedIterator for JunctionsIterator { + fn next_back(&mut self) -> Option { + self.junctions.at(self.range.next_back()?).cloned() + } +} + +pub struct JunctionsRefIterator<'a> { + junctions: &'a Junctions, + range: Range, +} + +impl<'a> Iterator for JunctionsRefIterator<'a> { + type Item = &'a Junction; + fn next(&mut self) -> Option<&'a Junction> { + self.junctions.at(self.range.next()?) + } +} + +impl<'a> DoubleEndedIterator for JunctionsRefIterator<'a> { + fn next_back(&mut self) -> Option<&'a Junction> { + self.junctions.at(self.range.next_back()?) + } +} +impl<'a> IntoIterator for &'a Junctions { + type Item = &'a Junction; + type IntoIter = JunctionsRefIterator<'a>; + fn into_iter(self) -> Self::IntoIter { + JunctionsRefIterator { junctions: self, range: 0..self.len() } + } +} + +impl IntoIterator for Junctions { + type Item = Junction; + type IntoIter = JunctionsIterator; + fn into_iter(self) -> Self::IntoIter { + JunctionsIterator { range: 0..self.len(), junctions: self } + } +} + +impl Junctions { + /// Convert `self` into a `Location` containing 0 parents. + /// + /// Similar to `Into::into`, except that this method can be used in a const evaluation context. + pub const fn into_location(self) -> Location { + Location { parents: 0, interior: self } + } + + /// Convert `self` into a `Location` containing `n` parents. + /// + /// Similar to `Self::into_location`, with the added ability to specify the number of parent + /// junctions. + pub const fn into_exterior(self, n: u8) -> Location { + Location { parents: n, interior: self } + } + + /// Casts `self` into a slice containing `Junction`s. + pub fn as_slice(&self) -> &[Junction] { + match self { + Junctions::Here => &[], + Junctions::X1(ref a) => &a[..], + Junctions::X2(ref a) => &a[..], + Junctions::X3(ref a) => &a[..], + Junctions::X4(ref a) => &a[..], + Junctions::X5(ref a) => &a[..], + Junctions::X6(ref a) => &a[..], + Junctions::X7(ref a) => &a[..], + Junctions::X8(ref a) => &a[..], + } + } + + /// Casts `self` into a mutable slice containing `Junction`s. + pub fn as_slice_mut(&mut self) -> &mut [Junction] { + match self { + Junctions::Here => &mut [], + Junctions::X1(ref mut a) => &mut Arc::make_mut(a)[..], + Junctions::X2(ref mut a) => &mut Arc::make_mut(a)[..], + Junctions::X3(ref mut a) => &mut Arc::make_mut(a)[..], + Junctions::X4(ref mut a) => &mut Arc::make_mut(a)[..], + Junctions::X5(ref mut a) => &mut Arc::make_mut(a)[..], + Junctions::X6(ref mut a) => &mut Arc::make_mut(a)[..], + Junctions::X7(ref mut a) => &mut Arc::make_mut(a)[..], + Junctions::X8(ref mut a) => &mut Arc::make_mut(a)[..], + } + } + + /// Remove the `NetworkId` value in any `Junction`s. + pub fn remove_network_id(&mut self) { + self.for_each_mut(Junction::remove_network_id); + } + + /// Treating `self` as the universal context, return the location of the local consensus system + /// from the point of view of the given `target`. + pub fn invert_target(&self, target: &Location) -> Result { + let mut itself = self.clone(); + let mut junctions = Self::Here; + for _ in 0..target.parent_count() { + junctions = junctions + .pushed_front_with(itself.take_last().unwrap_or(Junction::OnlyChild)) + .map_err(|_| ())?; + } + let parents = target.interior().len() as u8; + Ok(Location::new(parents, junctions)) + } + + /// Execute a function `f` on every junction. We use this since we cannot implement a mutable + /// `Iterator` without unsafe code. + pub fn for_each_mut(&mut self, x: impl FnMut(&mut Junction)) { + self.as_slice_mut().iter_mut().for_each(x) + } + + /// Extract the network ID treating this value as a universal location. + /// + /// This will return an `Err` if the first item is not a `GlobalConsensus`, which would indicate + /// that this value is not a universal location. + pub fn global_consensus(&self) -> Result { + if let Some(Junction::GlobalConsensus(network)) = self.first() { + Ok(*network) + } else { + Err(()) + } + } + + /// Extract the network ID and the interior consensus location, treating this value as a + /// universal location. + /// + /// This will return an `Err` if the first item is not a `GlobalConsensus`, which would indicate + /// that this value is not a universal location. + pub fn split_global(self) -> Result<(NetworkId, Junctions), ()> { + match self.split_first() { + (location, Some(Junction::GlobalConsensus(network))) => Ok((network, location)), + _ => return Err(()), + } + } + + /// Treat `self` as a universal location and the context of `relative`, returning the universal + /// location of relative. + /// + /// This will return an error if `relative` has as many (or more) parents than there are + /// junctions in `self`, implying that relative refers into a different global consensus. + pub fn within_global(mut self, relative: Location) -> Result { + if self.len() <= relative.parent_count() as usize { + return Err(()) + } + for _ in 0..relative.parent_count() { + self.take_last(); + } + for j in relative.interior() { + self.push(*j).map_err(|_| ())?; + } + Ok(self) + } + + /// Consumes `self` and returns how `viewer` would address it locally. + pub fn relative_to(mut self, viewer: &Junctions) -> Location { + let mut i = 0; + while match (self.first(), viewer.at(i)) { + (Some(x), Some(y)) => x == y, + _ => false, + } { + self = self.split_first().0; + // NOTE: Cannot overflow as loop can only iterate at most `MAX_JUNCTIONS` times. + i += 1; + } + // AUDIT NOTES: + // - above loop ensures that `i <= viewer.len()`. + // - `viewer.len()` is at most `MAX_JUNCTIONS`, so won't overflow a `u8`. + Location::new((viewer.len() - i) as u8, self) + } + + /// Returns first junction, or `None` if the location is empty. + pub fn first(&self) -> Option<&Junction> { + self.as_slice().first() + } + + /// Returns last junction, or `None` if the location is empty. + pub fn last(&self) -> Option<&Junction> { + self.as_slice().last() + } + + /// Splits off the first junction, returning the remaining suffix (first item in tuple) and the + /// first element (second item in tuple) or `None` if it was empty. + pub fn split_first(self) -> (Junctions, Option) { + match self { + Junctions::Here => (Junctions::Here, None), + Junctions::X1(xs) => { + let [a] = *xs; + (Junctions::Here, Some(a)) + }, + Junctions::X2(xs) => { + let [a, b] = *xs; + ([b].into(), Some(a)) + }, + Junctions::X3(xs) => { + let [a, b, c] = *xs; + ([b, c].into(), Some(a)) + }, + Junctions::X4(xs) => { + let [a, b, c, d] = *xs; + ([b, c, d].into(), Some(a)) + }, + Junctions::X5(xs) => { + let [a, b, c, d, e] = *xs; + ([b, c, d, e].into(), Some(a)) + }, + Junctions::X6(xs) => { + let [a, b, c, d, e, f] = *xs; + ([b, c, d, e, f].into(), Some(a)) + }, + Junctions::X7(xs) => { + let [a, b, c, d, e, f, g] = *xs; + ([b, c, d, e, f, g].into(), Some(a)) + }, + Junctions::X8(xs) => { + let [a, b, c, d, e, f, g, h] = *xs; + ([b, c, d, e, f, g, h].into(), Some(a)) + }, + } + } + + /// Splits off the last junction, returning the remaining prefix (first item in tuple) and the + /// last element (second item in tuple) or `None` if it was empty. + pub fn split_last(self) -> (Junctions, Option) { + match self { + Junctions::Here => (Junctions::Here, None), + Junctions::X1(xs) => { + let [a] = *xs; + (Junctions::Here, Some(a)) + }, + Junctions::X2(xs) => { + let [a, b] = *xs; + ([a].into(), Some(b)) + }, + Junctions::X3(xs) => { + let [a, b, c] = *xs; + ([a, b].into(), Some(c)) + }, + Junctions::X4(xs) => { + let [a, b, c, d] = *xs; + ([a, b, c].into(), Some(d)) + }, + Junctions::X5(xs) => { + let [a, b, c, d, e] = *xs; + ([a, b, c, d].into(), Some(e)) + }, + Junctions::X6(xs) => { + let [a, b, c, d, e, f] = *xs; + ([a, b, c, d, e].into(), Some(f)) + }, + Junctions::X7(xs) => { + let [a, b, c, d, e, f, g] = *xs; + ([a, b, c, d, e, f].into(), Some(g)) + }, + Junctions::X8(xs) => { + let [a, b, c, d, e, f, g, h] = *xs; + ([a, b, c, d, e, f, g].into(), Some(h)) + }, + } + } + + /// Removes the first element from `self`, returning it (or `None` if it was empty). + pub fn take_first(&mut self) -> Option { + let mut d = Junctions::Here; + mem::swap(&mut *self, &mut d); + let (tail, head) = d.split_first(); + *self = tail; + head + } + + /// Removes the last element from `self`, returning it (or `None` if it was empty). + pub fn take_last(&mut self) -> Option { + let mut d = Junctions::Here; + mem::swap(&mut *self, &mut d); + let (head, tail) = d.split_last(); + *self = head; + tail + } + + /// Mutates `self` to be appended with `new` or returns an `Err` with `new` if would overflow. + pub fn push(&mut self, new: impl Into) -> result::Result<(), Junction> { + let new = new.into(); + let mut dummy = Junctions::Here; + mem::swap(self, &mut dummy); + match dummy.pushed_with(new) { + Ok(s) => { + *self = s; + Ok(()) + }, + Err((s, j)) => { + *self = s; + Err(j) + }, + } + } + + /// Mutates `self` to be prepended with `new` or returns an `Err` with `new` if would overflow. + pub fn push_front(&mut self, new: impl Into) -> result::Result<(), Junction> { + let new = new.into(); + let mut dummy = Junctions::Here; + mem::swap(self, &mut dummy); + match dummy.pushed_front_with(new) { + Ok(s) => { + *self = s; + Ok(()) + }, + Err((s, j)) => { + *self = s; + Err(j) + }, + } + } + + /// Consumes `self` and returns a `Junctions` suffixed with `new`, or an `Err` with the + /// original value of `self` and `new` in case of overflow. + pub fn pushed_with(self, new: impl Into) -> result::Result { + let new = new.into(); + Ok(match self { + Junctions::Here => [new].into(), + Junctions::X1(xs) => { + let [a] = *xs; + [a, new].into() + }, + Junctions::X2(xs) => { + let [a, b] = *xs; + [a, b, new].into() + }, + Junctions::X3(xs) => { + let [a, b, c] = *xs; + [a, b, c, new].into() + }, + Junctions::X4(xs) => { + let [a, b, c, d] = *xs; + [a, b, c, d, new].into() + }, + Junctions::X5(xs) => { + let [a, b, c, d, e] = *xs; + [a, b, c, d, e, new].into() + }, + Junctions::X6(xs) => { + let [a, b, c, d, e, f] = *xs; + [a, b, c, d, e, f, new].into() + }, + Junctions::X7(xs) => { + let [a, b, c, d, e, f, g] = *xs; + [a, b, c, d, e, f, g, new].into() + }, + s => Err((s, new))?, + }) + } + + /// Consumes `self` and returns a `Junctions` prefixed with `new`, or an `Err` with the + /// original value of `self` and `new` in case of overflow. + pub fn pushed_front_with( + self, + new: impl Into, + ) -> result::Result { + let new = new.into(); + Ok(match self { + Junctions::Here => [new].into(), + Junctions::X1(xs) => { + let [a] = *xs; + [new, a].into() + }, + Junctions::X2(xs) => { + let [a, b] = *xs; + [new, a, b].into() + }, + Junctions::X3(xs) => { + let [a, b, c] = *xs; + [new, a, b, c].into() + }, + Junctions::X4(xs) => { + let [a, b, c, d] = *xs; + [new, a, b, c, d].into() + }, + Junctions::X5(xs) => { + let [a, b, c, d, e] = *xs; + [new, a, b, c, d, e].into() + }, + Junctions::X6(xs) => { + let [a, b, c, d, e, f] = *xs; + [new, a, b, c, d, e, f].into() + }, + Junctions::X7(xs) => { + let [a, b, c, d, e, f, g] = *xs; + [new, a, b, c, d, e, f, g].into() + }, + s => Err((s, new))?, + }) + } + + /// Mutate `self` so that it is suffixed with `suffix`. + /// + /// Does not modify `self` and returns `Err` with `suffix` in case of overflow. + /// + /// # Example + /// ```rust + /// # use staging_xcm::v5::{Junctions, Junction::*, Location}; + /// # fn main() { + /// let mut m = Junctions::from([Parachain(21)]); + /// assert_eq!(m.append_with([PalletInstance(3)]), Ok(())); + /// assert_eq!(m, [Parachain(21), PalletInstance(3)]); + /// # } + /// ``` + pub fn append_with(&mut self, suffix: impl Into) -> Result<(), Junctions> { + let suffix = suffix.into(); + if self.len().saturating_add(suffix.len()) > MAX_JUNCTIONS { + return Err(suffix) + } + for j in suffix.into_iter() { + self.push(j).expect("Already checked the sum of the len()s; qed") + } + Ok(()) + } + + /// Returns the number of junctions in `self`. + pub fn len(&self) -> usize { + self.as_slice().len() + } + + /// Returns the junction at index `i`, or `None` if the location doesn't contain that many + /// elements. + pub fn at(&self, i: usize) -> Option<&Junction> { + self.as_slice().get(i) + } + + /// Returns a mutable reference to the junction at index `i`, or `None` if the location doesn't + /// contain that many elements. + pub fn at_mut(&mut self, i: usize) -> Option<&mut Junction> { + self.as_slice_mut().get_mut(i) + } + + /// Returns a reference iterator over the junctions. + pub fn iter(&self) -> JunctionsRefIterator { + JunctionsRefIterator { junctions: self, range: 0..self.len() } + } + + /// Ensures that self begins with `prefix` and that it has a single `Junction` item following. + /// If so, returns a reference to this `Junction` item. + /// + /// # Example + /// ```rust + /// # use staging_xcm::v5::{Junctions, Junction::*}; + /// # fn main() { + /// let mut m = Junctions::from([Parachain(2), PalletInstance(3), OnlyChild]); + /// assert_eq!(m.match_and_split(&[Parachain(2), PalletInstance(3)].into()), Some(&OnlyChild)); + /// assert_eq!(m.match_and_split(&[Parachain(2)].into()), None); + /// # } + /// ``` + pub fn match_and_split(&self, prefix: &Junctions) -> Option<&Junction> { + if prefix.len() + 1 != self.len() { + return None + } + for i in 0..prefix.len() { + if prefix.at(i) != self.at(i) { + return None + } + } + return self.at(prefix.len()) + } + + pub fn starts_with(&self, prefix: &Junctions) -> bool { + prefix.len() <= self.len() && prefix.iter().zip(self.iter()).all(|(x, y)| x == y) + } +} + +impl TryFrom for Junctions { + type Error = Location; + fn try_from(x: Location) -> result::Result { + if x.parent_count() > 0 { + Err(x) + } else { + Ok(x.interior().clone()) + } + } +} + +impl> From for Junctions { + fn from(x: T) -> Self { + [x.into()].into() + } +} + +impl From<[Junction; 0]> for Junctions { + fn from(_: [Junction; 0]) -> Self { + Self::Here + } +} + +impl From<()> for Junctions { + fn from(_: ()) -> Self { + Self::Here + } +} + +xcm_procedural::impl_conversion_functions_for_junctions_v5!(); + +#[cfg(test)] +mod tests { + use super::{super::prelude::*, *}; + + #[test] + fn inverting_works() { + let context: InteriorLocation = (Parachain(1000), PalletInstance(42)).into(); + let target = (Parent, PalletInstance(69)).into(); + let expected = (Parent, PalletInstance(42)).into(); + let inverted = context.invert_target(&target).unwrap(); + assert_eq!(inverted, expected); + + let context: InteriorLocation = + (Parachain(1000), PalletInstance(42), GeneralIndex(1)).into(); + let target = (Parent, Parent, PalletInstance(69), GeneralIndex(2)).into(); + let expected = (Parent, Parent, PalletInstance(42), GeneralIndex(1)).into(); + let inverted = context.invert_target(&target).unwrap(); + assert_eq!(inverted, expected); + } + + #[test] + fn relative_to_works() { + use NetworkId::*; + assert_eq!( + Junctions::from([Polkadot.into()]).relative_to(&Junctions::from([Kusama.into()])), + (Parent, Polkadot).into() + ); + let base = Junctions::from([Kusama.into(), Parachain(1), PalletInstance(1)]); + + // Ancestors. + assert_eq!(Here.relative_to(&base), (Parent, Parent, Parent).into()); + assert_eq!(Junctions::from([Kusama.into()]).relative_to(&base), (Parent, Parent).into()); + assert_eq!( + Junctions::from([Kusama.into(), Parachain(1)]).relative_to(&base), + (Parent,).into() + ); + assert_eq!( + Junctions::from([Kusama.into(), Parachain(1), PalletInstance(1)]).relative_to(&base), + Here.into() + ); + + // Ancestors with one child. + assert_eq!( + Junctions::from([Polkadot.into()]).relative_to(&base), + (Parent, Parent, Parent, Polkadot).into() + ); + assert_eq!( + Junctions::from([Kusama.into(), Parachain(2)]).relative_to(&base), + (Parent, Parent, Parachain(2)).into() + ); + assert_eq!( + Junctions::from([Kusama.into(), Parachain(1), PalletInstance(2)]).relative_to(&base), + (Parent, PalletInstance(2)).into() + ); + assert_eq!( + Junctions::from([Kusama.into(), Parachain(1), PalletInstance(1), [1u8; 32].into()]) + .relative_to(&base), + ([1u8; 32],).into() + ); + + // Ancestors with grandchildren. + assert_eq!( + Junctions::from([Polkadot.into(), Parachain(1)]).relative_to(&base), + (Parent, Parent, Parent, Polkadot, Parachain(1)).into() + ); + assert_eq!( + Junctions::from([Kusama.into(), Parachain(2), PalletInstance(1)]).relative_to(&base), + (Parent, Parent, Parachain(2), PalletInstance(1)).into() + ); + assert_eq!( + Junctions::from([Kusama.into(), Parachain(1), PalletInstance(2), [1u8; 32].into()]) + .relative_to(&base), + (Parent, PalletInstance(2), [1u8; 32]).into() + ); + assert_eq!( + Junctions::from([ + Kusama.into(), + Parachain(1), + PalletInstance(1), + [1u8; 32].into(), + 1u128.into() + ]) + .relative_to(&base), + ([1u8; 32], 1u128).into() + ); + } + + #[test] + fn global_consensus_works() { + use NetworkId::*; + assert_eq!(Junctions::from([Polkadot.into()]).global_consensus(), Ok(Polkadot)); + assert_eq!(Junctions::from([Kusama.into(), 1u64.into()]).global_consensus(), Ok(Kusama)); + assert_eq!(Here.global_consensus(), Err(())); + assert_eq!(Junctions::from([1u64.into()]).global_consensus(), Err(())); + assert_eq!(Junctions::from([1u64.into(), Kusama.into()]).global_consensus(), Err(())); + } + + #[test] + fn test_conversion() { + use super::{Junction::*, NetworkId::*}; + let x: Junctions = GlobalConsensus(Polkadot).into(); + assert_eq!(x, Junctions::from([GlobalConsensus(Polkadot)])); + let x: Junctions = Polkadot.into(); + assert_eq!(x, Junctions::from([GlobalConsensus(Polkadot)])); + let x: Junctions = (Polkadot, Kusama).into(); + assert_eq!(x, Junctions::from([GlobalConsensus(Polkadot), GlobalConsensus(Kusama)])); + } + + #[test] + fn encode_decode_junctions_works() { + let original = Junctions::from([ + Polkadot.into(), + Kusama.into(), + 1u64.into(), + GlobalConsensus(Polkadot), + Parachain(123), + PalletInstance(45), + ]); + let encoded = original.encode(); + assert_eq!(encoded, &[6, 9, 2, 9, 3, 2, 0, 4, 9, 2, 0, 237, 1, 4, 45]); + let decoded = Junctions::decode(&mut &encoded[..]).unwrap(); + assert_eq!(decoded, original); + } +} diff --git a/polkadot/xcm/src/v5/location.rs b/polkadot/xcm/src/v5/location.rs new file mode 100644 index 000000000000..adaa866a4cc8 --- /dev/null +++ b/polkadot/xcm/src/v5/location.rs @@ -0,0 +1,755 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! XCM `Location` datatype. + +use super::{traits::Reanchorable, Junction, Junctions}; +use crate::{v4::Location as OldLocation, VersionedLocation}; +use codec::{Decode, Encode, MaxEncodedLen}; +use core::result; +use scale_info::TypeInfo; + +/// A relative path between state-bearing consensus systems. +/// +/// A location in a consensus system is defined as an *isolatable state machine* held within global +/// consensus. The location in question need not have a sophisticated consensus algorithm of its +/// own; a single account within Ethereum, for example, could be considered a location. +/// +/// A very-much non-exhaustive list of types of location include: +/// - A (normal, layer-1) block chain, e.g. the Bitcoin mainnet or a parachain. +/// - A layer-0 super-chain, e.g. the Polkadot Relay chain. +/// - A layer-2 smart contract, e.g. an ERC-20 on Ethereum. +/// - A logical functional component of a chain, e.g. a single instance of a pallet on a Frame-based +/// Substrate chain. +/// - An account. +/// +/// A `Location` is a *relative identifier*, meaning that it can only be used to define the +/// relative path between two locations, and cannot generally be used to refer to a location +/// universally. It is comprised of an integer number of parents specifying the number of times to +/// "escape" upwards into the containing consensus system and then a number of *junctions*, each +/// diving down and specifying some interior portion of state (which may be considered a +/// "sub-consensus" system). +/// +/// This specific `Location` implementation uses a `Junctions` datatype which is a Rust `enum` +/// in order to make pattern matching easier. There are occasions where it is important to ensure +/// that a value is strictly an interior location, in those cases, `Junctions` may be used. +/// +/// The `Location` value of `Null` simply refers to the interpreting consensus system. +#[derive( + Clone, + Decode, + Encode, + Eq, + PartialEq, + Ord, + PartialOrd, + Debug, + TypeInfo, + MaxEncodedLen, + serde::Serialize, + serde::Deserialize, +)] +pub struct Location { + /// The number of parent junctions at the beginning of this `Location`. + pub parents: u8, + /// The interior (i.e. non-parent) junctions that this `Location` contains. + pub interior: Junctions, +} + +impl Default for Location { + fn default() -> Self { + Self { parents: 0, interior: Junctions::Here } + } +} + +/// A relative location which is constrained to be an interior location of the context. +/// +/// See also `Location`. +pub type InteriorLocation = Junctions; + +impl Location { + /// Creates a new `Location` with the given number of parents and interior junctions. + pub fn new(parents: u8, interior: impl Into) -> Location { + Location { parents, interior: interior.into() } + } + + /// Consume `self` and return the equivalent `VersionedLocation` value. + pub const fn into_versioned(self) -> VersionedLocation { + VersionedLocation::V5(self) + } + + /// Creates a new `Location` with 0 parents and a `Here` interior. + /// + /// The resulting `Location` can be interpreted as the "current consensus system". + pub const fn here() -> Location { + Location { parents: 0, interior: Junctions::Here } + } + + /// Creates a new `Location` which evaluates to the parent context. + pub const fn parent() -> Location { + Location { parents: 1, interior: Junctions::Here } + } + + /// Creates a new `Location` with `parents` and an empty (`Here`) interior. + pub const fn ancestor(parents: u8) -> Location { + Location { parents, interior: Junctions::Here } + } + + /// Whether the `Location` has no parents and has a `Here` interior. + pub fn is_here(&self) -> bool { + self.parents == 0 && self.interior.len() == 0 + } + + /// Remove the `NetworkId` value in any interior `Junction`s. + pub fn remove_network_id(&mut self) { + self.interior.remove_network_id(); + } + + /// Return a reference to the interior field. + pub fn interior(&self) -> &Junctions { + &self.interior + } + + /// Return a mutable reference to the interior field. + pub fn interior_mut(&mut self) -> &mut Junctions { + &mut self.interior + } + + /// Returns the number of `Parent` junctions at the beginning of `self`. + pub const fn parent_count(&self) -> u8 { + self.parents + } + + /// Returns the parent count and the interior [`Junctions`] as a tuple. + /// + /// To be used when pattern matching, for example: + /// + /// ```rust + /// # use staging_xcm::v5::{Junctions::*, Junction::*, Location}; + /// fn get_parachain_id(loc: &Location) -> Option { + /// match loc.unpack() { + /// (0, [Parachain(id)]) => Some(*id), + /// _ => None + /// } + /// } + /// ``` + pub fn unpack(&self) -> (u8, &[Junction]) { + (self.parents, self.interior.as_slice()) + } + + /// Returns boolean indicating whether `self` contains only the specified amount of + /// parents and no interior junctions. + pub const fn contains_parents_only(&self, count: u8) -> bool { + matches!(self.interior, Junctions::Here) && self.parents == count + } + + /// Returns the number of parents and junctions in `self`. + pub fn len(&self) -> usize { + self.parent_count() as usize + self.interior.len() + } + + /// Returns the first interior junction, or `None` if the location is empty or contains only + /// parents. + pub fn first_interior(&self) -> Option<&Junction> { + self.interior.first() + } + + /// Returns last junction, or `None` if the location is empty or contains only parents. + pub fn last(&self) -> Option<&Junction> { + self.interior.last() + } + + /// Splits off the first interior junction, returning the remaining suffix (first item in tuple) + /// and the first element (second item in tuple) or `None` if it was empty. + pub fn split_first_interior(self) -> (Location, Option) { + let Location { parents, interior: junctions } = self; + let (suffix, first) = junctions.split_first(); + let location = Location { parents, interior: suffix }; + (location, first) + } + + /// Splits off the last interior junction, returning the remaining prefix (first item in tuple) + /// and the last element (second item in tuple) or `None` if it was empty or if `self` only + /// contains parents. + pub fn split_last_interior(self) -> (Location, Option) { + let Location { parents, interior: junctions } = self; + let (prefix, last) = junctions.split_last(); + let location = Location { parents, interior: prefix }; + (location, last) + } + + /// Mutates `self`, suffixing its interior junctions with `new`. Returns `Err` with `new` in + /// case of overflow. + pub fn push_interior(&mut self, new: impl Into) -> result::Result<(), Junction> { + self.interior.push(new) + } + + /// Mutates `self`, prefixing its interior junctions with `new`. Returns `Err` with `new` in + /// case of overflow. + pub fn push_front_interior( + &mut self, + new: impl Into, + ) -> result::Result<(), Junction> { + self.interior.push_front(new) + } + + /// Consumes `self` and returns a `Location` suffixed with `new`, or an `Err` with + /// the original value of `self` in case of overflow. + pub fn pushed_with_interior( + self, + new: impl Into, + ) -> result::Result { + match self.interior.pushed_with(new) { + Ok(i) => Ok(Location { interior: i, parents: self.parents }), + Err((i, j)) => Err((Location { interior: i, parents: self.parents }, j)), + } + } + + /// Consumes `self` and returns a `Location` prefixed with `new`, or an `Err` with the + /// original value of `self` in case of overflow. + pub fn pushed_front_with_interior( + self, + new: impl Into, + ) -> result::Result { + match self.interior.pushed_front_with(new) { + Ok(i) => Ok(Location { interior: i, parents: self.parents }), + Err((i, j)) => Err((Location { interior: i, parents: self.parents }, j)), + } + } + + /// Returns the junction at index `i`, or `None` if the location is a parent or if the location + /// does not contain that many elements. + pub fn at(&self, i: usize) -> Option<&Junction> { + let num_parents = self.parents as usize; + if i < num_parents { + return None + } + self.interior.at(i - num_parents) + } + + /// Returns a mutable reference to the junction at index `i`, or `None` if the location is a + /// parent or if it doesn't contain that many elements. + pub fn at_mut(&mut self, i: usize) -> Option<&mut Junction> { + let num_parents = self.parents as usize; + if i < num_parents { + return None + } + self.interior.at_mut(i - num_parents) + } + + /// Decrements the parent count by 1. + pub fn dec_parent(&mut self) { + self.parents = self.parents.saturating_sub(1); + } + + /// Removes the first interior junction from `self`, returning it + /// (or `None` if it was empty or if `self` contains only parents). + pub fn take_first_interior(&mut self) -> Option { + self.interior.take_first() + } + + /// Removes the last element from `interior`, returning it (or `None` if it was empty or if + /// `self` only contains parents). + pub fn take_last(&mut self) -> Option { + self.interior.take_last() + } + + /// Ensures that `self` has the same number of parents as `prefix`, its junctions begins with + /// the junctions of `prefix` and that it has a single `Junction` item following. + /// If so, returns a reference to this `Junction` item. + /// + /// # Example + /// ```rust + /// # use staging_xcm::v5::{Junctions::*, Junction::*, Location}; + /// # fn main() { + /// let mut m = Location::new(1, [PalletInstance(3), OnlyChild]); + /// assert_eq!( + /// m.match_and_split(&Location::new(1, [PalletInstance(3)])), + /// Some(&OnlyChild), + /// ); + /// assert_eq!(m.match_and_split(&Location::new(1, Here)), None); + /// # } + /// ``` + pub fn match_and_split(&self, prefix: &Location) -> Option<&Junction> { + if self.parents != prefix.parents { + return None + } + self.interior.match_and_split(&prefix.interior) + } + + pub fn starts_with(&self, prefix: &Location) -> bool { + self.parents == prefix.parents && self.interior.starts_with(&prefix.interior) + } + + /// Mutate `self` so that it is suffixed with `suffix`. + /// + /// Does not modify `self` and returns `Err` with `suffix` in case of overflow. + /// + /// # Example + /// ```rust + /// # use staging_xcm::v5::{Junctions::*, Junction::*, Location, Parent}; + /// # fn main() { + /// let mut m: Location = (Parent, Parachain(21), 69u64).into(); + /// assert_eq!(m.append_with((Parent, PalletInstance(3))), Ok(())); + /// assert_eq!(m, Location::new(1, [Parachain(21), PalletInstance(3)])); + /// # } + /// ``` + pub fn append_with(&mut self, suffix: impl Into) -> Result<(), Self> { + let prefix = core::mem::replace(self, suffix.into()); + match self.prepend_with(prefix) { + Ok(()) => Ok(()), + Err(prefix) => Err(core::mem::replace(self, prefix)), + } + } + + /// Consume `self` and return its value suffixed with `suffix`. + /// + /// Returns `Err` with the original value of `self` and `suffix` in case of overflow. + /// + /// # Example + /// ```rust + /// # use staging_xcm::v5::{Junctions::*, Junction::*, Location, Parent}; + /// # fn main() { + /// let mut m: Location = (Parent, Parachain(21), 69u64).into(); + /// let r = m.appended_with((Parent, PalletInstance(3))).unwrap(); + /// assert_eq!(r, Location::new(1, [Parachain(21), PalletInstance(3)])); + /// # } + /// ``` + pub fn appended_with(mut self, suffix: impl Into) -> Result { + match self.append_with(suffix) { + Ok(()) => Ok(self), + Err(suffix) => Err((self, suffix)), + } + } + + /// Mutate `self` so that it is prefixed with `prefix`. + /// + /// Does not modify `self` and returns `Err` with `prefix` in case of overflow. + /// + /// # Example + /// ```rust + /// # use staging_xcm::v5::{Junctions::*, Junction::*, Location, Parent}; + /// # fn main() { + /// let mut m: Location = (Parent, Parent, PalletInstance(3)).into(); + /// assert_eq!(m.prepend_with((Parent, Parachain(21), OnlyChild)), Ok(())); + /// assert_eq!(m, Location::new(1, [PalletInstance(3)])); + /// # } + /// ``` + pub fn prepend_with(&mut self, prefix: impl Into) -> Result<(), Self> { + // prefix self (suffix) + // P .. P I .. I p .. p i .. i + let mut prefix = prefix.into(); + let prepend_interior = prefix.interior.len().saturating_sub(self.parents as usize); + let final_interior = self.interior.len().saturating_add(prepend_interior); + if final_interior > super::junctions::MAX_JUNCTIONS { + return Err(prefix) + } + let suffix_parents = (self.parents as usize).saturating_sub(prefix.interior.len()); + let final_parents = (prefix.parents as usize).saturating_add(suffix_parents); + if final_parents > 255 { + return Err(prefix) + } + + // cancel out the final item on the prefix interior for one of the suffix's parents. + while self.parents > 0 && prefix.take_last().is_some() { + self.dec_parent(); + } + + // now we have either removed all suffix's parents or prefix interior. + // this means we can combine the prefix's and suffix's remaining parents/interior since + // we know that with at least one empty, the overall order will be respected: + // prefix self (suffix) + // P .. P (I) p .. p i .. i => P + p .. (no I) i + // -- or -- + // P .. P I .. I (p) i .. i => P (no p) .. I + i + + self.parents = self.parents.saturating_add(prefix.parents); + for j in prefix.interior.into_iter().rev() { + self.push_front_interior(j) + .expect("final_interior no greater than MAX_JUNCTIONS; qed"); + } + Ok(()) + } + + /// Consume `self` and return its value prefixed with `prefix`. + /// + /// Returns `Err` with the original value of `self` and `prefix` in case of overflow. + /// + /// # Example + /// ```rust + /// # use staging_xcm::v5::{Junctions::*, Junction::*, Location, Parent}; + /// # fn main() { + /// let m: Location = (Parent, Parent, PalletInstance(3)).into(); + /// let r = m.prepended_with((Parent, Parachain(21), OnlyChild)).unwrap(); + /// assert_eq!(r, Location::new(1, [PalletInstance(3)])); + /// # } + /// ``` + pub fn prepended_with(mut self, prefix: impl Into) -> Result { + match self.prepend_with(prefix) { + Ok(()) => Ok(self), + Err(prefix) => Err((self, prefix)), + } + } + + /// Remove any unneeded parents/junctions in `self` based on the given context it will be + /// interpreted in. + pub fn simplify(&mut self, context: &Junctions) { + if context.len() < self.parents as usize { + // Not enough context + return + } + while self.parents > 0 { + let maybe = context.at(context.len() - (self.parents as usize)); + match (self.interior.first(), maybe) { + (Some(i), Some(j)) if i == j => { + self.interior.take_first(); + self.parents -= 1; + }, + _ => break, + } + } + } + + /// Return the Location subsection identifying the chain that `self` points to. + pub fn chain_location(&self) -> Location { + let mut clone = self.clone(); + // start popping junctions until we reach chain identifier + while let Some(j) = clone.last() { + if matches!(j, Junction::Parachain(_) | Junction::GlobalConsensus(_)) { + // return chain subsection + return clone + } else { + (clone, _) = clone.split_last_interior(); + } + } + Location::new(clone.parents, Junctions::Here) + } +} + +impl Reanchorable for Location { + type Error = Self; + + /// Mutate `self` so that it represents the same location from the point of view of `target`. + /// The context of `self` is provided as `context`. + /// + /// Does not modify `self` in case of overflow. + fn reanchor(&mut self, target: &Location, context: &InteriorLocation) -> Result<(), ()> { + // TODO: https://github.com/paritytech/polkadot/issues/4489 Optimize this. + + // 1. Use our `context` to figure out how the `target` would address us. + let inverted_target = context.invert_target(target)?; + + // 2. Prepend `inverted_target` to `self` to get self's location from the perspective of + // `target`. + self.prepend_with(inverted_target).map_err(|_| ())?; + + // 3. Given that we know some of `target` context, ensure that any parents in `self` are + // strictly needed. + self.simplify(target.interior()); + + Ok(()) + } + + /// Consume `self` and return a new value representing the same location from the point of view + /// of `target`. The context of `self` is provided as `context`. + /// + /// Returns the original `self` in case of overflow. + fn reanchored(mut self, target: &Location, context: &InteriorLocation) -> Result { + match self.reanchor(target, context) { + Ok(()) => Ok(self), + Err(()) => Err(self), + } + } +} + +impl TryFrom for Option { + type Error = (); + fn try_from(value: OldLocation) -> result::Result { + Ok(Some(Location::try_from(value)?)) + } +} + +impl TryFrom for Location { + type Error = (); + fn try_from(x: OldLocation) -> result::Result { + Ok(Location { parents: x.parents, interior: x.interior.try_into()? }) + } +} + +/// A unit struct which can be converted into a `Location` of `parents` value 1. +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug)] +pub struct Parent; +impl From for Location { + fn from(_: Parent) -> Self { + Location { parents: 1, interior: Junctions::Here } + } +} + +/// A tuple struct which can be converted into a `Location` of `parents` value 1 with the inner +/// interior. +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Debug)] +pub struct ParentThen(pub Junctions); +impl From for Location { + fn from(ParentThen(interior): ParentThen) -> Self { + Location { parents: 1, interior } + } +} + +/// A unit struct which can be converted into a `Location` of the inner `parents` value. +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug)] +pub struct Ancestor(pub u8); +impl From for Location { + fn from(Ancestor(parents): Ancestor) -> Self { + Location { parents, interior: Junctions::Here } + } +} + +/// A unit struct which can be converted into a `Location` of the inner `parents` value and the +/// inner interior. +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Debug)] +pub struct AncestorThen(pub u8, pub Interior); +impl> From> for Location { + fn from(AncestorThen(parents, interior): AncestorThen) -> Self { + Location { parents, interior: interior.into() } + } +} + +impl From<[u8; 32]> for Location { + fn from(bytes: [u8; 32]) -> Self { + let junction: Junction = bytes.into(); + junction.into() + } +} + +impl From for Location { + fn from(id: sp_runtime::AccountId32) -> Self { + Junction::AccountId32 { network: None, id: id.into() }.into() + } +} + +xcm_procedural::impl_conversion_functions_for_location_v5!(); + +#[cfg(test)] +mod tests { + use crate::v5::prelude::*; + use codec::{Decode, Encode}; + + #[test] + fn conversion_works() { + let x: Location = Parent.into(); + assert_eq!(x, Location { parents: 1, interior: Here }); + // let x: Location = (Parent,).into(); + // assert_eq!(x, Location { parents: 1, interior: Here }); + // let x: Location = (Parent, Parent).into(); + // assert_eq!(x, Location { parents: 2, interior: Here }); + let x: Location = (Parent, Parent, OnlyChild).into(); + assert_eq!(x, Location { parents: 2, interior: OnlyChild.into() }); + let x: Location = OnlyChild.into(); + assert_eq!(x, Location { parents: 0, interior: OnlyChild.into() }); + let x: Location = (OnlyChild,).into(); + assert_eq!(x, Location { parents: 0, interior: OnlyChild.into() }); + } + + #[test] + fn simplify_basic_works() { + let mut location: Location = + (Parent, Parent, Parachain(1000), PalletInstance(42), GeneralIndex(69)).into(); + let context = [Parachain(1000), PalletInstance(42)].into(); + let expected = GeneralIndex(69).into(); + location.simplify(&context); + assert_eq!(location, expected); + + let mut location: Location = (Parent, PalletInstance(42), GeneralIndex(69)).into(); + let context = [PalletInstance(42)].into(); + let expected = GeneralIndex(69).into(); + location.simplify(&context); + assert_eq!(location, expected); + + let mut location: Location = (Parent, PalletInstance(42), GeneralIndex(69)).into(); + let context = [Parachain(1000), PalletInstance(42)].into(); + let expected = GeneralIndex(69).into(); + location.simplify(&context); + assert_eq!(location, expected); + + let mut location: Location = + (Parent, Parent, Parachain(1000), PalletInstance(42), GeneralIndex(69)).into(); + let context = [OnlyChild, Parachain(1000), PalletInstance(42)].into(); + let expected = GeneralIndex(69).into(); + location.simplify(&context); + assert_eq!(location, expected); + } + + #[test] + fn simplify_incompatible_location_fails() { + let mut location: Location = + (Parent, Parent, Parachain(1000), PalletInstance(42), GeneralIndex(69)).into(); + let context = [Parachain(1000), PalletInstance(42), GeneralIndex(42)].into(); + let expected = + (Parent, Parent, Parachain(1000), PalletInstance(42), GeneralIndex(69)).into(); + location.simplify(&context); + assert_eq!(location, expected); + + let mut location: Location = + (Parent, Parent, Parachain(1000), PalletInstance(42), GeneralIndex(69)).into(); + let context = [Parachain(1000)].into(); + let expected = + (Parent, Parent, Parachain(1000), PalletInstance(42), GeneralIndex(69)).into(); + location.simplify(&context); + assert_eq!(location, expected); + } + + #[test] + fn reanchor_works() { + let mut id: Location = (Parent, Parachain(1000), GeneralIndex(42)).into(); + let context = Parachain(2000).into(); + let target = (Parent, Parachain(1000)).into(); + let expected = GeneralIndex(42).into(); + id.reanchor(&target, &context).unwrap(); + assert_eq!(id, expected); + } + + #[test] + fn encode_and_decode_works() { + let m = Location { + parents: 1, + interior: [Parachain(42), AccountIndex64 { network: None, index: 23 }].into(), + }; + let encoded = m.encode(); + assert_eq!(encoded, [1, 2, 0, 168, 2, 0, 92].to_vec()); + let decoded = Location::decode(&mut &encoded[..]); + assert_eq!(decoded, Ok(m)); + } + + #[test] + fn match_and_split_works() { + let m = Location { + parents: 1, + interior: [Parachain(42), AccountIndex64 { network: None, index: 23 }].into(), + }; + assert_eq!(m.match_and_split(&Location { parents: 1, interior: Here }), None); + assert_eq!( + m.match_and_split(&Location { parents: 1, interior: [Parachain(42)].into() }), + Some(&AccountIndex64 { network: None, index: 23 }) + ); + assert_eq!(m.match_and_split(&m), None); + } + + #[test] + fn append_with_works() { + let acc = AccountIndex64 { network: None, index: 23 }; + let mut m = Location { parents: 1, interior: [Parachain(42)].into() }; + assert_eq!(m.append_with([PalletInstance(3), acc]), Ok(())); + assert_eq!( + m, + Location { parents: 1, interior: [Parachain(42), PalletInstance(3), acc].into() } + ); + + // cannot append to create overly long location + let acc = AccountIndex64 { network: None, index: 23 }; + let m = Location { + parents: 254, + interior: [Parachain(42), OnlyChild, OnlyChild, OnlyChild, OnlyChild].into(), + }; + let suffix: Location = (PalletInstance(3), acc, OnlyChild, OnlyChild).into(); + assert_eq!(m.clone().append_with(suffix.clone()), Err(suffix)); + } + + #[test] + fn prepend_with_works() { + let mut m = Location { + parents: 1, + interior: [Parachain(42), AccountIndex64 { network: None, index: 23 }].into(), + }; + assert_eq!(m.prepend_with(Location { parents: 1, interior: [OnlyChild].into() }), Ok(())); + assert_eq!( + m, + Location { + parents: 1, + interior: [Parachain(42), AccountIndex64 { network: None, index: 23 }].into() + } + ); + + // cannot prepend to create overly long location + let mut m = Location { parents: 254, interior: [Parachain(42)].into() }; + let prefix = Location { parents: 2, interior: Here }; + assert_eq!(m.prepend_with(prefix.clone()), Err(prefix)); + + let prefix = Location { parents: 1, interior: Here }; + assert_eq!(m.prepend_with(prefix.clone()), Ok(())); + assert_eq!(m, Location { parents: 255, interior: [Parachain(42)].into() }); + } + + #[test] + fn double_ended_ref_iteration_works() { + let m: Junctions = [Parachain(1000), Parachain(3), PalletInstance(5)].into(); + let mut iter = m.iter(); + + let first = iter.next().unwrap(); + assert_eq!(first, &Parachain(1000)); + let third = iter.next_back().unwrap(); + assert_eq!(third, &PalletInstance(5)); + let second = iter.next_back().unwrap(); + assert_eq!(iter.next(), None); + assert_eq!(iter.next_back(), None); + assert_eq!(second, &Parachain(3)); + + let res = Here + .pushed_with(*first) + .unwrap() + .pushed_with(*second) + .unwrap() + .pushed_with(*third) + .unwrap(); + assert_eq!(m, res); + + // make sure there's no funny business with the 0 indexing + let m = Here; + let mut iter = m.iter(); + + assert_eq!(iter.next(), None); + assert_eq!(iter.next_back(), None); + } + + #[test] + fn conversion_from_other_types_works() { + use crate::v4; + + fn takes_location>(_arg: Arg) {} + + takes_location(Parent); + takes_location(Here); + takes_location([Parachain(42)]); + takes_location((Ancestor(255), PalletInstance(8))); + takes_location((Ancestor(5), Parachain(1), PalletInstance(3))); + takes_location((Ancestor(2), Here)); + takes_location(AncestorThen( + 3, + [Parachain(43), AccountIndex64 { network: None, index: 155 }], + )); + takes_location((Parent, AccountId32 { network: None, id: [0; 32] })); + takes_location((Parent, Here)); + takes_location(ParentThen([Parachain(75)].into())); + takes_location([Parachain(100), PalletInstance(3)]); + + assert_eq!(v4::Location::from(v4::Junctions::Here).try_into(), Ok(Location::here())); + assert_eq!(v4::Location::from(v4::Parent).try_into(), Ok(Location::parent())); + assert_eq!( + v4::Location::from((v4::Parent, v4::Parent, v4::Junction::GeneralIndex(42u128),)) + .try_into(), + Ok(Location { parents: 2, interior: [GeneralIndex(42u128)].into() }), + ); + } +} diff --git a/polkadot/xcm/src/v5/mod.rs b/polkadot/xcm/src/v5/mod.rs new file mode 100644 index 000000000000..6b685384f26f --- /dev/null +++ b/polkadot/xcm/src/v5/mod.rs @@ -0,0 +1,1513 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Version 5 of the Cross-Consensus Message format data structures. + +pub use super::v3::GetWeight; +use super::v4::{ + Instruction as OldInstruction, PalletInfo as OldPalletInfo, + QueryResponseInfo as OldQueryResponseInfo, Response as OldResponse, Xcm as OldXcm, +}; +use crate::DoubleEncoded; +use alloc::{vec, vec::Vec}; +use bounded_collections::{parameter_types, BoundedVec}; +use codec::{ + self, decode_vec_with_len, Compact, Decode, Encode, Error as CodecError, Input as CodecInput, + MaxEncodedLen, +}; +use core::{fmt::Debug, result}; +use derivative::Derivative; +use scale_info::TypeInfo; + +mod asset; +mod junction; +pub(crate) mod junctions; +mod location; +mod traits; + +pub use asset::{ + Asset, AssetFilter, AssetId, AssetInstance, Assets, Fungibility, WildAsset, WildFungibility, + MAX_ITEMS_IN_ASSETS, +}; +pub use junction::{BodyId, BodyPart, Junction, NetworkId}; +pub use junctions::Junctions; +pub use location::{Ancestor, AncestorThen, InteriorLocation, Location, Parent, ParentThen}; +pub use traits::{ + send_xcm, validate_send, Error, ExecuteXcm, Outcome, PreparedMessage, Reanchorable, Result, + SendError, SendResult, SendXcm, Weight, XcmHash, +}; +// These parts of XCM v4 are unchanged in XCM v5, and are re-imported here. +pub use super::v4::{MaybeErrorCode, OriginKind, WeightLimit}; + +pub const VERSION: super::Version = 5; + +/// An identifier for a query. +pub type QueryId = u64; + +#[derive(Derivative, Default, Encode, TypeInfo)] +#[derivative(Clone(bound = ""), Eq(bound = ""), PartialEq(bound = ""), Debug(bound = ""))] +#[codec(encode_bound())] +#[codec(decode_bound())] +#[scale_info(bounds(), skip_type_params(Call))] +pub struct Xcm(pub Vec>); + +pub const MAX_INSTRUCTIONS_TO_DECODE: u8 = 100; + +environmental::environmental!(instructions_count: u8); + +impl Decode for Xcm { + fn decode(input: &mut I) -> core::result::Result { + instructions_count::using_once(&mut 0, || { + let number_of_instructions: u32 = >::decode(input)?.into(); + instructions_count::with(|count| { + *count = count.saturating_add(number_of_instructions as u8); + if *count > MAX_INSTRUCTIONS_TO_DECODE { + return Err(CodecError::from("Max instructions exceeded")) + } + Ok(()) + }) + .expect("Called in `using` context and thus can not return `None`; qed")?; + let decoded_instructions = decode_vec_with_len(input, number_of_instructions as usize)?; + Ok(Self(decoded_instructions)) + }) + } +} + +impl Xcm { + /// Create an empty instance. + pub fn new() -> Self { + Self(vec![]) + } + + /// Return `true` if no instructions are held in `self`. + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + + /// Return the number of instructions held in `self`. + pub fn len(&self) -> usize { + self.0.len() + } + + /// Return a reference to the inner value. + pub fn inner(&self) -> &[Instruction] { + &self.0 + } + + /// Return a mutable reference to the inner value. + pub fn inner_mut(&mut self) -> &mut Vec> { + &mut self.0 + } + + /// Consume and return the inner value. + pub fn into_inner(self) -> Vec> { + self.0 + } + + /// Return an iterator over references to the items. + pub fn iter(&self) -> impl Iterator> { + self.0.iter() + } + + /// Return an iterator over mutable references to the items. + pub fn iter_mut(&mut self) -> impl Iterator> { + self.0.iter_mut() + } + + /// Consume and return an iterator over the items. + pub fn into_iter(self) -> impl Iterator> { + self.0.into_iter() + } + + /// Consume and either return `self` if it contains some instructions, or if it's empty, then + /// instead return the result of `f`. + pub fn or_else(self, f: impl FnOnce() -> Self) -> Self { + if self.0.is_empty() { + f() + } else { + self + } + } + + /// Return the first instruction, if any. + pub fn first(&self) -> Option<&Instruction> { + self.0.first() + } + + /// Return the last instruction, if any. + pub fn last(&self) -> Option<&Instruction> { + self.0.last() + } + + /// Return the only instruction, contained in `Self`, iff only one exists (`None` otherwise). + pub fn only(&self) -> Option<&Instruction> { + if self.0.len() == 1 { + self.0.first() + } else { + None + } + } + + /// Return the only instruction, contained in `Self`, iff only one exists (returns `self` + /// otherwise). + pub fn into_only(mut self) -> core::result::Result, Self> { + if self.0.len() == 1 { + self.0.pop().ok_or(self) + } else { + Err(self) + } + } +} + +impl From>> for Xcm { + fn from(c: Vec>) -> Self { + Self(c) + } +} + +impl From> for Vec> { + fn from(c: Xcm) -> Self { + c.0 + } +} + +/// A prelude for importing all types typically used when interacting with XCM messages. +pub mod prelude { + mod contents { + pub use super::super::{ + send_xcm, validate_send, Ancestor, AncestorThen, Asset, + AssetFilter::{self, *}, + AssetId, + AssetInstance::{self, *}, + Assets, BodyId, BodyPart, Error as XcmError, ExecuteXcm, + Fungibility::{self, *}, + Instruction::*, + InteriorLocation, + Junction::{self, *}, + Junctions::{self, Here}, + Location, MaybeErrorCode, + NetworkId::{self, *}, + OriginKind, Outcome, PalletInfo, Parent, ParentThen, PreparedMessage, QueryId, + QueryResponseInfo, Reanchorable, Response, Result as XcmResult, SendError, SendResult, + SendXcm, Weight, + WeightLimit::{self, *}, + WildAsset::{self, *}, + WildFungibility::{self, Fungible as WildFungible, NonFungible as WildNonFungible}, + XcmContext, XcmHash, XcmWeightInfo, VERSION as XCM_VERSION, + }; + } + pub use super::{Instruction, Xcm}; + pub use contents::*; + pub mod opaque { + pub use super::{ + super::opaque::{Instruction, Xcm}, + contents::*, + }; + } +} + +parameter_types! { + pub MaxPalletNameLen: u32 = 48; + /// Maximum size of the encoded error code coming from a `Dispatch` result, used for + /// `MaybeErrorCode`. This is not (yet) enforced, so it's just an indication of expectation. + pub MaxDispatchErrorLen: u32 = 128; + pub MaxPalletsInfo: u32 = 64; +} + +#[derive(Clone, Eq, PartialEq, Encode, Decode, Debug, TypeInfo, MaxEncodedLen)] +pub struct PalletInfo { + #[codec(compact)] + pub index: u32, + pub name: BoundedVec, + pub module_name: BoundedVec, + #[codec(compact)] + pub major: u32, + #[codec(compact)] + pub minor: u32, + #[codec(compact)] + pub patch: u32, +} + +impl TryInto for PalletInfo { + type Error = (); + + fn try_into(self) -> result::Result { + OldPalletInfo::new( + self.index, + self.name.into_inner(), + self.module_name.into_inner(), + self.major, + self.minor, + self.patch, + ) + .map_err(|_| ()) + } +} + +impl PalletInfo { + pub fn new( + index: u32, + name: Vec, + module_name: Vec, + major: u32, + minor: u32, + patch: u32, + ) -> result::Result { + let name = BoundedVec::try_from(name).map_err(|_| Error::Overflow)?; + let module_name = BoundedVec::try_from(module_name).map_err(|_| Error::Overflow)?; + + Ok(Self { index, name, module_name, major, minor, patch }) + } +} + +/// Response data to a query. +#[derive(Clone, Eq, PartialEq, Encode, Decode, Debug, TypeInfo, MaxEncodedLen)] +pub enum Response { + /// No response. Serves as a neutral default. + Null, + /// Some assets. + Assets(Assets), + /// The outcome of an XCM instruction. + ExecutionResult(Option<(u32, Error)>), + /// An XCM version. + Version(super::Version), + /// The index, instance name, pallet name and version of some pallets. + PalletsInfo(BoundedVec), + /// The status of a dispatch attempt using `Transact`. + DispatchResult(MaybeErrorCode), +} + +impl Default for Response { + fn default() -> Self { + Self::Null + } +} + +impl TryFrom for Response { + type Error = (); + + fn try_from(old: OldResponse) -> result::Result { + use OldResponse::*; + Ok(match old { + Null => Self::Null, + Assets(assets) => Self::Assets(assets.try_into()?), + ExecutionResult(result) => + Self::ExecutionResult(result.map(|(num, old_error)| (num, old_error.into()))), + Version(version) => Self::Version(version), + PalletsInfo(pallet_info) => { + let inner = pallet_info + .into_iter() + .map(TryInto::try_into) + .collect::, _>>()?; + Self::PalletsInfo( + BoundedVec::::try_from(inner).map_err(|_| ())?, + ) + }, + DispatchResult(maybe_error) => Self::DispatchResult(maybe_error), + }) + } +} + +/// Information regarding the composition of a query response. +#[derive(Clone, Eq, PartialEq, Encode, Decode, Debug, TypeInfo)] +pub struct QueryResponseInfo { + /// The destination to which the query response message should be send. + pub destination: Location, + /// The `query_id` field of the `QueryResponse` message. + #[codec(compact)] + pub query_id: QueryId, + /// The `max_weight` field of the `QueryResponse` message. + pub max_weight: Weight, +} + +impl TryFrom for QueryResponseInfo { + type Error = (); + + fn try_from(old: OldQueryResponseInfo) -> result::Result { + Ok(Self { + destination: old.destination.try_into()?, + query_id: old.query_id, + max_weight: old.max_weight, + }) + } +} + +/// Contextual data pertaining to a specific list of XCM instructions. +#[derive(Clone, Eq, PartialEq, Encode, Decode, Debug)] +pub struct XcmContext { + /// The current value of the Origin register of the `XCVM`. + pub origin: Option, + /// The identity of the XCM; this may be a hash of its versioned encoding but could also be + /// a high-level identity set by an appropriate barrier. + pub message_id: XcmHash, + /// The current value of the Topic register of the `XCVM`. + pub topic: Option<[u8; 32]>, +} + +impl XcmContext { + /// Constructor which sets the message ID to the supplied parameter and leaves the origin and + /// topic unset. + pub fn with_message_id(message_id: XcmHash) -> XcmContext { + XcmContext { origin: None, message_id, topic: None } + } +} + +/// Cross-Consensus Message: A message from one consensus system to another. +/// +/// Consensus systems that may send and receive messages include blockchains and smart contracts. +/// +/// All messages are delivered from a known *origin*, expressed as a `Location`. +/// +/// This is the inner XCM format and is version-sensitive. Messages are typically passed using the +/// outer XCM format, known as `VersionedXcm`. +#[derive( + Derivative, + Encode, + Decode, + TypeInfo, + xcm_procedural::XcmWeightInfoTrait, + xcm_procedural::Builder, +)] +#[derivative(Clone(bound = ""), Eq(bound = ""), PartialEq(bound = ""), Debug(bound = ""))] +#[codec(encode_bound())] +#[codec(decode_bound())] +#[scale_info(bounds(), skip_type_params(Call))] +pub enum Instruction { + /// Withdraw asset(s) (`assets`) from the ownership of `origin` and place them into the Holding + /// Register. + /// + /// - `assets`: The asset(s) to be withdrawn into holding. + /// + /// Kind: *Command*. + /// + /// Errors: + #[builder(loads_holding)] + WithdrawAsset(Assets), + + /// Asset(s) (`assets`) have been received into the ownership of this system on the `origin` + /// system and equivalent derivatives should be placed into the Holding Register. + /// + /// - `assets`: The asset(s) that are minted into holding. + /// + /// Safety: `origin` must be trusted to have received and be storing `assets` such that they + /// may later be withdrawn should this system send a corresponding message. + /// + /// Kind: *Trusted Indication*. + /// + /// Errors: + #[builder(loads_holding)] + ReserveAssetDeposited(Assets), + + /// Asset(s) (`assets`) have been destroyed on the `origin` system and equivalent assets should + /// be created and placed into the Holding Register. + /// + /// - `assets`: The asset(s) that are minted into the Holding Register. + /// + /// Safety: `origin` must be trusted to have irrevocably destroyed the corresponding `assets` + /// prior as a consequence of sending this message. + /// + /// Kind: *Trusted Indication*. + /// + /// Errors: + #[builder(loads_holding)] + ReceiveTeleportedAsset(Assets), + + /// Respond with information that the local system is expecting. + /// + /// - `query_id`: The identifier of the query that resulted in this message being sent. + /// - `response`: The message content. + /// - `max_weight`: The maximum weight that handling this response should take. + /// - `querier`: The location responsible for the initiation of the response, if there is one. + /// In general this will tend to be the same location as the receiver of this message. NOTE: + /// As usual, this is interpreted from the perspective of the receiving consensus system. + /// + /// Safety: Since this is information only, there are no immediate concerns. However, it should + /// be remembered that even if the Origin behaves reasonably, it can always be asked to make + /// a response to a third-party chain who may or may not be expecting the response. Therefore + /// the `querier` should be checked to match the expected value. + /// + /// Kind: *Information*. + /// + /// Errors: + QueryResponse { + #[codec(compact)] + query_id: QueryId, + response: Response, + max_weight: Weight, + querier: Option, + }, + + /// Withdraw asset(s) (`assets`) from the ownership of `origin` and place equivalent assets + /// under the ownership of `beneficiary`. + /// + /// - `assets`: The asset(s) to be withdrawn. + /// - `beneficiary`: The new owner for the assets. + /// + /// Safety: No concerns. + /// + /// Kind: *Command*. + /// + /// Errors: + TransferAsset { assets: Assets, beneficiary: Location }, + + /// Withdraw asset(s) (`assets`) from the ownership of `origin` and place equivalent assets + /// under the ownership of `dest` within this consensus system (i.e. its sovereign account). + /// + /// Send an onward XCM message to `dest` of `ReserveAssetDeposited` with the given + /// `xcm`. + /// + /// - `assets`: The asset(s) to be withdrawn. + /// - `dest`: The location whose sovereign account will own the assets and thus the effective + /// beneficiary for the assets and the notification target for the reserve asset deposit + /// message. + /// - `xcm`: The instructions that should follow the `ReserveAssetDeposited` instruction, which + /// is sent onwards to `dest`. + /// + /// Safety: No concerns. + /// + /// Kind: *Command*. + /// + /// Errors: + TransferReserveAsset { assets: Assets, dest: Location, xcm: Xcm<()> }, + + /// Apply the encoded transaction `call`, whose dispatch-origin should be `origin` as expressed + /// by the kind of origin `origin_kind`. + /// + /// The Transact Status Register is set according to the result of dispatching the call. + /// + /// - `origin_kind`: The means of expressing the message origin as a dispatch origin. + /// - `require_weight_at_most`: The weight of `call`; this should be at least the chain's + /// calculated weight and will be used in the weight determination arithmetic. + /// - `call`: The encoded transaction to be applied. + /// + /// Safety: No concerns. + /// + /// Kind: *Command*. + /// + /// Errors: + Transact { origin_kind: OriginKind, require_weight_at_most: Weight, call: DoubleEncoded }, + + /// A message to notify about a new incoming HRMP channel. This message is meant to be sent by + /// the relay-chain to a para. + /// + /// - `sender`: The sender in the to-be opened channel. Also, the initiator of the channel + /// opening. + /// - `max_message_size`: The maximum size of a message proposed by the sender. + /// - `max_capacity`: The maximum number of messages that can be queued in the channel. + /// + /// Safety: The message should originate directly from the relay-chain. + /// + /// Kind: *System Notification* + HrmpNewChannelOpenRequest { + #[codec(compact)] + sender: u32, + #[codec(compact)] + max_message_size: u32, + #[codec(compact)] + max_capacity: u32, + }, + + /// A message to notify about that a previously sent open channel request has been accepted by + /// the recipient. That means that the channel will be opened during the next relay-chain + /// session change. This message is meant to be sent by the relay-chain to a para. + /// + /// Safety: The message should originate directly from the relay-chain. + /// + /// Kind: *System Notification* + /// + /// Errors: + HrmpChannelAccepted { + // NOTE: We keep this as a structured item to a) keep it consistent with the other Hrmp + // items; and b) because the field's meaning is not obvious/mentioned from the item name. + #[codec(compact)] + recipient: u32, + }, + + /// A message to notify that the other party in an open channel decided to close it. In + /// particular, `initiator` is going to close the channel opened from `sender` to the + /// `recipient`. The close will be enacted at the next relay-chain session change. This message + /// is meant to be sent by the relay-chain to a para. + /// + /// Safety: The message should originate directly from the relay-chain. + /// + /// Kind: *System Notification* + /// + /// Errors: + HrmpChannelClosing { + #[codec(compact)] + initiator: u32, + #[codec(compact)] + sender: u32, + #[codec(compact)] + recipient: u32, + }, + + /// Clear the origin. + /// + /// This may be used by the XCM author to ensure that later instructions cannot command the + /// authority of the origin (e.g. if they are being relayed from an untrusted source, as often + /// the case with `ReserveAssetDeposited`). + /// + /// Safety: No concerns. + /// + /// Kind: *Command*. + /// + /// Errors: + ClearOrigin, + + /// Mutate the origin to some interior location. + /// + /// Kind: *Command* + /// + /// Errors: + DescendOrigin(InteriorLocation), + + /// Immediately report the contents of the Error Register to the given destination via XCM. + /// + /// A `QueryResponse` message of type `ExecutionOutcome` is sent to the described destination. + /// + /// - `response_info`: Information for making the response. + /// + /// Kind: *Command* + /// + /// Errors: + ReportError(QueryResponseInfo), + + /// Remove the asset(s) (`assets`) from the Holding Register and place equivalent assets under + /// the ownership of `beneficiary` within this consensus system. + /// + /// - `assets`: The asset(s) to remove from holding. + /// - `beneficiary`: The new owner for the assets. + /// + /// Kind: *Command* + /// + /// Errors: + DepositAsset { assets: AssetFilter, beneficiary: Location }, + + /// Remove the asset(s) (`assets`) from the Holding Register and place equivalent assets under + /// the ownership of `dest` within this consensus system (i.e. deposit them into its sovereign + /// account). + /// + /// Send an onward XCM message to `dest` of `ReserveAssetDeposited` with the given `effects`. + /// + /// - `assets`: The asset(s) to remove from holding. + /// - `dest`: The location whose sovereign account will own the assets and thus the effective + /// beneficiary for the assets and the notification target for the reserve asset deposit + /// message. + /// - `xcm`: The orders that should follow the `ReserveAssetDeposited` instruction which is + /// sent onwards to `dest`. + /// + /// Kind: *Command* + /// + /// Errors: + DepositReserveAsset { assets: AssetFilter, dest: Location, xcm: Xcm<()> }, + + /// Remove the asset(s) (`want`) from the Holding Register and replace them with alternative + /// assets. + /// + /// The minimum amount of assets to be received into the Holding Register for the order not to + /// fail may be stated. + /// + /// - `give`: The maximum amount of assets to remove from holding. + /// - `want`: The minimum amount of assets which `give` should be exchanged for. + /// - `maximal`: If `true`, then prefer to give as much as possible up to the limit of `give` + /// and receive accordingly more. If `false`, then prefer to give as little as possible in + /// order to receive as little as possible while receiving at least `want`. + /// + /// Kind: *Command* + /// + /// Errors: + ExchangeAsset { give: AssetFilter, want: Assets, maximal: bool }, + + /// Remove the asset(s) (`assets`) from holding and send a `WithdrawAsset` XCM message to a + /// reserve location. + /// + /// - `assets`: The asset(s) to remove from holding. + /// - `reserve`: A valid location that acts as a reserve for all asset(s) in `assets`. The + /// sovereign account of this consensus system *on the reserve location* will have + /// appropriate assets withdrawn and `effects` will be executed on them. There will typically + /// be only one valid location on any given asset/chain combination. + /// - `xcm`: The instructions to execute on the assets once withdrawn *on the reserve + /// location*. + /// + /// Kind: *Command* + /// + /// Errors: + InitiateReserveWithdraw { assets: AssetFilter, reserve: Location, xcm: Xcm<()> }, + + /// Remove the asset(s) (`assets`) from holding and send a `ReceiveTeleportedAsset` XCM message + /// to a `dest` location. + /// + /// - `assets`: The asset(s) to remove from holding. + /// - `dest`: A valid location that respects teleports coming from this location. + /// - `xcm`: The instructions to execute on the assets once arrived *on the destination + /// location*. + /// + /// NOTE: The `dest` location *MUST* respect this origin as a valid teleportation origin for + /// all `assets`. If it does not, then the assets may be lost. + /// + /// Kind: *Command* + /// + /// Errors: + InitiateTeleport { assets: AssetFilter, dest: Location, xcm: Xcm<()> }, + + /// Report to a given destination the contents of the Holding Register. + /// + /// A `QueryResponse` message of type `Assets` is sent to the described destination. + /// + /// - `response_info`: Information for making the response. + /// - `assets`: A filter for the assets that should be reported back. The assets reported back + /// will be, asset-wise, *the lesser of this value and the holding register*. No wildcards + /// will be used when reporting assets back. + /// + /// Kind: *Command* + /// + /// Errors: + ReportHolding { response_info: QueryResponseInfo, assets: AssetFilter }, + + /// Pay for the execution of some XCM `xcm` and `orders` with up to `weight` + /// picoseconds of execution time, paying for this with up to `fees` from the Holding Register. + /// + /// - `fees`: The asset(s) to remove from the Holding Register to pay for fees. + /// - `weight_limit`: The maximum amount of weight to purchase; this must be at least the + /// expected maximum weight of the total XCM to be executed for the + /// `AllowTopLevelPaidExecutionFrom` barrier to allow the XCM be executed. + /// + /// Kind: *Command* + /// + /// Errors: + #[builder(pays_fees)] + BuyExecution { fees: Asset, weight_limit: WeightLimit }, + + /// Refund any surplus weight previously bought with `BuyExecution`. + /// + /// Kind: *Command* + /// + /// Errors: None. + RefundSurplus, + + /// Set the Error Handler Register. This is code that should be called in the case of an error + /// happening. + /// + /// An error occurring within execution of this code will _NOT_ result in the error register + /// being set, nor will an error handler be called due to it. The error handler and appendix + /// may each still be set. + /// + /// The apparent weight of this instruction is inclusive of the inner `Xcm`; the executing + /// weight however includes only the difference between the previous handler and the new + /// handler, which can reasonably be negative, which would result in a surplus. + /// + /// Kind: *Command* + /// + /// Errors: None. + SetErrorHandler(Xcm), + + /// Set the Appendix Register. This is code that should be called after code execution + /// (including the error handler if any) is finished. This will be called regardless of whether + /// an error occurred. + /// + /// Any error occurring due to execution of this code will result in the error register being + /// set, and the error handler (if set) firing. + /// + /// The apparent weight of this instruction is inclusive of the inner `Xcm`; the executing + /// weight however includes only the difference between the previous appendix and the new + /// appendix, which can reasonably be negative, which would result in a surplus. + /// + /// Kind: *Command* + /// + /// Errors: None. + SetAppendix(Xcm), + + /// Clear the Error Register. + /// + /// Kind: *Command* + /// + /// Errors: None. + ClearError, + + /// Create some assets which are being held on behalf of the origin. + /// + /// - `assets`: The assets which are to be claimed. This must match exactly with the assets + /// claimable by the origin of the ticket. + /// - `ticket`: The ticket of the asset; this is an abstract identifier to help locate the + /// asset. + /// + /// Kind: *Command* + /// + /// Errors: + #[builder(loads_holding)] + ClaimAsset { assets: Assets, ticket: Location }, + + /// Always throws an error of type `Trap`. + /// + /// Kind: *Command* + /// + /// Errors: + /// - `Trap`: All circumstances, whose inner value is the same as this item's inner value. + Trap(#[codec(compact)] u64), + + /// Ask the destination system to respond with the most recent version of XCM that they + /// support in a `QueryResponse` instruction. Any changes to this should also elicit similar + /// responses when they happen. + /// + /// - `query_id`: An identifier that will be replicated into the returned XCM message. + /// - `max_response_weight`: The maximum amount of weight that the `QueryResponse` item which + /// is sent as a reply may take to execute. NOTE: If this is unexpectedly large then the + /// response may not execute at all. + /// + /// Kind: *Command* + /// + /// Errors: *Fallible* + SubscribeVersion { + #[codec(compact)] + query_id: QueryId, + max_response_weight: Weight, + }, + + /// Cancel the effect of a previous `SubscribeVersion` instruction. + /// + /// Kind: *Command* + /// + /// Errors: *Fallible* + UnsubscribeVersion, + + /// Reduce Holding by up to the given assets. + /// + /// Holding is reduced by as much as possible up to the assets in the parameter. It is not an + /// error if the Holding does not contain the assets (to make this an error, use `ExpectAsset` + /// prior). + /// + /// Kind: *Command* + /// + /// Errors: *Infallible* + BurnAsset(Assets), + + /// Throw an error if Holding does not contain at least the given assets. + /// + /// Kind: *Command* + /// + /// Errors: + /// - `ExpectationFalse`: If Holding Register does not contain the assets in the parameter. + ExpectAsset(Assets), + + /// Ensure that the Origin Register equals some given value and throw an error if not. + /// + /// Kind: *Command* + /// + /// Errors: + /// - `ExpectationFalse`: If Origin Register is not equal to the parameter. + ExpectOrigin(Option), + + /// Ensure that the Error Register equals some given value and throw an error if not. + /// + /// Kind: *Command* + /// + /// Errors: + /// - `ExpectationFalse`: If the value of the Error Register is not equal to the parameter. + ExpectError(Option<(u32, Error)>), + + /// Ensure that the Transact Status Register equals some given value and throw an error if + /// not. + /// + /// Kind: *Command* + /// + /// Errors: + /// - `ExpectationFalse`: If the value of the Transact Status Register is not equal to the + /// parameter. + ExpectTransactStatus(MaybeErrorCode), + + /// Query the existence of a particular pallet type. + /// + /// - `module_name`: The module name of the pallet to query. + /// - `response_info`: Information for making the response. + /// + /// Sends a `QueryResponse` to Origin whose data field `PalletsInfo` containing the information + /// of all pallets on the local chain whose name is equal to `name`. This is empty in the case + /// that the local chain is not based on Substrate Frame. + /// + /// Safety: No concerns. + /// + /// Kind: *Command* + /// + /// Errors: *Fallible*. + QueryPallet { module_name: Vec, response_info: QueryResponseInfo }, + + /// Ensure that a particular pallet with a particular version exists. + /// + /// - `index: Compact`: The index which identifies the pallet. An error if no pallet exists at + /// this index. + /// - `name: Vec`: Name which must be equal to the name of the pallet. + /// - `module_name: Vec`: Module name which must be equal to the name of the module in + /// which the pallet exists. + /// - `crate_major: Compact`: Version number which must be equal to the major version of the + /// crate which implements the pallet. + /// - `min_crate_minor: Compact`: Version number which must be at most the minor version of the + /// crate which implements the pallet. + /// + /// Safety: No concerns. + /// + /// Kind: *Command* + /// + /// Errors: + /// - `ExpectationFalse`: In case any of the expectations are broken. + ExpectPallet { + #[codec(compact)] + index: u32, + name: Vec, + module_name: Vec, + #[codec(compact)] + crate_major: u32, + #[codec(compact)] + min_crate_minor: u32, + }, + + /// Send a `QueryResponse` message containing the value of the Transact Status Register to some + /// destination. + /// + /// - `query_response_info`: The information needed for constructing and sending the + /// `QueryResponse` message. + /// + /// Safety: No concerns. + /// + /// Kind: *Command* + /// + /// Errors: *Fallible*. + ReportTransactStatus(QueryResponseInfo), + + /// Set the Transact Status Register to its default, cleared, value. + /// + /// Safety: No concerns. + /// + /// Kind: *Command* + /// + /// Errors: *Infallible*. + ClearTransactStatus, + + /// Set the Origin Register to be some child of the Universal Ancestor. + /// + /// Safety: Should only be usable if the Origin is trusted to represent the Universal Ancestor + /// child in general. In general, no Origin should be able to represent the Universal Ancestor + /// child which is the root of the local consensus system since it would by extension + /// allow it to act as any location within the local consensus. + /// + /// The `Junction` parameter should generally be a `GlobalConsensus` variant since it is only + /// these which are children of the Universal Ancestor. + /// + /// Kind: *Command* + /// + /// Errors: *Fallible*. + UniversalOrigin(Junction), + + /// Send a message on to Non-Local Consensus system. + /// + /// This will tend to utilize some extra-consensus mechanism, the obvious one being a bridge. + /// A fee may be charged; this may be determined based on the contents of `xcm`. It will be + /// taken from the Holding register. + /// + /// - `network`: The remote consensus system to which the message should be exported. + /// - `destination`: The location relative to the remote consensus system to which the message + /// should be sent on arrival. + /// - `xcm`: The message to be exported. + /// + /// As an example, to export a message for execution on Statemine (parachain #1000 in the + /// Kusama network), you would call with `network: NetworkId::Kusama` and + /// `destination: [Parachain(1000)].into()`. Alternatively, to export a message for execution + /// on Polkadot, you would call with `network: NetworkId:: Polkadot` and `destination: Here`. + /// + /// Kind: *Command* + /// + /// Errors: *Fallible*. + ExportMessage { network: NetworkId, destination: InteriorLocation, xcm: Xcm<()> }, + + /// Lock the locally held asset and prevent further transfer or withdrawal. + /// + /// This restriction may be removed by the `UnlockAsset` instruction being called with an + /// Origin of `unlocker` and a `target` equal to the current `Origin`. + /// + /// If the locking is successful, then a `NoteUnlockable` instruction is sent to `unlocker`. + /// + /// - `asset`: The asset(s) which should be locked. + /// - `unlocker`: The value which the Origin must be for a corresponding `UnlockAsset` + /// instruction to work. + /// + /// Kind: *Command*. + /// + /// Errors: + LockAsset { asset: Asset, unlocker: Location }, + + /// Remove the lock over `asset` on this chain and (if nothing else is preventing it) allow the + /// asset to be transferred. + /// + /// - `asset`: The asset to be unlocked. + /// - `target`: The owner of the asset on the local chain. + /// + /// Safety: No concerns. + /// + /// Kind: *Command*. + /// + /// Errors: + UnlockAsset { asset: Asset, target: Location }, + + /// Asset (`asset`) has been locked on the `origin` system and may not be transferred. It may + /// only be unlocked with the receipt of the `UnlockAsset` instruction from this chain. + /// + /// - `asset`: The asset(s) which are now unlockable from this origin. + /// - `owner`: The owner of the asset on the chain in which it was locked. This may be a + /// location specific to the origin network. + /// + /// Safety: `origin` must be trusted to have locked the corresponding `asset` + /// prior as a consequence of sending this message. + /// + /// Kind: *Trusted Indication*. + /// + /// Errors: + NoteUnlockable { asset: Asset, owner: Location }, + + /// Send an `UnlockAsset` instruction to the `locker` for the given `asset`. + /// + /// This may fail if the local system is making use of the fact that the asset is locked or, + /// of course, if there is no record that the asset actually is locked. + /// + /// - `asset`: The asset(s) to be unlocked. + /// - `locker`: The location from which a previous `NoteUnlockable` was sent and to which an + /// `UnlockAsset` should be sent. + /// + /// Kind: *Command*. + /// + /// Errors: + RequestUnlock { asset: Asset, locker: Location }, + + /// Sets the Fees Mode Register. + /// + /// - `jit_withdraw`: The fees mode item; if set to `true` then fees for any instructions are + /// withdrawn as needed using the same mechanism as `WithdrawAssets`. + /// + /// Kind: *Command*. + /// + /// Errors: + SetFeesMode { jit_withdraw: bool }, + + /// Set the Topic Register. + /// + /// The 32-byte array identifier in the parameter is not guaranteed to be + /// unique; if such a property is desired, it is up to the code author to + /// enforce uniqueness. + /// + /// Safety: No concerns. + /// + /// Kind: *Command* + /// + /// Errors: + SetTopic([u8; 32]), + + /// Clear the Topic Register. + /// + /// Kind: *Command* + /// + /// Errors: None. + ClearTopic, + + /// Alter the current Origin to another given origin. + /// + /// Kind: *Command* + /// + /// Errors: If the existing state would not allow such a change. + AliasOrigin(Location), + + /// A directive to indicate that the origin expects free execution of the message. + /// + /// At execution time, this instruction just does a check on the Origin register. + /// However, at the barrier stage messages starting with this instruction can be disregarded if + /// the origin is not acceptable for free execution or the `weight_limit` is `Limited` and + /// insufficient. + /// + /// Kind: *Indication* + /// + /// Errors: If the given origin is `Some` and not equal to the current Origin register. + UnpaidExecution { weight_limit: WeightLimit, check_origin: Option }, + + /// Pay Fees. + /// + /// Successor to `BuyExecution`. + /// Defined in fellowship RFC 105. + #[builder(pays_fees)] + PayFees { asset: Asset }, +} + +impl Xcm { + pub fn into(self) -> Xcm { + Xcm::from(self) + } + pub fn from(xcm: Xcm) -> Self { + Self(xcm.0.into_iter().map(Instruction::::from).collect()) + } +} + +impl Instruction { + pub fn into(self) -> Instruction { + Instruction::from(self) + } + pub fn from(xcm: Instruction) -> Self { + use Instruction::*; + match xcm { + WithdrawAsset(assets) => WithdrawAsset(assets), + ReserveAssetDeposited(assets) => ReserveAssetDeposited(assets), + ReceiveTeleportedAsset(assets) => ReceiveTeleportedAsset(assets), + QueryResponse { query_id, response, max_weight, querier } => + QueryResponse { query_id, response, max_weight, querier }, + TransferAsset { assets, beneficiary } => TransferAsset { assets, beneficiary }, + TransferReserveAsset { assets, dest, xcm } => + TransferReserveAsset { assets, dest, xcm }, + HrmpNewChannelOpenRequest { sender, max_message_size, max_capacity } => + HrmpNewChannelOpenRequest { sender, max_message_size, max_capacity }, + HrmpChannelAccepted { recipient } => HrmpChannelAccepted { recipient }, + HrmpChannelClosing { initiator, sender, recipient } => + HrmpChannelClosing { initiator, sender, recipient }, + Transact { origin_kind, require_weight_at_most, call } => + Transact { origin_kind, require_weight_at_most, call: call.into() }, + ReportError(response_info) => ReportError(response_info), + DepositAsset { assets, beneficiary } => DepositAsset { assets, beneficiary }, + DepositReserveAsset { assets, dest, xcm } => DepositReserveAsset { assets, dest, xcm }, + ExchangeAsset { give, want, maximal } => ExchangeAsset { give, want, maximal }, + InitiateReserveWithdraw { assets, reserve, xcm } => + InitiateReserveWithdraw { assets, reserve, xcm }, + InitiateTeleport { assets, dest, xcm } => InitiateTeleport { assets, dest, xcm }, + ReportHolding { response_info, assets } => ReportHolding { response_info, assets }, + BuyExecution { fees, weight_limit } => BuyExecution { fees, weight_limit }, + ClearOrigin => ClearOrigin, + DescendOrigin(who) => DescendOrigin(who), + RefundSurplus => RefundSurplus, + SetErrorHandler(xcm) => SetErrorHandler(xcm.into()), + SetAppendix(xcm) => SetAppendix(xcm.into()), + ClearError => ClearError, + ClaimAsset { assets, ticket } => ClaimAsset { assets, ticket }, + Trap(code) => Trap(code), + SubscribeVersion { query_id, max_response_weight } => + SubscribeVersion { query_id, max_response_weight }, + UnsubscribeVersion => UnsubscribeVersion, + BurnAsset(assets) => BurnAsset(assets), + ExpectAsset(assets) => ExpectAsset(assets), + ExpectOrigin(origin) => ExpectOrigin(origin), + ExpectError(error) => ExpectError(error), + ExpectTransactStatus(transact_status) => ExpectTransactStatus(transact_status), + QueryPallet { module_name, response_info } => + QueryPallet { module_name, response_info }, + ExpectPallet { index, name, module_name, crate_major, min_crate_minor } => + ExpectPallet { index, name, module_name, crate_major, min_crate_minor }, + ReportTransactStatus(response_info) => ReportTransactStatus(response_info), + ClearTransactStatus => ClearTransactStatus, + UniversalOrigin(j) => UniversalOrigin(j), + ExportMessage { network, destination, xcm } => + ExportMessage { network, destination, xcm }, + LockAsset { asset, unlocker } => LockAsset { asset, unlocker }, + UnlockAsset { asset, target } => UnlockAsset { asset, target }, + NoteUnlockable { asset, owner } => NoteUnlockable { asset, owner }, + RequestUnlock { asset, locker } => RequestUnlock { asset, locker }, + SetFeesMode { jit_withdraw } => SetFeesMode { jit_withdraw }, + SetTopic(topic) => SetTopic(topic), + ClearTopic => ClearTopic, + AliasOrigin(location) => AliasOrigin(location), + UnpaidExecution { weight_limit, check_origin } => + UnpaidExecution { weight_limit, check_origin }, + PayFees { asset } => PayFees { asset }, + } + } +} + +// TODO: Automate Generation +impl> GetWeight for Instruction { + fn weight(&self) -> Weight { + use Instruction::*; + match self { + WithdrawAsset(assets) => W::withdraw_asset(assets), + ReserveAssetDeposited(assets) => W::reserve_asset_deposited(assets), + ReceiveTeleportedAsset(assets) => W::receive_teleported_asset(assets), + QueryResponse { query_id, response, max_weight, querier } => + W::query_response(query_id, response, max_weight, querier), + TransferAsset { assets, beneficiary } => W::transfer_asset(assets, beneficiary), + TransferReserveAsset { assets, dest, xcm } => + W::transfer_reserve_asset(&assets, dest, xcm), + Transact { origin_kind, require_weight_at_most, call } => + W::transact(origin_kind, require_weight_at_most, call), + HrmpNewChannelOpenRequest { sender, max_message_size, max_capacity } => + W::hrmp_new_channel_open_request(sender, max_message_size, max_capacity), + HrmpChannelAccepted { recipient } => W::hrmp_channel_accepted(recipient), + HrmpChannelClosing { initiator, sender, recipient } => + W::hrmp_channel_closing(initiator, sender, recipient), + ClearOrigin => W::clear_origin(), + DescendOrigin(who) => W::descend_origin(who), + ReportError(response_info) => W::report_error(&response_info), + DepositAsset { assets, beneficiary } => W::deposit_asset(assets, beneficiary), + DepositReserveAsset { assets, dest, xcm } => + W::deposit_reserve_asset(assets, dest, xcm), + ExchangeAsset { give, want, maximal } => W::exchange_asset(give, want, maximal), + InitiateReserveWithdraw { assets, reserve, xcm } => + W::initiate_reserve_withdraw(assets, reserve, xcm), + InitiateTeleport { assets, dest, xcm } => W::initiate_teleport(assets, dest, xcm), + ReportHolding { response_info, assets } => W::report_holding(&response_info, &assets), + BuyExecution { fees, weight_limit } => W::buy_execution(fees, weight_limit), + RefundSurplus => W::refund_surplus(), + SetErrorHandler(xcm) => W::set_error_handler(xcm), + SetAppendix(xcm) => W::set_appendix(xcm), + ClearError => W::clear_error(), + ClaimAsset { assets, ticket } => W::claim_asset(assets, ticket), + Trap(code) => W::trap(code), + SubscribeVersion { query_id, max_response_weight } => + W::subscribe_version(query_id, max_response_weight), + UnsubscribeVersion => W::unsubscribe_version(), + BurnAsset(assets) => W::burn_asset(assets), + ExpectAsset(assets) => W::expect_asset(assets), + ExpectOrigin(origin) => W::expect_origin(origin), + ExpectError(error) => W::expect_error(error), + ExpectTransactStatus(transact_status) => W::expect_transact_status(transact_status), + QueryPallet { module_name, response_info } => + W::query_pallet(module_name, response_info), + ExpectPallet { index, name, module_name, crate_major, min_crate_minor } => + W::expect_pallet(index, name, module_name, crate_major, min_crate_minor), + ReportTransactStatus(response_info) => W::report_transact_status(response_info), + ClearTransactStatus => W::clear_transact_status(), + UniversalOrigin(j) => W::universal_origin(j), + ExportMessage { network, destination, xcm } => + W::export_message(network, destination, xcm), + LockAsset { asset, unlocker } => W::lock_asset(asset, unlocker), + UnlockAsset { asset, target } => W::unlock_asset(asset, target), + NoteUnlockable { asset, owner } => W::note_unlockable(asset, owner), + RequestUnlock { asset, locker } => W::request_unlock(asset, locker), + SetFeesMode { jit_withdraw } => W::set_fees_mode(jit_withdraw), + SetTopic(topic) => W::set_topic(topic), + ClearTopic => W::clear_topic(), + AliasOrigin(location) => W::alias_origin(location), + UnpaidExecution { weight_limit, check_origin } => + W::unpaid_execution(weight_limit, check_origin), + PayFees { asset } => W::pay_fees(asset), + } + } +} + +pub mod opaque { + /// The basic concrete type of `Xcm`, which doesn't make any assumptions about the + /// format of a call other than it is pre-encoded. + pub type Xcm = super::Xcm<()>; + + /// The basic concrete type of `Instruction`, which doesn't make any assumptions about the + /// format of a call other than it is pre-encoded. + pub type Instruction = super::Instruction<()>; +} + +// Convert from a v4 XCM to a v5 XCM +impl TryFrom> for Xcm { + type Error = (); + fn try_from(old_xcm: OldXcm) -> result::Result { + Ok(Xcm(old_xcm.0.into_iter().map(TryInto::try_into).collect::>()?)) + } +} + +// Convert from a v4 instruction to a v5 instruction +impl TryFrom> for Instruction { + type Error = (); + fn try_from(old_instruction: OldInstruction) -> result::Result { + use OldInstruction::*; + Ok(match old_instruction { + WithdrawAsset(assets) => Self::WithdrawAsset(assets.try_into()?), + ReserveAssetDeposited(assets) => Self::ReserveAssetDeposited(assets.try_into()?), + ReceiveTeleportedAsset(assets) => Self::ReceiveTeleportedAsset(assets.try_into()?), + QueryResponse { query_id, response, max_weight, querier: Some(querier) } => + Self::QueryResponse { + query_id, + querier: querier.try_into()?, + response: response.try_into()?, + max_weight, + }, + QueryResponse { query_id, response, max_weight, querier: None } => + Self::QueryResponse { + query_id, + querier: None, + response: response.try_into()?, + max_weight, + }, + TransferAsset { assets, beneficiary } => Self::TransferAsset { + assets: assets.try_into()?, + beneficiary: beneficiary.try_into()?, + }, + TransferReserveAsset { assets, dest, xcm } => Self::TransferReserveAsset { + assets: assets.try_into()?, + dest: dest.try_into()?, + xcm: xcm.try_into()?, + }, + HrmpNewChannelOpenRequest { sender, max_message_size, max_capacity } => + Self::HrmpNewChannelOpenRequest { sender, max_message_size, max_capacity }, + HrmpChannelAccepted { recipient } => Self::HrmpChannelAccepted { recipient }, + HrmpChannelClosing { initiator, sender, recipient } => + Self::HrmpChannelClosing { initiator, sender, recipient }, + Transact { origin_kind, require_weight_at_most, call } => + Self::Transact { origin_kind, require_weight_at_most, call: call.into() }, + ReportError(response_info) => Self::ReportError(QueryResponseInfo { + query_id: response_info.query_id, + destination: response_info.destination.try_into().map_err(|_| ())?, + max_weight: response_info.max_weight, + }), + DepositAsset { assets, beneficiary } => { + let beneficiary = beneficiary.try_into()?; + let assets = assets.try_into()?; + Self::DepositAsset { assets, beneficiary } + }, + DepositReserveAsset { assets, dest, xcm } => { + let dest = dest.try_into()?; + let xcm = xcm.try_into()?; + let assets = assets.try_into()?; + Self::DepositReserveAsset { assets, dest, xcm } + }, + ExchangeAsset { give, want, maximal } => { + let give = give.try_into()?; + let want = want.try_into()?; + Self::ExchangeAsset { give, want, maximal } + }, + InitiateReserveWithdraw { assets, reserve, xcm } => { + let assets = assets.try_into()?; + let reserve = reserve.try_into()?; + let xcm = xcm.try_into()?; + Self::InitiateReserveWithdraw { assets, reserve, xcm } + }, + InitiateTeleport { assets, dest, xcm } => { + let assets = assets.try_into()?; + let dest = dest.try_into()?; + let xcm = xcm.try_into()?; + Self::InitiateTeleport { assets, dest, xcm } + }, + ReportHolding { response_info, assets } => { + let response_info = QueryResponseInfo { + destination: response_info.destination.try_into().map_err(|_| ())?, + query_id: response_info.query_id, + max_weight: response_info.max_weight, + }; + Self::ReportHolding { response_info, assets: assets.try_into()? } + }, + BuyExecution { fees, weight_limit } => { + let fees = fees.try_into()?; + let weight_limit = weight_limit.into(); + Self::BuyExecution { fees, weight_limit } + }, + ClearOrigin => Self::ClearOrigin, + DescendOrigin(who) => Self::DescendOrigin(who.try_into()?), + RefundSurplus => Self::RefundSurplus, + SetErrorHandler(xcm) => Self::SetErrorHandler(xcm.try_into()?), + SetAppendix(xcm) => Self::SetAppendix(xcm.try_into()?), + ClearError => Self::ClearError, + ClaimAsset { assets, ticket } => { + let assets = assets.try_into()?; + let ticket = ticket.try_into()?; + Self::ClaimAsset { assets, ticket } + }, + Trap(code) => Self::Trap(code), + SubscribeVersion { query_id, max_response_weight } => + Self::SubscribeVersion { query_id, max_response_weight }, + UnsubscribeVersion => Self::UnsubscribeVersion, + BurnAsset(assets) => Self::BurnAsset(assets.try_into()?), + ExpectAsset(assets) => Self::ExpectAsset(assets.try_into()?), + ExpectOrigin(maybe_location) => Self::ExpectOrigin( + maybe_location.map(|location| location.try_into()).transpose().map_err(|_| ())?, + ), + ExpectError(maybe_error) => Self::ExpectError( + maybe_error.map(|error| error.try_into()).transpose().map_err(|_| ())?, + ), + ExpectTransactStatus(maybe_error_code) => Self::ExpectTransactStatus(maybe_error_code), + QueryPallet { module_name, response_info } => Self::QueryPallet { + module_name, + response_info: response_info.try_into().map_err(|_| ())?, + }, + ExpectPallet { index, name, module_name, crate_major, min_crate_minor } => + Self::ExpectPallet { index, name, module_name, crate_major, min_crate_minor }, + ReportTransactStatus(response_info) => + Self::ReportTransactStatus(response_info.try_into().map_err(|_| ())?), + ClearTransactStatus => Self::ClearTransactStatus, + UniversalOrigin(junction) => + Self::UniversalOrigin(junction.try_into().map_err(|_| ())?), + ExportMessage { network, destination, xcm } => Self::ExportMessage { + network: network.into(), + destination: destination.try_into().map_err(|_| ())?, + xcm: xcm.try_into().map_err(|_| ())?, + }, + LockAsset { asset, unlocker } => Self::LockAsset { + asset: asset.try_into().map_err(|_| ())?, + unlocker: unlocker.try_into().map_err(|_| ())?, + }, + UnlockAsset { asset, target } => Self::UnlockAsset { + asset: asset.try_into().map_err(|_| ())?, + target: target.try_into().map_err(|_| ())?, + }, + NoteUnlockable { asset, owner } => Self::NoteUnlockable { + asset: asset.try_into().map_err(|_| ())?, + owner: owner.try_into().map_err(|_| ())?, + }, + RequestUnlock { asset, locker } => Self::RequestUnlock { + asset: asset.try_into().map_err(|_| ())?, + locker: locker.try_into().map_err(|_| ())?, + }, + SetFeesMode { jit_withdraw } => Self::SetFeesMode { jit_withdraw }, + SetTopic(topic) => Self::SetTopic(topic), + ClearTopic => Self::ClearTopic, + AliasOrigin(location) => Self::AliasOrigin(location.try_into().map_err(|_| ())?), + UnpaidExecution { weight_limit, check_origin } => Self::UnpaidExecution { + weight_limit, + check_origin: check_origin + .map(|location| location.try_into()) + .transpose() + .map_err(|_| ())?, + }, + }) + } +} + +#[cfg(test)] +mod tests { + use super::{prelude::*, *}; + use crate::v4::{ + AssetFilter as OldAssetFilter, Junctions::Here as OldHere, WildAsset as OldWildAsset, + }; + + #[test] + fn basic_roundtrip_works() { + let xcm = Xcm::<()>(vec![TransferAsset { + assets: (Here, 1u128).into(), + beneficiary: Here.into(), + }]); + let old_xcm = OldXcm::<()>(vec![OldInstruction::TransferAsset { + assets: (OldHere, 1u128).into(), + beneficiary: OldHere.into(), + }]); + assert_eq!(old_xcm, OldXcm::<()>::try_from(xcm.clone()).unwrap()); + let new_xcm: Xcm<()> = old_xcm.try_into().unwrap(); + assert_eq!(new_xcm, xcm); + } + + #[test] + fn teleport_roundtrip_works() { + let xcm = Xcm::<()>(vec![ + ReceiveTeleportedAsset((Here, 1u128).into()), + ClearOrigin, + DepositAsset { assets: Wild(AllCounted(1)), beneficiary: Here.into() }, + ]); + let old_xcm: OldXcm<()> = OldXcm::<()>(vec![ + OldInstruction::ReceiveTeleportedAsset((OldHere, 1u128).into()), + OldInstruction::ClearOrigin, + OldInstruction::DepositAsset { + assets: crate::v4::AssetFilter::Wild(crate::v4::WildAsset::AllCounted(1)), + beneficiary: OldHere.into(), + }, + ]); + assert_eq!(old_xcm, OldXcm::<()>::try_from(xcm.clone()).unwrap()); + let new_xcm: Xcm<()> = old_xcm.try_into().unwrap(); + assert_eq!(new_xcm, xcm); + } + + #[test] + fn reserve_deposit_roundtrip_works() { + let xcm = Xcm::<()>(vec![ + ReserveAssetDeposited((Here, 1u128).into()), + ClearOrigin, + BuyExecution { + fees: (Here, 1u128).into(), + weight_limit: Some(Weight::from_parts(1, 1)).into(), + }, + DepositAsset { assets: Wild(AllCounted(1)), beneficiary: Here.into() }, + ]); + let old_xcm = OldXcm::<()>(vec![ + OldInstruction::ReserveAssetDeposited((OldHere, 1u128).into()), + OldInstruction::ClearOrigin, + OldInstruction::BuyExecution { + fees: (OldHere, 1u128).into(), + weight_limit: WeightLimit::Limited(Weight::from_parts(1, 1)), + }, + OldInstruction::DepositAsset { + assets: crate::v4::AssetFilter::Wild(crate::v4::WildAsset::AllCounted(1)), + beneficiary: OldHere.into(), + }, + ]); + assert_eq!(old_xcm, OldXcm::<()>::try_from(xcm.clone()).unwrap()); + let new_xcm: Xcm<()> = old_xcm.try_into().unwrap(); + assert_eq!(new_xcm, xcm); + } + + #[test] + fn deposit_asset_roundtrip_works() { + let xcm = Xcm::<()>(vec![ + WithdrawAsset((Here, 1u128).into()), + DepositAsset { assets: Wild(AllCounted(1)), beneficiary: Here.into() }, + ]); + let old_xcm = OldXcm::<()>(vec![ + OldInstruction::WithdrawAsset((OldHere, 1u128).into()), + OldInstruction::DepositAsset { + assets: OldAssetFilter::Wild(OldWildAsset::AllCounted(1)), + beneficiary: OldHere.into(), + }, + ]); + assert_eq!(old_xcm, OldXcm::<()>::try_from(xcm.clone()).unwrap()); + let new_xcm: Xcm<()> = old_xcm.try_into().unwrap(); + assert_eq!(new_xcm, xcm); + } + + #[test] + fn deposit_reserve_asset_roundtrip_works() { + let xcm = Xcm::<()>(vec![ + WithdrawAsset((Here, 1u128).into()), + DepositReserveAsset { + assets: Wild(AllCounted(1)), + dest: Here.into(), + xcm: Xcm::<()>(vec![]), + }, + ]); + let old_xcm = OldXcm::<()>(vec![ + OldInstruction::WithdrawAsset((OldHere, 1u128).into()), + OldInstruction::DepositReserveAsset { + assets: OldAssetFilter::Wild(OldWildAsset::AllCounted(1)), + dest: OldHere.into(), + xcm: OldXcm::<()>(vec![]), + }, + ]); + assert_eq!(old_xcm, OldXcm::<()>::try_from(xcm.clone()).unwrap()); + let new_xcm: Xcm<()> = old_xcm.try_into().unwrap(); + assert_eq!(new_xcm, xcm); + } + + #[test] + fn decoding_respects_limit() { + let max_xcm = Xcm::<()>(vec![ClearOrigin; MAX_INSTRUCTIONS_TO_DECODE as usize]); + let encoded = max_xcm.encode(); + assert!(Xcm::<()>::decode(&mut &encoded[..]).is_ok()); + + let big_xcm = Xcm::<()>(vec![ClearOrigin; MAX_INSTRUCTIONS_TO_DECODE as usize + 1]); + let encoded = big_xcm.encode(); + assert!(Xcm::<()>::decode(&mut &encoded[..]).is_err()); + + let nested_xcm = Xcm::<()>(vec![ + DepositReserveAsset { + assets: All.into(), + dest: Here.into(), + xcm: max_xcm, + }; + (MAX_INSTRUCTIONS_TO_DECODE / 2) as usize + ]); + let encoded = nested_xcm.encode(); + assert!(Xcm::<()>::decode(&mut &encoded[..]).is_err()); + + let even_more_nested_xcm = Xcm::<()>(vec![SetAppendix(nested_xcm); 64]); + let encoded = even_more_nested_xcm.encode(); + assert_eq!(encoded.len(), 342530); + // This should not decode since the limit is 100 + assert_eq!(MAX_INSTRUCTIONS_TO_DECODE, 100, "precondition"); + assert!(Xcm::<()>::decode(&mut &encoded[..]).is_err()); + } +} diff --git a/polkadot/xcm/src/v5/traits.rs b/polkadot/xcm/src/v5/traits.rs new file mode 100644 index 000000000000..d21e77b8d7ab --- /dev/null +++ b/polkadot/xcm/src/v5/traits.rs @@ -0,0 +1,312 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Cross-Consensus Message format data structures. + +pub use crate::v3::{Error, Result, SendError, XcmHash}; +use codec::{Decode, Encode}; +use core::result; +use scale_info::TypeInfo; + +pub use sp_weights::Weight; + +use super::*; + +/// Outcome of an XCM execution. +#[derive(Clone, Encode, Decode, Eq, PartialEq, Debug, TypeInfo)] +pub enum Outcome { + /// Execution completed successfully; given weight was used. + Complete { used: Weight }, + /// Execution started, but did not complete successfully due to the given error; given weight + /// was used. + Incomplete { used: Weight, error: Error }, + /// Execution did not start due to the given error. + Error { error: Error }, +} + +impl Outcome { + pub fn ensure_complete(self) -> Result { + match self { + Outcome::Complete { .. } => Ok(()), + Outcome::Incomplete { error, .. } => Err(error), + Outcome::Error { error, .. } => Err(error), + } + } + pub fn ensure_execution(self) -> result::Result { + match self { + Outcome::Complete { used, .. } => Ok(used), + Outcome::Incomplete { used, .. } => Ok(used), + Outcome::Error { error, .. } => Err(error), + } + } + /// How much weight was used by the XCM execution attempt. + pub fn weight_used(&self) -> Weight { + match self { + Outcome::Complete { used, .. } => *used, + Outcome::Incomplete { used, .. } => *used, + Outcome::Error { .. } => Weight::zero(), + } + } +} + +impl From for Outcome { + fn from(error: Error) -> Self { + Self::Error { error } + } +} + +pub trait PreparedMessage { + fn weight_of(&self) -> Weight; +} + +/// Type of XCM message executor. +pub trait ExecuteXcm { + type Prepared: PreparedMessage; + fn prepare(message: Xcm) -> result::Result>; + fn execute( + origin: impl Into, + pre: Self::Prepared, + id: &mut XcmHash, + weight_credit: Weight, + ) -> Outcome; + fn prepare_and_execute( + origin: impl Into, + message: Xcm, + id: &mut XcmHash, + weight_limit: Weight, + weight_credit: Weight, + ) -> Outcome { + let pre = match Self::prepare(message) { + Ok(x) => x, + Err(_) => return Outcome::Error { error: Error::WeightNotComputable }, + }; + let xcm_weight = pre.weight_of(); + if xcm_weight.any_gt(weight_limit) { + return Outcome::Error { error: Error::WeightLimitReached(xcm_weight) } + } + Self::execute(origin, pre, id, weight_credit) + } + + /// Deduct some `fees` to the sovereign account of the given `location` and place them as per + /// the convention for fees. + fn charge_fees(location: impl Into, fees: Assets) -> Result; +} + +pub enum Weightless {} +impl PreparedMessage for Weightless { + fn weight_of(&self) -> Weight { + unreachable!() + } +} + +impl ExecuteXcm for () { + type Prepared = Weightless; + fn prepare(message: Xcm) -> result::Result> { + Err(message) + } + fn execute(_: impl Into, _: Self::Prepared, _: &mut XcmHash, _: Weight) -> Outcome { + unreachable!() + } + fn charge_fees(_location: impl Into, _fees: Assets) -> Result { + Err(Error::Unimplemented) + } +} + +pub trait Reanchorable: Sized { + /// Type to return in case of an error. + type Error: Debug; + + /// Mutate `self` so that it represents the same location from the point of view of `target`. + /// The context of `self` is provided as `context`. + /// + /// Does not modify `self` in case of overflow. + fn reanchor( + &mut self, + target: &Location, + context: &InteriorLocation, + ) -> core::result::Result<(), ()>; + + /// Consume `self` and return a new value representing the same location from the point of view + /// of `target`. The context of `self` is provided as `context`. + /// + /// Returns the original `self` in case of overflow. + fn reanchored( + self, + target: &Location, + context: &InteriorLocation, + ) -> core::result::Result; +} + +/// Result value when attempting to send an XCM message. +pub type SendResult = result::Result<(T, Assets), SendError>; + +/// Utility for sending an XCM message to a given location. +/// +/// These can be amalgamated in tuples to form sophisticated routing systems. In tuple format, each +/// router might return `NotApplicable` to pass the execution to the next sender item. Note that +/// each `NotApplicable` might alter the destination and the XCM message for to the next router. +/// +/// # Example +/// ```rust +/// # use codec::Encode; +/// # use staging_xcm::v5::{prelude::*, Weight}; +/// # use staging_xcm::VersionedXcm; +/// # use std::convert::Infallible; +/// +/// /// A sender that only passes the message through and does nothing. +/// struct Sender1; +/// impl SendXcm for Sender1 { +/// type Ticket = Infallible; +/// fn validate(_: &mut Option, _: &mut Option>) -> SendResult { +/// Err(SendError::NotApplicable) +/// } +/// fn deliver(_: Infallible) -> Result { +/// unreachable!() +/// } +/// } +/// +/// /// A sender that accepts a message that has two junctions, otherwise stops the routing. +/// struct Sender2; +/// impl SendXcm for Sender2 { +/// type Ticket = (); +/// fn validate(destination: &mut Option, message: &mut Option>) -> SendResult<()> { +/// match destination.as_ref().ok_or(SendError::MissingArgument)?.unpack() { +/// (0, [j1, j2]) => Ok(((), Assets::new())), +/// _ => Err(SendError::Unroutable), +/// } +/// } +/// fn deliver(_: ()) -> Result { +/// Ok([0; 32]) +/// } +/// } +/// +/// /// A sender that accepts a message from a parent, passing through otherwise. +/// struct Sender3; +/// impl SendXcm for Sender3 { +/// type Ticket = (); +/// fn validate(destination: &mut Option, message: &mut Option>) -> SendResult<()> { +/// match destination.as_ref().ok_or(SendError::MissingArgument)?.unpack() { +/// (1, []) => Ok(((), Assets::new())), +/// _ => Err(SendError::NotApplicable), +/// } +/// } +/// fn deliver(_: ()) -> Result { +/// Ok([0; 32]) +/// } +/// } +/// +/// // A call to send via XCM. We don't really care about this. +/// # fn main() { +/// let call: Vec = ().encode(); +/// let message = Xcm(vec![Instruction::Transact { +/// origin_kind: OriginKind::Superuser, +/// require_weight_at_most: Weight::zero(), +/// call: call.into(), +/// }]); +/// let message_hash = message.using_encoded(sp_io::hashing::blake2_256); +/// +/// // Sender2 will block this. +/// assert!(send_xcm::<(Sender1, Sender2, Sender3)>(Parent.into(), message.clone()).is_err()); +/// +/// // Sender3 will catch this. +/// assert!(send_xcm::<(Sender1, Sender3)>(Parent.into(), message.clone()).is_ok()); +/// # } +/// ``` +pub trait SendXcm { + /// Intermediate value which connects the two phases of the send operation. + type Ticket; + + /// Check whether the given `_message` is deliverable to the given `_destination` and if so + /// determine the cost which will be paid by this chain to do so, returning a `Validated` token + /// which can be used to enact delivery. + /// + /// The `destination` and `message` must be `Some` (or else an error will be returned) and they + /// may only be consumed if the `Err` is not `NotApplicable`. + /// + /// If it is not a destination which can be reached with this type but possibly could by others, + /// then this *MUST* return `NotApplicable`. Any other error will cause the tuple + /// implementation to exit early without trying other type fields. + fn validate( + destination: &mut Option, + message: &mut Option>, + ) -> SendResult; + + /// Actually carry out the delivery operation for a previously validated message sending. + fn deliver(ticket: Self::Ticket) -> result::Result; +} + +#[impl_trait_for_tuples::impl_for_tuples(30)] +impl SendXcm for Tuple { + for_tuples! { type Ticket = (#( Option ),* ); } + + fn validate( + destination: &mut Option, + message: &mut Option>, + ) -> SendResult { + let mut maybe_cost: Option = None; + let one_ticket: Self::Ticket = (for_tuples! { #( + if maybe_cost.is_some() { + None + } else { + match Tuple::validate(destination, message) { + Err(SendError::NotApplicable) => None, + Err(e) => { return Err(e) }, + Ok((v, c)) => { + maybe_cost = Some(c); + Some(v) + }, + } + } + ),* }); + if let Some(cost) = maybe_cost { + Ok((one_ticket, cost)) + } else { + Err(SendError::NotApplicable) + } + } + + fn deliver(one_ticket: Self::Ticket) -> result::Result { + for_tuples!( #( + if let Some(validated) = one_ticket.Tuple { + return Tuple::deliver(validated); + } + )* ); + Err(SendError::Unroutable) + } +} + +/// Convenience function for using a `SendXcm` implementation. Just interprets the `dest` and wraps +/// both in `Some` before passing them as as mutable references into `T::send_xcm`. +pub fn validate_send(dest: Location, msg: Xcm<()>) -> SendResult { + T::validate(&mut Some(dest), &mut Some(msg)) +} + +/// Convenience function for using a `SendXcm` implementation. Just interprets the `dest` and wraps +/// both in `Some` before passing them as as mutable references into `T::send_xcm`. +/// +/// Returns either `Ok` with the price of the delivery, or `Err` with the reason why the message +/// could not be sent. +/// +/// Generally you'll want to validate and get the price first to ensure that the sender can pay it +/// before actually doing the delivery. +pub fn send_xcm( + dest: Location, + msg: Xcm<()>, +) -> result::Result<(XcmHash, Assets), SendError> { + let (ticket, price) = T::validate(&mut Some(dest), &mut Some(msg))?; + let hash = T::deliver(ticket)?; + Ok((hash, price)) +} diff --git a/polkadot/xcm/xcm-builder/src/barriers.rs b/polkadot/xcm/xcm-builder/src/barriers.rs index c995361ea8a3..56a8493ef0ab 100644 --- a/polkadot/xcm/xcm-builder/src/barriers.rs +++ b/polkadot/xcm/xcm-builder/src/barriers.rs @@ -108,6 +108,7 @@ impl> ShouldExecute for AllowTopLevelPaidExecutionFrom *weight_limit = Limited(max_weight); Ok(()) }, + PayFees { .. } => Ok(()), _ => Err(ProcessMessageError::Overweight(max_weight)), })?; Ok(()) diff --git a/polkadot/xcm/xcm-builder/src/process_xcm_message.rs b/polkadot/xcm/xcm-builder/src/process_xcm_message.rs index 2e6f8c5fb566..ce78c68917a9 100644 --- a/polkadot/xcm/xcm-builder/src/process_xcm_message.rs +++ b/polkadot/xcm/xcm-builder/src/process_xcm_message.rs @@ -125,7 +125,7 @@ mod tests { traits::{ProcessMessageError, ProcessMessageError::*}, }; use polkadot_test_runtime::*; - use xcm::{v3, v4, VersionedXcm}; + use xcm::{v3, v4, v5, VersionedXcm}; const ORIGIN: Junction = Junction::OnlyChild; /// The processor to use for tests. @@ -137,13 +137,15 @@ mod tests { // ClearOrigin works. assert!(process(v3_xcm(true)).unwrap()); assert!(process(v4_xcm(true)).unwrap()); + assert!(process(v5_xcm(true)).unwrap()); } #[test] fn process_message_trivial_fails() { // Trap makes it fail. assert!(!process(v3_xcm(false)).unwrap()); - assert!(!process(v3_xcm(false)).unwrap()); + assert!(!process(v4_xcm(false)).unwrap()); + assert!(!process(v5_xcm(false)).unwrap()); } #[test] @@ -179,7 +181,7 @@ mod tests { type Processor = ProcessXcmMessage; - let xcm = VersionedXcm::V4(xcm::latest::Xcm::<()>(vec![ + let xcm = VersionedXcm::from(xcm::latest::Xcm::<()>(vec![ xcm::latest::Instruction::<()>::ClearOrigin, ])); assert_err!( @@ -235,6 +237,15 @@ mod tests { VersionedXcm::V4(v4::Xcm::(vec![instr])) } + fn v5_xcm(success: bool) -> VersionedXcm { + let instr = if success { + v5::Instruction::::ClearOrigin + } else { + v5::Instruction::::Trap(1) + }; + VersionedXcm::V5(v5::Xcm::(vec![instr])) + } + fn process(msg: VersionedXcm) -> Result { process_raw(msg.encode().as_slice()) } diff --git a/polkadot/xcm/xcm-executor/integration-tests/src/lib.rs b/polkadot/xcm/xcm-executor/integration-tests/src/lib.rs index e95473c5407e..56a1d1305c56 100644 --- a/polkadot/xcm/xcm-executor/integration-tests/src/lib.rs +++ b/polkadot/xcm/xcm-executor/integration-tests/src/lib.rs @@ -241,7 +241,7 @@ fn query_response_fires() { assert_eq!( polkadot_test_runtime::Xcm::query(query_id), Some(QueryStatus::Ready { - response: VersionedResponse::V4(Response::ExecutionResult(None)), + response: VersionedResponse::from(Response::ExecutionResult(None)), at: 2u32.into() }), ) diff --git a/polkadot/xcm/xcm-executor/src/lib.rs b/polkadot/xcm/xcm-executor/src/lib.rs index e3addfa3e794..5df0d0df7c64 100644 --- a/polkadot/xcm/xcm-executor/src/lib.rs +++ b/polkadot/xcm/xcm-executor/src/lib.rs @@ -47,6 +47,9 @@ pub use assets::AssetsInHolding; mod config; pub use config::Config; +#[cfg(test)] +mod tests; + /// A struct to specify how fees are being paid. #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub struct FeesMode { @@ -83,13 +86,16 @@ pub struct XcmExecutor { appendix_weight: Weight, transact_status: MaybeErrorCode, fees_mode: FeesMode, + fees: AssetsInHolding, /// Asset provided in last `BuyExecution` instruction (if any) in current XCM program. Same /// asset type will be used for paying any potential delivery fees incurred by the program. - asset_used_for_fees: Option, + asset_used_in_buy_execution: Option, + /// Stores the current message's weight. + message_weight: Weight, _config: PhantomData, } -#[cfg(feature = "runtime-benchmarks")] +#[cfg(any(test, feature = "runtime-benchmarks"))] impl XcmExecutor { pub fn holding(&self) -> &AssetsInHolding { &self.holding @@ -175,6 +181,12 @@ impl XcmExecutor { pub fn set_fees_mode(&mut self, v: FeesMode) { self.fees_mode = v } + pub fn fees(&self) -> &AssetsInHolding { + &self.fees + } + pub fn set_fees(&mut self, value: AssetsInHolding) { + self.fees = value; + } pub fn topic(&self) -> &Option<[u8; 32]> { &self.context.topic } @@ -249,6 +261,7 @@ impl ExecuteXcm for XcmExecutor XcmExecutor { appendix_weight: Weight::zero(), transact_status: Default::default(), fees_mode: FeesMode { jit_withdraw: false }, - asset_used_for_fees: None, + fees: AssetsInHolding::new(), + asset_used_in_buy_execution: None, + message_weight: Weight::zero(), _config: PhantomData, } } @@ -466,6 +481,11 @@ impl XcmExecutor { self.holding.subsume_assets(w.into()); } } + // If there are any leftover `fees`, merge them with `holding`. + if !self.fees.is_empty() { + let leftover_fees = self.fees.saturating_take(Wild(All)); + self.holding.subsume_assets(leftover_fees); + } tracing::trace!( target: "xcm::refund_surplus", total_refunded = ?self.total_refunded, @@ -490,7 +510,7 @@ impl XcmExecutor { Some(fee) => fee, None => return Ok(()), // No delivery fees need to be paid. }; - // If `BuyExecution` was called, we use that asset for delivery fees as well. + // If `BuyExecution` or `PayFees` was called, we use that asset for delivery fees as well. let asset_to_pay_for_fees = self.calculate_asset_for_delivery_fees(asset_needed_for_fees.clone()); tracing::trace!(target: "xcm::fees", ?asset_to_pay_for_fees); @@ -505,15 +525,31 @@ impl XcmExecutor { tracing::trace!(target: "xcm::fees", ?asset_needed_for_fees); asset_to_pay_for_fees.clone().into() } else { - let assets_taken_from_holding_to_pay_delivery_fees = self - .holding - .try_take(asset_to_pay_for_fees.clone().into()) - .map_err(|e| { - tracing::error!(target: "xcm::fees", ?e, ?asset_to_pay_for_fees, "Failed to take asset_to_pay_for_fees from holding"); - XcmError::NotHoldingFees - })?; - tracing::trace!(target: "xcm::fees", ?assets_taken_from_holding_to_pay_delivery_fees); - let mut iter = assets_taken_from_holding_to_pay_delivery_fees.fungible_assets_iter(); + // This condition exists to support `BuyExecution` while the ecosystem + // transitions to `PayFees`. + let assets_to_pay_delivery_fees: AssetsInHolding = if self.fees.is_empty() { + // Means `BuyExecution` was used, we'll find the fees in the `holding` register. + self.holding + .try_take(asset_to_pay_for_fees.clone().into()) + .map_err(|e| { + tracing::error!(target: "xcm::fees", ?e, ?asset_to_pay_for_fees, + "Holding doesn't hold enough for fees"); + XcmError::NotHoldingFees + })? + .into() + } else { + // Means `PayFees` was used, we'll find the fees in the `fees` register. + self.fees + .try_take(asset_to_pay_for_fees.clone().into()) + .map_err(|e| { + tracing::error!(target: "xcm::fees", ?e, ?asset_to_pay_for_fees, + "Fees register doesn't hold enough for fees"); + XcmError::NotHoldingFees + })? + .into() + }; + tracing::trace!(target: "xcm::fees", ?assets_to_pay_delivery_fees); + let mut iter = assets_to_pay_delivery_fees.fungible_assets_iter(); let asset = iter.next().ok_or(XcmError::NotHoldingFees)?; asset.into() }; @@ -544,41 +580,45 @@ impl XcmExecutor { Ok(()) } - /// Calculates the amount of `self.asset_used_for_fees` required to swap for - /// `asset_needed_for_fees`. + /// Calculates the amount of asset used in `PayFees` or `BuyExecution` that would be + /// charged for swapping to `asset_needed_for_fees`. /// /// The calculation is done by `Config::AssetExchanger`. - /// If `self.asset_used_for_fees` is not set, it will just return `asset_needed_for_fees`. + /// If neither `PayFees` or `BuyExecution` were not used, or no swap is required, + /// it will just return `asset_needed_for_fees`. fn calculate_asset_for_delivery_fees(&self, asset_needed_for_fees: Asset) -> Asset { - if let Some(asset_wanted_for_fees) = &self.asset_used_for_fees { - if *asset_wanted_for_fees != asset_needed_for_fees.id { - match Config::AssetExchanger::quote_exchange_price( - &(asset_wanted_for_fees.clone(), Fungible(0)).into(), - &asset_needed_for_fees.clone().into(), - false, // Minimal. - ) { - Some(necessary_assets) => - // We only use the first asset for fees. - // If this is not enough to swap for the fee asset then it will error later down - // the line. - necessary_assets.get(0).unwrap_or(&asset_needed_for_fees.clone()).clone(), - // If we can't convert, then we return the original asset. - // It will error later in any case. - None => { - tracing::trace!( - target: "xcm::calculate_asset_for_delivery_fees", - ?asset_wanted_for_fees, - "Could not convert fees", - ); - asset_needed_for_fees.clone() - }, - } - } else { - asset_needed_for_fees - } - } else { + let Some(asset_wanted_for_fees) = + // we try to swap first asset in the fees register (should only ever be one), + self.fees.fungible.first_key_value().map(|(id, _)| id).or_else(|| { + // or the one used in BuyExecution + self.asset_used_in_buy_execution.as_ref() + }) + // if it is different from what we need + .filter(|&id| asset_needed_for_fees.id.ne(id)) + else { + // either nothing to swap or we're already holding the right asset + return asset_needed_for_fees + }; + Config::AssetExchanger::quote_exchange_price( + &(asset_wanted_for_fees.clone(), Fungible(0)).into(), + &asset_needed_for_fees.clone().into(), + false, // Minimal. + ) + .and_then(|necessary_assets| { + // We only use the first asset for fees. + // If this is not enough to swap for the fee asset then it will error later down + // the line. + necessary_assets.into_inner().into_iter().next() + }) + .unwrap_or_else(|| { + // If we can't convert, then we return the original asset. + // It will error later in any case. + tracing::trace!( + target: "xcm::calculate_asset_for_delivery_fees", + ?asset_wanted_for_fees, "Could not convert fees", + ); asset_needed_for_fees - } + }) } /// Calculates what `local_querier` would be from the perspective of `destination`. @@ -638,11 +678,16 @@ impl XcmExecutor { assets.into_assets_iter().collect::>().into() } - #[cfg(feature = "runtime-benchmarks")] + #[cfg(any(test, feature = "runtime-benchmarks"))] pub fn bench_process(&mut self, xcm: Xcm) -> Result<(), ExecutorError> { self.process(xcm) } + #[cfg(any(test, feature = "runtime-benchmarks"))] + pub fn bench_post_process(self, xcm_weight: Weight) -> Outcome { + self.post_process(xcm_weight) + } + fn process(&mut self, xcm: Xcm) -> Result<(), ExecutorError> { tracing::trace!( target: "xcm::process", @@ -960,30 +1005,11 @@ impl XcmExecutor { DepositReserveAsset { assets, dest, xcm } => { let old_holding = self.holding.clone(); let result = Config::TransactionalProcessor::process(|| { - // we need to do this take/put cycle to solve wildcards and get exact assets to - // be weighed - let to_weigh = self.holding.saturating_take(assets.clone()); - self.holding.subsume_assets(to_weigh.clone()); - let to_weigh_reanchored = Self::reanchored(to_weigh, &dest, None); - let mut message_to_weigh = - vec![ReserveAssetDeposited(to_weigh_reanchored), ClearOrigin]; - message_to_weigh.extend(xcm.0.clone().into_iter()); - let (_, fee) = - validate_send::(dest.clone(), Xcm(message_to_weigh))?; - let maybe_delivery_fee = fee.get(0).map(|asset_needed_for_fees| { - tracing::trace!( - target: "xcm::DepositReserveAsset", - "Asset provided to pay for fees {:?}, asset required for delivery fees: {:?}", - self.asset_used_for_fees, asset_needed_for_fees, - ); - let asset_to_pay_for_fees = - self.calculate_asset_for_delivery_fees(asset_needed_for_fees.clone()); - // set aside fee to be charged by XcmSender - let delivery_fee = - self.holding.saturating_take(asset_to_pay_for_fees.into()); - tracing::trace!(target: "xcm::DepositReserveAsset", ?delivery_fee); - delivery_fee - }); + let maybe_delivery_fee_from_holding = if self.fees.is_empty() { + self.get_delivery_fee_from_holding(&assets, &dest, &xcm)? + } else { + None + }; // now take assets to deposit (after having taken delivery fees) let deposited = self.holding.saturating_take(assets); tracing::trace!(target: "xcm::DepositReserveAsset", ?deposited, "Assets except delivery fee"); @@ -994,8 +1020,8 @@ impl XcmExecutor { let assets = Self::reanchored(deposited, &dest, None); let mut message = vec![ReserveAssetDeposited(assets), ClearOrigin]; message.extend(xcm.0.into_iter()); - // put back delivery_fee in holding register to be charged by XcmSender - if let Some(delivery_fee) = maybe_delivery_fee { + if let Some(delivery_fee) = maybe_delivery_fee_from_holding { + // Put back delivery_fee in holding register to be charged by XcmSender. self.holding.subsume_assets(delivery_fee); } self.send(dest, Xcm(message), FeeReason::DepositReserveAsset)?; @@ -1090,8 +1116,11 @@ impl XcmExecutor { let old_holding = self.holding.clone(); // Save the asset being used for execution fees, so we later know what should be // used for delivery fees. - self.asset_used_for_fees = Some(fees.id.clone()); - tracing::trace!(target: "xcm::executor::BuyExecution", asset_used_for_fees = ?self.asset_used_for_fees); + self.asset_used_in_buy_execution = Some(fees.id.clone()); + tracing::trace!( + target: "xcm::executor::BuyExecution", + asset_used_in_buy_execution = ?self.asset_used_in_buy_execution + ); // pay for `weight` using up to `fees` of the holding register. let max_fee = self.holding.try_take(fees.clone().into()).map_err(|e| { @@ -1108,6 +1137,26 @@ impl XcmExecutor { } result }, + PayFees { asset } => { + // Record old holding in case we need to rollback. + let old_holding = self.holding.clone(); + // The max we're willing to pay for fees is decided by the `asset` operand. + let max_fee = + self.holding.try_take(asset.into()).map_err(|_| XcmError::NotHoldingFees)?; + // Pay for execution fees. + let result = Config::TransactionalProcessor::process(|| { + let unspent = + self.trader.buy_weight(self.message_weight, max_fee, &self.context)?; + // Move unspent to the `fees` register. + self.fees.subsume_assets(unspent); + Ok(()) + }); + if Config::TransactionalProcessor::IS_TRANSACTIONAL && result.is_err() { + // Rollback. + self.holding = old_holding; + } + result + }, RefundSurplus => self.refund_surplus(), SetErrorHandler(mut handler) => { let handler_weight = Config::Weigher::weight(&mut handler) @@ -1445,4 +1494,39 @@ impl XcmExecutor { } Ok(()) } + + /// Gets the necessary delivery fee to send a reserve transfer message to `destination` from + /// holding. + /// + /// Will be removed once the transition from `BuyExecution` to `PayFees` is complete. + fn get_delivery_fee_from_holding( + &mut self, + assets: &AssetFilter, + destination: &Location, + xcm: &Xcm<()>, + ) -> Result, XcmError> { + // we need to do this take/put cycle to solve wildcards and get exact assets to + // be weighed + let to_weigh = self.holding.saturating_take(assets.clone()); + self.holding.subsume_assets(to_weigh.clone()); + let to_weigh_reanchored = Self::reanchored(to_weigh, &destination, None); + let mut message_to_weigh = vec![ReserveAssetDeposited(to_weigh_reanchored), ClearOrigin]; + message_to_weigh.extend(xcm.0.clone().into_iter()); + let (_, fee) = + validate_send::(destination.clone(), Xcm(message_to_weigh))?; + let maybe_delivery_fee = fee.get(0).map(|asset_needed_for_fees| { + tracing::trace!( + target: "xcm::DepositReserveAsset", + "Asset provided to pay for fees {:?}, asset required for delivery fees: {:?}", + self.asset_used_in_buy_execution, asset_needed_for_fees, + ); + let asset_to_pay_for_fees = + self.calculate_asset_for_delivery_fees(asset_needed_for_fees.clone()); + // set aside fee to be charged by XcmSender + let delivery_fee = self.holding.saturating_take(asset_to_pay_for_fees.into()); + tracing::trace!(target: "xcm::DepositReserveAsset", ?delivery_fee); + delivery_fee + }); + Ok(maybe_delivery_fee) + } } diff --git a/polkadot/xcm/xcm-executor/src/tests/mock.rs b/polkadot/xcm/xcm-executor/src/tests/mock.rs new file mode 100644 index 000000000000..cd612879d205 --- /dev/null +++ b/polkadot/xcm/xcm-executor/src/tests/mock.rs @@ -0,0 +1,279 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Mock types and XcmConfig for all executor unit tests. + +use alloc::collections::btree_map::BTreeMap; +use codec::{Decode, Encode}; +use core::cell::RefCell; +use frame_support::{ + dispatch::{DispatchInfo, DispatchResultWithPostInfo, GetDispatchInfo, PostDispatchInfo}, + parameter_types, + traits::{Everything, Nothing, ProcessMessageError}, + weights::Weight, +}; +use sp_runtime::traits::Dispatchable; +use xcm::prelude::*; + +use crate::{ + traits::{DropAssets, Properties, ShouldExecute, TransactAsset, WeightBounds, WeightTrader}, + AssetsInHolding, Config, XcmExecutor, +}; + +/// We create an XCVM instance instead of calling `XcmExecutor::<_>::prepare_and_execute` so we +/// can inspect its fields. +pub fn instantiate_executor( + origin: impl Into, + message: Xcm<::RuntimeCall>, +) -> (XcmExecutor, Weight) { + let mut vm = + XcmExecutor::::new(origin, message.using_encoded(sp_io::hashing::blake2_256)); + let weight = XcmExecutor::::prepare(message.clone()).unwrap().weight_of(); + vm.message_weight = weight; + (vm, weight) +} + +parameter_types! { + pub const MaxAssetsIntoHolding: u32 = 10; + pub const BaseXcmWeight: Weight = Weight::from_parts(1, 1); + pub const MaxInstructions: u32 = 10; + pub UniversalLocation: InteriorLocation = GlobalConsensus(ByGenesis([0; 32])).into(); +} + +/// Test origin. +#[derive(Debug)] +pub struct TestOrigin; + +/// Test call. +/// +/// Doesn't dispatch anything, has an empty implementation of [`Dispatchable`] that +/// just returns `Ok` with an empty [`PostDispatchInfo`]. +#[derive(Debug, Encode, Decode, Eq, PartialEq, Clone, Copy, scale_info::TypeInfo)] +pub struct TestCall; +impl Dispatchable for TestCall { + type RuntimeOrigin = TestOrigin; + type Config = (); + type Info = (); + type PostInfo = PostDispatchInfo; + + fn dispatch(self, _origin: Self::RuntimeOrigin) -> DispatchResultWithPostInfo { + Ok(PostDispatchInfo::default()) + } +} +impl GetDispatchInfo for TestCall { + fn get_dispatch_info(&self) -> DispatchInfo { + DispatchInfo::default() + } +} + +/// Test weigher that just returns a fixed weight for every program. +pub struct TestWeigher; +impl WeightBounds for TestWeigher { + fn weight(_message: &mut Xcm) -> Result { + Ok(Weight::from_parts(2, 2)) + } + + fn instr_weight(_instruction: &Instruction) -> Result { + Ok(Weight::from_parts(2, 2)) + } +} + +thread_local! { + pub static ASSETS: RefCell> = RefCell::new(BTreeMap::new()); + pub static SENT_XCM: RefCell)>> = RefCell::new(Vec::new()); +} + +pub fn add_asset(who: impl Into, what: impl Into) { + ASSETS.with(|a| { + a.borrow_mut() + .entry(who.into()) + .or_insert(AssetsInHolding::new()) + .subsume(what.into()) + }); +} + +pub fn asset_list(who: impl Into) -> Vec { + Assets::from(assets(who)).into_inner() +} + +pub fn assets(who: impl Into) -> AssetsInHolding { + ASSETS.with(|a| a.borrow().get(&who.into()).cloned()).unwrap_or_default() +} + +pub fn get_first_fungible(assets: &AssetsInHolding) -> Option { + assets.fungible_assets_iter().next() +} + +/// Test asset transactor that withdraws from and deposits to a thread local assets storage. +pub struct TestAssetTransactor; +impl TransactAsset for TestAssetTransactor { + fn deposit_asset( + what: &Asset, + who: &Location, + _context: Option<&XcmContext>, + ) -> Result<(), XcmError> { + add_asset(who.clone(), what.clone()); + Ok(()) + } + + fn withdraw_asset( + what: &Asset, + who: &Location, + _context: Option<&XcmContext>, + ) -> Result { + ASSETS.with(|a| { + a.borrow_mut() + .get_mut(who) + .ok_or(XcmError::NotWithdrawable)? + .try_take(what.clone().into()) + .map_err(|_| XcmError::NotWithdrawable) + }) + } +} + +/// Test barrier that just lets everything through. +pub struct TestBarrier; +impl ShouldExecute for TestBarrier { + fn should_execute( + _origin: &Location, + _instructions: &mut [Instruction], + _max_weight: Weight, + _properties: &mut Properties, + ) -> Result<(), ProcessMessageError> { + Ok(()) + } +} + +/// Test weight to fee that just multiplies `Weight.ref_time` and `Weight.proof_size`. +pub struct WeightToFee; +impl WeightToFee { + pub fn weight_to_fee(weight: &Weight) -> u128 { + weight.ref_time() as u128 * weight.proof_size() as u128 + } +} + +/// Test weight trader that just buys weight with the native asset (`Here`) and +/// uses the test `WeightToFee`. +pub struct TestTrader { + weight_bought_so_far: Weight, +} +impl WeightTrader for TestTrader { + fn new() -> Self { + Self { weight_bought_so_far: Weight::zero() } + } + + fn buy_weight( + &mut self, + weight: Weight, + payment: AssetsInHolding, + _context: &XcmContext, + ) -> Result { + let amount = WeightToFee::weight_to_fee(&weight); + let required: Asset = (Here, amount).into(); + let unused = payment.checked_sub(required).map_err(|_| XcmError::TooExpensive)?; + self.weight_bought_so_far.saturating_add(weight); + Ok(unused) + } + + fn refund_weight(&mut self, weight: Weight, _context: &XcmContext) -> Option { + let weight = weight.min(self.weight_bought_so_far); + let amount = WeightToFee::weight_to_fee(&weight); + self.weight_bought_so_far -= weight; + if amount > 0 { + Some((Here, amount).into()) + } else { + None + } + } +} + +/// Account where all dropped assets are deposited. +pub const TRAPPED_ASSETS: [u8; 32] = [255; 32]; + +/// Test asset trap that moves all dropped assets to the `TRAPPED_ASSETS` account. +pub struct TestAssetTrap; +impl DropAssets for TestAssetTrap { + fn drop_assets(_origin: &Location, assets: AssetsInHolding, _context: &XcmContext) -> Weight { + ASSETS.with(|a| { + a.borrow_mut() + .entry(TRAPPED_ASSETS.into()) + .or_insert(AssetsInHolding::new()) + .subsume_assets(assets) + }); + Weight::zero() + } +} + +/// Test sender that always succeeds and puts messages in a dummy queue. +/// +/// It charges `1` for the delivery fee. +pub struct TestSender; +impl SendXcm for TestSender { + type Ticket = (Location, Xcm<()>); + + fn validate( + destination: &mut Option, + message: &mut Option>, + ) -> SendResult { + let ticket = (destination.take().unwrap(), message.take().unwrap()); + let delivery_fee: Asset = (Here, 1u128).into(); + Ok((ticket, delivery_fee.into())) + } + + fn deliver(ticket: Self::Ticket) -> Result { + SENT_XCM.with(|q| q.borrow_mut().push(ticket)); + Ok([0; 32]) + } +} + +/// Gets queued test messages. +pub fn sent_xcm() -> Vec<(Location, Xcm<()>)> { + SENT_XCM.with(|q| (*q.borrow()).clone()) +} + +/// Test XcmConfig that uses all the test implementations in this file. +pub struct XcmConfig; +impl Config for XcmConfig { + type RuntimeCall = TestCall; + type XcmSender = TestSender; + type AssetTransactor = TestAssetTransactor; + type OriginConverter = (); + type IsReserve = (); + type IsTeleporter = (); + type UniversalLocation = UniversalLocation; + type Barrier = TestBarrier; + type Weigher = TestWeigher; + type Trader = TestTrader; + type ResponseHandler = (); + type AssetTrap = TestAssetTrap; + type AssetLocker = (); + type AssetExchanger = (); + type AssetClaims = (); + type SubscriptionService = (); + type PalletInstancesInfo = (); + type MaxAssetsIntoHolding = MaxAssetsIntoHolding; + type FeeManager = (); + type MessageExporter = (); + type UniversalAliases = Nothing; + type CallDispatcher = Self::RuntimeCall; + type SafeCallFilter = Everything; + type Aliasers = Nothing; + type TransactionalProcessor = (); + type HrmpNewChannelOpenRequestHandler = (); + type HrmpChannelAcceptedHandler = (); + type HrmpChannelClosingHandler = (); + type XcmRecorder = (); +} diff --git a/polkadot/xcm/xcm-executor/src/tests/mod.rs b/polkadot/xcm/xcm-executor/src/tests/mod.rs new file mode 100644 index 000000000000..9892a0277127 --- /dev/null +++ b/polkadot/xcm/xcm-executor/src/tests/mod.rs @@ -0,0 +1,24 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Unit tests for the XCM executor. +//! +//! These exclude any cross-chain functionality. For those, look at the +//! `xcm-emulator` based tests in the cumulus folder. +//! These tests deal with internal state changes of the XCVM. + +mod mock; +mod pay_fees; diff --git a/polkadot/xcm/xcm-executor/src/tests/pay_fees.rs b/polkadot/xcm/xcm-executor/src/tests/pay_fees.rs new file mode 100644 index 000000000000..192cc319c287 --- /dev/null +++ b/polkadot/xcm/xcm-executor/src/tests/pay_fees.rs @@ -0,0 +1,260 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Unit tests related to the `fees` register and `PayFees` instruction. +//! +//! See [Fellowship RFC 105](https://github.com/polkadot-fellows/rfCs/pull/105) +//! and the [specification](https://github.com/polkadot-fellows/xcm-format) for more information. + +use xcm::prelude::*; + +use super::mock::*; + +// The sender and recipient we use across these tests. +const SENDER: [u8; 32] = [0; 32]; +const RECIPIENT: [u8; 32] = [1; 32]; + +// ===== Happy path ===== + +// This is a sort of backwards compatibility test. +// Since `PayFees` is a replacement for `BuyExecution`, we need to make sure it at least +// manages to do the same thing, paying for execution fees. +#[test] +fn works_for_execution_fees() { + // Make sure the sender has enough funds to withdraw. + add_asset(SENDER, (Here, 100u128)); + + // Build xcm. + let xcm = Xcm::::builder() + .withdraw_asset((Here, 100u128)) + .pay_fees((Here, 10u128)) // 10% destined for fees, not more. + .deposit_asset(All, RECIPIENT) + .build(); + + let (mut vm, weight) = instantiate_executor(SENDER, xcm.clone()); + + // Program runs successfully. + assert!(vm.bench_process(xcm).is_ok()); + + // Nothing is left in the `holding` register. + assert_eq!(get_first_fungible(vm.holding()), None); + // Execution fees were 4, so we still have 6 left in the `fees` register. + assert_eq!(get_first_fungible(vm.fees()).unwrap(), (Here, 6u128).into()); + + // The recipient received all the assets in the holding register, so `100` that + // were withdrawn, minus the `10` that were destinated for fee payment. + assert_eq!(asset_list(RECIPIENT), [(Here, 90u128).into()]); + + // Leftover fees get trapped. + assert!(vm.bench_post_process(weight).ensure_complete().is_ok()); + assert_eq!(asset_list(TRAPPED_ASSETS), [(Here, 6u128).into()]) +} + +// This tests the new functionality provided by `PayFees`, being able to pay for +// delivery fees from the `fees` register. +#[test] +fn works_for_delivery_fees() { + // Make sure the sender has enough funds to withdraw. + add_asset(SENDER, (Here, 100u128)); + + // Information to send messages. + // We don't care about the specifics since we're not actually sending them. + let query_response_info = + QueryResponseInfo { destination: Parent.into(), query_id: 0, max_weight: Weight::zero() }; + + // Build xcm. + let xcm = Xcm::::builder() + .withdraw_asset((Here, 100u128)) + .pay_fees((Here, 10u128)) + // Send a bunch of messages, each charging delivery fees. + .report_error(query_response_info.clone()) + .report_error(query_response_info.clone()) + .report_error(query_response_info) + .deposit_asset(All, RECIPIENT) + .build(); + + let (mut vm, _) = instantiate_executor(SENDER, xcm.clone()); + + // Program runs successfully. + assert!(vm.bench_process(xcm).is_ok()); + + // Nothing is left in the `holding` register. + assert_eq!(get_first_fungible(vm.holding()), None); + // Execution fees were 4, delivery were 3, so we are left with only 3 in the `fees` register. + assert_eq!(get_first_fungible(vm.fees()).unwrap(), (Here, 3u128).into()); + + // The recipient received all the assets in the holding register, so `100` that + // were withdrawn, minus the `10` that were destinated for fee payment. + assert_eq!(asset_list(RECIPIENT), [(Here, 90u128).into()]); + + let querier: Location = ( + UniversalLocation::get().take_first().unwrap(), + AccountId32 { id: SENDER.into(), network: None }, + ) + .into(); + let sent_message = Xcm(vec![QueryResponse { + query_id: 0, + response: Response::ExecutionResult(None), + max_weight: Weight::zero(), + querier: Some(querier), + }]); + + // The messages were "sent" successfully. + assert_eq!( + sent_xcm(), + vec![ + (Parent.into(), sent_message.clone()), + (Parent.into(), sent_message.clone()), + (Parent.into(), sent_message.clone()) + ] + ); +} + +// Tests the support for `BuyExecution` while the ecosystem transitions to `PayFees`. +#[test] +fn buy_execution_works_as_before() { + // Make sure the sender has enough funds to withdraw. + add_asset(SENDER, (Here, 100u128)); + + // Build xcm. + let xcm = Xcm::::builder() + .withdraw_asset((Here, 100u128)) + // We can put everything here, since excess will be returned to holding. + // We have to specify `Limited` here to actually work, it's normally + // set in the `AllowTopLevelPaidExecutionFrom` barrier. + .buy_execution((Here, 100u128), Limited(Weight::from_parts(2, 2))) + .deposit_asset(All, RECIPIENT) + .build(); + + let (mut vm, _) = instantiate_executor(SENDER, xcm.clone()); + + // Program runs successfully. + assert!(vm.bench_process(xcm).is_ok()); + + // Nothing is left in the `holding` register. + assert_eq!(get_first_fungible(vm.holding()), None); + // `BuyExecution` does not interact with the `fees` register. + assert_eq!(get_first_fungible(vm.fees()), None); + + // The recipient received all the assets in the holding register, so `100` that + // were withdrawn, minus the `4` from paying the execution fees. + assert_eq!(asset_list(RECIPIENT), [(Here, 96u128).into()]); +} + +// Tests the interaction between `PayFees` and `RefundSurplus`. +#[test] +fn fees_can_be_refunded() { + // Make sure the sender has enough funds to withdraw. + add_asset(SENDER, (Here, 100u128)); + + // Build xcm. + let xcm = Xcm::::builder() + .withdraw_asset((Here, 100u128)) + .pay_fees((Here, 10u128)) // 10% destined for fees, not more. + .deposit_asset(All, RECIPIENT) + .refund_surplus() + .deposit_asset(All, SENDER) + .build(); + + let (mut vm, _) = instantiate_executor(SENDER, xcm.clone()); + + // Program runs successfully. + assert!(vm.bench_process(xcm).is_ok()); + + // Nothing is left in the `holding` register. + assert_eq!(get_first_fungible(vm.holding()), None); + // Nothing was left in the `fees` register since it was refunded. + assert_eq!(get_first_fungible(vm.fees()), None); + + // The recipient received all the assets in the holding register, so `100` that + // were withdrawn, minus the `10` that were destinated for fee payment. + assert_eq!(asset_list(RECIPIENT), [(Here, 90u128).into()]); + + // The sender got back `6` from unused assets. + assert_eq!(asset_list(SENDER), [(Here, 6u128).into()]); +} + +// ===== Unhappy path ===== + +#[test] +fn putting_all_assets_in_pay_fees() { + // Make sure the sender has enough funds to withdraw. + add_asset(SENDER, (Here, 100u128)); + + // Build xcm. + let xcm = Xcm::::builder() + .withdraw_asset((Here, 100u128)) + .pay_fees((Here, 100u128)) // 100% destined for fees, this is not going to end well... + .deposit_asset(All, RECIPIENT) + .build(); + + let (mut vm, _) = instantiate_executor(SENDER, xcm.clone()); + + // Program runs successfully. + assert!(vm.bench_process(xcm).is_ok()); + + // Nothing is left in the `holding` register. + assert_eq!(get_first_fungible(vm.holding()), None); + // We destined `100` for fee payment, after `4` for execution fees, we are left with `96`. + assert_eq!(get_first_fungible(vm.fees()).unwrap(), (Here, 96u128).into()); + + // The recipient received no assets since they were all destined for fee payment. + assert_eq!(asset_list(RECIPIENT), []); +} + +#[test] +fn refunding_too_early() { + // Make sure the sender has enough funds to withdraw. + add_asset(SENDER, (Here, 100u128)); + + // Information to send messages. + // We don't care about the specifics since we're not actually sending them. + let query_response_info = + QueryResponseInfo { destination: Parent.into(), query_id: 0, max_weight: Weight::zero() }; + + // Build xcm. + let xcm = Xcm::::builder() + .withdraw_asset((Here, 100u128)) + .pay_fees((Here, 10u128)) // 10% destined for fees, not more. + .deposit_asset(All, RECIPIENT) + .refund_surplus() + .deposit_asset(All, SENDER) + // `refund_surplus` cleared the `fees` register. + // `holding` is used as a fallback, but we also cleared that. + // The instruction will error and the message won't be sent :(. + .report_error(query_response_info) + .build(); + + let (mut vm, _) = instantiate_executor(SENDER, xcm.clone()); + + // Program fails to run. + assert!(vm.bench_process(xcm).is_err()); + + // Nothing is left in the `holding` register. + assert_eq!(get_first_fungible(vm.holding()), None); + // Nothing was left in the `fees` register since it was refunded. + assert_eq!(get_first_fungible(vm.fees()), None); + + // The recipient received all the assets in the holding register, so `100` that + // were withdrawn, minus the `10` that were destinated for fee payment. + assert_eq!(asset_list(RECIPIENT), [(Here, 90u128).into()]); + + // The sender got back `6` from unused assets. + assert_eq!(asset_list(SENDER), [(Here, 6u128).into()]); + + // No messages were "sent". + assert_eq!(sent_xcm(), Vec::new()); +} diff --git a/prdoc/pr_5420.prdoc b/prdoc/pr_5420.prdoc new file mode 100644 index 000000000000..bf8a34569077 --- /dev/null +++ b/prdoc/pr_5420.prdoc @@ -0,0 +1,62 @@ +# Schema: Polkadot SDK PRDoc Schema (prdoc) v1.0.0 +# See doc at https://raw.githubusercontent.com/paritytech/polkadot-sdk/master/prdoc/schema_user.json + +title: XCMv5 - Better fee mechanism + +doc: + - audience: + - Runtime User + - Runtime Dev + description: | + In XCMv5, there's a new instruction, `PayFees`, which is meant to be a replacement for `BuyExecution`. + This instruction takes only one parameter, the `asset` that you are willing to use for fee payment. + There's no parameter for limiting the weight, the amount of the `asset` you put in is the limit of + how much you're willing to pay. + This instruction works much better with delivery fees. + `BuyExecution` will still be around to ensure backwards-compatibility, however, the benefits of the new + instruction are a good incentive to switch. + The proposed workflow is to estimate fees using the `XcmPaymentApi` and `DryRunApi`, then to put those + values in `PayFees` and watch your message go knowing you covered all the necessary fees. + You can add a little bit more just in case if you want. + `RefundSurplus` now gets back all of the assets that were destined for fee payment so you can deposit + them somewhere. + BEWARE, make sure you're not sending any other message after you call `RefundSurplus`, if not, it will + error. + +crates: + - name: staging-xcm-executor + bump: minor + - name: staging-xcm-builder + bump: minor + - name: staging-xcm + bump: major + - name: rococo-runtime + bump: minor + - name: westend-runtime + bump: minor + - name: xcm-emulator + bump: major + - name: people-westend-runtime + bump: minor + - name: people-rococo-runtime + bump: minor + - name: coretime-rococo-runtime + bump: minor + - name: coretime-westend-runtime + bump: minor + - name: bridge-hub-westend-runtime + bump: minor + - name: bridge-hub-rococo-runtime + bump: minor + - name: asset-hub-westend-runtime + bump: minor + - name: asset-hub-rococo-runtime + bump: minor + - name: emulated-integration-tests-common + bump: minor + - name: xcm-procedural + bump: minor + - name: pallet-xcm-benchmarks + bump: minor + - name: snowbridge-pallet-system + bump: patch