diff --git a/Cargo.lock b/Cargo.lock index 3b6987d36c..a7cf7827bb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10712,7 +10712,6 @@ dependencies = [ name = "subxt-metadata" version = "0.37.0" dependencies = [ - "assert_matches", "bitvec", "criterion", "frame-decode", diff --git a/artifacts/regressions/1659.scale b/artifacts/regressions/1659.scale new file mode 100644 index 0000000000..7f19753064 Binary files /dev/null and b/artifacts/regressions/1659.scale differ diff --git a/metadata/Cargo.toml b/metadata/Cargo.toml index 92bff27b5b..fb34cfa2fd 100644 --- a/metadata/Cargo.toml +++ b/metadata/Cargo.toml @@ -29,7 +29,6 @@ hashbrown = { workspace = true } bitvec = { workspace = true, features = ["alloc"] } criterion = { workspace = true } scale-info = { workspace = true, features = ["bit-vec"] } -assert_matches = { workspace = true } [lib] # Without this, libtest cli opts interfere with criteron benches: diff --git a/metadata/src/lib.rs b/metadata/src/lib.rs index 8e0900ea7d..1d81efe200 100644 --- a/metadata/src/lib.rs +++ b/metadata/src/lib.rs @@ -214,7 +214,10 @@ impl Metadata { MetadataHasher::new(self) } - /// Filter out any pallets that we don't want to keep, retaining only those that we do. + /// Filter out any pallets and/or runtime_apis that we don't want to keep, retaining only those that we do. + /// Note: + /// only filter by `pallet`s will not lead to significant metadata size reduction because the return types are kept to ensure that those can be decoded. + /// pub fn retain(&mut self, pallet_filter: F, api_filter: G) where F: FnMut(&str) -> bool, diff --git a/metadata/src/utils/retain.rs b/metadata/src/utils/retain.rs index a160609597..4e13982a69 100644 --- a/metadata/src/utils/retain.rs +++ b/metadata/src/utils/retain.rs @@ -5,187 +5,139 @@ //! Utility functions to generate a subset of the metadata. use crate::{ - ExtrinsicMetadata, Metadata, OuterEnumsMetadata, PalletMetadataInner, RuntimeApiMetadataInner, - StorageEntryType, + ExtrinsicMetadata, Metadata, PalletMetadataInner, RuntimeApiMetadataInner, StorageEntryType, }; -use alloc::collections::BTreeMap; -use hashbrown::HashSet; -use scale_info::TypeDef; - -/// Collect all type IDs needed to represent the provided pallet. -fn collect_pallet_types(pallet: &PalletMetadataInner, type_ids: &mut HashSet) { - if let Some(storage) = &pallet.storage { - for entry in storage.entries() { - match entry.entry_type { - StorageEntryType::Plain(ty) => { - type_ids.insert(ty); - } - StorageEntryType::Map { - key_ty, value_ty, .. - } => { - type_ids.insert(key_ty); - type_ids.insert(value_ty); - } - } - } - } +use alloc::collections::BTreeSet; +use alloc::vec::Vec; +use scale_info::{ + PortableType, TypeDef, TypeDefArray, TypeDefBitSequence, TypeDefCompact, TypeDefComposite, + TypeDefSequence, TypeDefTuple, TypeDefVariant, +}; + +#[derive(Clone)] +struct TypeSet { + seen_ids: BTreeSet, + pub work_set: Vec, +} - if let Some(ty) = pallet.call_ty { - type_ids.insert(ty); +impl TypeSet { + fn new() -> Self { + Self { + seen_ids: BTreeSet::new(), + // Average work set size is around 30-50 elements, depending on the metadata size + work_set: Vec::with_capacity(32), + } } - if let Some(ty) = pallet.event_ty { - type_ids.insert(ty); + fn insert(&mut self, id: u32) -> bool { + self.seen_ids.insert(id) } - for constant in pallet.constants.values() { - type_ids.insert(constant.ty); + fn contains(&mut self, id: u32) -> bool { + self.seen_ids.contains(&id) } - if let Some(ty) = pallet.error_ty { - type_ids.insert(ty); + fn push_to_workset(&mut self, id: u32) { + // Check if wee hit a type we've already inserted; avoid infinite loops and stop. + if self.insert(id) { + self.work_set.push(id); + } } -} -/// Update all type IDs of the provided pallet using the new type IDs from the portable registry. -fn update_pallet_types(pallet: &mut PalletMetadataInner, map_ids: &BTreeMap) { - if let Some(storage) = &mut pallet.storage { - for entry in storage.entries.values_mut() { - match &mut entry.entry_type { - StorageEntryType::Plain(ty) => { - update_type(ty, map_ids); + /// This function will deeply traverse the inital type and it's dependencies to collect the relevant type_ids + fn collect_types(&mut self, metadata: &Metadata, id: u32) { + self.push_to_workset(id); + while let Some(typ) = self.work_set.pop() { + let typ = resolve_typ(metadata, typ); + match &typ.ty.type_def { + TypeDef::Composite(TypeDefComposite { fields }) => { + for field in fields { + self.push_to_workset(field.ty.id); + } + } + TypeDef::Variant(TypeDefVariant { variants }) => { + for variant in variants { + for field in &variant.fields { + self.push_to_workset(field.ty.id); + } + } } - StorageEntryType::Map { - key_ty, value_ty, .. - } => { - update_type(key_ty, map_ids); - update_type(value_ty, map_ids); + TypeDef::Array(TypeDefArray { len: _, type_param }) + | TypeDef::Sequence(TypeDefSequence { type_param }) + | TypeDef::Compact(TypeDefCompact { type_param }) => { + self.push_to_workset(type_param.id); + } + TypeDef::Tuple(TypeDefTuple { fields }) => { + for field in fields { + self.push_to_workset(field.id); + } + } + TypeDef::Primitive(_) => (), + TypeDef::BitSequence(TypeDefBitSequence { + bit_store_type, + bit_order_type, + }) => { + for typ in [bit_order_type, bit_store_type] { + self.push_to_workset(typ.id); + } } } } } - if let Some(ty) = &mut pallet.call_ty { - update_type(ty, map_ids); - } - - if let Some(ty) = &mut pallet.event_ty { - update_type(ty, map_ids); - } - - if let Some(ty) = &mut pallet.error_ty { - update_type(ty, map_ids); - } - - for constant in pallet.constants.values_mut() { - update_type(&mut constant.ty, map_ids); - } -} - -/// Collect all type IDs needed to represent the extrinsic metadata. -fn collect_extrinsic_types(extrinsic: &ExtrinsicMetadata, type_ids: &mut HashSet) { - type_ids.insert(extrinsic.address_ty); - type_ids.insert(extrinsic.call_ty); - type_ids.insert(extrinsic.signature_ty); - type_ids.insert(extrinsic.extra_ty); - - for signed in &extrinsic.signed_extensions { - type_ids.insert(signed.extra_ty); - type_ids.insert(signed.additional_ty); - } -} - -/// Update all type IDs of the provided extrinsic metadata using the new type IDs from the portable registry. -fn update_extrinsic_types(extrinsic: &mut ExtrinsicMetadata, map_ids: &BTreeMap) { - update_type(&mut extrinsic.address_ty, map_ids); - update_type(&mut extrinsic.call_ty, map_ids); - update_type(&mut extrinsic.signature_ty, map_ids); - update_type(&mut extrinsic.extra_ty, map_ids); + fn collect_extrinsic_types(&mut self, extrinsic: &ExtrinsicMetadata) { + for ty in [ + extrinsic.address_ty, + extrinsic.call_ty, + extrinsic.signature_ty, + extrinsic.extra_ty, + ] { + self.insert(ty); + } - for signed in &mut extrinsic.signed_extensions { - update_type(&mut signed.extra_ty, map_ids); - update_type(&mut signed.additional_ty, map_ids); + for signed in &extrinsic.signed_extensions { + self.insert(signed.extra_ty); + self.insert(signed.additional_ty); + } } -} -/// Collect all type IDs needed to represent the runtime APIs. -fn collect_runtime_api_types(api: &RuntimeApiMetadataInner, type_ids: &mut HashSet) { - for method in api.methods.values() { - for input in &method.inputs { - type_ids.insert(input.ty); + /// Collect all type IDs needed to represent the runtime APIs. + fn collect_runtime_api_types(&mut self, metadata: &Metadata, api: &RuntimeApiMetadataInner) { + for method in api.methods.values() { + self.collect_types(metadata, method.output_ty); } - type_ids.insert(method.output_ty); } -} -/// Update all type IDs of the provided runtime APIs metadata using the new type IDs from the portable registry. -fn update_runtime_api_types(apis: &mut [RuntimeApiMetadataInner], map_ids: &BTreeMap) { - for api in apis { - for method in api.methods.values_mut() { - for input in &mut method.inputs { - update_type(&mut input.ty, map_ids); + /// Collect all type IDs needed to represent the provided pallet. + fn collect_pallet_types(&mut self, pallet: &PalletMetadataInner, metadata: &Metadata) { + if let Some(storage) = &pallet.storage { + for entry in storage.entries() { + match entry.entry_type { + StorageEntryType::Plain(ty) => { + self.collect_types(metadata, ty); + } + StorageEntryType::Map { + key_ty, value_ty, .. + } => { + self.collect_types(metadata, key_ty); + self.collect_types(metadata, value_ty); + } + } } - update_type(&mut method.output_ty, map_ids); } - } -} - -/// Collect the outer enums type IDs. -fn collect_outer_enums(enums: &OuterEnumsMetadata, type_ids: &mut HashSet) { - type_ids.insert(enums.call_enum_ty); - type_ids.insert(enums.event_enum_ty); - type_ids.insert(enums.error_enum_ty); -} -/// Update all the type IDs for outer enums. -fn update_outer_enums(enums: &mut OuterEnumsMetadata, map_ids: &BTreeMap) { - update_type(&mut enums.call_enum_ty, map_ids); - update_type(&mut enums.event_enum_ty, map_ids); - update_type(&mut enums.error_enum_ty, map_ids); -} - -/// Update the given type using the new type ID from the portable registry. -/// -/// # Panics -/// -/// Panics if the [`scale_info::PortableRegistry`] did not retain all needed types. -fn update_type(ty: &mut u32, map_ids: &BTreeMap) { - let old_id = *ty; - let new_id = map_ids - .get(&old_id) - .copied() - .unwrap_or_else(|| panic!("PortableRegistry did not retain type id {old_id}. This is a bug. Please open an issue.")); - *ty = new_id; + for constant in pallet.constants.values() { + self.collect_types(metadata, constant.ty); + } + } } -/// Retain the enum type identified by ID and keep only the variants that -/// match the provided filter. -fn retain_variants_in_enum_type(metadata: &mut Metadata, id: u32, mut filter: F) -where - F: FnMut(&str) -> bool, -{ - let ty = metadata +fn resolve_typ(metadata: &Metadata, typ: u32) -> &PortableType { + metadata .types .types - .get_mut(id as usize) - .expect("Metadata should contain enum type in registry"); - - let TypeDef::Variant(variant) = &mut ty.ty.type_def else { - panic!("Metadata type is expected to be a variant type"); - }; - - // Remove all variants from the type that aren't the pallet(s) we want to keep. - variant.variants.retain(|v| filter(&v.name)); -} - -/// Strip any pallets out of the outer enum types that aren't the ones we want to keep. -fn retain_pallets_in_runtime_outer_types(metadata: &mut Metadata, mut filter: F) -where - F: FnMut(&str) -> bool, -{ - retain_variants_in_enum_type(metadata, metadata.outer_enums.call_enum_ty, &mut filter); - retain_variants_in_enum_type(metadata, metadata.outer_enums.event_enum_ty, &mut filter); - retain_variants_in_enum_type(metadata, metadata.outer_enums.error_enum_ty, &mut filter); + .get(typ as usize) + .expect("Metadata should contain enum type in registry") } /// Generate a subset of the metadata that contains only the @@ -208,27 +160,10 @@ pub fn retain_metadata( F: FnMut(&str) -> bool, G: FnMut(&str) -> bool, { - let mut type_ids = HashSet::new(); - - // There are special outer enum types that point to all pallets types (call, error, event) by default. - // This brings in a significant chunk of types. We trim this down to only include variants - // for the pallets we're retaining, to avoid this. - retain_pallets_in_runtime_outer_types(metadata, &mut pallets_filter); - - // Collect the stripped outer enums. - collect_outer_enums(&metadata.outer_enums, &mut type_ids); - - // Filter our pallet list to only those pallets we want to keep. Keep hold of all - // type IDs in the pallets we're keeping. Retain all, if no filter specified. - metadata.pallets.retain(|pallet| { - let should_retain = pallets_filter(&pallet.name); - if should_retain { - collect_pallet_types(pallet, &mut type_ids); - } - should_retain - }); - - // We index pallets by their u8 index for easy access. Rebuild this index. + // 1. Delete pallets we don't want to keep. + metadata + .pallets + .retain(|pallet| pallets_filter(&pallet.name)); metadata.pallets_by_index = metadata .pallets .values() @@ -237,58 +172,188 @@ pub fn retain_metadata( .map(|(pos, p)| (p.index, pos)) .collect(); - // Keep the extrinsic stuff referenced in our metadata. - collect_extrinsic_types(&metadata.extrinsic, &mut type_ids); - - // Keep the "runtime" type ID, since it's referenced in our metadata. - type_ids.insert(metadata.runtime_ty); - - // Keep only the runtime API types that the filter allows for. Keep hold of all - // type IDs in the runtime apis we're keeping. Retain all, if no filter specified. - metadata.apis.retain(|api| { - let should_retain = runtime_apis_filter(&api.name); - if should_retain { - collect_runtime_api_types(api, &mut type_ids); + // 2. Delete runtime APIs we don't want to keep. + metadata.apis.retain(|api| runtime_apis_filter(&api.name)); + + // 3. For each outer enum type, strip it if possible, ie if it is not returned by any + // of the things we're keeping (because if it is, we need to keep all of it so that we + // can still decode values into it). + let outer_enums = metadata.outer_enums(); + let mut find_type_id = keep_outer_enum(metadata, &mut pallets_filter, &mut runtime_apis_filter); + for outer_enum_ty_id in [ + outer_enums.call_enum_ty(), + outer_enums.error_enum_ty(), + outer_enums.event_enum_ty(), + ] { + if !find_type_id(outer_enum_ty_id) { + strip_variants_in_enum_type(metadata, &mut pallets_filter, outer_enum_ty_id); } - should_retain - }); + } + + // 4. Collect all of the type IDs we still want to keep after deleting. + let mut keep_these_type_ids: BTreeSet = + iterate_metadata_types(metadata).map(|x| *x).collect(); - // Additionally, subxt depends on the `DispatchError` type existing; we use the same - // logic here that is used when building our `Metadata`. + // 5. Additionally, subxt depends on the `DispatchError` type existing; we use the same + // logic here that is used when building our `Metadata` to ensure we keep it too. let dispatch_error_ty = metadata .types .types .iter() .find(|ty| ty.ty.path.segments == ["sp_runtime", "DispatchError"]) .expect("Metadata must contain sp_runtime::DispatchError"); - type_ids.insert(dispatch_error_ty.id); - // Now, keep the type IDs we've asked for. This recursively keeps any types referenced from these. - // This will return a map from old to new type ID, because IDs may change. - let map_ids = metadata.types.retain(|id| type_ids.contains(&id)); + keep_these_type_ids.insert(dispatch_error_ty.id); - // And finally, we can go and update all of our type IDs in the metadata as a result of this: - update_outer_enums(&mut metadata.outer_enums, &map_ids); - for pallets in metadata.pallets.values_mut() { - update_pallet_types(pallets, &map_ids); + // 5. Strip all of the type IDs we no longer need, based on the above set. + let map_ids = metadata + .types + .retain(|id| keep_these_type_ids.contains(&id)); + + // 6. Now, update the type IDs referenced in our metadata to reflect this. + for id in iterate_metadata_types(metadata) { + if let Some(new_id) = map_ids.get(id) { + *id = *new_id; + } else { + panic!("Type id {id} was not retained. This is a bug"); + } } - update_extrinsic_types(&mut metadata.extrinsic, &map_ids); - update_type(&mut metadata.runtime_ty, &map_ids); - update_runtime_api_types(metadata.apis.values_mut(), &map_ids); +} + +fn strip_variants_in_enum_type(metadata: &mut Metadata, mut pallets_filter: F, id: u32) +where + F: FnMut(&str) -> bool, +{ + let ty = { + metadata + .types + .types + .get_mut(id as usize) + .expect("Metadata should contain enum type in registry") + }; + + let TypeDef::Variant(variant) = &mut ty.ty.type_def else { + panic!("Metadata type is expected to be a variant type"); + }; + + variant.variants.retain(|v| pallets_filter(&v.name)); +} + +/// Returns an iterator that allows modifying each type ID seen in the metadata (not recursively). +/// This will iterate over every type referenced in the metadata outside of `metadata.types`. +fn iterate_metadata_types(metadata: &mut Metadata) -> impl Iterator { + let mut types = alloc::vec::Vec::new(); + + // collect outer_enum top-level types + let outer_enum = &mut metadata.outer_enums; + types.push(&mut outer_enum.call_enum_ty); + types.push(&mut outer_enum.event_enum_ty); + types.push(&mut outer_enum.error_enum_ty); + + // collect pallet top-level type ids + for pallet in metadata.pallets.values_mut() { + if let Some(storage) = &mut pallet.storage { + for entry in storage.entries.values_mut() { + match &mut entry.entry_type { + StorageEntryType::Plain(ty) => { + types.push(ty); + } + StorageEntryType::Map { + key_ty, value_ty, .. + } => { + types.push(key_ty); + types.push(value_ty); + } + } + } + }; + if let Some(ty) = &mut pallet.call_ty { + types.push(ty); + } + + if let Some(ty) = &mut pallet.event_ty { + types.push(ty); + } + + if let Some(ty) = &mut pallet.error_ty { + types.push(ty); + } + + for constant in pallet.constants.values_mut() { + types.push(&mut constant.ty); + } + } + + // collect extrinsic type_ids + for ty in [ + &mut metadata.extrinsic.extra_ty, + &mut metadata.extrinsic.address_ty, + &mut metadata.extrinsic.signature_ty, + &mut metadata.extrinsic.call_ty, + ] { + types.push(ty); + } + + for signed in &mut metadata.extrinsic.signed_extensions { + types.push(&mut signed.extra_ty); + types.push(&mut signed.additional_ty); + } + + types.push(&mut metadata.runtime_ty); + + // collect runtime_api_types + for api in metadata.apis.values_mut() { + for method in api.methods.values_mut() { + for input in &mut method.inputs.iter_mut() { + types.push(&mut input.ty); + } + types.push(&mut method.output_ty); + } + } + + types.into_iter() +} + +/// Look for a type ID anywhere that we can be given back, ie in constants, storage, extrinsics or runtime API return types. +/// This will recurse deeply into those type IDs to find them. +pub fn keep_outer_enum( + metadata: &Metadata, + pallets_filter: &mut F, + runtime_apis_filter: &mut G, +) -> impl FnMut(u32) -> bool +where + F: FnMut(&str) -> bool, + G: FnMut(&str) -> bool, +{ + let mut type_set = TypeSet::new(); + for pallet in metadata.pallets.values() { + if pallets_filter(&pallet.name) { + type_set.collect_pallet_types(pallet, metadata); + } + } + for api in metadata.apis.values() { + if runtime_apis_filter(&api.name) { + type_set.collect_runtime_api_types(metadata, api); + } + } + type_set.collect_extrinsic_types(&metadata.extrinsic); + move |type_id| type_set.contains(type_id) } #[cfg(test)] mod tests { use super::*; use crate::Metadata; - use assert_matches::assert_matches; use codec::Decode; use frame_metadata::{RuntimeMetadata, RuntimeMetadataPrefixed}; use std::{fs, path::Path}; fn load_metadata() -> Metadata { - let bytes = fs::read(Path::new("../artifacts/polkadot_metadata_full.scale")) - .expect("Cannot read metadata blob"); + load_metadata_custom("../artifacts/polkadot_metadata_full.scale") + } + + fn load_metadata_custom(path: impl AsRef) -> Metadata { + let bytes = fs::read(path).expect("Cannot read metadata blob"); let meta: RuntimeMetadataPrefixed = Decode::decode(&mut &*bytes).expect("Cannot decode scale metadata"); @@ -305,8 +370,8 @@ mod tests { // Retain one pallet at a time ensuring the test does not panic. for pallet in metadata_cache.pallets() { + let original_meta = metadata_cache.clone(); let mut metadata = metadata_cache.clone(); - retain_metadata( &mut metadata, |pallet_name| pallet_name == pallet.name(), @@ -319,20 +384,11 @@ mod tests { pallet.name() ); - let id = metadata.outer_enums().call_enum_ty; - let ty = metadata.types.resolve(id).unwrap(); - let num_variants = if pallet.call_ty_id().is_some() { 1 } else { 0 }; - assert_matches!(&ty.type_def, TypeDef::Variant(variant) if variant.variants.len() == num_variants); - - let id = metadata.outer_enums().error_enum_ty; - let ty = metadata.types.resolve(id).unwrap(); - let num_variants = if pallet.error_ty_id().is_some() { 1 } else { 0 }; - assert_matches!(&ty.type_def, TypeDef::Variant(variant) if variant.variants.len() == num_variants); - - let id = metadata.outer_enums().event_enum_ty; - let ty = metadata.types.resolve(id).unwrap(); - let num_variants = if pallet.event_ty_id().is_some() { 1 } else { 0 }; - assert_matches!(&ty.type_def, TypeDef::Variant(variant) if variant.variants.len() == num_variants); + assert!( + metadata.types.types.len() < original_meta.types.types.len(), + "Stripped metadata must have less retained types than the non-stripped one: stripped amount {}, original amount {}", + metadata.types.types.len(), original_meta.types.types.len() + ); } } @@ -356,4 +412,33 @@ mod tests { ); } } + + #[test] + fn issue_1659() { + let full_metadata = load_metadata_custom("../artifacts/regressions/1659.scale"); + // Strip metadata to the pallets as described in the issue. + let mut stripped_metadata = full_metadata.clone(); + retain_metadata( + &mut stripped_metadata, + { + let set = "Balances,Timestamp,Contracts,ContractsEvm,System" + .split(",") + .collect::>(); + move |s| set.contains(&s) + }, + |_| true, + ); + + // check that call_enum did not change as it is referenced inside runtime_api + assert_eq!( + stripped_metadata.type_hash(stripped_metadata.outer_enums.call_enum_ty), + full_metadata.type_hash(full_metadata.outer_enums.call_enum_ty) + ); + + // check that event_num did not change as it is referenced inside runtime_api + assert_eq!( + stripped_metadata.type_hash(stripped_metadata.outer_enums.event_enum_ty), + full_metadata.type_hash(full_metadata.outer_enums.event_enum_ty) + ); + } } diff --git a/metadata/src/utils/validation.rs b/metadata/src/utils/validation.rs index dd76a359ea..11c51ad4d2 100644 --- a/metadata/src/utils/validation.rs +++ b/metadata/src/utils/validation.rs @@ -589,7 +589,11 @@ impl<'a> MetadataHasher<'a> { // Get the hashes of outer enums, considering only `specific_pallets` (if any are set). // If any of the typed that represent outer enums are encountered later, hashes from `top_level_enum_hashes` can be substituted. - let outer_enum_hashes = OuterEnumHashes::new(metadata, self.specific_pallets.as_deref()); + let outer_enum_hashes = OuterEnumHashes::new( + metadata, + self.specific_pallets.as_deref(), + self.specific_runtime_apis.as_deref(), + ); let pallet_hash = metadata.pallets().fold([0u8; HASH_LEN], |bytes, pallet| { // If specific pallets are given, only include this pallet if it is in the specific pallets. diff --git a/metadata/src/utils/validation/outer_enum_hashes.rs b/metadata/src/utils/validation/outer_enum_hashes.rs index 5e50fcb40e..9144c6b6a6 100644 --- a/metadata/src/utils/validation/outer_enum_hashes.rs +++ b/metadata/src/utils/validation/outer_enum_hashes.rs @@ -5,7 +5,10 @@ use hashbrown::HashMap; use scale_info::{PortableRegistry, TypeDef}; use crate::{ - utils::validation::{get_type_def_variant_hash, get_type_hash}, + utils::{ + retain, + validation::{get_type_def_variant_hash, get_type_hash}, + }, Metadata, }; @@ -20,7 +23,28 @@ pub struct OuterEnumHashes { impl OuterEnumHashes { /// Constructs new `OuterEnumHashes` from metadata. If `only_these_variants` is set, the enums are stripped down to only these variants, before their hashes are calculated. - pub fn new(metadata: &Metadata, only_these_variants: Option<&[&str]>) -> Self { + pub fn new( + metadata: &Metadata, + specific_pallets: Option<&[&str]>, + specific_runtimes: Option<&[&str]>, + ) -> Self { + let filter = |names: Option<&[&str]>, name: &str| match names { + Some(names) => names.contains(&name), + None => true, + }; + let mut check_enum_type_id = retain::keep_outer_enum( + metadata, + &mut |name| filter(specific_pallets, name), + &mut |name| filter(specific_runtimes, name), + ); + + let variants = |filter: bool| { + if !filter { + specific_pallets + } else { + None + } + }; fn get_enum_hash( registry: &PortableRegistry, id: u32, @@ -46,9 +70,12 @@ impl OuterEnumHashes { } let enums = &metadata.outer_enums; - let call_hash = get_enum_hash(metadata.types(), enums.call_enum_ty, only_these_variants); - let event_hash = get_enum_hash(metadata.types(), enums.event_enum_ty, only_these_variants); - let error_hash = get_enum_hash(metadata.types(), enums.error_enum_ty, only_these_variants); + let call_variants = variants(check_enum_type_id(enums.call_enum_ty)); + let call_hash = get_enum_hash(metadata.types(), enums.call_enum_ty, call_variants); + let event_variants = variants(check_enum_type_id(enums.event_enum_ty)); + let event_hash = get_enum_hash(metadata.types(), enums.event_enum_ty, event_variants); + let error_variants = variants(check_enum_type_id(enums.error_enum_ty)); + let error_hash = get_enum_hash(metadata.types(), enums.error_enum_ty, error_variants); Self { call_hash: (enums.call_enum_ty, call_hash),