diff --git a/node/src/chain_spec/mod.rs b/node/src/chain_spec/mod.rs index 720170e88f..014fb47bfb 100644 --- a/node/src/chain_spec/mod.rs +++ b/node/src/chain_spec/mod.rs @@ -33,10 +33,10 @@ use sp_runtime::Perbill; use node_runtime::{ membership, wasm_binary_unwrap, AuthorityDiscoveryConfig, BabeConfig, Balance, BalancesConfig, ContentDirectoryConfig, ContentDirectoryWorkingGroupConfig, ContentWorkingGroupConfig, - CouncilConfig, CouncilElectionConfig, ElectionParameters, ForumConfig, GrandpaConfig, - ImOnlineConfig, MembersConfig, Moment, ProposalsCodexConfig, SessionConfig, SessionKeys, - Signature, StakerStatus, StakingConfig, StorageWorkingGroupConfig, SudoConfig, SystemConfig, - VersionedStoreConfig, VersionedStorePermissionsConfig, DAYS, + CouncilConfig, CouncilElectionConfig, DistributionWorkingGroupConfig, ElectionParameters, + ForumConfig, GrandpaConfig, ImOnlineConfig, MembersConfig, Moment, ProposalsCodexConfig, + SessionConfig, SessionKeys, Signature, StakerStatus, StakingConfig, StorageWorkingGroupConfig, + SudoConfig, SystemConfig, VersionedStoreConfig, VersionedStorePermissionsConfig, DAYS, }; // Exported to be used by chain-spec-builder @@ -319,6 +319,13 @@ pub fn testnet_genesis( worker_application_human_readable_text_constraint: default_text_constraint, worker_exit_rationale_text_constraint: default_text_constraint, }), + working_group_Instance4: Some(DistributionWorkingGroupConfig { + phantom: Default::default(), + working_group_mint_capacity: 0, + opening_human_readable_text_constraint: default_text_constraint, + worker_application_human_readable_text_constraint: default_text_constraint, + worker_exit_rationale_text_constraint: default_text_constraint, + }), content_directory: Some({ ContentDirectoryConfig { class_by_id: vec![], diff --git a/runtime-modules/common/src/working_group.rs b/runtime-modules/common/src/working_group.rs index 37470366f6..35ef146ac0 100644 --- a/runtime-modules/common/src/working_group.rs +++ b/runtime-modules/common/src/working_group.rs @@ -20,4 +20,7 @@ pub enum WorkingGroup { /// Storage working group: working_group::Instance3. Content = 3isize, + + /// Distribution working group: working_group::Instance4. + Distribution = 4isize, } diff --git a/runtime-modules/storage/src/distribution_bucket_picker.rs b/runtime-modules/storage/src/distribution_bucket_picker.rs new file mode 100644 index 0000000000..400847346d --- /dev/null +++ b/runtime-modules/storage/src/distribution_bucket_picker.rs @@ -0,0 +1,94 @@ +#![warn(missing_docs)] + +use frame_support::traits::Randomness; +use sp_arithmetic::traits::Zero; +use sp_runtime::SaturatedConversion; +use sp_std::cell::RefCell; +use sp_std::collections::btree_set::BTreeSet; +use sp_std::marker::PhantomData; +use sp_std::rc::Rc; +use sp_std::vec::Vec; + +use crate::{DynamicBagType, Module, Trait}; + +// Generates distribution bucket IDs to assign to a new dynamic bag. +pub(crate) struct DistributionBucketPicker { + trait_marker: PhantomData, +} + +impl DistributionBucketPicker { + // Get random distribution buckets from distribution bucket families using the dynamic bag + // creation policy. + pub(crate) fn pick_distribution_buckets( + bag_type: DynamicBagType, + ) -> BTreeSet { + let creation_policy = Module::::get_dynamic_bag_creation_policy(bag_type); + + if creation_policy.no_distribution_buckets_required() { + return BTreeSet::new(); + } + + // Randomness for all bucket family. + // let random_seed = RefCell::new(Module::::get_initial_random_seed()); + let random_seed = Rc::new(RefCell::new(Module::::get_initial_random_seed())); + + creation_policy + .families + .iter() + .filter_map(|(family_id, bucket_num)| { + Module::::ensure_distribution_bucket_family_exists(family_id) + .ok() + .map(|fam| (fam, bucket_num)) + }) + .map(|(family, bucket_num)| { + let filtered_ids = family + .distribution_buckets + .iter() + .filter_map(|(id, bucket)| bucket.accepting_new_bags.then(|| *id)) + .collect::>(); + + (filtered_ids, bucket_num) + }) + .map(|(bucket_ids, bucket_num)| { + Self::get_random_distribution_buckets(bucket_ids, *bucket_num, random_seed.clone()) + }) + .flatten() + .collect::>() + } + + // Get random bucket IDs from the ID collection. + pub fn get_random_distribution_buckets( + ids: Vec, + bucket_number: u32, + seed: Rc>, // seed: RefCell + ) -> BTreeSet { + let mut working_ids = ids; + let mut result_ids = BTreeSet::default(); + + for _ in 0..bucket_number { + if working_ids.is_empty() { + break; + } + + let current_seed = Self::advance_random_seed(seed.clone()); + + let upper_bound = working_ids.len() as u64 - 1; + let index = + Module::::random_index(current_seed.as_ref(), upper_bound).saturated_into(); + result_ids.insert(working_ids.remove(index)); + } + + result_ids + } + + // Changes the internal seed value of the container and returns new random seed. + fn advance_random_seed(seed: Rc>) -> T::Hash { + // Cannot create randomness in the initial block (Substrate error). + if >::block_number() == Zero::zero() { + return Module::::get_initial_random_seed(); + } + + let current_seed = *seed.borrow(); + seed.replace(T::Randomness::random(current_seed.as_ref())) + } +} diff --git a/runtime-modules/storage/src/lib.rs b/runtime-modules/storage/src/lib.rs index bf0018c150..3e22b18e79 100644 --- a/runtime-modules/storage/src/lib.rs +++ b/runtime-modules/storage/src/lib.rs @@ -5,12 +5,14 @@ //! - [spec](https://github.com/Joystream/joystream/issues/2224) //! - [utilization model](https://github.com/Joystream/joystream/issues/2359) //! -//! Pallet functionality could be split in three distinct groups: -//! - extrinsics for the working group leader +//! Pallet functionality could be split in five distinct groups: +//! - extrinsics for the storage working group leader +//! - extrinsics for the distribution group leader //! - extrinsics for the storage provider +//! - extrinsics for the distribution provider //! - public methods for the pallet integration //! -//! #### Working group leader extrinsics +//! #### Storage working group leader extrinsics //! - [create_storage_bucket](./struct.Module.html#method.create_storage_bucket) - creates storage //! bucket. //! - [update_storage_buckets_for_bag](./struct.Module.html#method.update_storage_buckets_for_bag) - @@ -24,7 +26,7 @@ //! - [remove_storage_bucket_operator](./struct.Module.html#method.remove_storage_bucket_operator) - //! removes storage bucket operator. //! - [update_uploading_blocked_status](./struct.Module.html#method.update_uploading_blocked_status) - -//! updates whether uploading is globally blocked. +//! updates global uploading status. //! - [update_data_size_fee](./struct.Module.html#method.update_data_size_fee) - updates size-based //! pricing of new objects uploaded. //! - [update_storage_buckets_per_bag_limit](./struct.Module.html#method.update_storage_buckets_per_bag_limit) - @@ -49,6 +51,38 @@ //! - [accept_pending_data_objects](./struct.Module.html#method.accept_pending_data_objects) - a //! storage provider signals that the data object was successfully uploaded to its storage. //! +//! #### Distribution working group leader extrinsics +//! - [create_distribution_bucket_family](./struct.Module.html#method.create_distribution_bucket_family) - +//! creates distribution bucket family. +//! - [delete_distribution_bucket_family](./struct.Module.html#method.delete_distribution_bucket_family) - +//! deletes distribution bucket family. +//! - [create_distribution_bucket](./struct.Module.html#method.create_distribution_bucket) - +//! creates distribution bucket. +//! - [delete_distribution_bucket](./struct.Module.html#method.delete_distribution_bucket) - +//! deletes distribution bucket. +//! - [update_distribution_bucket_status](./struct.Module.html#method.update_distribution_bucket_status) - +//! updates distribution bucket status (accepting new bags). +//! - [update_distribution_buckets_for_bag](./struct.Module.html#method.update_distribution_buckets_for_bag) - +//! updates distribution buckets for a bag. +//! - [distribution_buckets_per_bag_limit](./struct.Module.html#method.distribution_buckets_per_bag_limit) - +//! updates "Distribution buckets per bag" number limit. +//! - [update_distribution_bucket_mode](./struct.Module.html#method.distribution_buckets_per_bag_limit) - +//! updates "distributing" flag for a distribution bucket. +//! - [update_families_in_dynamic_bag_creation_policy](./struct.Module.html#method.update_families_in_dynamic_bag_creation_policy) - +//! updates distribution bucket families used in given dynamic bag creation policy. +//! - [invite_distribution_bucket_operator](./struct.Module.html#method.invite_distribution_bucket_operator) - +//! invites a distribution bucket operator. +//! - [cancel_distribution_bucket_operator_invite](./struct.Module.html#method.cancel_distribution_bucket_operator_invite) - +//! Cancels pending invite for a distribution bucket. +//! - [remove_distribution_bucket_operator](./struct.Module.html#method.remove_distribution_bucket_operator) - +//! Removes a distribution bucket operator. +//! +//! #### Distribution provider extrinsics +//! - [accept_distribution_bucket_invitation](./struct.Module.html#method.accept_distribution_bucket_invitation) - +//! Accepts pending invite for a distribution bucket. +//! - [set_distribution_operator_metadata](./struct.Module.html#method.set_distribution_operator_metadata) - +//! Set distribution operator metadata for the distribution bucket. +//! //! #### Public methods //! Public integration methods are exposed via the [DataObjectStorage](./trait.DataObjectStorage.html) //! - can_upload_data_objects @@ -67,10 +101,15 @@ //! - DataObjectDeletionPrize //! - BlacklistSizeLimit //! - StorageBucketsPerBagValueConstraint -//! - DefaultMemberDynamicBagCreationPolicy -//! - DefaultChannelDynamicBagCreationPolicy -//! - +//! - DefaultMemberDynamicBagNumberOfStorageBuckets +//! - DefaultChannelDynamicBagNumberOfStorageBuckets +//! - MaxDistributionBucketFamilyNumber +//! - MaxDistributionBucketNumberPerFamily +//! - DistributionBucketsPerBagValueConstraint +//! - MaxNumberOfPendingInvitationsPerDistributionBucket + +// Compiler demand. +#![recursion_limit = "256"] // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] // #![warn(missing_docs)] // Cannot be enabled by default because of the Substrate issue. @@ -81,6 +120,7 @@ mod tests; #[cfg(feature = "runtime-benchmarks")] mod benchmarking; +pub(crate) mod distribution_bucket_picker; pub(crate) mod storage_bucket_picker; use codec::{Codec, Decode, Encode}; @@ -102,6 +142,7 @@ use common::constraints::BoundedValueConstraint; use common::origin::ActorOriginValidator; use common::working_group::WorkingGroup; +use distribution_bucket_picker::DistributionBucketPicker; use storage_bucket_picker::StorageBucketPicker; /// Public interface for the storage module. @@ -199,6 +240,16 @@ pub trait Trait: frame_system::Trait + balances::Trait + membership::Trait { + MaybeSerialize + PartialEq; + /// Distribution bucket family ID type. + type DistributionBucketFamilyId: Parameter + + Member + + BaseArithmetic + + Codec + + Default + + Copy + + MaybeSerialize + + PartialEq; + /// Channel ID type (part of the dynamic bag ID). type ChannelId: Parameter + Member @@ -209,6 +260,17 @@ pub trait Trait: frame_system::Trait + balances::Trait + membership::Trait { + MaybeSerialize + PartialEq; + /// Distribution bucket operator ID type (relationship between distribution bucket and + /// distribution operator). + type DistributionBucketOperatorId: Parameter + + Member + + BaseArithmetic + + Codec + + Default + + Copy + + MaybeSerialize + + PartialEq; + /// Defines max number of data objects per bag. type MaxNumberOfDataObjectsPerBag: Get; @@ -227,11 +289,14 @@ pub trait Trait: frame_system::Trait + balances::Trait + membership::Trait { /// "Storage buckets per bag" value constraint. type StorageBucketsPerBagValueConstraint: Get; - /// Defines the default dynamic bag creation policy for members. - type DefaultMemberDynamicBagCreationPolicy: Get; + /// "Distribution buckets per bag" value constraint. + type DistributionBucketsPerBagValueConstraint: Get; - /// Defines the default dynamic bag creation policy for channels. - type DefaultChannelDynamicBagCreationPolicy: Get; + /// Defines the default dynamic bag creation policy for members (storage bucket number). + type DefaultMemberDynamicBagNumberOfStorageBuckets: Get; + + /// Defines the default dynamic bag creation policy for channels (storage bucket number). + type DefaultChannelDynamicBagNumberOfStorageBuckets: Get; /// Defines max random iteration number (eg.: when picking the storage buckets). type MaxRandomIterationNumber: Get; @@ -239,17 +304,44 @@ pub trait Trait: frame_system::Trait + balances::Trait + membership::Trait { /// Something that provides randomness in the runtime. type Randomness: Randomness; - /// Demand the working group leader authorization. + /// Defines max allowed distribution bucket family number. + type MaxDistributionBucketFamilyNumber: Get; + + /// Defines max allowed distribution bucket number per family. + type MaxDistributionBucketNumberPerFamily: Get; + + /// Max number of pending invitations per distribution bucket. + type MaxNumberOfPendingInvitationsPerDistributionBucket: Get; + + /// Demand the storage working group leader authorization. /// TODO: Refactor after merging with the Olympia release. - fn ensure_working_group_leader_origin(origin: Self::Origin) -> DispatchResult; + fn ensure_storage_working_group_leader_origin(origin: Self::Origin) -> DispatchResult; - /// Validate origin for the worker. + /// Validate origin for the storage worker. /// TODO: Refactor after merging with the Olympia release. - fn ensure_worker_origin(origin: Self::Origin, worker_id: WorkerId) -> DispatchResult; + fn ensure_storage_worker_origin( + origin: Self::Origin, + worker_id: WorkerId, + ) -> DispatchResult; + + /// Validate storage worker existence. + /// TODO: Refactor after merging with the Olympia release. + fn ensure_storage_worker_exists(worker_id: &WorkerId) -> DispatchResult; + + /// Demand the distribution group leader authorization. + /// TODO: Refactor after merging with the Olympia release. + fn ensure_distribution_working_group_leader_origin(origin: Self::Origin) -> DispatchResult; + + /// Validate origin for the distribution worker. + /// TODO: Refactor after merging with the Olympia release. + fn ensure_distribution_worker_origin( + origin: Self::Origin, + worker_id: WorkerId, + ) -> DispatchResult; - /// Validate worker existence. + /// Validate distribution worker existence. /// TODO: Refactor after merging with the Olympia release. - fn ensure_worker_exists(worker_id: &WorkerId) -> DispatchResult; + fn ensure_distribution_worker_exists(worker_id: &WorkerId) -> DispatchResult; } /// Operations with local pallet account. @@ -307,21 +399,34 @@ impl> ModuleAccount for ModuleAccoun /// It describes how many storage buckets should store the bag. #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] #[derive(Encode, Decode, Default, Clone, PartialEq, Eq, Debug)] -pub struct DynamicBagCreationPolicy { +pub struct DynamicBagCreationPolicy { /// The number of storage buckets which should replicate the new bag. pub number_of_storage_buckets: u64, + + /// The set of distribution bucket families which should be sampled + /// to distribute bag, and for each the number of buckets in that family + /// which should be used. + pub families: BTreeMap, } -impl DynamicBagCreationPolicy { +impl DynamicBagCreationPolicy { // Verifies non-zero number of storage buckets. pub(crate) fn no_storage_buckets_required(&self) -> bool { self.number_of_storage_buckets == 0 } + + // Verifies non-zero number of required distribution buckets. + pub(crate) fn no_distribution_buckets_required(&self) -> bool { + self.families.iter().map(|(_, num)| num).sum::() == 0 + } } /// "Storage buckets per bag" value constraint type. pub type StorageBucketsPerBagValueConstraint = BoundedValueConstraint; +/// "Distribution buckets per bag" value constraint type. +pub type DistributionBucketsPerBagValueConstraint = BoundedValueConstraint; + /// Local module account handler. pub type StorageTreasury = ModuleAccountHandler::ModuleId>; @@ -358,14 +463,14 @@ pub struct DataObject { pub size: u64, } -/// Type alias for the BagObject. +/// Type alias for the BagRecord. pub type Bag = - BagObject<::StorageBucketId, ::DistributionBucketId, BalanceOf>; + BagRecord<::StorageBucketId, ::DistributionBucketId, BalanceOf>; /// Bag container. #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] #[derive(Encode, Decode, Default, Clone, PartialEq, Eq, Debug)] -pub struct BagObject { +pub struct BagRecord { /// Associated storage buckets. pub stored_by: BTreeSet, @@ -383,10 +488,10 @@ pub struct BagObject { } impl - BagObject + BagRecord { // Add and/or remove storage buckets. - fn update_buckets( + fn update_storage_buckets( &mut self, add_buckets: &mut BTreeSet, remove_buckets: &BTreeSet, @@ -401,6 +506,23 @@ impl } } } + + // Add and/or remove distribution buckets. + fn update_distribution_buckets( + &mut self, + add_buckets: &mut BTreeSet, + remove_buckets: &BTreeSet, + ) { + if !add_buckets.is_empty() { + self.distributed_by.append(add_buckets); + } + + if !remove_buckets.is_empty() { + for bucket_id in remove_buckets.iter() { + self.distributed_by.remove(bucket_id); + } + } + } } /// Parameters for the data object creation. @@ -512,8 +634,8 @@ impl Into for DynamicBagIdType = UploadParametersObject< +/// Alias for the UploadParametersRecord +pub type UploadParameters = UploadParametersRecord< MemberId, ::ChannelId, ::AccountId, @@ -523,7 +645,7 @@ pub type UploadParameters = UploadParametersObject< /// Data wrapper structure. Helps passing the parameters to the `upload` extrinsic. #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] #[derive(Encode, Decode, Default, Clone, PartialEq, Eq, Debug)] -pub struct UploadParametersObject { +pub struct UploadParametersRecord { /// Public key used authentication in upload to liaison. pub authentication_key: Vec, @@ -540,14 +662,14 @@ pub struct UploadParametersObject { pub expected_data_size_fee: Balance, } -/// Alias for the DynamicBagDeletionPrizeObject +/// Alias for the DynamicBagDeletionPrizeRecord pub type DynamicBagDeletionPrize = - DynamicBagDeletionPrizeObject<::AccountId, BalanceOf>; + DynamicBagDeletionPrizeRecord<::AccountId, BalanceOf>; /// Deletion prize data for the dynamic bag. Requires on the dynamic bag creation. #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] #[derive(Encode, Decode, Default, Clone, PartialEq, Eq, Debug)] -pub struct DynamicBagDeletionPrizeObject { +pub struct DynamicBagDeletionPrizeRecord { /// Account ID to withdraw the deletion prize. pub account_id: AccountId, @@ -685,6 +807,89 @@ impl BagUpdate { } } +/// Type alias for the DistributionBucketFamilyRecord. +pub type DistributionBucketFamily = + DistributionBucketFamilyRecord<::DistributionBucketId, WorkerId>; + +/// Distribution bucket family. +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +#[derive(Encode, Decode, Default, Clone, PartialEq, Eq, Debug)] +pub struct DistributionBucketFamilyRecord { + /// Distribution bucket map. + pub distribution_buckets: BTreeMap>, +} + +impl + DistributionBucketFamilyRecord +{ + // Add and/or remove distribution buckets assignments to bags. + fn change_bag_assignments( + &mut self, + add_buckets: &BTreeSet, + remove_buckets: &BTreeSet, + ) { + for bucket_id in add_buckets.iter() { + if let Some(bucket) = self.distribution_buckets.get_mut(bucket_id) { + bucket.register_bag_assignment(); + } + } + + for bucket_id in remove_buckets.iter() { + if let Some(bucket) = self.distribution_buckets.get_mut(bucket_id) { + bucket.unregister_bag_assignment(); + } + } + } + + // Checks inner buckets for bag assignment number. Returns true only if all 'assigned_bags' are + // zero. + fn no_bags_assigned(&self) -> bool { + self.distribution_buckets + .values() + .all(|b| b.no_bags_assigned()) + } +} + +/// Type alias for the DistributionBucketRecord. +pub type DistributionBucket = DistributionBucketRecord>; + +/// Distribution bucket. +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +#[derive(Encode, Decode, Default, Clone, PartialEq, Eq, Debug)] +pub struct DistributionBucketRecord { + /// Distribution bucket accepts new bags. + pub accepting_new_bags: bool, + + /// Distribution bucket serves objects. + pub distributing: bool, + + /// Pending invitations for workers to distribute the bucket. + pub pending_invitations: BTreeSet, + + /// Active operators to distribute the bucket. + pub operators: BTreeSet, + + /// Number of assigned bags. + pub assigned_bags: u64, +} + +impl DistributionBucketRecord { + // Increment the assigned bags number. + fn register_bag_assignment(&mut self) { + self.assigned_bags = self.assigned_bags.saturating_add(1); + } + + // Decrement the assigned bags number. + fn unregister_bag_assignment(&mut self) { + self.assigned_bags = self.assigned_bags.saturating_sub(1); + } + + // Checks the bag assignment number. Returns true if it equals zero. + fn no_bags_assigned(&self) -> bool { + self.assigned_bags == 0 + } +} + decl_storage! { trait Store for Module as Storage { /// Defines whether all new uploads blocked @@ -723,12 +928,31 @@ decl_storage! { /// DynamicBagCreationPolicy by bag type storage map. pub DynamicBagCreationPolicies get (fn dynamic_bag_creation_policy): - map hasher(blake2_128_concat) DynamicBagType => DynamicBagCreationPolicy; + map hasher(blake2_128_concat) DynamicBagType => + DynamicBagCreationPolicy; /// 'Data objects for bags' storage double map. pub DataObjectsById get (fn data_object_by_id): double_map hasher(blake2_128_concat) BagId, hasher(blake2_128_concat) T::DataObjectId => DataObject>; + + /// Distribution bucket family id counter. Starts at zero. + pub NextDistributionBucketFamilyId get(fn next_distribution_bucket_family_id): + T::DistributionBucketFamilyId; + + /// Distribution bucket families. + pub DistributionBucketFamilyById get (fn distribution_bucket_family_by_id): + map hasher(blake2_128_concat) T::DistributionBucketFamilyId => + DistributionBucketFamily; + + /// Total number of distribution bucket families in the system. + pub DistributionBucketFamilyNumber get(fn distribution_bucket_family_number): u64; + + /// Distribution bucket id counter. Starts at zero. + pub NextDistributionBucketId get(fn next_distribution_bucket_id): T::DistributionBucketId; + + /// "Distribution buckets per bag" number limit. + pub DistributionBucketsPerBagLimit get (fn distribution_buckets_per_bag_limit): u64; } } @@ -744,6 +968,8 @@ decl_event! { DynamicBagId = DynamicBagId, ::AccountId, Balance = BalanceOf, + ::DistributionBucketFamilyId, + ::DistributionBucketId, { /// Emits on creating the storage bucket. /// Params @@ -868,7 +1094,7 @@ decl_event! { /// Params /// - dynamic bag ID /// - optional DynamicBagDeletionPrize instance - DynamicBagCreated(DynamicBagId, Option>), + DynamicBagCreated(DynamicBagId, Option>), /// Emits on changing the voucher for a storage bucket. /// Params @@ -893,6 +1119,126 @@ decl_event! { /// - new total objects size /// - new total objects number BagObjectsChanged(BagId, u64, u64), + + /// Emits on creating distribution bucket family. + /// Params + /// - distribution family bucket ID + DistributionBucketFamilyCreated(DistributionBucketFamilyId), + + /// Emits on deleting distribution bucket family. + /// Params + /// - distribution family bucket ID + DistributionBucketFamilyDeleted(DistributionBucketFamilyId), + + /// Emits on creating distribution bucket. + /// Params + /// - distribution bucket family ID + /// - accepting new bags + /// - distribution bucket ID + DistributionBucketCreated(DistributionBucketFamilyId, bool, DistributionBucketId), + + /// Emits on storage bucket status update (accepting new bags). + /// Params + /// - distribution bucket family ID + /// - distribution bucket ID + /// - new status (accepting new bags) + DistributionBucketStatusUpdated(DistributionBucketFamilyId, DistributionBucketId, bool), + + /// Emits on deleting distribution bucket. + /// Params + /// - distribution bucket family ID + /// - distribution bucket ID + DistributionBucketDeleted(DistributionBucketFamilyId, DistributionBucketId), + + /// Emits on updating distribution buckets for bag. + /// Params + /// - bag ID + /// - storage buckets to add ID collection + /// - storage buckets to remove ID collection + DistributionBucketsUpdatedForBag( + BagId, + DistributionBucketFamilyId, + BTreeSet, + BTreeSet + ), + + /// Emits on changing the "Distribution buckets per bag" number limit. + /// Params + /// - new limit + DistributionBucketsPerBagLimitUpdated(u64), + + /// Emits on storage bucket mode update (distributing flag). + /// Params + /// - distribution bucket family ID + /// - distribution bucket ID + /// - distributing + DistributionBucketModeUpdated(DistributionBucketFamilyId, DistributionBucketId, bool), + + /// Emits on dynamic bag creation policy update (distribution bucket families). + /// Params + /// - dynamic bag type + /// - families and bucket numbers + FamiliesInDynamicBagCreationPolicyUpdated( + DynamicBagType, + BTreeMap + ), + + /// Emits on creating a distribution bucket invitation for the operator. + /// Params + /// - distribution bucket family ID + /// - distribution bucket ID + /// - worker ID + DistributionBucketOperatorInvited( + DistributionBucketFamilyId, + DistributionBucketId, + WorkerId, + ), + + /// Emits on canceling a distribution bucket invitation for the operator. + /// Params + /// - distribution bucket family ID + /// - distribution bucket ID + /// - operator worker ID + DistributionBucketInvitationCancelled( + DistributionBucketFamilyId, + DistributionBucketId, + WorkerId, + ), + + /// Emits on accepting a distribution bucket invitation for the operator. + /// Params + /// - worker ID + /// - distribution bucket family ID + /// - distribution bucket ID + DistributionBucketInvitationAccepted( + WorkerId, + DistributionBucketFamilyId, + DistributionBucketId, + ), + + /// Emits on setting the metadata by a distribution bucket operator. + /// Params + /// - worker ID + /// - distribution bucket family ID + /// - distribution bucket ID + /// - metadata + DistributionBucketMetadataSet( + WorkerId, + DistributionBucketFamilyId, + DistributionBucketId, + Vec + ), + + /// Emits on the distribution bucket operator removal. + /// Params + /// - distribution bucket family ID + /// - distribution bucket ID + /// - distribution bucket operator ID + DistributionBucketOperatorRemoved( + DistributionBucketFamilyId, + DistributionBucketId, + WorkerId + ), } } @@ -1012,6 +1358,60 @@ decl_error! { /// Cannot delete non empty dynamic bag. CannotDeleteNonEmptyDynamicBag, + + /// Max distribution bucket family number limit exceeded. + MaxDistributionBucketFamilyNumberLimitExceeded, + + /// Distribution bucket family doesn't exist. + DistributionBucketFamilyDoesntExist, + + /// Max distribution bucket number per family limit exceeded. + MaxDistributionBucketNumberPerFamilyLimitExceeded, + + /// Distribution bucket doesn't exist. + DistributionBucketDoesntExist, + + /// Distribution bucket id collections are empty. + DistributionBucketIdCollectionsAreEmpty, + + /// Distribution bucket doesn't accept new bags. + DistributionBucketDoesntAcceptNewBags, + + /// Max distribution bucket number per bag limit exceeded. + MaxDistributionBucketNumberPerBagLimitExceeded, + + /// Distribution bucket is not bound to a bag. + DistributionBucketIsNotBoundToBag, + + /// Distribution bucket is bound to a bag. + DistributionBucketIsBoundToBag, + + /// The new `DistributionBucketsPerBagLimit` number is too low. + DistributionBucketsPerBagLimitTooLow, + + /// The new `DistributionBucketsPerBagLimit` number is too high. + DistributionBucketsPerBagLimitTooHigh, + + /// Distribution provider operator doesn't exist. + DistributionProviderOperatorDoesntExist, + + /// Distribution provider operator already invited. + DistributionProviderOperatorAlreadyInvited, + + /// Distribution provider operator already set. + DistributionProviderOperatorSet, + + /// No distribution bucket invitation. + NoDistributionBucketInvitation, + + /// Invalid operations: must be a distribution provider operator for a bucket. + MustBeDistributionProviderOperatorForBucket, + + /// Max number of pending invitations limit for a distribution bucket reached. + MaxNumberOfPendingInvitationsLimitForDistributionBucketReached, + + /// Distribution family bound to a bag creation policy. + DistributionFamilyBoundToBagCreationPolicy, } } @@ -1037,13 +1437,30 @@ decl_module! { const StorageBucketsPerBagValueConstraint: StorageBucketsPerBagValueConstraint = T::StorageBucketsPerBagValueConstraint::get(); - /// Exports const - the default dynamic bag creation policy for members. - const DefaultMemberDynamicBagCreationPolicy: DynamicBagCreationPolicy = - T::DefaultMemberDynamicBagCreationPolicy::get(); + /// Exports const - the default dynamic bag creation policy for members (storage bucket + /// number). + const DefaultMemberDynamicBagNumberOfStorageBuckets: u64 = + T::DefaultMemberDynamicBagNumberOfStorageBuckets::get(); - /// Exports const - the default dynamic bag creation policy for channels. - const DefaultChannelDynamicBagCreationPolicy: DynamicBagCreationPolicy = - T::DefaultChannelDynamicBagCreationPolicy::get(); + /// Exports const - the default dynamic bag creation policy for channels (storage bucket + /// number). + const DefaultChannelDynamicBagNumberOfStorageBuckets: u64 = + T::DefaultChannelDynamicBagNumberOfStorageBuckets::get(); + + /// Exports const - max allowed distribution bucket family number. + const MaxDistributionBucketFamilyNumber: u64 = T::MaxDistributionBucketFamilyNumber::get(); + + /// Exports const - max allowed distribution bucket number per family. + const MaxDistributionBucketNumberPerFamily: u64 = + T::MaxDistributionBucketNumberPerFamily::get(); + + /// Exports const - "Distribution buckets per bag" value constraint. + const DistributionBucketsPerBagValueConstraint: StorageBucketsPerBagValueConstraint = + T::DistributionBucketsPerBagValueConstraint::get(); + + /// Exports const - max number of pending invitations per distribution bucket. + const MaxNumberOfPendingInvitationsPerDistributionBucket: u64 = + T::MaxNumberOfPendingInvitationsPerDistributionBucket::get(); // ===== Storage Lead actions ===== @@ -1053,7 +1470,7 @@ decl_module! { origin, storage_bucket_id: T::StorageBucketId, ){ - T::ensure_working_group_leader_origin(origin)?; + T::ensure_storage_working_group_leader_origin(origin)?; let bucket = Self::ensure_storage_bucket_exists(&storage_bucket_id)?; @@ -1073,10 +1490,10 @@ decl_module! { ); } - /// Update whether uploading is globally blocked. + /// Updates global uploading flag. #[weight = 10_000_000] // TODO: adjust weight pub fn update_uploading_blocked_status(origin, new_status: bool) { - T::ensure_working_group_leader_origin(origin)?; + T::ensure_storage_working_group_leader_origin(origin)?; // // == MUTATION SAFE == @@ -1090,7 +1507,7 @@ decl_module! { /// Updates size-based pricing of new objects uploaded. #[weight = 10_000_000] // TODO: adjust weight pub fn update_data_size_fee(origin, new_data_size_fee: BalanceOf) { - T::ensure_working_group_leader_origin(origin)?; + T::ensure_storage_working_group_leader_origin(origin)?; // // == MUTATION SAFE == @@ -1104,7 +1521,7 @@ decl_module! { /// Updates "Storage buckets per bag" number limit. #[weight = 10_000_000] // TODO: adjust weight pub fn update_storage_buckets_per_bag_limit(origin, new_limit: u64) { - T::ensure_working_group_leader_origin(origin)?; + T::ensure_storage_working_group_leader_origin(origin)?; T::StorageBucketsPerBagValueConstraint::get().ensure_valid( new_limit, @@ -1128,7 +1545,7 @@ decl_module! { new_objects_size: u64, new_objects_number: u64, ) { - T::ensure_working_group_leader_origin(origin)?; + T::ensure_storage_working_group_leader_origin(origin)?; // // == MUTATION SAFE == @@ -1149,7 +1566,7 @@ decl_module! { dynamic_bag_type: DynamicBagType, number_of_storage_buckets: u64, ) { - T::ensure_working_group_leader_origin(origin)?; + T::ensure_storage_working_group_leader_origin(origin)?; // // == MUTATION SAFE == @@ -1159,7 +1576,7 @@ decl_module! { creation_policy.number_of_storage_buckets = number_of_storage_buckets; - DynamicBagCreationPolicies::insert(dynamic_bag_type, creation_policy); + DynamicBagCreationPolicies::::insert(dynamic_bag_type, creation_policy); Self::deposit_event( RawEvent::NumberOfStorageBucketsInDynamicBagCreationPolicyUpdated( @@ -1176,7 +1593,7 @@ decl_module! { remove_hashes: BTreeSet, add_hashes: BTreeSet ){ - T::ensure_working_group_leader_origin(origin)?; + T::ensure_storage_working_group_leader_origin(origin)?; // Get only hashes that exist in the blacklist. let verified_remove_hashes = Self::get_existing_hashes(&remove_hashes); @@ -1219,7 +1636,7 @@ decl_module! { size_limit: u64, objects_limit: u64, ) { - T::ensure_working_group_leader_origin(origin)?; + T::ensure_storage_working_group_leader_origin(origin)?; let voucher = Voucher { size_limit, @@ -1268,7 +1685,7 @@ decl_module! { add_buckets: BTreeSet, remove_buckets: BTreeSet, ) { - T::ensure_working_group_leader_origin(origin)?; + T::ensure_storage_working_group_leader_origin(origin)?; Self::ensure_bag_exists(&bag_id)?; @@ -1299,7 +1716,7 @@ decl_module! { } Bags::::mutate(&bag_id, |bag| { - bag.update_buckets(&mut add_buckets.clone(), &remove_buckets); + bag.update_storage_buckets(&mut add_buckets.clone(), &remove_buckets); }); Self::deposit_event( @@ -1310,7 +1727,7 @@ decl_module! { /// Cancel pending storage bucket invite. An invitation must be pending. #[weight = 10_000_000] // TODO: adjust weight pub fn cancel_storage_bucket_operator_invite(origin, storage_bucket_id: T::StorageBucketId){ - T::ensure_working_group_leader_origin(origin)?; + T::ensure_storage_working_group_leader_origin(origin)?; let bucket = Self::ensure_storage_bucket_exists(&storage_bucket_id)?; @@ -1336,7 +1753,7 @@ decl_module! { storage_bucket_id: T::StorageBucketId, operator_id: WorkerId, ){ - T::ensure_working_group_leader_origin(origin)?; + T::ensure_storage_working_group_leader_origin(origin)?; let bucket = Self::ensure_storage_bucket_exists(&storage_bucket_id)?; @@ -1358,13 +1775,13 @@ decl_module! { ); } - /// Removes storage bucket operator. Must be invited. + /// Removes storage bucket operator. #[weight = 10_000_000] // TODO: adjust weight pub fn remove_storage_bucket_operator( origin, storage_bucket_id: T::StorageBucketId, ){ - T::ensure_working_group_leader_origin(origin)?; + T::ensure_storage_working_group_leader_origin(origin)?; let bucket = Self::ensure_storage_bucket_exists(&storage_bucket_id)?; @@ -1391,7 +1808,7 @@ decl_module! { storage_bucket_id: T::StorageBucketId, accepting_new_bags: bool ) { - T::ensure_working_group_leader_origin(origin)?; + T::ensure_storage_working_group_leader_origin(origin)?; Self::ensure_storage_bucket_exists(&storage_bucket_id)?; @@ -1416,7 +1833,7 @@ decl_module! { new_objects_size_limit: u64, new_objects_number_limit: u64, ) { - T::ensure_working_group_leader_origin(origin)?; + T::ensure_storage_working_group_leader_origin(origin)?; Self::ensure_storage_bucket_exists(&storage_bucket_id)?; @@ -1460,7 +1877,7 @@ decl_module! { worker_id: WorkerId, storage_bucket_id: T::StorageBucketId ) { - T::ensure_worker_origin(origin, worker_id)?; + T::ensure_storage_worker_origin(origin, worker_id)?; let bucket = Self::ensure_storage_bucket_exists(&storage_bucket_id)?; @@ -1487,7 +1904,7 @@ decl_module! { storage_bucket_id: T::StorageBucketId, metadata: Vec ) { - T::ensure_worker_origin(origin, worker_id)?; + T::ensure_storage_worker_origin(origin, worker_id)?; let bucket = Self::ensure_storage_bucket_exists(&storage_bucket_id)?; @@ -1511,7 +1928,7 @@ decl_module! { bag_id: BagId, data_objects: BTreeSet, ) { - T::ensure_worker_origin(origin, worker_id)?; + T::ensure_storage_worker_origin(origin, worker_id)?; let bucket = Self::ensure_storage_bucket_exists(&storage_bucket_id)?; @@ -1545,6 +1962,494 @@ decl_module! { ) ); } + + // ===== Distribution Lead actions ===== + + /// Create a distribution bucket family. + #[weight = 10_000_000] // TODO: adjust weight + pub fn create_distribution_bucket_family(origin) { + T::ensure_distribution_working_group_leader_origin(origin)?; + + ensure!( + Self::distribution_bucket_family_number() < + T::MaxDistributionBucketFamilyNumber::get(), + Error::::MaxDistributionBucketFamilyNumberLimitExceeded + ); + + // + // == MUTATION SAFE == + // + + Self::increment_distribution_family_number(); + + let family = DistributionBucketFamily::::default(); + + let family_id = Self::next_distribution_bucket_family_id(); + + >::put(family_id + One::one()); + + >::insert(family_id, family); + + Self::deposit_event(RawEvent::DistributionBucketFamilyCreated(family_id)); + } + + /// Deletes a distribution bucket family. + #[weight = 10_000_000] // TODO: adjust weight + pub fn delete_distribution_bucket_family(origin, family_id: T::DistributionBucketFamilyId) { + T::ensure_distribution_working_group_leader_origin(origin)?; + + let family = Self::ensure_distribution_bucket_family_exists(&family_id)?; + + // Check that no assigned bags left. + ensure!(family.no_bags_assigned(), Error::::DistributionBucketIsBoundToBag); + + Self::check_dynamic_bag_creation_policy_for_dependencies( + &family_id, + DynamicBagType::Member + )?; + Self::check_dynamic_bag_creation_policy_for_dependencies( + &family_id, + DynamicBagType::Channel + )?; + + // + // == MUTATION SAFE == + // + + Self::decrement_distribution_family_number(); + + >::remove(family_id); + + Self::deposit_event(RawEvent::DistributionBucketFamilyDeleted(family_id)); + } + + /// Create a distribution bucket. + #[weight = 10_000_000] // TODO: adjust weight + pub fn create_distribution_bucket( + origin, + family_id: T::DistributionBucketFamilyId, + accepting_new_bags: bool, + ) { + T::ensure_distribution_working_group_leader_origin(origin)?; + + let family = Self::ensure_distribution_bucket_family_exists(&family_id)?; + + ensure!( + family.distribution_buckets.len().saturated_into::() < + T::MaxDistributionBucketNumberPerFamily::get(), + Error::::MaxDistributionBucketNumberPerFamilyLimitExceeded + ); + + // + // == MUTATION SAFE == + // + + let bucket = DistributionBucket:: { + accepting_new_bags, + distributing: true, + pending_invitations: BTreeSet::new(), + operators: BTreeSet::new(), + assigned_bags: 0, + }; + + let bucket_id = Self::next_distribution_bucket_id(); + + >::mutate(family_id, |family|{ + family.distribution_buckets.insert(bucket_id, bucket); + }); + + >::put(bucket_id + One::one()); + + Self::deposit_event( + RawEvent::DistributionBucketCreated(family_id, accepting_new_bags, bucket_id) + ); + } + + /// Updates a distribution bucket 'accepts new bags' flag. + #[weight = 10_000_000] // TODO: adjust weight + pub fn update_distribution_bucket_status( + origin, + family_id: T::DistributionBucketFamilyId, + distribution_bucket_id: T::DistributionBucketId, + accepting_new_bags: bool + ) { + T::ensure_distribution_working_group_leader_origin(origin)?; + + let family = Self::ensure_distribution_bucket_family_exists(&family_id)?; + Self::ensure_distribution_bucket_exists( + &family, + &distribution_bucket_id + )?; + + // + // == MUTATION SAFE == + // + + >::mutate(family_id, |family| { + if let Some(bucket) = family.distribution_buckets.get_mut(&distribution_bucket_id) { + bucket.accepting_new_bags = accepting_new_bags; + } + }); + + Self::deposit_event( + RawEvent::DistributionBucketStatusUpdated( + family_id, + distribution_bucket_id, + accepting_new_bags + ) + ); + } + + /// Delete distribution bucket. Must be empty. + #[weight = 10_000_000] // TODO: adjust weight + pub fn delete_distribution_bucket( + origin, + family_id: T::DistributionBucketFamilyId, + distribution_bucket_id: T::DistributionBucketId, + ){ + T::ensure_distribution_working_group_leader_origin(origin)?; + + let family = Self::ensure_distribution_bucket_family_exists(&family_id)?; + let bucket = Self::ensure_distribution_bucket_exists(&family, &distribution_bucket_id)?; + + // Check that no assigned bags left. + ensure!(bucket.no_bags_assigned(), Error::::DistributionBucketIsBoundToBag); + + // Check that all operators were removed. + ensure!(bucket.operators.is_empty(), Error::::DistributionProviderOperatorSet); + + // + // == MUTATION SAFE == + // + + >::mutate(family_id, |family| { + family.distribution_buckets.remove(&distribution_bucket_id); + }); + + Self::deposit_event( + RawEvent::DistributionBucketDeleted(family_id, distribution_bucket_id) + ); + } + + /// Updates distribution buckets for a bag. + #[weight = 10_000_000] // TODO: adjust weight + pub fn update_distribution_buckets_for_bag( + origin, + bag_id: BagId, + family_id: T::DistributionBucketFamilyId, + add_buckets: BTreeSet, + remove_buckets: BTreeSet, + ) { + T::ensure_distribution_working_group_leader_origin(origin)?; + + Self::validate_update_distribution_buckets_for_bag_params( + &bag_id, + &family_id, + &add_buckets, + &remove_buckets, + )?; + + // + // == MUTATION SAFE == + // + + Bags::::mutate(&bag_id, |bag| { + bag.update_distribution_buckets(&mut add_buckets.clone(), &remove_buckets); + }); + + >::mutate(family_id, |family| { + family.change_bag_assignments(&add_buckets, &remove_buckets); + }); + + Self::deposit_event( + RawEvent::DistributionBucketsUpdatedForBag( + bag_id, + family_id, + add_buckets, + remove_buckets + ) + ); + } + + /// Updates "Distribution buckets per bag" number limit. + #[weight = 10_000_000] // TODO: adjust weight + pub fn update_distribution_buckets_per_bag_limit(origin, new_limit: u64) { + T::ensure_distribution_working_group_leader_origin(origin)?; + + T::DistributionBucketsPerBagValueConstraint::get().ensure_valid( + new_limit, + Error::::DistributionBucketsPerBagLimitTooLow, + Error::::DistributionBucketsPerBagLimitTooHigh, + )?; + + // + // == MUTATION SAFE == + // + + DistributionBucketsPerBagLimit::put(new_limit); + + Self::deposit_event(RawEvent::DistributionBucketsPerBagLimitUpdated(new_limit)); + } + + /// Updates 'distributing' flag for the distributing flag. + #[weight = 10_000_000] // TODO: adjust weight + pub fn update_distribution_bucket_mode( + origin, + family_id: T::DistributionBucketFamilyId, + distribution_bucket_id: T::DistributionBucketId, + distributing: bool + ) { + T::ensure_distribution_working_group_leader_origin(origin)?; + + let family = Self::ensure_distribution_bucket_family_exists(&family_id)?; + Self::ensure_distribution_bucket_exists( + &family, + &distribution_bucket_id + )?; + + // + // == MUTATION SAFE == + // + + >::mutate(family_id, |family| { + if let Some(bucket) = family.distribution_buckets.get_mut(&distribution_bucket_id) { + bucket.distributing = distributing; + } + }); + + Self::deposit_event( + RawEvent::DistributionBucketModeUpdated( + family_id, + distribution_bucket_id, + distributing + ) + ); + } + + /// Update number of distributed buckets used in given dynamic bag creation policy. + #[weight = 10_000_000] // TODO: adjust weight + pub fn update_families_in_dynamic_bag_creation_policy( + origin, + dynamic_bag_type: DynamicBagType, + families: BTreeMap + ) { + T::ensure_distribution_working_group_leader_origin(origin)?; + + Self::validate_update_families_in_dynamic_bag_creation_policy_params(&families)?; + + // + // == MUTATION SAFE == + // + + DynamicBagCreationPolicies::::mutate(dynamic_bag_type, |creation_policy| { + creation_policy.families = families.clone(); + }); + + Self::deposit_event( + RawEvent::FamiliesInDynamicBagCreationPolicyUpdated( + dynamic_bag_type, + families + ) + ); + } + + /// Invite an operator. Must be missing. + #[weight = 10_000_000] // TODO: adjust weight + pub fn invite_distribution_bucket_operator( + origin, + family_id: T::DistributionBucketFamilyId, + distribution_bucket_id: T::DistributionBucketId, + operator_worker_id: WorkerId + ) { + T::ensure_distribution_working_group_leader_origin(origin)?; + + let family = Self::ensure_distribution_bucket_family_exists(&family_id)?; + let bucket = Self::ensure_distribution_bucket_exists( + &family, + &distribution_bucket_id + )?; + + Self::ensure_distribution_provider_can_be_invited(&bucket, &operator_worker_id)?; + + // + // == MUTATION SAFE == + // + + >::mutate(family_id, |family| { + if let Some(bucket) = family.distribution_buckets.get_mut(&distribution_bucket_id) { + bucket.pending_invitations.insert(operator_worker_id); + } + }); + + Self::deposit_event( + RawEvent::DistributionBucketOperatorInvited( + family_id, + distribution_bucket_id, + operator_worker_id, + ) + ); + } + + /// Cancel pending invite. Must be pending. + #[weight = 10_000_000] // TODO: adjust weight + pub fn cancel_distribution_bucket_operator_invite( + origin, + family_id: T::DistributionBucketFamilyId, + distribution_bucket_id: T::DistributionBucketId, + operator_worker_id: WorkerId + ) { + T::ensure_distribution_working_group_leader_origin(origin)?; + + let family = Self::ensure_distribution_bucket_family_exists(&family_id)?; + let bucket = Self::ensure_distribution_bucket_exists( + &family, + &distribution_bucket_id + )?; + + ensure!( + bucket.pending_invitations.contains(&operator_worker_id), + Error::::NoDistributionBucketInvitation + ); + + // + // == MUTATION SAFE == + // + + >::mutate(family_id, |family| { + if let Some(bucket) = family.distribution_buckets.get_mut(&distribution_bucket_id) { + bucket.pending_invitations.remove(&operator_worker_id); + } + }); + + Self::deposit_event( + RawEvent::DistributionBucketInvitationCancelled( + family_id, + distribution_bucket_id, + operator_worker_id + ) + ); + } + + /// Removes distribution bucket operator. + #[weight = 10_000_000] // TODO: adjust weight + pub fn remove_distribution_bucket_operator( + origin, + family_id: T::DistributionBucketFamilyId, + distribution_bucket_id: T::DistributionBucketId, + operator_worker_id: WorkerId, + ){ + T::ensure_distribution_working_group_leader_origin(origin)?; + + let family = Self::ensure_distribution_bucket_family_exists(&family_id)?; + let bucket = Self::ensure_distribution_bucket_exists( + &family, + &distribution_bucket_id + )?; + + ensure!( + bucket.operators.contains(&operator_worker_id), + Error::::MustBeDistributionProviderOperatorForBucket + ); + + + // + // == MUTATION SAFE == + // + + >::mutate(family_id, |family| { + if let Some(bucket) = family.distribution_buckets.get_mut(&distribution_bucket_id) { + bucket.operators.remove(&operator_worker_id); + } + }); + + Self::deposit_event( + RawEvent::DistributionBucketOperatorRemoved( + family_id, + distribution_bucket_id, + operator_worker_id + ) + ); + } + + // ===== Distribution Operator actions ===== + + /// Accept pending invite. + #[weight = 10_000_000] // TODO: adjust weight + pub fn accept_distribution_bucket_invitation( + origin, + worker_id: WorkerId, + family_id: T::DistributionBucketFamilyId, + distribution_bucket_id: T::DistributionBucketId, + + ) { + T::ensure_distribution_worker_origin(origin, worker_id)?; + + let family = Self::ensure_distribution_bucket_family_exists(&family_id)?; + let bucket = Self::ensure_distribution_bucket_exists( + &family, + &distribution_bucket_id + )?; + + ensure!( + bucket.pending_invitations.contains(&worker_id), + Error::::NoDistributionBucketInvitation + ); + + // + // == MUTATION SAFE == + // + + >::mutate(family_id, |family| { + if let Some(bucket) = family.distribution_buckets.get_mut(&distribution_bucket_id) { + bucket.pending_invitations.remove(&worker_id); + bucket.operators.insert(worker_id); + } + }); + + Self::deposit_event( + RawEvent::DistributionBucketInvitationAccepted( + worker_id, + family_id, + distribution_bucket_id, + ) + ); + } + + /// Set distribution operator metadata for the distribution bucket. + #[weight = 10_000_000] // TODO: adjust weight + pub fn set_distribution_operator_metadata( + origin, + worker_id: WorkerId, + family_id: T::DistributionBucketFamilyId, + distribution_bucket_id: T::DistributionBucketId, + metadata: Vec, + ) { + T::ensure_distribution_worker_origin(origin, worker_id)?; + + let family = Self::ensure_distribution_bucket_family_exists(&family_id)?; + let bucket = Self::ensure_distribution_bucket_exists( + &family, + &distribution_bucket_id + )?; + + ensure!( + bucket.operators.contains(&worker_id), + Error::::MustBeDistributionProviderOperatorForBucket + ); + + // + // == MUTATION SAFE == + // + + Self::deposit_event( + RawEvent::DistributionBucketMetadataSet( + worker_id, + family_id, + distribution_bucket_id, + metadata + ) + ); + } } } @@ -1727,8 +2632,6 @@ impl DataObjectStorage for Module { ) -> DispatchResult { Self::validate_create_dynamic_bag_params(&dynamic_bag_id, &deletion_prize)?; - let bag_id: BagId = dynamic_bag_id.clone().into(); - // // == MUTATION SAFE == // @@ -1737,15 +2640,20 @@ impl DataObjectStorage for Module { >::deposit(&deletion_prize.account_id, deletion_prize.prize)?; } - let storage_buckets = - Self::pick_storage_buckets_for_dynamic_bag(dynamic_bag_id.clone().into()); + let bag_type: DynamicBagType = dynamic_bag_id.clone().into(); + + let storage_buckets = Self::pick_storage_buckets_for_dynamic_bag(bag_type); + let distribution_buckets = Self::pick_distribution_buckets_for_dynamic_bag(bag_type); let bag = Bag:: { stored_by: storage_buckets, deletion_prize: deletion_prize.clone().map(|dp| dp.prize), + distributed_by: distribution_buckets, ..Default::default() }; + let bag_id: BagId = dynamic_bag_id.clone().into(); + >::insert(&bag_id, bag); Self::deposit_event(RawEvent::DynamicBagCreated(dynamic_bag_id, deletion_prize)); @@ -1762,6 +2670,18 @@ impl DataObjectStorage for Module { } impl Module { + // Increment distribution family number in the storage. + fn increment_distribution_family_number() { + DistributionBucketFamilyNumber::put(Self::distribution_bucket_family_number() + 1); + } + + // Decrement distribution family number in the storage. No effect on zero number. + fn decrement_distribution_family_number() { + if Self::distribution_bucket_family_number() > 0 { + DistributionBucketFamilyNumber::put(Self::distribution_bucket_family_number() - 1); + } + } + // Validates dynamic bag creation params and conditions. fn validate_create_dynamic_bag_params( dynamic_bag_id: &DynamicBagId, @@ -2324,38 +3244,50 @@ impl Module { } } - // Selects storage bucket ID sets to assign to the storage bucket. + // Selects storage bucket ID sets to assign to the dynamic bag. pub(crate) fn pick_storage_buckets_for_dynamic_bag( bag_type: DynamicBagType, ) -> BTreeSet { StorageBucketPicker::::pick_storage_buckets(bag_type) } + // Selects distributed bucket ID sets to assign to the dynamic bag. + pub(crate) fn pick_distribution_buckets_for_dynamic_bag( + bag_type: DynamicBagType, + ) -> BTreeSet { + DistributionBucketPicker::::pick_distribution_buckets(bag_type) + } + // Get default dynamic bag policy by bag type. fn get_default_dynamic_bag_creation_policy( bag_type: DynamicBagType, - ) -> DynamicBagCreationPolicy { - match bag_type { - DynamicBagType::Member => T::DefaultMemberDynamicBagCreationPolicy::get(), - DynamicBagType::Channel => T::DefaultChannelDynamicBagCreationPolicy::get(), + ) -> DynamicBagCreationPolicy { + let number_of_storage_buckets = match bag_type { + DynamicBagType::Member => T::DefaultMemberDynamicBagNumberOfStorageBuckets::get(), + DynamicBagType::Channel => T::DefaultChannelDynamicBagNumberOfStorageBuckets::get(), + }; + + DynamicBagCreationPolicy:: { + number_of_storage_buckets, + ..Default::default() } } // Loads dynamic bag creation policy or use default values. pub(crate) fn get_dynamic_bag_creation_policy( bag_type: DynamicBagType, - ) -> DynamicBagCreationPolicy { - if DynamicBagCreationPolicies::contains_key(bag_type) { + ) -> DynamicBagCreationPolicy { + if DynamicBagCreationPolicies::::contains_key(bag_type) { return Self::dynamic_bag_creation_policy(bag_type); } Self::get_default_dynamic_bag_creation_policy(bag_type) } - // Verifies storage provider operator existence. + // Verifies storage operator existence. fn ensure_storage_provider_operator_exists(operator_id: &WorkerId) -> DispatchResult { ensure!( - T::ensure_worker_exists(operator_id).is_ok(), + T::ensure_storage_worker_exists(operator_id).is_ok(), Error::::StorageProviderOperatorDoesntExist ); @@ -2423,4 +3355,163 @@ impl Module { Ok(Self::data_object_by_id(bag_id, data_object_id)) } + + // Ensures the existence of the distribution bucket family. + // Returns the DistributionBucketFamily object or error. + fn ensure_distribution_bucket_family_exists( + family_id: &T::DistributionBucketFamilyId, + ) -> Result, Error> { + ensure!( + >::contains_key(family_id), + Error::::DistributionBucketFamilyDoesntExist + ); + + Ok(Self::distribution_bucket_family_by_id(family_id)) + } + + // Ensures the existence of the distribution bucket. + // Returns the DistributionBucket object or error. + fn ensure_distribution_bucket_exists( + family: &DistributionBucketFamily, + distribution_bucket_id: &T::DistributionBucketId, + ) -> Result, Error> { + family + .distribution_buckets + .get(distribution_bucket_id) + .cloned() + .ok_or(Error::::DistributionBucketDoesntExist) + } + + // Ensures validity of the `update_distribution_buckets_for_bag` extrinsic parameters + fn validate_update_distribution_buckets_for_bag_params( + bag_id: &BagId, + family_id: &T::DistributionBucketFamilyId, + add_buckets: &BTreeSet, + remove_buckets: &BTreeSet, + ) -> DispatchResult { + ensure!( + !add_buckets.is_empty() || !remove_buckets.is_empty(), + Error::::DistributionBucketIdCollectionsAreEmpty + ); + + let bag = Self::ensure_bag_exists(bag_id)?; + + let family = Self::ensure_distribution_bucket_family_exists(family_id)?; + + let new_bucket_number = bag + .distributed_by + .len() + .saturating_add(add_buckets.len()) + .saturating_sub(remove_buckets.len()) + .saturated_into::(); + + ensure!( + new_bucket_number <= Self::distribution_buckets_per_bag_limit(), + Error::::MaxDistributionBucketNumberPerBagLimitExceeded + ); + + for bucket_id in remove_buckets.iter() { + Self::ensure_distribution_bucket_exists(&family, bucket_id)?; + + ensure!( + bag.distributed_by.contains(&bucket_id), + Error::::DistributionBucketIsNotBoundToBag + ); + } + + for bucket_id in add_buckets.iter() { + let bucket = Self::ensure_distribution_bucket_exists(&family, bucket_id)?; + + ensure!( + bucket.accepting_new_bags, + Error::::DistributionBucketDoesntAcceptNewBags + ); + + ensure!( + !bag.distributed_by.contains(&bucket_id), + Error::::DistributionBucketIsBoundToBag + ); + } + + Ok(()) + } + + // Ensures validity of the `update_families_in_dynamic_bag_creation_policy` extrinsic parameters + fn validate_update_families_in_dynamic_bag_creation_policy_params( + families: &BTreeMap, + ) -> DispatchResult { + for (family_id, _) in families.iter() { + Self::ensure_distribution_bucket_family_exists(family_id)?; + } + + Ok(()) + } + + // Generate random number from zero to upper_bound (excluding). + pub(crate) fn random_index(seed: &[u8], upper_bound: u64) -> u64 { + if upper_bound == 0 { + return upper_bound; + } + + let mut rand: u64 = 0; + for (offset, byte) in seed.iter().enumerate().take(8) { + rand += (*byte as u64) << offset; + } + rand % upper_bound + } + + // Get initial random seed. It handles the error on the initial block. + pub(crate) fn get_initial_random_seed() -> T::Hash { + // Cannot create randomness in the initial block (Substrate error). + if >::block_number() == Zero::zero() { + Default::default() + } else { + T::Randomness::random_seed() + } + } + + // Verify parameters for the `invite_distribution_bucket_operator` extrinsic. + fn ensure_distribution_provider_can_be_invited( + bucket: &DistributionBucket, + worker_id: &WorkerId, + ) -> DispatchResult { + ensure!( + T::ensure_distribution_worker_exists(worker_id).is_ok(), + Error::::DistributionProviderOperatorDoesntExist + ); + + ensure!( + !bucket.pending_invitations.contains(worker_id), + Error::::DistributionProviderOperatorAlreadyInvited + ); + + ensure!( + !bucket.operators.contains(worker_id), + Error::::DistributionProviderOperatorSet + ); + + ensure!( + bucket.pending_invitations.len().saturated_into::() + < T::MaxNumberOfPendingInvitationsPerDistributionBucket::get(), + Error::::MaxNumberOfPendingInvitationsLimitForDistributionBucketReached + ); + + Ok(()) + } + + // Verify that dynamic bag creation policies has no dependencies on given distribution bucket + // family for all bag types. + fn check_dynamic_bag_creation_policy_for_dependencies( + family_id: &T::DistributionBucketFamilyId, + dynamic_bag_type: DynamicBagType, + ) -> DispatchResult { + let creation_policy = Self::get_dynamic_bag_creation_policy(dynamic_bag_type); + + ensure!( + !creation_policy.families.contains_key(family_id), + Error::::DistributionFamilyBoundToBagCreationPolicy + ); + + Ok(()) + } } diff --git a/runtime-modules/storage/src/storage_bucket_picker.rs b/runtime-modules/storage/src/storage_bucket_picker.rs index 8aa10055dc..0494d41ad9 100644 --- a/runtime-modules/storage/src/storage_bucket_picker.rs +++ b/runtime-modules/storage/src/storage_bucket_picker.rs @@ -30,7 +30,7 @@ impl StorageBucketPicker { let required_bucket_num = creation_policy.number_of_storage_buckets as usize; - // Storage IDs accumulator. + // Storage bucket IDs accumulator. let bucket_ids_cell = RefCell::new(BTreeSet::new()); RandomStorageBucketIdIterator::::new() @@ -109,34 +109,18 @@ impl RandomStorageBucketIdIterator { fn random_storage_bucket_id(&self) -> T::StorageBucketId { let total_buckets_number = Module::::next_storage_bucket_id(); - let random_bucket_id: T::StorageBucketId = self - .random_index(total_buckets_number.saturated_into()) - .saturated_into(); + let random_bucket_id: T::StorageBucketId = Module::::random_index( + self.current_seed.as_ref(), + total_buckets_number.saturated_into(), + ) + .saturated_into(); random_bucket_id } - // Generate random number from zero to upper_bound (excluding). - fn random_index(&self, upper_bound: u64) -> u64 { - if upper_bound == 0 { - return upper_bound; - } - - let mut rand: u64 = 0; - for offset in 0..8 { - rand += (self.current_seed.as_ref()[offset] as u64) << offset; - } - rand % upper_bound - } - // Creates new iterator. pub(crate) fn new() -> Self { - // Cannot create randomness in the initial block (Substrate error). - let seed = if >::block_number() == Zero::zero() { - Default::default() - } else { - T::Randomness::random_seed() - }; + let seed = Module::::get_initial_random_seed(); Self { current_iteration: 0, diff --git a/runtime-modules/storage/src/tests/fixtures.rs b/runtime-modules/storage/src/tests/fixtures.rs index 9755d8dea4..e0e72a22f8 100644 --- a/runtime-modules/storage/src/tests/fixtures.rs +++ b/runtime-modules/storage/src/tests/fixtures.rs @@ -2,17 +2,21 @@ use frame_support::dispatch::DispatchResult; use frame_support::storage::StorageMap; use frame_support::traits::{Currency, OnFinalize, OnInitialize}; use frame_system::{EventRecord, Phase, RawOrigin}; +use sp_std::collections::btree_map::BTreeMap; use sp_std::collections::btree_set::BTreeSet; use super::mocks::{ Balances, CollectiveFlip, Storage, System, Test, TestEvent, DEFAULT_MEMBER_ACCOUNT_ID, - DEFAULT_STORAGE_PROVIDER_ACCOUNT_ID, WG_LEADER_ACCOUNT_ID, + DEFAULT_STORAGE_PROVIDER_ACCOUNT_ID, STORAGE_WG_LEADER_ACCOUNT_ID, }; +use crate::tests::mocks::{ + DEFAULT_DISTRIBUTION_PROVIDER_ACCOUNT_ID, DISTRIBUTION_WG_LEADER_ACCOUNT_ID, +}; use crate::{ - BagId, ContentId, DataObjectCreationParameters, DataObjectStorage, DynamicBagDeletionPrize, - DynamicBagId, DynamicBagType, RawEvent, StaticBagId, StorageBucketOperatorStatus, - UploadParameters, + BagId, ContentId, DataObjectCreationParameters, DataObjectStorage, DistributionBucketFamily, + DynamicBagDeletionPrize, DynamicBagId, DynamicBagType, RawEvent, StaticBagId, + StorageBucketOperatorStatus, UploadParameters, }; // Recommendation from Parity on testing on_finalize @@ -45,6 +49,8 @@ impl EventFixture { DynamicBagId, u64, u64, + u64, + u64, >, ) { let converted_event = TestEvent::storage(expected_raw_event); @@ -62,6 +68,8 @@ impl EventFixture { DynamicBagId, u64, u64, + u64, + u64, >, ) { let converted_event = TestEvent::storage(expected_raw_event); @@ -444,7 +452,7 @@ pub struct CancelStorageBucketInvitationFixture { impl CancelStorageBucketInvitationFixture { pub fn default() -> Self { Self { - origin: RawOrigin::Signed(WG_LEADER_ACCOUNT_ID), + origin: RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID), storage_bucket_id: Default::default(), } } @@ -546,7 +554,7 @@ pub struct UpdateUploadingBlockedStatusFixture { impl UpdateUploadingBlockedStatusFixture { pub fn default() -> Self { Self { - origin: RawOrigin::Signed(WG_LEADER_ACCOUNT_ID), + origin: RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID), new_status: false, } } @@ -719,7 +727,7 @@ pub struct UpdateBlacklistFixture { impl UpdateBlacklistFixture { pub fn default() -> Self { Self { - origin: RawOrigin::Signed(WG_LEADER_ACCOUNT_ID), + origin: RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID), remove_hashes: BTreeSet::new(), add_hashes: BTreeSet::new(), } @@ -829,7 +837,7 @@ pub struct RemoveStorageBucketOperatorFixture { impl RemoveStorageBucketOperatorFixture { pub fn default() -> Self { Self { - origin: RawOrigin::Signed(WG_LEADER_ACCOUNT_ID), + origin: RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID), storage_bucket_id: Default::default(), } } @@ -875,7 +883,7 @@ pub struct UpdateDataObjectPerMegabyteFeeFixture { impl UpdateDataObjectPerMegabyteFeeFixture { pub fn default() -> Self { Self { - origin: RawOrigin::Signed(WG_LEADER_ACCOUNT_ID), + origin: RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID), new_fee: 0, } } @@ -911,7 +919,7 @@ pub struct UpdateStorageBucketsPerBagLimitFixture { impl UpdateStorageBucketsPerBagLimitFixture { pub fn default() -> Self { Self { - origin: RawOrigin::Signed(WG_LEADER_ACCOUNT_ID), + origin: RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID), new_limit: 0, } } @@ -1014,7 +1022,7 @@ pub struct UpdateStorageBucketsVoucherMaxLimitsFixture { impl UpdateStorageBucketsVoucherMaxLimitsFixture { pub fn default() -> Self { Self { - origin: RawOrigin::Signed(WG_LEADER_ACCOUNT_ID), + origin: RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID), new_objects_size_limit: 0, new_objects_number_limit: 0, } @@ -1115,7 +1123,7 @@ pub struct UpdateNumberOfStorageBucketsInDynamicBagCreationPolicyFixture { impl UpdateNumberOfStorageBucketsInDynamicBagCreationPolicyFixture { pub fn default() -> Self { Self { - origin: RawOrigin::Signed(WG_LEADER_ACCOUNT_ID), + origin: RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID), new_storage_buckets_number: 0, dynamic_bag_type: Default::default(), } @@ -1162,3 +1170,735 @@ impl UpdateNumberOfStorageBucketsInDynamicBagCreationPolicyFixture { } } } + +pub struct CreateDistributionBucketFamilyFixture { + origin: RawOrigin, +} + +impl CreateDistributionBucketFamilyFixture { + pub fn default() -> Self { + Self { + origin: RawOrigin::Signed(DEFAULT_ACCOUNT_ID), + } + } + + pub fn with_origin(self, origin: RawOrigin) -> Self { + Self { origin, ..self } + } + + pub fn call_and_assert(&self, expected_result: DispatchResult) -> Option { + let next_family_id = Storage::next_distribution_bucket_family_id(); + let family_number = Storage::distribution_bucket_family_number(); + let actual_result = Storage::create_distribution_bucket_family(self.origin.clone().into()); + + assert_eq!(actual_result, expected_result); + + if actual_result.is_ok() { + assert_eq!( + next_family_id + 1, + Storage::next_distribution_bucket_family_id() + ); + assert_eq!( + family_number + 1, + Storage::distribution_bucket_family_number() + ); + assert!(>::contains_key( + next_family_id + )); + + Some(next_family_id) + } else { + assert_eq!( + next_family_id, + Storage::next_distribution_bucket_family_id() + ); + assert_eq!(family_number, Storage::distribution_bucket_family_number()); + assert!(!>::contains_key( + next_family_id + )); + + None + } + } +} + +pub struct DeleteDistributionBucketFamilyFixture { + origin: RawOrigin, + family_id: u64, +} + +impl DeleteDistributionBucketFamilyFixture { + pub fn default() -> Self { + Self { + origin: RawOrigin::Signed(DEFAULT_ACCOUNT_ID), + family_id: Default::default(), + } + } + + pub fn with_family_id(self, family_id: u64) -> Self { + Self { family_id, ..self } + } + + pub fn with_origin(self, origin: RawOrigin) -> Self { + Self { origin, ..self } + } + + pub fn call_and_assert(&self, expected_result: DispatchResult) { + let family_number = Storage::distribution_bucket_family_number(); + let actual_result = + Storage::delete_distribution_bucket_family(self.origin.clone().into(), self.family_id); + + assert_eq!(actual_result, expected_result); + + if actual_result.is_ok() { + assert_eq!( + family_number - 1, + Storage::distribution_bucket_family_number() + ); + assert!(!>::contains_key( + self.family_id + )); + } else { + assert_eq!(family_number, Storage::distribution_bucket_family_number()); + } + } +} + +pub struct CreateDistributionBucketFixture { + origin: RawOrigin, + family_id: u64, + accept_new_bags: bool, +} + +impl CreateDistributionBucketFixture { + pub fn default() -> Self { + Self { + origin: RawOrigin::Signed(DEFAULT_ACCOUNT_ID), + family_id: Default::default(), + accept_new_bags: false, + } + } + + pub fn with_family_id(self, family_id: u64) -> Self { + Self { family_id, ..self } + } + + pub fn with_origin(self, origin: RawOrigin) -> Self { + Self { origin, ..self } + } + + pub fn with_accept_new_bags(self, accept_new_bags: bool) -> Self { + Self { + accept_new_bags, + ..self + } + } + + pub fn call_and_assert(&self, expected_result: DispatchResult) -> Option { + let next_bucket_id = Storage::next_distribution_bucket_id(); + let actual_result = Storage::create_distribution_bucket( + self.origin.clone().into(), + self.family_id, + self.accept_new_bags, + ); + + assert_eq!(actual_result, expected_result); + + if actual_result.is_ok() { + assert_eq!(next_bucket_id + 1, Storage::next_distribution_bucket_id()); + + let family: DistributionBucketFamily = + Storage::distribution_bucket_family_by_id(self.family_id); + + assert!(family.distribution_buckets.contains_key(&next_bucket_id)); + assert_eq!( + family + .distribution_buckets + .get(&next_bucket_id) + .unwrap() + .accepting_new_bags, + self.accept_new_bags + ); + + Some(next_bucket_id) + } else { + assert_eq!(next_bucket_id, Storage::next_distribution_bucket_id()); + + None + } + } +} + +pub struct UpdateDistributionBucketStatusFixture { + origin: RawOrigin, + family_id: u64, + distribution_bucket_id: u64, + new_status: bool, +} + +impl UpdateDistributionBucketStatusFixture { + pub fn default() -> Self { + Self { + origin: RawOrigin::Signed(DEFAULT_MEMBER_ACCOUNT_ID), + family_id: Default::default(), + distribution_bucket_id: Default::default(), + new_status: false, + } + } + pub fn with_bucket_id(self, bucket_id: u64) -> Self { + Self { + distribution_bucket_id: bucket_id, + ..self + } + } + + pub fn with_family_id(self, family_id: u64) -> Self { + Self { family_id, ..self } + } + + pub fn with_origin(self, origin: RawOrigin) -> Self { + Self { origin, ..self } + } + + pub fn with_new_status(self, new_status: bool) -> Self { + Self { new_status, ..self } + } + + pub fn call_and_assert(&self, expected_result: DispatchResult) { + let actual_result = Storage::update_distribution_bucket_status( + self.origin.clone().into(), + self.family_id, + self.distribution_bucket_id, + self.new_status, + ); + + assert_eq!(actual_result, expected_result); + } +} + +pub struct DeleteDistributionBucketFixture { + origin: RawOrigin, + family_id: u64, + distribution_bucket_id: u64, +} + +impl DeleteDistributionBucketFixture { + pub fn default() -> Self { + Self { + origin: RawOrigin::Signed(DEFAULT_MEMBER_ACCOUNT_ID), + family_id: Default::default(), + distribution_bucket_id: Default::default(), + } + } + + pub fn with_bucket_id(self, bucket_id: u64) -> Self { + Self { + distribution_bucket_id: bucket_id, + ..self + } + } + + pub fn with_family_id(self, family_id: u64) -> Self { + Self { family_id, ..self } + } + + pub fn with_origin(self, origin: RawOrigin) -> Self { + Self { origin, ..self } + } + + pub fn call_and_assert(&self, expected_result: DispatchResult) { + let actual_result = Storage::delete_distribution_bucket( + self.origin.clone().into(), + self.family_id, + self.distribution_bucket_id, + ); + + assert_eq!(actual_result, expected_result); + } +} + +pub struct UpdateDistributionBucketForBagsFixture { + origin: RawOrigin, + bag_id: BagId, + family_id: u64, + add_bucket_ids: BTreeSet, + remove_bucket_ids: BTreeSet, +} + +impl UpdateDistributionBucketForBagsFixture { + pub fn default() -> Self { + Self { + origin: RawOrigin::Signed(DEFAULT_ACCOUNT_ID), + bag_id: Default::default(), + family_id: Default::default(), + add_bucket_ids: Default::default(), + remove_bucket_ids: Default::default(), + } + } + + pub fn with_origin(self, origin: RawOrigin) -> Self { + Self { origin, ..self } + } + + pub fn with_add_bucket_ids(self, add_bucket_ids: BTreeSet) -> Self { + Self { + add_bucket_ids, + ..self + } + } + + pub fn with_remove_bucket_ids(self, remove_bucket_ids: BTreeSet) -> Self { + Self { + remove_bucket_ids, + ..self + } + } + + pub fn with_bag_id(self, bag_id: BagId) -> Self { + Self { bag_id, ..self } + } + + pub fn with_family_id(self, family_id: u64) -> Self { + Self { family_id, ..self } + } + + pub fn call_and_assert(&self, expected_result: DispatchResult) { + let actual_result = Storage::update_distribution_buckets_for_bag( + self.origin.clone().into(), + self.bag_id.clone(), + self.family_id, + self.add_bucket_ids.clone(), + self.remove_bucket_ids.clone(), + ); + + assert_eq!(actual_result, expected_result); + } +} + +pub struct UpdateDistributionBucketsPerBagLimitFixture { + origin: RawOrigin, + new_limit: u64, +} + +impl UpdateDistributionBucketsPerBagLimitFixture { + pub fn default() -> Self { + Self { + origin: RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID), + new_limit: 0, + } + } + + pub fn with_origin(self, origin: RawOrigin) -> Self { + Self { origin, ..self } + } + + pub fn with_new_limit(self, new_limit: u64) -> Self { + Self { new_limit, ..self } + } + + pub fn call_and_assert(&self, expected_result: DispatchResult) { + let old_limit = Storage::distribution_buckets_per_bag_limit(); + + let actual_result = Storage::update_distribution_buckets_per_bag_limit( + self.origin.clone().into(), + self.new_limit, + ); + + assert_eq!(actual_result, expected_result); + + if actual_result.is_ok() { + assert_eq!( + Storage::distribution_buckets_per_bag_limit(), + self.new_limit + ); + } else { + assert_eq!(old_limit, Storage::distribution_buckets_per_bag_limit()); + } + } +} + +pub struct UpdateDistributionBucketModeFixture { + origin: RawOrigin, + family_id: u64, + distribution_bucket_id: u64, + distributing: bool, +} + +impl UpdateDistributionBucketModeFixture { + pub fn default() -> Self { + Self { + origin: RawOrigin::Signed(DEFAULT_MEMBER_ACCOUNT_ID), + family_id: Default::default(), + distribution_bucket_id: Default::default(), + distributing: true, + } + } + pub fn with_bucket_id(self, bucket_id: u64) -> Self { + Self { + distribution_bucket_id: bucket_id, + ..self + } + } + + pub fn with_family_id(self, family_id: u64) -> Self { + Self { family_id, ..self } + } + + pub fn with_origin(self, origin: RawOrigin) -> Self { + Self { origin, ..self } + } + + pub fn with_distributing(self, distributing: bool) -> Self { + Self { + distributing, + ..self + } + } + + pub fn call_and_assert(&self, expected_result: DispatchResult) { + let actual_result = Storage::update_distribution_bucket_mode( + self.origin.clone().into(), + self.family_id, + self.distribution_bucket_id, + self.distributing, + ); + + assert_eq!(actual_result, expected_result); + } +} + +pub struct UpdateFamiliesInDynamicBagCreationPolicyFixture { + origin: RawOrigin, + dynamic_bag_type: DynamicBagType, + families: BTreeMap, +} + +impl UpdateFamiliesInDynamicBagCreationPolicyFixture { + pub fn default() -> Self { + Self { + origin: RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID), + dynamic_bag_type: Default::default(), + families: Default::default(), + } + } + + pub fn with_origin(self, origin: RawOrigin) -> Self { + Self { origin, ..self } + } + + pub fn with_families(self, families: BTreeMap) -> Self { + Self { families, ..self } + } + + pub fn with_dynamic_bag_type(self, dynamic_bag_type: DynamicBagType) -> Self { + Self { + dynamic_bag_type, + ..self + } + } + + pub fn call_and_assert(&self, expected_result: DispatchResult) { + let old_policy = Storage::get_dynamic_bag_creation_policy(self.dynamic_bag_type); + + let actual_result = Storage::update_families_in_dynamic_bag_creation_policy( + self.origin.clone().into(), + self.dynamic_bag_type, + self.families.clone(), + ); + + assert_eq!(actual_result, expected_result); + + let new_policy = Storage::get_dynamic_bag_creation_policy(self.dynamic_bag_type); + if actual_result.is_ok() { + assert_eq!(new_policy.families, self.families); + } else { + assert_eq!(old_policy, new_policy); + } + } +} + +pub struct InviteDistributionBucketOperatorFixture { + origin: RawOrigin, + operator_worker_id: u64, + family_id: u64, + bucket_id: u64, +} + +impl InviteDistributionBucketOperatorFixture { + pub fn default() -> Self { + Self { + origin: RawOrigin::Signed(DEFAULT_ACCOUNT_ID), + operator_worker_id: DEFAULT_WORKER_ID, + bucket_id: Default::default(), + family_id: Default::default(), + } + } + + pub fn with_origin(self, origin: RawOrigin) -> Self { + Self { origin, ..self } + } + + pub fn with_operator_worker_id(self, operator_worker_id: u64) -> Self { + Self { + operator_worker_id, + ..self + } + } + + pub fn with_bucket_id(self, bucket_id: u64) -> Self { + Self { bucket_id, ..self } + } + + pub fn with_family_id(self, family_id: u64) -> Self { + Self { family_id, ..self } + } + + pub fn call_and_assert(&self, expected_result: DispatchResult) { + let actual_result = Storage::invite_distribution_bucket_operator( + self.origin.clone().into(), + self.family_id, + self.bucket_id, + self.operator_worker_id, + ); + + assert_eq!(actual_result, expected_result); + + if actual_result.is_ok() { + let new_family = Storage::distribution_bucket_family_by_id(self.family_id); + let new_bucket = new_family + .distribution_buckets + .get(&self.bucket_id) + .unwrap(); + + assert!(new_bucket + .pending_invitations + .contains(&self.operator_worker_id),); + } + } +} + +pub struct CancelDistributionBucketInvitationFixture { + origin: RawOrigin, + bucket_id: u64, + family_id: u64, + operator_worker_id: u64, +} + +impl CancelDistributionBucketInvitationFixture { + pub fn default() -> Self { + Self { + origin: RawOrigin::Signed(DEFAULT_DISTRIBUTION_PROVIDER_ACCOUNT_ID), + bucket_id: Default::default(), + family_id: Default::default(), + operator_worker_id: Default::default(), + } + } + + pub fn with_origin(self, origin: RawOrigin) -> Self { + Self { origin, ..self } + } + + pub fn with_bucket_id(self, bucket_id: u64) -> Self { + Self { bucket_id, ..self } + } + + pub fn with_family_id(self, family_id: u64) -> Self { + Self { family_id, ..self } + } + + pub fn with_operator_worker_id(self, operator_worker_id: u64) -> Self { + Self { + operator_worker_id, + ..self + } + } + + pub fn call_and_assert(&self, expected_result: DispatchResult) { + let actual_result = Storage::cancel_distribution_bucket_operator_invite( + self.origin.clone().into(), + self.family_id, + self.bucket_id, + self.operator_worker_id, + ); + + assert_eq!(actual_result, expected_result); + + if actual_result.is_ok() { + let new_family = Storage::distribution_bucket_family_by_id(self.family_id); + let new_bucket = new_family + .distribution_buckets + .get(&self.bucket_id) + .unwrap(); + + assert!(!new_bucket + .pending_invitations + .contains(&self.operator_worker_id)); + } + } +} + +pub struct AcceptDistributionBucketInvitationFixture { + origin: RawOrigin, + bucket_id: u64, + family_id: u64, + worker_id: u64, +} + +impl AcceptDistributionBucketInvitationFixture { + pub fn default() -> Self { + Self { + origin: RawOrigin::Signed(DEFAULT_DISTRIBUTION_PROVIDER_ACCOUNT_ID), + bucket_id: Default::default(), + family_id: Default::default(), + worker_id: Default::default(), + } + } + + pub fn with_origin(self, origin: RawOrigin) -> Self { + Self { origin, ..self } + } + + pub fn with_bucket_id(self, bucket_id: u64) -> Self { + Self { bucket_id, ..self } + } + + pub fn with_family_id(self, family_id: u64) -> Self { + Self { family_id, ..self } + } + + pub fn with_worker_id(self, worker_id: u64) -> Self { + Self { worker_id, ..self } + } + + pub fn call_and_assert(&self, expected_result: DispatchResult) { + let actual_result = Storage::accept_distribution_bucket_invitation( + self.origin.clone().into(), + self.worker_id, + self.family_id, + self.bucket_id, + ); + + assert_eq!(actual_result, expected_result); + + if actual_result.is_ok() { + let new_family = Storage::distribution_bucket_family_by_id(self.family_id); + let new_bucket = new_family + .distribution_buckets + .get(&self.bucket_id) + .unwrap(); + + assert!(!new_bucket.pending_invitations.contains(&self.worker_id)); + + assert!(new_bucket.operators.contains(&self.worker_id)); + } + } +} + +pub struct SetDistributionBucketMetadataFixture { + origin: RawOrigin, + bucket_id: u64, + family_id: u64, + worker_id: u64, + metadata: Vec, +} + +impl SetDistributionBucketMetadataFixture { + pub fn default() -> Self { + Self { + origin: RawOrigin::Signed(DEFAULT_DISTRIBUTION_PROVIDER_ACCOUNT_ID), + bucket_id: Default::default(), + family_id: Default::default(), + worker_id: Default::default(), + metadata: Default::default(), + } + } + + pub fn with_metadata(self, metadata: Vec) -> Self { + Self { metadata, ..self } + } + + pub fn with_origin(self, origin: RawOrigin) -> Self { + Self { origin, ..self } + } + + pub fn with_bucket_id(self, bucket_id: u64) -> Self { + Self { bucket_id, ..self } + } + + pub fn with_family_id(self, family_id: u64) -> Self { + Self { family_id, ..self } + } + + pub fn with_worker_id(self, worker_id: u64) -> Self { + Self { worker_id, ..self } + } + + pub fn call_and_assert(&self, expected_result: DispatchResult) { + let actual_result = Storage::set_distribution_operator_metadata( + self.origin.clone().into(), + self.worker_id, + self.family_id, + self.bucket_id, + self.metadata.clone(), + ); + + assert_eq!(actual_result, expected_result); + } +} + +pub struct RemoveDistributionBucketOperatorFixture { + origin: RawOrigin, + bucket_id: u64, + family_id: u64, + operator_worker_id: u64, +} + +impl RemoveDistributionBucketOperatorFixture { + pub fn default() -> Self { + Self { + origin: RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID), + bucket_id: Default::default(), + family_id: Default::default(), + operator_worker_id: Default::default(), + } + } + + pub fn with_origin(self, origin: RawOrigin) -> Self { + Self { origin, ..self } + } + + pub fn with_bucket_id(self, bucket_id: u64) -> Self { + Self { bucket_id, ..self } + } + + pub fn with_family_id(self, family_id: u64) -> Self { + Self { family_id, ..self } + } + + pub fn with_operator_worker_id(self, operator_worker_id: u64) -> Self { + Self { + operator_worker_id, + ..self + } + } + + pub fn call_and_assert(&self, expected_result: DispatchResult) { + let actual_result = Storage::remove_distribution_bucket_operator( + self.origin.clone().into(), + self.family_id, + self.bucket_id, + self.operator_worker_id, + ); + + assert_eq!(actual_result, expected_result); + if actual_result.is_ok() { + let new_family = Storage::distribution_bucket_family_by_id(self.family_id); + let new_bucket = new_family + .distribution_buckets + .get(&self.bucket_id) + .unwrap(); + + assert!(!new_bucket.operators.contains(&self.operator_worker_id)); + } + } +} diff --git a/runtime-modules/storage/src/tests/mocks.rs b/runtime-modules/storage/src/tests/mocks.rs index db8d6bd1c8..6052c8363e 100644 --- a/runtime-modules/storage/src/tests/mocks.rs +++ b/runtime-modules/storage/src/tests/mocks.rs @@ -10,8 +10,6 @@ use sp_runtime::{ ModuleId, Perbill, }; -use crate::DynamicBagCreationPolicy; - // Workaround for https://github.com/rust-lang/rust/issues/26925 . Remove when sorted. #[derive(Clone, PartialEq, Eq, Debug)] pub struct Test; @@ -53,31 +51,38 @@ impl balances::Trait for Test { parameter_types! { pub const MaxNumberOfDataObjectsPerBag: u64 = 4; + pub const MaxDistributionBucketFamilyNumber: u64 = 4; + pub const MaxDistributionBucketNumberPerFamily: u64 = 10; pub const DataObjectDeletionPrize: u64 = 10; pub const StorageModuleId: ModuleId = ModuleId(*b"mstorage"); // module storage pub const BlacklistSizeLimit: u64 = 1; + pub const MaxNumberOfPendingInvitationsPerDistributionBucket: u64 = 1; pub const StorageBucketsPerBagValueConstraint: crate::StorageBucketsPerBagValueConstraint = crate::StorageBucketsPerBagValueConstraint {min: 3, max_min_diff: 7}; pub const InitialStorageBucketsNumberForDynamicBag: u64 = 3; pub const MaxRandomIterationNumber: u64 = 3; - pub const DefaultMemberDynamicBagCreationPolicy: DynamicBagCreationPolicy = DynamicBagCreationPolicy{ - number_of_storage_buckets: 3 - }; - pub const DefaultChannelDynamicBagCreationPolicy: DynamicBagCreationPolicy = DynamicBagCreationPolicy{ - number_of_storage_buckets: 4 - }; + pub const DefaultMemberDynamicBagNumberOfStorageBuckets: u64 = 3; + pub const DefaultChannelDynamicBagNumberOfStorageBuckets: u64 = 4; + pub const DistributionBucketsPerBagValueConstraint: crate::DistributionBucketsPerBagValueConstraint = + crate::StorageBucketsPerBagValueConstraint {min: 3, max_min_diff: 7}; } -pub const WG_LEADER_ACCOUNT_ID: u64 = 100001; +pub const STORAGE_WG_LEADER_ACCOUNT_ID: u64 = 100001; pub const DEFAULT_STORAGE_PROVIDER_ACCOUNT_ID: u64 = 100002; +pub const DEFAULT_DISTRIBUTION_PROVIDER_ACCOUNT_ID: u64 = 100003; +pub const DISTRIBUTION_WG_LEADER_ACCOUNT_ID: u64 = 100004; pub const DEFAULT_STORAGE_PROVIDER_ID: u64 = 10; pub const ANOTHER_STORAGE_PROVIDER_ID: u64 = 11; +pub const DEFAULT_DISTRIBUTION_PROVIDER_ID: u64 = 12; +pub const ANOTHER_DISTRIBUTION_PROVIDER_ID: u64 = 13; impl crate::Trait for Test { type Event = TestEvent; type DataObjectId = u64; type StorageBucketId = u64; type DistributionBucketId = u64; + type DistributionBucketFamilyId = u64; + type DistributionBucketOperatorId = u64; type ChannelId = u64; type MaxNumberOfDataObjectsPerBag = MaxNumberOfDataObjectsPerBag; type DataObjectDeletionPrize = DataObjectDeletionPrize; @@ -85,22 +90,29 @@ impl crate::Trait for Test { type ModuleId = StorageModuleId; type MemberOriginValidator = (); type StorageBucketsPerBagValueConstraint = StorageBucketsPerBagValueConstraint; - type DefaultMemberDynamicBagCreationPolicy = DefaultMemberDynamicBagCreationPolicy; - type DefaultChannelDynamicBagCreationPolicy = DefaultChannelDynamicBagCreationPolicy; + type DefaultMemberDynamicBagNumberOfStorageBuckets = + DefaultMemberDynamicBagNumberOfStorageBuckets; + type DefaultChannelDynamicBagNumberOfStorageBuckets = + DefaultChannelDynamicBagNumberOfStorageBuckets; type Randomness = CollectiveFlip; type MaxRandomIterationNumber = MaxRandomIterationNumber; + type MaxDistributionBucketFamilyNumber = MaxDistributionBucketFamilyNumber; + type MaxDistributionBucketNumberPerFamily = MaxDistributionBucketNumberPerFamily; + type DistributionBucketsPerBagValueConstraint = DistributionBucketsPerBagValueConstraint; + type MaxNumberOfPendingInvitationsPerDistributionBucket = + MaxNumberOfPendingInvitationsPerDistributionBucket; - fn ensure_working_group_leader_origin(origin: Self::Origin) -> DispatchResult { + fn ensure_storage_working_group_leader_origin(origin: Self::Origin) -> DispatchResult { let account_id = ensure_signed(origin)?; - if account_id != WG_LEADER_ACCOUNT_ID { + if account_id != STORAGE_WG_LEADER_ACCOUNT_ID { Err(DispatchError::BadOrigin) } else { Ok(()) } } - fn ensure_worker_origin(origin: Self::Origin, _: u64) -> DispatchResult { + fn ensure_storage_worker_origin(origin: Self::Origin, _: u64) -> DispatchResult { let account_id = ensure_signed(origin)?; if account_id != DEFAULT_STORAGE_PROVIDER_ACCOUNT_ID { @@ -110,7 +122,7 @@ impl crate::Trait for Test { } } - fn ensure_worker_exists(worker_id: &u64) -> DispatchResult { + fn ensure_storage_worker_exists(worker_id: &u64) -> DispatchResult { let allowed_storage_providers = vec![DEFAULT_STORAGE_PROVIDER_ID, ANOTHER_STORAGE_PROVIDER_ID]; @@ -120,6 +132,39 @@ impl crate::Trait for Test { Ok(()) } } + + fn ensure_distribution_working_group_leader_origin(origin: Self::Origin) -> DispatchResult { + let account_id = ensure_signed(origin)?; + + if account_id != DISTRIBUTION_WG_LEADER_ACCOUNT_ID { + Err(DispatchError::BadOrigin) + } else { + Ok(()) + } + } + + fn ensure_distribution_worker_origin(origin: Self::Origin, _: u64) -> DispatchResult { + let account_id = ensure_signed(origin)?; + + if account_id != DEFAULT_DISTRIBUTION_PROVIDER_ACCOUNT_ID { + Err(DispatchError::BadOrigin) + } else { + Ok(()) + } + } + + fn ensure_distribution_worker_exists(worker_id: &u64) -> DispatchResult { + let allowed_providers = vec![ + DEFAULT_DISTRIBUTION_PROVIDER_ID, + ANOTHER_DISTRIBUTION_PROVIDER_ID, + ]; + + if !allowed_providers.contains(worker_id) { + Err(DispatchError::Other("Invalid worker")) + } else { + Ok(()) + } + } } pub const DEFAULT_MEMBER_ID: u64 = 100; diff --git a/runtime-modules/storage/src/tests/mod.rs b/runtime-modules/storage/src/tests/mod.rs index fda2d63e21..321794e097 100644 --- a/runtime-modules/storage/src/tests/mod.rs +++ b/runtime-modules/storage/src/tests/mod.rs @@ -5,27 +5,31 @@ pub(crate) mod mocks; use frame_support::dispatch::DispatchError; use frame_support::traits::Currency; -use frame_support::{StorageDoubleMap, StorageMap}; +use frame_support::{StorageDoubleMap, StorageMap, StorageValue}; use frame_system::RawOrigin; use sp_runtime::SaturatedConversion; +use sp_std::collections::btree_map::BTreeMap; use sp_std::collections::btree_set::BTreeSet; -use sp_std::iter::FromIterator; +use sp_std::iter::{repeat, FromIterator}; use common::working_group::WorkingGroup; use crate::{ - BagId, DataObject, DataObjectCreationParameters, DataObjectStorage, DynamicBagCreationPolicy, - DynamicBagDeletionPrize, DynamicBagId, DynamicBagType, Error, ModuleAccount, RawEvent, - StaticBagId, StorageBucketOperatorStatus, StorageTreasury, UploadParameters, Voucher, + BagId, DataObject, DataObjectCreationParameters, DataObjectStorage, DistributionBucketFamily, + DynamicBagCreationPolicy, DynamicBagDeletionPrize, DynamicBagId, DynamicBagType, Error, + ModuleAccount, RawEvent, StaticBagId, StorageBucketOperatorStatus, StorageTreasury, + UploadParameters, Voucher, }; use mocks::{ build_test_externalities, Balances, DataObjectDeletionPrize, - DefaultChannelDynamicBagCreationPolicy, DefaultMemberDynamicBagCreationPolicy, - InitialStorageBucketsNumberForDynamicBag, MaxNumberOfDataObjectsPerBag, - MaxRandomIterationNumber, Storage, Test, ANOTHER_STORAGE_PROVIDER_ID, + DefaultChannelDynamicBagNumberOfStorageBuckets, DefaultMemberDynamicBagNumberOfStorageBuckets, + InitialStorageBucketsNumberForDynamicBag, MaxDistributionBucketFamilyNumber, + MaxDistributionBucketNumberPerFamily, MaxNumberOfDataObjectsPerBag, MaxRandomIterationNumber, + Storage, Test, ANOTHER_DISTRIBUTION_PROVIDER_ID, ANOTHER_STORAGE_PROVIDER_ID, + DEFAULT_DISTRIBUTION_PROVIDER_ACCOUNT_ID, DEFAULT_DISTRIBUTION_PROVIDER_ID, DEFAULT_MEMBER_ACCOUNT_ID, DEFAULT_MEMBER_ID, DEFAULT_STORAGE_PROVIDER_ACCOUNT_ID, - DEFAULT_STORAGE_PROVIDER_ID, WG_LEADER_ACCOUNT_ID, + DEFAULT_STORAGE_PROVIDER_ID, DISTRIBUTION_WG_LEADER_ACCOUNT_ID, STORAGE_WG_LEADER_ACCOUNT_ID, }; use fixtures::*; @@ -45,7 +49,7 @@ fn create_storage_bucket_succeeded() { let invite_worker = None; let bucket_id = CreateStorageBucketFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_accepting_new_bags(accepting_new_bags) .with_invite_worker(invite_worker) .with_size_limit(size_limit) @@ -77,12 +81,12 @@ fn create_storage_bucket_fails_with_invalid_voucher_params() { let objects_limit = 10; CreateStorageBucketFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_size_limit(size_limit) .call_and_assert(Err(Error::::VoucherMaxObjectSizeLimitExceeded.into())); CreateStorageBucketFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_objects_limit(objects_limit) .call_and_assert(Err( Error::::VoucherMaxObjectNumberLimitExceeded.into() @@ -98,7 +102,7 @@ fn create_storage_bucket_succeeded_with_invited_member() { let invite_worker = Some(invited_worker_id); let bucket_id = CreateStorageBucketFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_accepting_new_bags(accepting_new_bags) .with_invite_worker(invite_worker) .call_and_assert(Ok(())) @@ -139,7 +143,7 @@ fn create_storage_bucket_fails_with_invalid_storage_provider_id() { let invalid_storage_provider_id = 155; CreateStorageBucketFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_invite_worker(Some(invalid_storage_provider_id)) .call_and_assert(Err(Error::::StorageProviderOperatorDoesntExist.into())); }); @@ -155,7 +159,7 @@ fn accept_storage_bucket_invitation_succeeded() { let invite_worker = Some(storage_provider_id); let bucket_id = CreateStorageBucketFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_invite_worker(invite_worker) .call_and_assert(Ok(())) .unwrap(); @@ -197,7 +201,7 @@ fn accept_storage_bucket_invitation_fails_with_non_existing_storage_bucket() { fn accept_storage_bucket_invitation_fails_with_non_invited_storage_provider() { build_test_externalities().execute_with(|| { let bucket_id = CreateStorageBucketFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_invite_worker(None) .call_and_assert(Ok(())) .unwrap(); @@ -215,7 +219,7 @@ fn accept_storage_bucket_invitation_fails_with_different_invited_storage_provide let different_storage_provider_id = ANOTHER_STORAGE_PROVIDER_ID; let bucket_id = CreateStorageBucketFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_invite_worker(Some(different_storage_provider_id)) .call_and_assert(Ok(())) .unwrap(); @@ -233,7 +237,7 @@ fn accept_storage_bucket_invitation_fails_with_already_set_storage_provider() { let storage_provider_id = DEFAULT_STORAGE_PROVIDER_ID; let bucket_id = CreateStorageBucketFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_invite_worker(Some(storage_provider_id)) .call_and_assert(Ok(())) .unwrap(); @@ -267,7 +271,7 @@ fn update_storage_buckets_for_bags_succeeded() { let invite_worker = Some(storage_provider_id); let bucket_id = CreateStorageBucketFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_invite_worker(invite_worker) .call_and_assert(Ok(())) .unwrap(); @@ -275,7 +279,7 @@ fn update_storage_buckets_for_bags_succeeded() { let add_buckets = BTreeSet::from_iter(vec![bucket_id]); UpdateStorageBucketForBagsFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_bag_id(bag_id.clone()) .with_add_bucket_ids(add_buckets.clone()) .call_and_assert(Ok(())); @@ -301,7 +305,7 @@ fn update_storage_buckets_for_bags_fails_with_non_existing_dynamic_bag() { let invite_worker = Some(storage_provider_id); let bucket_id = CreateStorageBucketFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_invite_worker(invite_worker) .call_and_assert(Ok(())) .unwrap(); @@ -309,7 +313,7 @@ fn update_storage_buckets_for_bags_fails_with_non_existing_dynamic_bag() { let add_buckets = BTreeSet::from_iter(vec![bucket_id]); UpdateStorageBucketForBagsFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_bag_id(bag_id.clone()) .with_add_bucket_ids(add_buckets.clone()) .call_and_assert(Err(Error::::DynamicBagDoesntExist.into())); @@ -325,7 +329,7 @@ fn update_storage_buckets_for_bags_fails_with_non_accepting_new_bags_bucket() { set_default_update_storage_buckets_per_bag_limit(); let bucket_id = CreateStorageBucketFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_invite_worker(None) .with_accepting_new_bags(false) .call_and_assert(Ok(())) @@ -334,7 +338,7 @@ fn update_storage_buckets_for_bags_fails_with_non_accepting_new_bags_bucket() { let add_buckets = BTreeSet::from_iter(vec![bucket_id]); UpdateStorageBucketForBagsFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_bag_id(bag_id.clone()) .with_add_bucket_ids(add_buckets.clone()) .call_and_assert(Err(Error::::StorageBucketDoesntAcceptNewBags.into())); @@ -370,7 +374,7 @@ fn update_storage_buckets_for_bags_succeeded_with_voucher_usage() { let size_limit = 100; let new_bucket_id = CreateStorageBucketFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_objects_limit(objects_limit) .with_size_limit(size_limit) .call_and_assert(Ok(())) @@ -383,7 +387,7 @@ fn update_storage_buckets_for_bags_succeeded_with_voucher_usage() { assert_eq!(bag.stored_by, old_buckets); UpdateStorageBucketForBagsFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_bag_id(bag_id.clone()) .with_add_bucket_ids(new_buckets.clone()) .with_remove_bucket_ids(old_buckets.clone()) @@ -433,7 +437,7 @@ fn update_storage_buckets_for_bags_fails_with_exceeding_the_voucher_objects_numb let new_bucket_objects_limit = 0; let new_bucket_size_limit = 100; let new_bucket_id = CreateStorageBucketFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_objects_limit(new_bucket_objects_limit) .with_size_limit(new_bucket_size_limit) .call_and_assert(Ok(())) @@ -442,7 +446,7 @@ fn update_storage_buckets_for_bags_fails_with_exceeding_the_voucher_objects_numb let new_buckets = BTreeSet::from_iter(vec![new_bucket_id]); UpdateStorageBucketForBagsFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_bag_id(bag_id.clone()) .with_add_bucket_ids(new_buckets.clone()) .call_and_assert(Err( @@ -479,7 +483,7 @@ fn update_storage_buckets_for_bags_fails_with_exceeding_the_voucher_objects_tota let new_bucket_objects_limit = 1; let new_bucket_size_limit = 5; let new_bucket_id = CreateStorageBucketFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_objects_limit(new_bucket_objects_limit) .with_size_limit(new_bucket_size_limit) .call_and_assert(Ok(())) @@ -488,7 +492,7 @@ fn update_storage_buckets_for_bags_fails_with_exceeding_the_voucher_objects_tota let new_buckets = BTreeSet::from_iter(vec![new_bucket_id]); UpdateStorageBucketForBagsFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_bag_id(bag_id.clone()) .with_add_bucket_ids(new_buckets.clone()) .call_and_assert(Err( @@ -506,7 +510,7 @@ fn update_storage_buckets_for_working_group_static_bags_succeeded() { let invite_worker = Some(storage_provider_id); let bucket_id = CreateStorageBucketFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_invite_worker(invite_worker) .call_and_assert(Ok(())) .unwrap(); @@ -517,7 +521,7 @@ fn update_storage_buckets_for_working_group_static_bags_succeeded() { let bag_id = BagId::::Static(static_bag_id.clone()); UpdateStorageBucketForBagsFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_bag_id(bag_id.clone()) .with_add_bucket_ids(buckets.clone()) .call_and_assert(Ok(())); @@ -536,7 +540,7 @@ fn update_storage_buckets_for_dynamic_bags_succeeded() { let invite_worker = Some(storage_provider_id); let bucket_id = CreateStorageBucketFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_invite_worker(invite_worker) .call_and_assert(Ok(())) .unwrap(); @@ -549,7 +553,7 @@ fn update_storage_buckets_for_dynamic_bags_succeeded() { create_dynamic_bag(&dynamic_bag_id); UpdateStorageBucketForBagsFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_bag_id(bag_id.clone()) .with_remove_bucket_ids(buckets.clone()) .call_and_assert(Ok(())); @@ -574,7 +578,7 @@ fn update_storage_buckets_for_bags_fails_with_non_leader_origin() { fn update_storage_buckets_for_bags_fails_with_empty_params() { build_test_externalities().execute_with(|| { UpdateStorageBucketForBagsFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .call_and_assert(Err(Error::::StorageBucketIdCollectionsAreEmpty.into())); }); } @@ -590,14 +594,14 @@ fn update_storage_buckets_for_bags_fails_with_non_existing_storage_buckets() { // Invalid added bucket ID. UpdateStorageBucketForBagsFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_bag_id(bag_id.clone()) .with_add_bucket_ids(buckets.clone()) .call_and_assert(Err(Error::::StorageBucketDoesntExist.into())); // Invalid removed bucket ID. UpdateStorageBucketForBagsFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_bag_id(bag_id.clone()) .with_remove_bucket_ids(buckets.clone()) .call_and_assert(Err(Error::::StorageBucketDoesntExist.into())); @@ -613,7 +617,7 @@ fn update_storage_buckets_for_bags_fails_with_going_beyond_the_buckets_per_bag_l let bag_id = BagId::::Static(StaticBagId::Council); UpdateStorageBucketForBagsFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_bag_id(bag_id.clone()) .with_add_bucket_ids(buckets.clone()) .call_and_assert(Err(Error::::StorageBucketPerBagLimitExceeded.into())); @@ -630,7 +634,7 @@ fn update_storage_buckets_succeeds_with_add_remove_within_limits() { let _bucket3 = create_default_storage_bucket_and_assign_to_bag(bag_id.clone()); let bucket4 = CreateStorageBucketFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .call_and_assert(Ok(())) .unwrap(); @@ -640,7 +644,7 @@ fn update_storage_buckets_succeeds_with_add_remove_within_limits() { let add_buckets = BTreeSet::from_iter(vec![bucket4]); UpdateStorageBucketForBagsFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_bag_id(bag_id.clone()) .with_add_bucket_ids(add_buckets.clone()) .call_and_assert(Err(Error::::StorageBucketPerBagLimitExceeded.into())); @@ -648,7 +652,7 @@ fn update_storage_buckets_succeeds_with_add_remove_within_limits() { let remove_buckets = BTreeSet::from_iter(vec![bucket1]); UpdateStorageBucketForBagsFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_bag_id(bag_id.clone()) .with_add_bucket_ids(add_buckets) .with_remove_bucket_ids(remove_buckets) @@ -722,7 +726,7 @@ fn upload_succeeded_with_data_size_fee() { let data_size_fee = 100; UpdateDataObjectPerMegabyteFeeFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_new_fee(data_size_fee) .call_and_assert(Ok(())); @@ -1051,7 +1055,7 @@ fn upload_fails_with_insufficient_balance_for_data_size_fee() { let data_size_fee = 1000; UpdateDataObjectPerMegabyteFeeFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_new_fee(data_size_fee) .call_and_assert(Ok(())); @@ -1083,7 +1087,7 @@ fn upload_fails_with_data_size_fee_changed() { let data_size_fee = 1000; UpdateDataObjectPerMegabyteFeeFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_new_fee(data_size_fee) .call_and_assert(Ok(())); @@ -1109,7 +1113,7 @@ fn upload_failed_with_blocked_uploading() { let new_blocking_status = true; UpdateUploadingBlockedStatusFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_new_status(new_blocking_status) .call_and_assert(Ok(())); @@ -1130,7 +1134,7 @@ fn upload_failed_with_blacklisted_data_object() { let add_hashes = BTreeSet::from_iter(vec![hash]); UpdateBlacklistFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_add_hashes(add_hashes) .call_and_assert(Ok(())); @@ -1158,7 +1162,7 @@ fn set_storage_operator_metadata_succeeded() { let invite_worker = Some(storage_provider_id); let bucket_id = CreateStorageBucketFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_invite_worker(invite_worker) .call_and_assert(Ok(())) .unwrap(); @@ -1212,7 +1216,7 @@ fn set_storage_operator_metadata_fails_with_invalid_storage_association() { // Missing invitation let bucket_id = CreateStorageBucketFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .call_and_assert(Ok(())) .unwrap(); @@ -1224,7 +1228,7 @@ fn set_storage_operator_metadata_fails_with_invalid_storage_association() { // Not accepted invitation let bucket_id = CreateStorageBucketFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_invite_worker(invite_worker) .call_and_assert(Ok(())) .unwrap(); @@ -1260,36 +1264,19 @@ fn accept_pending_data_objects_succeeded() { set_max_voucher_limits(); set_default_update_storage_buckets_per_bag_limit(); - let objects_limit = 1; - let size_limit = 100; - let static_bag_id = StaticBagId::Council; - let bag_id = BagId::::Static(static_bag_id.clone()); + let bag_id: BagId = static_bag_id.into(); let storage_provider_id = DEFAULT_STORAGE_PROVIDER_ID; - let invite_worker = Some(storage_provider_id); - - let bucket_id = CreateStorageBucketFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) - .with_invite_worker(invite_worker) - .with_size_limit(size_limit) - .with_objects_limit(objects_limit) - .call_and_assert(Ok(())) - .unwrap(); - - AcceptStorageBucketInvitationFixture::default() - .with_origin(RawOrigin::Signed(DEFAULT_STORAGE_PROVIDER_ACCOUNT_ID)) - .with_storage_bucket_id(bucket_id) - .with_worker_id(storage_provider_id) - .call_and_assert(Ok(())); - - let buckets = BTreeSet::from_iter(vec![bucket_id]); + let objects_limit = 1; + let size_limit = 100; - UpdateStorageBucketForBagsFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) - .with_bag_id(bag_id.clone()) - .with_add_bucket_ids(buckets.clone()) - .call_and_assert(Ok(())); + let bucket_id = create_storage_bucket_and_assign_to_bag( + bag_id.clone(), + Some(storage_provider_id), + objects_limit, + size_limit, + ); let initial_balance = 1000; increase_account_balance(&DEFAULT_MEMBER_ACCOUNT_ID, initial_balance); @@ -1310,7 +1297,6 @@ fn accept_pending_data_objects_succeeded() { let data_object_ids = BTreeSet::from_iter(vec![data_object_id]); - let bag_id = static_bag_id.into(); let data_object = Storage::ensure_data_object_exists(&bag_id, &data_object_id).unwrap(); // Check `accepted` flag for the fist data object in the bag. assert_eq!(data_object.accepted, false); @@ -1346,7 +1332,7 @@ fn accept_pending_data_objects_fails_with_unrelated_storage_bucket() { let bag_id = BagId::::Static(static_bag_id); let bucket_id = CreateStorageBucketFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_invite_worker(invite_worker) .call_and_assert(Ok(())) .unwrap(); @@ -1393,7 +1379,7 @@ fn accept_pending_data_objects_fails_with_non_existing_dynamic_bag() { let invite_worker = Some(storage_provider_id); let bucket_id = CreateStorageBucketFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_invite_worker(invite_worker) .call_and_assert(Ok(())) .unwrap(); @@ -1435,7 +1421,7 @@ fn accept_pending_data_objects_succeeded_with_dynamic_bag() { let size_limit = 100; let bucket_id = CreateStorageBucketFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_invite_worker(invite_worker) .with_objects_limit(objects_limit) .with_size_limit(size_limit) @@ -1580,13 +1566,13 @@ fn cancel_storage_bucket_operator_invite_succeeded() { let invite_worker = Some(storage_provider_id); let bucket_id = CreateStorageBucketFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_invite_worker(invite_worker) .call_and_assert(Ok(())) .unwrap(); CancelStorageBucketInvitationFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_storage_bucket_id(bucket_id) .call_and_assert(Ok(())); @@ -1611,7 +1597,7 @@ fn cancel_storage_bucket_operator_invite_fails_with_non_leader_origin() { fn cancel_storage_bucket_operator_invite_fails_with_non_existing_storage_bucket() { build_test_externalities().execute_with(|| { CancelStorageBucketInvitationFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .call_and_assert(Err(Error::::StorageBucketDoesntExist.into())); }); } @@ -1620,13 +1606,13 @@ fn cancel_storage_bucket_operator_invite_fails_with_non_existing_storage_bucket( fn cancel_storage_bucket_operator_invite_fails_with_non_invited_storage_provider() { build_test_externalities().execute_with(|| { let bucket_id = CreateStorageBucketFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_invite_worker(None) .call_and_assert(Ok(())) .unwrap(); CancelStorageBucketInvitationFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_storage_bucket_id(bucket_id) .call_and_assert(Err(Error::::NoStorageBucketInvitation.into())); }); @@ -1638,7 +1624,7 @@ fn cancel_storage_bucket_operator_invite_fails_with_already_set_storage_provider let storage_provider_id = DEFAULT_STORAGE_PROVIDER_ID; let bucket_id = CreateStorageBucketFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_invite_worker(Some(storage_provider_id)) .call_and_assert(Ok(())) .unwrap(); @@ -1650,7 +1636,7 @@ fn cancel_storage_bucket_operator_invite_fails_with_already_set_storage_provider .call_and_assert(Ok(())); CancelStorageBucketInvitationFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_storage_bucket_id(bucket_id) .call_and_assert(Err(Error::::StorageProviderAlreadySet.into())); }); @@ -1665,12 +1651,12 @@ fn invite_storage_bucket_operator_succeeded() { let storage_provider_id = DEFAULT_STORAGE_PROVIDER_ID; let bucket_id = CreateStorageBucketFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .call_and_assert(Ok(())) .unwrap(); InviteStorageBucketOperatorFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_storage_bucket_id(bucket_id) .with_operator_worker_id(storage_provider_id) .call_and_assert(Ok(())); @@ -1697,7 +1683,7 @@ fn invite_storage_bucket_operator_fails_with_non_leader_origin() { fn invite_storage_bucket_operator_fails_with_non_existing_storage_bucket() { build_test_externalities().execute_with(|| { InviteStorageBucketOperatorFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .call_and_assert(Err(Error::::StorageBucketDoesntExist.into())); }); } @@ -1708,13 +1694,13 @@ fn invite_storage_bucket_operator_fails_with_non_missing_invitation() { let invited_worker_id = DEFAULT_STORAGE_PROVIDER_ID; let bucket_id = CreateStorageBucketFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_invite_worker(Some(invited_worker_id)) .call_and_assert(Ok(())) .unwrap(); InviteStorageBucketOperatorFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_storage_bucket_id(bucket_id) .call_and_assert(Err(Error::::InvitedStorageProvider.into())); }); @@ -1726,12 +1712,12 @@ fn invite_storage_bucket_operator_fails_with_invalid_storage_provider_id() { let invalid_storage_provider_id = 155; let bucket_id = CreateStorageBucketFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .call_and_assert(Ok(())) .unwrap(); InviteStorageBucketOperatorFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_storage_bucket_id(bucket_id) .with_operator_worker_id(invalid_storage_provider_id) .call_and_assert(Err(Error::::StorageProviderOperatorDoesntExist.into())); @@ -1747,7 +1733,7 @@ fn update_uploading_blocked_status_succeeded() { let new_blocking_status = true; UpdateUploadingBlockedStatusFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_new_status(new_blocking_status) .call_and_assert(Ok(())); @@ -2189,7 +2175,7 @@ fn delete_data_objects_fails_with_invalid_treasury_balance() { let invite_worker = Some(storage_provider_id); CreateStorageBucketFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_invite_worker(invite_worker) .call_and_assert(Ok(())) .unwrap(); @@ -2321,13 +2307,13 @@ fn update_storage_bucket_status_succeeded() { run_to_block(starting_block); let bucket_id = CreateStorageBucketFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .call_and_assert(Ok(())) .unwrap(); let new_status = true; UpdateStorageBucketStatusFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_storage_bucket_id(bucket_id) .with_new_status(new_status) .call_and_assert(Ok(())); @@ -2351,7 +2337,7 @@ fn update_storage_bucket_status_fails_with_invalid_origin() { fn update_storage_bucket_status_fails_with_invalid_storage_bucket() { build_test_externalities().execute_with(|| { UpdateStorageBucketStatusFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .call_and_assert(Err(Error::::StorageBucketDoesntExist.into())); }); } @@ -2367,7 +2353,7 @@ fn update_blacklist_succeeded() { let add_hashes = BTreeSet::from_iter(vec![cid1.clone()]); UpdateBlacklistFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_add_hashes(add_hashes.clone()) .call_and_assert(Ok(())); @@ -2378,7 +2364,7 @@ fn update_blacklist_succeeded() { let add_hashes = BTreeSet::from_iter(vec![cid2.clone()]); UpdateBlacklistFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_add_hashes(add_hashes.clone()) .with_remove_hashes(remove_hashes.clone()) .call_and_assert(Ok(())); @@ -2401,7 +2387,7 @@ fn update_blacklist_failed_with_exceeding_size_limit() { let add_hashes = BTreeSet::from_iter(vec![cid1.clone()]); UpdateBlacklistFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_add_hashes(add_hashes.clone()) .call_and_assert(Ok(())); @@ -2409,7 +2395,7 @@ fn update_blacklist_failed_with_exceeding_size_limit() { let add_hashes = BTreeSet::from_iter(vec![cid2.clone(), cid3.clone()]); UpdateBlacklistFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_add_hashes(add_hashes.clone()) .with_remove_hashes(remove_hashes.clone()) .call_and_assert(Err(Error::::BlacklistSizeLimitExceeded.into())); @@ -2430,7 +2416,7 @@ fn update_blacklist_failed_with_exceeding_size_limit_with_non_existent_remove_ha let add_hashes = BTreeSet::from_iter(vec![cid1.clone()]); UpdateBlacklistFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_add_hashes(add_hashes.clone()) .call_and_assert(Ok(())); @@ -2438,7 +2424,7 @@ fn update_blacklist_failed_with_exceeding_size_limit_with_non_existent_remove_ha let add_hashes = BTreeSet::from_iter(vec![cid2.clone()]); UpdateBlacklistFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_add_hashes(add_hashes.clone()) .with_remove_hashes(remove_hashes.clone()) .call_and_assert(Err(Error::::BlacklistSizeLimitExceeded.into())); @@ -2457,12 +2443,12 @@ fn update_blacklist_succeeds_with_existent_remove_hashes() { let add_hashes = BTreeSet::from_iter(vec![cid1.clone()]); UpdateBlacklistFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_add_hashes(add_hashes.clone()) .call_and_assert(Ok(())); UpdateBlacklistFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_add_hashes(add_hashes.clone()) .call_and_assert(Ok(())); @@ -2499,7 +2485,7 @@ fn create_storage_bucket_and_assign_to_bag( set_default_update_storage_buckets_per_bag_limit(); let bucket_id = CreateStorageBucketFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_invite_worker(storage_provider_id) .with_objects_limit(objects_limit) .with_size_limit(size_limit) @@ -2509,7 +2495,7 @@ fn create_storage_bucket_and_assign_to_bag( let buckets = BTreeSet::from_iter(vec![bucket_id]); UpdateStorageBucketForBagsFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_bag_id(bag_id.clone()) .with_add_bucket_ids(buckets.clone()) .call_and_assert(Ok(())); @@ -2624,12 +2610,12 @@ fn delete_storage_bucket_succeeded() { run_to_block(starting_block); let bucket_id = CreateStorageBucketFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .call_and_assert(Ok(())) .unwrap(); DeleteStorageBucketFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_storage_bucket_id(bucket_id) .call_and_assert(Ok(())); @@ -2652,7 +2638,7 @@ fn delete_storage_bucket_fails_with_non_leader_origin() { fn delete_storage_bucket_fails_with_non_existing_storage_bucket() { build_test_externalities().execute_with(|| { DeleteStorageBucketFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .call_and_assert(Err(Error::::StorageBucketDoesntExist.into())); }); } @@ -2682,7 +2668,7 @@ fn delete_storage_bucket_fails_with_non_empty_bucket() { .call_and_assert(Ok(())); DeleteStorageBucketFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_storage_bucket_id(bucket_id) .call_and_assert(Err(Error::::CannotDeleteNonEmptyStorageBucket.into())); }); @@ -2698,7 +2684,7 @@ fn remove_storage_bucket_operator_succeeded() { let invite_worker = Some(storage_provider_id); let bucket_id = CreateStorageBucketFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_invite_worker(invite_worker) .call_and_assert(Ok(())) .unwrap(); @@ -2710,7 +2696,7 @@ fn remove_storage_bucket_operator_succeeded() { .call_and_assert(Ok(())); RemoveStorageBucketOperatorFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_storage_bucket_id(bucket_id) .call_and_assert(Ok(())); @@ -2733,7 +2719,7 @@ fn remove_storage_bucket_operator_fails_with_non_leader_origin() { fn remove_storage_bucket_operator_fails_with_non_existing_storage_bucket() { build_test_externalities().execute_with(|| { RemoveStorageBucketOperatorFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .call_and_assert(Err(Error::::StorageBucketDoesntExist.into())); }); } @@ -2745,13 +2731,13 @@ fn remove_storage_bucket_operator_fails_with_non_accepted_storage_provider() { let invite_worker = Some(storage_provider_id); let bucket_id = CreateStorageBucketFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_invite_worker(invite_worker) .call_and_assert(Ok(())) .unwrap(); RemoveStorageBucketOperatorFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_storage_bucket_id(bucket_id) .call_and_assert(Err(Error::::StorageProviderMustBeSet.into())); }); @@ -2761,13 +2747,13 @@ fn remove_storage_bucket_operator_fails_with_non_accepted_storage_provider() { fn remove_storage_bucket_operator_fails_with_missing_storage_provider() { build_test_externalities().execute_with(|| { let bucket_id = CreateStorageBucketFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_invite_worker(None) .call_and_assert(Ok(())) .unwrap(); RemoveStorageBucketOperatorFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_storage_bucket_id(bucket_id) .call_and_assert(Err(Error::::StorageProviderMustBeSet.into())); }); @@ -2782,7 +2768,7 @@ fn update_data_size_fee_succeeded() { let new_fee = 1000; UpdateDataObjectPerMegabyteFeeFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_new_fee(new_fee) .call_and_assert(Ok(())); @@ -2812,7 +2798,7 @@ fn data_size_fee_calculation_works_properly() { let data_size_fee = 1000; UpdateDataObjectPerMegabyteFeeFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_new_fee(data_size_fee) .call_and_assert(Ok(())); @@ -2889,7 +2875,7 @@ fn update_storage_buckets_per_bag_limit_succeeded() { let new_limit = 4; UpdateStorageBucketsPerBagLimitFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_new_limit(new_limit) .call_and_assert(Ok(())); @@ -2916,14 +2902,14 @@ fn update_storage_buckets_per_bag_limit_fails_with_incorrect_value() { let new_limit = 0; UpdateStorageBucketsPerBagLimitFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_new_limit(new_limit) .call_and_assert(Err(Error::::StorageBucketsPerBagLimitTooLow.into())); let new_limit = 100; UpdateStorageBucketsPerBagLimitFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_new_limit(new_limit) .call_and_assert(Err(Error::::StorageBucketsPerBagLimitTooHigh.into())); }); @@ -2931,7 +2917,7 @@ fn update_storage_buckets_per_bag_limit_fails_with_incorrect_value() { fn set_update_storage_buckets_per_bag_limit(new_limit: u64) { UpdateStorageBucketsPerBagLimitFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_new_limit(new_limit) .call_and_assert(Ok(())) } @@ -2954,7 +2940,7 @@ fn set_storage_bucket_voucher_limits_succeeded() { let invite_worker = Some(storage_provider_id); let bucket_id = CreateStorageBucketFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_invite_worker(invite_worker) .call_and_assert(Ok(())) .unwrap(); @@ -2969,7 +2955,7 @@ fn set_storage_bucket_voucher_limits_succeeded() { let new_objects_number_limit = 1; SetStorageBucketVoucherLimitsFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_storage_bucket_id(bucket_id) .with_new_objects_number_limit(new_objects_number_limit) .with_new_objects_size_limit(new_objects_size_limit) @@ -2990,7 +2976,7 @@ fn set_storage_bucket_voucher_limits_fails_with_invalid_values() { let invite_worker = Some(storage_provider_id); let bucket_id = CreateStorageBucketFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_invite_worker(invite_worker) .call_and_assert(Ok(())) .unwrap(); @@ -3005,13 +2991,13 @@ fn set_storage_bucket_voucher_limits_fails_with_invalid_values() { let invalid_objects_number_limit = 1000; SetStorageBucketVoucherLimitsFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_storage_bucket_id(bucket_id) .with_new_objects_size_limit(invalid_objects_size_limit) .call_and_assert(Err(Error::::VoucherMaxObjectSizeLimitExceeded.into())); SetStorageBucketVoucherLimitsFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_storage_bucket_id(bucket_id) .with_new_objects_number_limit(invalid_objects_number_limit) .call_and_assert(Err( @@ -3033,7 +3019,7 @@ fn set_storage_bucket_voucher_limits_fails_with_invalid_origin() { fn set_storage_bucket_voucher_limits_fails_with_invalid_storage_bucket() { build_test_externalities().execute_with(|| { SetStorageBucketVoucherLimitsFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .call_and_assert(Err(Error::::StorageBucketDoesntExist.into())); }); } @@ -3058,7 +3044,7 @@ fn update_storage_buckets_voucher_max_limits_succeeded() { let new_number_limit = 4; UpdateStorageBucketsVoucherMaxLimitsFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_new_objects_number_limit(new_number_limit) .with_new_objects_size_limit(new_size_limit) .call_and_assert(Ok(())); @@ -3242,7 +3228,7 @@ fn test_storage_bucket_picking_for_bag_non_random() { assert_eq!(bucket_ids, expected_ids); // No storage buckets required - crate::DynamicBagCreationPolicies::insert( + crate::DynamicBagCreationPolicies::::insert( DynamicBagType::Member, DynamicBagCreationPolicy::default(), ); @@ -3324,7 +3310,7 @@ fn test_storage_bucket_picking_for_bag_with_randomness() { assert!(!bucket_ids.contains(removed_bucket_id)); // No storage buckets required - crate::DynamicBagCreationPolicies::insert( + crate::DynamicBagCreationPolicies::::insert( DynamicBagType::Member, DynamicBagCreationPolicy::default(), ); @@ -3380,7 +3366,7 @@ fn create_storage_buckets(buckets_number: u64) -> BTreeSet { for _ in 0..buckets_number { let bucket_id = CreateStorageBucketFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_invite_worker(None) .with_objects_limit(objects_limit) .with_size_limit(size_limit) @@ -3403,7 +3389,7 @@ fn update_number_of_storage_buckets_in_dynamic_bag_creation_policy_succeeded() { let new_bucket_number = 40; UpdateNumberOfStorageBucketsInDynamicBagCreationPolicyFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_new_storage_buckets_number(new_bucket_number) .with_dynamic_bag_type(dynamic_bag_type) .call_and_assert(Ok(())); @@ -3436,10 +3422,13 @@ fn dynamic_bag_creation_policy_defaults_and_updates_succeeded() { // Change member dynamic bag creation policy. let dynamic_bag_type = DynamicBagType::Member; let policy = Storage::get_dynamic_bag_creation_policy(dynamic_bag_type); - assert_eq!(policy, DefaultMemberDynamicBagCreationPolicy::get()); + assert_eq!( + policy.number_of_storage_buckets, + DefaultMemberDynamicBagNumberOfStorageBuckets::get() + ); UpdateNumberOfStorageBucketsInDynamicBagCreationPolicyFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_new_storage_buckets_number(new_bucket_number) .with_dynamic_bag_type(dynamic_bag_type) .call_and_assert(Ok(())); @@ -3450,10 +3439,13 @@ fn dynamic_bag_creation_policy_defaults_and_updates_succeeded() { // Change channel dynamic bag creation policy. let dynamic_bag_type = DynamicBagType::Channel; let policy = Storage::get_dynamic_bag_creation_policy(dynamic_bag_type); - assert_eq!(policy, DefaultChannelDynamicBagCreationPolicy::get()); + assert_eq!( + policy.number_of_storage_buckets, + DefaultChannelDynamicBagNumberOfStorageBuckets::get() + ); UpdateNumberOfStorageBucketsInDynamicBagCreationPolicyFixture::default() - .with_origin(RawOrigin::Signed(WG_LEADER_ACCOUNT_ID)) + .with_origin(RawOrigin::Signed(STORAGE_WG_LEADER_ACCOUNT_ID)) .with_new_storage_buckets_number(new_bucket_number) .with_dynamic_bag_type(dynamic_bag_type) .call_and_assert(Ok(())); @@ -3462,3 +3454,1595 @@ fn dynamic_bag_creation_policy_defaults_and_updates_succeeded() { assert_eq!(policy.number_of_storage_buckets, new_bucket_number); }); } + +#[test] +fn create_distribution_bucket_family_succeeded() { + build_test_externalities().execute_with(|| { + let starting_block = 1; + run_to_block(starting_block); + + let family_id = CreateDistributionBucketFamilyFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + let bucket_family = Storage::distribution_bucket_family_by_id(family_id); + + assert_eq!(bucket_family, DistributionBucketFamily::::default()); + + EventFixture::assert_last_crate_event(RawEvent::DistributionBucketFamilyCreated(family_id)); + }); +} + +#[test] +fn create_distribution_bucket_family_fails_with_non_signed_origin() { + build_test_externalities().execute_with(|| { + CreateDistributionBucketFamilyFixture::default() + .with_origin(RawOrigin::None) + .call_and_assert(Err(DispatchError::BadOrigin)); + }); +} + +#[test] +fn create_distribution_bucket_family_fails_with_exceeding_family_number_limit() { + build_test_externalities().execute_with(|| { + for _ in 0..MaxDistributionBucketFamilyNumber::get() { + CreateDistributionBucketFamilyFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())); + } + + CreateDistributionBucketFamilyFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Err( + Error::::MaxDistributionBucketFamilyNumberLimitExceeded.into(), + )); + }); +} + +#[test] +fn delete_distribution_bucket_family_succeeded() { + build_test_externalities().execute_with(|| { + let starting_block = 1; + run_to_block(starting_block); + + let family_id = CreateDistributionBucketFamilyFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + DeleteDistributionBucketFamilyFixture::default() + .with_family_id(family_id) + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())); + + EventFixture::assert_last_crate_event(RawEvent::DistributionBucketFamilyDeleted(family_id)); + }); +} + +#[test] +fn delete_distribution_bucket_family_fails_with_assgined_bags() { + build_test_externalities().execute_with(|| { + set_default_distribution_buckets_per_bag_limit(); + + let static_bag_id = StaticBagId::Council; + let bag_id: BagId = static_bag_id.into(); + + let family_id = CreateDistributionBucketFamilyFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + let bucket_id = CreateDistributionBucketFixture::default() + .with_family_id(family_id) + .with_accept_new_bags(true) + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + let add_buckets = BTreeSet::from_iter(vec![bucket_id]); + + UpdateDistributionBucketForBagsFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .with_bag_id(bag_id.clone()) + .with_family_id(family_id) + .with_add_bucket_ids(add_buckets.clone()) + .call_and_assert(Ok(())); + + let bag = Storage::bag(&bag_id); + assert_eq!(bag.distributed_by, add_buckets); + + DeleteDistributionBucketFamilyFixture::default() + .with_family_id(family_id) + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Err(Error::::DistributionBucketIsBoundToBag.into())); + }); +} + +#[test] +fn delete_distribution_bucket_family_fails_with_bound_member_dynamic_bag_creation_policy() { + build_test_externalities().execute_with(|| { + let family_id = CreateDistributionBucketFamilyFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + let new_bucket_number = 10; + let families = BTreeMap::from_iter(vec![(family_id, new_bucket_number)]); + let dynamic_bag_type = DynamicBagType::Member; + + UpdateFamiliesInDynamicBagCreationPolicyFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .with_families(families.clone()) + .with_dynamic_bag_type(dynamic_bag_type) + .call_and_assert(Ok(())); + + DeleteDistributionBucketFamilyFixture::default() + .with_family_id(family_id) + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Err( + Error::::DistributionFamilyBoundToBagCreationPolicy.into(), + )); + }); +} + +#[test] +fn delete_distribution_bucket_family_fails_with_bound_channel_dynamic_bag_creation_policy() { + build_test_externalities().execute_with(|| { + let family_id = CreateDistributionBucketFamilyFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + let new_bucket_number = 10; + let families = BTreeMap::from_iter(vec![(family_id, new_bucket_number)]); + let dynamic_bag_type = DynamicBagType::Channel; + + UpdateFamiliesInDynamicBagCreationPolicyFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .with_families(families.clone()) + .with_dynamic_bag_type(dynamic_bag_type) + .call_and_assert(Ok(())); + + DeleteDistributionBucketFamilyFixture::default() + .with_family_id(family_id) + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Err( + Error::::DistributionFamilyBoundToBagCreationPolicy.into(), + )); + }); +} + +#[test] +fn delete_distribution_bucket_family_fails_with_non_signed_origin() { + build_test_externalities().execute_with(|| { + DeleteDistributionBucketFamilyFixture::default() + .with_origin(RawOrigin::None) + .call_and_assert(Err(DispatchError::BadOrigin)); + }); +} + +#[test] +fn delete_distribution_bucket_family_fails_with_non_existing_family() { + build_test_externalities().execute_with(|| { + DeleteDistributionBucketFamilyFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Err( + Error::::DistributionBucketFamilyDoesntExist.into() + )); + }); +} + +#[test] +fn create_distribution_bucket_succeeded() { + build_test_externalities().execute_with(|| { + let starting_block = 1; + run_to_block(starting_block); + + let accept_new_bags = false; + + let family_id = CreateDistributionBucketFamilyFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + let bucket_id = CreateDistributionBucketFixture::default() + .with_family_id(family_id) + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .with_accept_new_bags(accept_new_bags) + .call_and_assert(Ok(())) + .unwrap(); + + assert!(Storage::distribution_bucket_family_by_id(family_id) + .distribution_buckets + .contains_key(&bucket_id)); + + EventFixture::assert_last_crate_event(RawEvent::DistributionBucketCreated( + family_id, + accept_new_bags, + bucket_id, + )); + }); +} + +#[test] +fn create_distribution_bucket_fails_with_non_signed_origin() { + build_test_externalities().execute_with(|| { + CreateDistributionBucketFixture::default() + .with_origin(RawOrigin::None) + .call_and_assert(Err(DispatchError::BadOrigin)); + }); +} + +#[test] +fn create_distribution_bucket_fails_with_non_existing_family() { + build_test_externalities().execute_with(|| { + CreateDistributionBucketFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Err( + Error::::DistributionBucketFamilyDoesntExist.into() + )); + }); +} + +#[test] +fn create_distribution_bucket_fails_with_exceeding_max_bucket_number() { + build_test_externalities().execute_with(|| { + let (family_id, _) = create_distribution_bucket_family_with_buckets( + MaxDistributionBucketNumberPerFamily::get(), + ); + + CreateDistributionBucketFixture::default() + .with_family_id(family_id) + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Err( + Error::::MaxDistributionBucketNumberPerFamilyLimitExceeded.into(), + )); + }); +} + +#[test] +fn update_distribution_bucket_status_succeeded() { + build_test_externalities().execute_with(|| { + let starting_block = 1; + run_to_block(starting_block); + + let family_id = CreateDistributionBucketFamilyFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + let bucket_id = CreateDistributionBucketFixture::default() + .with_family_id(family_id) + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + let new_status = true; + UpdateDistributionBucketStatusFixture::default() + .with_family_id(family_id) + .with_bucket_id(bucket_id) + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .with_new_status(new_status) + .call_and_assert(Ok(())); + + assert_eq!( + Storage::distribution_bucket_family_by_id(family_id) + .distribution_buckets + .get(&bucket_id) + .unwrap() + .accepting_new_bags, + new_status + ); + + EventFixture::assert_last_crate_event(RawEvent::DistributionBucketStatusUpdated( + family_id, bucket_id, new_status, + )); + }); +} + +#[test] +fn update_distribution_bucket_status_fails_with_invalid_origin() { + build_test_externalities().execute_with(|| { + UpdateDistributionBucketStatusFixture::default() + .with_origin(RawOrigin::Root) + .call_and_assert(Err(DispatchError::BadOrigin)); + }); +} + +#[test] +fn update_distribution_bucket_status_fails_with_invalid_distribution_bucket() { + build_test_externalities().execute_with(|| { + let family_id = CreateDistributionBucketFamilyFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + UpdateDistributionBucketStatusFixture::default() + .with_family_id(family_id) + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Err(Error::::DistributionBucketDoesntExist.into())); + }); +} + +#[test] +fn update_distribution_bucket_status_fails_with_invalid_distribution_bucket_family() { + build_test_externalities().execute_with(|| { + UpdateDistributionBucketStatusFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Err( + Error::::DistributionBucketFamilyDoesntExist.into() + )); + }); +} + +#[test] +fn delete_distribution_bucket_succeeded() { + build_test_externalities().execute_with(|| { + let starting_block = 1; + run_to_block(starting_block); + + let family_id = CreateDistributionBucketFamilyFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + let bucket_id = CreateDistributionBucketFixture::default() + .with_family_id(family_id) + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + DeleteDistributionBucketFixture::default() + .with_bucket_id(bucket_id) + .with_family_id(family_id) + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())); + + EventFixture::assert_last_crate_event(RawEvent::DistributionBucketDeleted( + family_id, bucket_id, + )); + }); +} + +#[test] +fn delete_distribution_bucket_fails_with_assgined_bags() { + build_test_externalities().execute_with(|| { + set_default_distribution_buckets_per_bag_limit(); + + let static_bag_id = StaticBagId::Council; + let bag_id: BagId = static_bag_id.into(); + + let family_id = CreateDistributionBucketFamilyFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + let bucket_id = CreateDistributionBucketFixture::default() + .with_family_id(family_id) + .with_accept_new_bags(true) + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + let add_buckets = BTreeSet::from_iter(vec![bucket_id]); + + UpdateDistributionBucketForBagsFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .with_bag_id(bag_id.clone()) + .with_family_id(family_id) + .with_add_bucket_ids(add_buckets.clone()) + .call_and_assert(Ok(())); + + let bag = Storage::bag(&bag_id); + assert_eq!(bag.distributed_by, add_buckets); + + DeleteDistributionBucketFixture::default() + .with_bucket_id(bucket_id) + .with_family_id(family_id) + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Err(Error::::DistributionBucketIsBoundToBag.into())); + }); +} + +#[test] +fn delete_distribution_bucket_failed_with_existing_operators() { + build_test_externalities().execute_with(|| { + let family_id = CreateDistributionBucketFamilyFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + let bucket_id = CreateDistributionBucketFixture::default() + .with_family_id(family_id) + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + InviteDistributionBucketOperatorFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .with_bucket_id(bucket_id) + .with_family_id(family_id) + .with_operator_worker_id(DEFAULT_DISTRIBUTION_PROVIDER_ID) + .call_and_assert(Ok(())); + + AcceptDistributionBucketInvitationFixture::default() + .with_origin(RawOrigin::Signed(DEFAULT_DISTRIBUTION_PROVIDER_ACCOUNT_ID)) + .with_family_id(family_id) + .with_bucket_id(bucket_id) + .with_worker_id(DEFAULT_DISTRIBUTION_PROVIDER_ID) + .call_and_assert(Ok(())); + + DeleteDistributionBucketFixture::default() + .with_bucket_id(bucket_id) + .with_family_id(family_id) + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Err(Error::::DistributionProviderOperatorSet.into())); + }); +} + +#[test] +fn delete_distribution_bucket_fails_with_non_leader_origin() { + build_test_externalities().execute_with(|| { + let non_leader_id = 1111; + + DeleteDistributionBucketFixture::default() + .with_origin(RawOrigin::Signed(non_leader_id)) + .call_and_assert(Err(DispatchError::BadOrigin)); + }); +} + +#[test] +fn delete_distribution_bucket_fails_with_non_existing_distribution_bucket() { + build_test_externalities().execute_with(|| { + let family_id = CreateDistributionBucketFamilyFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + DeleteDistributionBucketFixture::default() + .with_family_id(family_id) + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Err(Error::::DistributionBucketDoesntExist.into())); + }); +} + +#[test] +fn delete_distribution_bucket_fails_with_non_existing_distribution_bucket_family() { + build_test_externalities().execute_with(|| { + DeleteDistributionBucketFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Err( + Error::::DistributionBucketFamilyDoesntExist.into() + )); + }); +} + +#[test] +fn update_distribution_buckets_for_bags_succeeded() { + build_test_externalities().execute_with(|| { + let starting_block = 1; + run_to_block(starting_block); + + set_default_distribution_buckets_per_bag_limit(); + + let static_bag_id = StaticBagId::Council; + let bag_id: BagId = static_bag_id.into(); + + let family_id = CreateDistributionBucketFamilyFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + let bucket_id = CreateDistributionBucketFixture::default() + .with_family_id(family_id) + .with_accept_new_bags(true) + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + let add_buckets = BTreeSet::from_iter(vec![bucket_id]); + + UpdateDistributionBucketForBagsFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .with_bag_id(bag_id.clone()) + .with_family_id(family_id) + .with_add_bucket_ids(add_buckets.clone()) + .call_and_assert(Ok(())); + + let bag = Storage::bag(&bag_id); + assert_eq!(bag.distributed_by, add_buckets); + + EventFixture::assert_last_crate_event(RawEvent::DistributionBucketsUpdatedForBag( + bag_id, + family_id, + add_buckets, + BTreeSet::new(), + )); + }); +} + +#[test] +fn update_distribution_buckets_for_bags_succeeded_with_additioonal_checks_on_adding_and_removing() { + build_test_externalities().execute_with(|| { + set_default_distribution_buckets_per_bag_limit(); + + let static_bag_id = StaticBagId::Council; + let bag_id: BagId = static_bag_id.into(); + + let family_id = CreateDistributionBucketFamilyFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + let bucket_id = CreateDistributionBucketFixture::default() + .with_family_id(family_id) + .with_accept_new_bags(true) + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + let add_buckets = BTreeSet::from_iter(vec![bucket_id]); + + UpdateDistributionBucketForBagsFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .with_bag_id(bag_id.clone()) + .with_family_id(family_id) + .with_add_bucket_ids(add_buckets.clone()) + .call_and_assert(Ok(())); + + // Add check + let bag = Storage::bag(&bag_id); + assert_eq!(bag.distributed_by, add_buckets); + + let family = Storage::distribution_bucket_family_by_id(family_id); + let bucket = family.distribution_buckets.get(&bucket_id).unwrap(); + assert_eq!(bucket.assigned_bags, 1); + + // ****** + + UpdateDistributionBucketForBagsFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .with_bag_id(bag_id.clone()) + .with_family_id(family_id) + .with_remove_bucket_ids(add_buckets.clone()) + .call_and_assert(Ok(())); + + let bag = Storage::bag(&bag_id); + assert_eq!(bag.distributed_by.len(), 0); + + let family = Storage::distribution_bucket_family_by_id(family_id); + let bucket = family.distribution_buckets.get(&bucket_id).unwrap(); + assert_eq!(bucket.assigned_bags, 0); + }); +} + +#[test] +fn update_distribution_buckets_for_bags_fails_with_non_existing_dynamic_bag() { + build_test_externalities().execute_with(|| { + let dynamic_bag_id = DynamicBagId::::Member(DEFAULT_MEMBER_ID); + let bag_id: BagId = dynamic_bag_id.into(); + + let family_id = CreateDistributionBucketFamilyFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + let bucket_id = CreateDistributionBucketFixture::default() + .with_family_id(family_id) + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + let add_buckets = BTreeSet::from_iter(vec![bucket_id]); + + UpdateDistributionBucketForBagsFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .with_family_id(family_id) + .with_bag_id(bag_id.clone()) + .with_add_bucket_ids(add_buckets.clone()) + .call_and_assert(Err(Error::::DynamicBagDoesntExist.into())); + }); +} + +#[test] +fn update_distribution_buckets_for_bags_fails_with_non_accepting_new_bags_bucket() { + build_test_externalities().execute_with(|| { + set_default_distribution_buckets_per_bag_limit(); + + let static_bag_id = StaticBagId::Council; + let bag_id: BagId = static_bag_id.into(); + + let family_id = CreateDistributionBucketFamilyFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + let bucket_id = CreateDistributionBucketFixture::default() + .with_family_id(family_id) + .with_accept_new_bags(false) + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + let add_buckets = BTreeSet::from_iter(vec![bucket_id]); + + UpdateDistributionBucketForBagsFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .with_family_id(family_id) + .with_bag_id(bag_id.clone()) + .with_add_bucket_ids(add_buckets.clone()) + .call_and_assert(Err( + Error::::DistributionBucketDoesntAcceptNewBags.into() + )); + }); +} + +#[test] +fn update_distribution_buckets_for_bags_fails_with_non_leader_origin() { + build_test_externalities().execute_with(|| { + let non_leader_id = 1; + + UpdateDistributionBucketForBagsFixture::default() + .with_origin(RawOrigin::Signed(non_leader_id)) + .call_and_assert(Err(DispatchError::BadOrigin)); + }); +} + +#[test] +fn update_distribution_buckets_for_bags_fails_with_empty_params() { + build_test_externalities().execute_with(|| { + UpdateDistributionBucketForBagsFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Err( + Error::::DistributionBucketIdCollectionsAreEmpty.into() + )); + }); +} + +#[test] +fn update_distribution_buckets_for_bags_fails_with_non_existing_distribution_buckets() { + build_test_externalities().execute_with(|| { + set_default_distribution_buckets_per_bag_limit(); + + let invalid_bucket_id = 11000; + let buckets = BTreeSet::from_iter(vec![invalid_bucket_id]); + let bag_id: BagId = StaticBagId::Council.into(); + + let family_id = CreateDistributionBucketFamilyFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + // Invalid added bucket ID. + UpdateDistributionBucketForBagsFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .with_bag_id(bag_id.clone()) + .with_family_id(family_id) + .with_add_bucket_ids(buckets.clone()) + .call_and_assert(Err(Error::::DistributionBucketDoesntExist.into())); + + // Invalid removed bucket ID. + UpdateDistributionBucketForBagsFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .with_bag_id(bag_id.clone()) + .with_family_id(family_id) + .with_remove_bucket_ids(buckets.clone()) + .call_and_assert(Err(Error::::DistributionBucketDoesntExist.into())); + }); +} + +fn set_default_distribution_buckets_per_bag_limit() { + crate::DistributionBucketsPerBagLimit::put(5); +} + +#[test] +fn update_distribution_buckets_per_bag_limit_succeeded() { + build_test_externalities().execute_with(|| { + let starting_block = 1; + run_to_block(starting_block); + + let new_limit = 4; + + UpdateDistributionBucketsPerBagLimitFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .with_new_limit(new_limit) + .call_and_assert(Ok(())); + + EventFixture::assert_last_crate_event(RawEvent::DistributionBucketsPerBagLimitUpdated( + new_limit, + )); + }); +} + +#[test] +fn update_distribution_buckets_per_bag_limit_origin() { + build_test_externalities().execute_with(|| { + let non_leader_id = 1; + + UpdateDistributionBucketsPerBagLimitFixture::default() + .with_origin(RawOrigin::Signed(non_leader_id)) + .call_and_assert(Err(DispatchError::BadOrigin)); + }); +} + +#[test] +fn update_distribution_buckets_per_bag_limit_fails_with_incorrect_value() { + build_test_externalities().execute_with(|| { + let new_limit = 0; + + UpdateDistributionBucketsPerBagLimitFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .with_new_limit(new_limit) + .call_and_assert(Err( + Error::::DistributionBucketsPerBagLimitTooLow.into() + )); + + let new_limit = 100; + + UpdateDistributionBucketsPerBagLimitFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .with_new_limit(new_limit) + .call_and_assert(Err( + Error::::DistributionBucketsPerBagLimitTooHigh.into() + )); + }); +} + +#[test] +fn update_distribution_bucket_mode_succeeded() { + build_test_externalities().execute_with(|| { + let starting_block = 1; + run_to_block(starting_block); + + let family_id = CreateDistributionBucketFamilyFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + let bucket_id = CreateDistributionBucketFixture::default() + .with_family_id(family_id) + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + let distributing = false; + UpdateDistributionBucketModeFixture::default() + .with_family_id(family_id) + .with_bucket_id(bucket_id) + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .with_distributing(distributing) + .call_and_assert(Ok(())); + + assert_eq!( + Storage::distribution_bucket_family_by_id(family_id) + .distribution_buckets + .get(&bucket_id) + .unwrap() + .accepting_new_bags, + distributing + ); + + EventFixture::assert_last_crate_event(RawEvent::DistributionBucketModeUpdated( + family_id, + bucket_id, + distributing, + )); + }); +} + +#[test] +fn update_distribution_bucket_mode_fails_with_invalid_origin() { + build_test_externalities().execute_with(|| { + UpdateDistributionBucketModeFixture::default() + .with_origin(RawOrigin::Root) + .call_and_assert(Err(DispatchError::BadOrigin)); + }); +} + +#[test] +fn update_distribution_bucket_mode_fails_with_invalid_distribution_bucket() { + build_test_externalities().execute_with(|| { + let family_id = CreateDistributionBucketFamilyFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + UpdateDistributionBucketModeFixture::default() + .with_family_id(family_id) + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Err(Error::::DistributionBucketDoesntExist.into())); + }); +} + +#[test] +fn update_distribution_bucket_mode_fails_with_invalid_distribution_bucket_family() { + build_test_externalities().execute_with(|| { + UpdateDistributionBucketModeFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Err( + Error::::DistributionBucketFamilyDoesntExist.into() + )); + }); +} + +#[test] +fn update_families_in_dynamic_bag_creation_policy_succeeded() { + build_test_externalities().execute_with(|| { + let starting_block = 1; + run_to_block(starting_block); + + let dynamic_bag_type = DynamicBagType::Channel; + let new_bucket_number = 40; + + let family_id = CreateDistributionBucketFamilyFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + let families = BTreeMap::from_iter(vec![(family_id, new_bucket_number)]); + + UpdateFamiliesInDynamicBagCreationPolicyFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .with_families(families.clone()) + .with_dynamic_bag_type(dynamic_bag_type) + .call_and_assert(Ok(())); + + EventFixture::assert_last_crate_event(RawEvent::FamiliesInDynamicBagCreationPolicyUpdated( + dynamic_bag_type, + families, + )); + }); +} + +#[test] +fn update_families_in_dynamic_bag_creation_policy_fails_with_bad_origin() { + build_test_externalities().execute_with(|| { + let non_leader_id = 1; + + UpdateFamiliesInDynamicBagCreationPolicyFixture::default() + .with_origin(RawOrigin::Signed(non_leader_id)) + .call_and_assert(Err(DispatchError::BadOrigin)); + }); +} + +#[test] +fn update_families_in_dynamic_bag_creation_policy_fails_with_invalid_family_id() { + build_test_externalities().execute_with(|| { + let dynamic_bag_type = DynamicBagType::Channel; + let new_bucket_number = 40; + let invalid_family_id = 111; + + let families = BTreeMap::from_iter(vec![(invalid_family_id, new_bucket_number)]); + + UpdateFamiliesInDynamicBagCreationPolicyFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .with_families(families.clone()) + .with_dynamic_bag_type(dynamic_bag_type) + .call_and_assert(Err( + Error::::DistributionBucketFamilyDoesntExist.into() + )); + }); +} + +fn create_distribution_bucket_family_with_buckets(bucket_number: u64) -> (u64, Vec) { + let family_id = CreateDistributionBucketFamilyFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + let bucket_ids = repeat(family_id) + .take(bucket_number as usize) + .map(|fam_id| { + CreateDistributionBucketFixture::default() + .with_family_id(fam_id) + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .with_accept_new_bags(true) + .call_and_assert(Ok(())) + .unwrap() + }) + .collect::>(); + + (family_id, bucket_ids) +} + +#[test] +fn distribution_bucket_family_pick_during_dynamic_bag_creation_succeeded() { + build_test_externalities().execute_with(|| { + // Enable randomness (disabled at the initial block). + let starting_block = 6; + run_to_block(starting_block); + + let dynamic_bag_type = DynamicBagType::Channel; + let new_bucket_number = 5; + + let (family_id1, bucket_ids1) = create_distribution_bucket_family_with_buckets( + MaxDistributionBucketNumberPerFamily::get(), + ); + let (family_id2, bucket_ids2) = create_distribution_bucket_family_with_buckets( + MaxDistributionBucketNumberPerFamily::get(), + ); + let (family_id3, _) = create_distribution_bucket_family_with_buckets( + MaxDistributionBucketNumberPerFamily::get(), + ); + let (family_id4, _) = create_distribution_bucket_family_with_buckets(0); + + let families = BTreeMap::from_iter(vec![ + (family_id1, new_bucket_number), + (family_id2, new_bucket_number), + (family_id3, 0), + (family_id4, new_bucket_number), + ]); + + UpdateFamiliesInDynamicBagCreationPolicyFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .with_families(families) + .with_dynamic_bag_type(dynamic_bag_type) + .call_and_assert(Ok(())); + + let picked_bucket_ids = + Storage::pick_distribution_buckets_for_dynamic_bag(dynamic_bag_type); + + assert_eq!(picked_bucket_ids.len(), (new_bucket_number * 2) as usize); // buckets from two families + + let total_ids1 = BTreeSet::from_iter( + bucket_ids1 + .iter() + .cloned() + .chain(bucket_ids2.iter().cloned()), + ); + let total_ids2 = BTreeSet::from_iter( + total_ids1 + .iter() + .cloned() + .chain(picked_bucket_ids.iter().cloned()), + ); + + assert_eq!(total_ids1, total_ids2); // picked IDS are from total ID set. + }); +} + +#[test] +fn invite_distribution_bucket_operator_succeeded() { + build_test_externalities().execute_with(|| { + let starting_block = 1; + run_to_block(starting_block); + + let provider_id = DEFAULT_DISTRIBUTION_PROVIDER_ID; + + let family_id = CreateDistributionBucketFamilyFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + let bucket_id = CreateDistributionBucketFixture::default() + .with_family_id(family_id) + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + InviteDistributionBucketOperatorFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .with_bucket_id(bucket_id) + .with_family_id(family_id) + .with_operator_worker_id(provider_id) + .call_and_assert(Ok(())); + + EventFixture::assert_last_crate_event(RawEvent::DistributionBucketOperatorInvited( + family_id, + bucket_id, + provider_id, + )); + }); +} + +#[test] +fn invite_distribution_bucket_operator_fails_with_non_leader_origin() { + build_test_externalities().execute_with(|| { + let non_leader_id = 1; + + InviteDistributionBucketOperatorFixture::default() + .with_origin(RawOrigin::Signed(non_leader_id)) + .call_and_assert(Err(DispatchError::BadOrigin)); + }); +} + +#[test] +fn invite_distribution_bucket_operator_fails_with_non_existing_distribution_bucket() { + build_test_externalities().execute_with(|| { + let family_id = CreateDistributionBucketFamilyFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + InviteDistributionBucketOperatorFixture::default() + .with_family_id(family_id) + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Err(Error::::DistributionBucketDoesntExist.into())); + }); +} + +#[test] +fn invite_distribution_bucket_operator_fails_with_non_missing_invitation() { + build_test_externalities().execute_with(|| { + let invited_worker_id = DEFAULT_DISTRIBUTION_PROVIDER_ID; + + let family_id = CreateDistributionBucketFamilyFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + let bucket_id = CreateDistributionBucketFixture::default() + .with_family_id(family_id) + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + InviteDistributionBucketOperatorFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .with_bucket_id(bucket_id) + .with_family_id(family_id) + .with_operator_worker_id(invited_worker_id) + .call_and_assert(Ok(())); + + InviteDistributionBucketOperatorFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .with_bucket_id(bucket_id) + .with_family_id(family_id) + .with_operator_worker_id(invited_worker_id) + .call_and_assert(Err( + Error::::DistributionProviderOperatorAlreadyInvited.into(), + )); + }); +} + +#[test] +fn invite_distribution_bucket_operator_fails_with_exceeding_the_limit_of_pending_invitations() { + build_test_externalities().execute_with(|| { + let invited_worker_id = DEFAULT_DISTRIBUTION_PROVIDER_ID; + let another_worker_id = ANOTHER_DISTRIBUTION_PROVIDER_ID; + + let family_id = CreateDistributionBucketFamilyFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + let bucket_id = CreateDistributionBucketFixture::default() + .with_family_id(family_id) + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + InviteDistributionBucketOperatorFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .with_bucket_id(bucket_id) + .with_family_id(family_id) + .with_operator_worker_id(invited_worker_id) + .call_and_assert(Ok(())); + + InviteDistributionBucketOperatorFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .with_bucket_id(bucket_id) + .with_family_id(family_id) + .with_operator_worker_id(another_worker_id) + .call_and_assert(Err( + Error::::MaxNumberOfPendingInvitationsLimitForDistributionBucketReached + .into(), + )); + }); +} + +#[test] +fn invite_distribution_bucket_operator_fails_with_already_set_operator() { + build_test_externalities().execute_with(|| { + let invited_worker_id = DEFAULT_DISTRIBUTION_PROVIDER_ID; + + let family_id = CreateDistributionBucketFamilyFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + let bucket_id = CreateDistributionBucketFixture::default() + .with_family_id(family_id) + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + InviteDistributionBucketOperatorFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .with_bucket_id(bucket_id) + .with_family_id(family_id) + .with_operator_worker_id(invited_worker_id) + .call_and_assert(Ok(())); + + AcceptDistributionBucketInvitationFixture::default() + .with_origin(RawOrigin::Signed(DEFAULT_DISTRIBUTION_PROVIDER_ACCOUNT_ID)) + .with_bucket_id(bucket_id) + .with_family_id(family_id) + .with_worker_id(invited_worker_id) + .call_and_assert(Ok(())); + + InviteDistributionBucketOperatorFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .with_bucket_id(bucket_id) + .with_family_id(family_id) + .with_operator_worker_id(invited_worker_id) + .call_and_assert(Err(Error::::DistributionProviderOperatorSet.into())); + }); +} + +#[test] +fn invite_distribution_bucket_operator_fails_with_invalid_distribution_provider_id() { + build_test_externalities().execute_with(|| { + let invalid_provider_id = 155; + + let family_id = CreateDistributionBucketFamilyFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + let bucket_id = CreateDistributionBucketFixture::default() + .with_family_id(family_id) + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + InviteDistributionBucketOperatorFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .with_bucket_id(bucket_id) + .with_family_id(family_id) + .with_operator_worker_id(invalid_provider_id) + .call_and_assert(Err( + Error::::DistributionProviderOperatorDoesntExist.into() + )); + }); +} + +#[test] +fn invite_distribution_bucket_operator_fails_with_invalid_distribution_bucket_family() { + build_test_externalities().execute_with(|| { + CancelDistributionBucketInvitationFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Err( + Error::::DistributionBucketFamilyDoesntExist.into() + )); + }); +} + +#[test] +fn cancel_distribution_bucket_operator_invite_succeeded() { + build_test_externalities().execute_with(|| { + let starting_block = 1; + run_to_block(starting_block); + + let provider_id = DEFAULT_DISTRIBUTION_PROVIDER_ID; + + let family_id = CreateDistributionBucketFamilyFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + let bucket_id = CreateDistributionBucketFixture::default() + .with_family_id(family_id) + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + InviteDistributionBucketOperatorFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .with_bucket_id(bucket_id) + .with_family_id(family_id) + .with_operator_worker_id(provider_id) + .call_and_assert(Ok(())); + + CancelDistributionBucketInvitationFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .with_family_id(family_id) + .with_bucket_id(bucket_id) + .with_operator_worker_id(provider_id) + .call_and_assert(Ok(())); + + EventFixture::assert_last_crate_event(RawEvent::DistributionBucketInvitationCancelled( + family_id, + bucket_id, + provider_id, + )); + }); +} + +#[test] +fn cancel_distribution_bucket_operator_invite_fails_with_non_leader_origin() { + build_test_externalities().execute_with(|| { + let non_leader_account_id = 11111; + + CancelDistributionBucketInvitationFixture::default() + .with_origin(RawOrigin::Signed(non_leader_account_id)) + .call_and_assert(Err(DispatchError::BadOrigin)); + }); +} + +#[test] +fn cancel_distribution_bucket_operator_invite_fails_with_non_existing_distribution_bucket() { + build_test_externalities().execute_with(|| { + let family_id = CreateDistributionBucketFamilyFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + CancelDistributionBucketInvitationFixture::default() + .with_family_id(family_id) + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Err(Error::::DistributionBucketDoesntExist.into())); + }); +} + +#[test] +fn cancel_distribution_bucket_operator_invite_fails_with_non_invited_distribution_provider() { + build_test_externalities().execute_with(|| { + let family_id = CreateDistributionBucketFamilyFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + let bucket_id = CreateDistributionBucketFixture::default() + .with_family_id(family_id) + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + CancelDistributionBucketInvitationFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .with_family_id(family_id) + .with_bucket_id(bucket_id) + .call_and_assert(Err(Error::::NoDistributionBucketInvitation.into())); + }); +} + +#[test] +fn cancel_distribution_bucket_operator_invite_fails_with_invalid_distribution_bucket_family() { + build_test_externalities().execute_with(|| { + CancelDistributionBucketInvitationFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Err( + Error::::DistributionBucketFamilyDoesntExist.into() + )); + }); +} + +#[test] +fn accept_distribution_bucket_operator_invite_succeeded() { + build_test_externalities().execute_with(|| { + let starting_block = 1; + run_to_block(starting_block); + + let provider_id = DEFAULT_DISTRIBUTION_PROVIDER_ID; + + let family_id = CreateDistributionBucketFamilyFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + let bucket_id = CreateDistributionBucketFixture::default() + .with_family_id(family_id) + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + InviteDistributionBucketOperatorFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .with_bucket_id(bucket_id) + .with_family_id(family_id) + .with_operator_worker_id(provider_id) + .call_and_assert(Ok(())); + + AcceptDistributionBucketInvitationFixture::default() + .with_origin(RawOrigin::Signed(DEFAULT_DISTRIBUTION_PROVIDER_ACCOUNT_ID)) + .with_family_id(family_id) + .with_bucket_id(bucket_id) + .with_worker_id(provider_id) + .call_and_assert(Ok(())); + + EventFixture::assert_last_crate_event(RawEvent::DistributionBucketInvitationAccepted( + provider_id, + family_id, + bucket_id, + )); + }); +} + +#[test] +fn accept_distribution_bucket_operator_invite_fails_with_non_leader_origin() { + build_test_externalities().execute_with(|| { + let invalid_account_id = 11111; + + AcceptDistributionBucketInvitationFixture::default() + .with_origin(RawOrigin::Signed(invalid_account_id)) + .call_and_assert(Err(DispatchError::BadOrigin)); + }); +} + +#[test] +fn accept_distribution_bucket_operator_invite_fails_with_non_existing_distribution_bucket() { + build_test_externalities().execute_with(|| { + let family_id = CreateDistributionBucketFamilyFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + AcceptDistributionBucketInvitationFixture::default() + .with_family_id(family_id) + .with_origin(RawOrigin::Signed(DEFAULT_DISTRIBUTION_PROVIDER_ACCOUNT_ID)) + .call_and_assert(Err(Error::::DistributionBucketDoesntExist.into())); + }); +} + +#[test] +fn accept_distribution_bucket_operator_invite_fails_with_non_invited_distribution_provider() { + build_test_externalities().execute_with(|| { + let family_id = CreateDistributionBucketFamilyFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + let bucket_id = CreateDistributionBucketFixture::default() + .with_family_id(family_id) + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + AcceptDistributionBucketInvitationFixture::default() + .with_origin(RawOrigin::Signed(DEFAULT_DISTRIBUTION_PROVIDER_ACCOUNT_ID)) + .with_family_id(family_id) + .with_bucket_id(bucket_id) + .call_and_assert(Err(Error::::NoDistributionBucketInvitation.into())); + }); +} + +#[test] +fn accept_distribution_bucket_operator_invite_fails_with_invalid_distribution_bucket_family() { + build_test_externalities().execute_with(|| { + AcceptDistributionBucketInvitationFixture::default() + .with_origin(RawOrigin::Signed(DEFAULT_DISTRIBUTION_PROVIDER_ACCOUNT_ID)) + .call_and_assert(Err( + Error::::DistributionBucketFamilyDoesntExist.into() + )); + }); +} + +#[test] +fn set_distribution_operator_metadata_invite_succeeded() { + build_test_externalities().execute_with(|| { + let starting_block = 1; + run_to_block(starting_block); + + let provider_id = DEFAULT_DISTRIBUTION_PROVIDER_ID; + let metadata = b"Metadata".to_vec(); + + let family_id = CreateDistributionBucketFamilyFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + let bucket_id = CreateDistributionBucketFixture::default() + .with_family_id(family_id) + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + InviteDistributionBucketOperatorFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .with_bucket_id(bucket_id) + .with_family_id(family_id) + .with_operator_worker_id(provider_id) + .call_and_assert(Ok(())); + + AcceptDistributionBucketInvitationFixture::default() + .with_origin(RawOrigin::Signed(DEFAULT_DISTRIBUTION_PROVIDER_ACCOUNT_ID)) + .with_family_id(family_id) + .with_bucket_id(bucket_id) + .with_worker_id(provider_id) + .call_and_assert(Ok(())); + + SetDistributionBucketMetadataFixture::default() + .with_origin(RawOrigin::Signed(DEFAULT_DISTRIBUTION_PROVIDER_ACCOUNT_ID)) + .with_family_id(family_id) + .with_bucket_id(bucket_id) + .with_worker_id(provider_id) + .with_metadata(metadata.clone()) + .call_and_assert(Ok(())); + + EventFixture::assert_last_crate_event(RawEvent::DistributionBucketMetadataSet( + provider_id, + family_id, + bucket_id, + metadata, + )); + }); +} + +#[test] +fn set_distribution_operator_metadata_fails_with_non_leader_origin() { + build_test_externalities().execute_with(|| { + let invalid_account_id = 11111; + + SetDistributionBucketMetadataFixture::default() + .with_origin(RawOrigin::Signed(invalid_account_id)) + .call_and_assert(Err(DispatchError::BadOrigin)); + }); +} + +#[test] +fn set_distribution_operator_metadata_fails_with_non_existing_distribution_bucket() { + build_test_externalities().execute_with(|| { + let family_id = CreateDistributionBucketFamilyFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + SetDistributionBucketMetadataFixture::default() + .with_family_id(family_id) + .with_origin(RawOrigin::Signed(DEFAULT_DISTRIBUTION_PROVIDER_ACCOUNT_ID)) + .call_and_assert(Err(Error::::DistributionBucketDoesntExist.into())); + }); +} + +#[test] +fn set_distribution_operator_metadata_fails_with_non_distribution_provider() { + build_test_externalities().execute_with(|| { + let family_id = CreateDistributionBucketFamilyFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + let bucket_id = CreateDistributionBucketFixture::default() + .with_family_id(family_id) + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + SetDistributionBucketMetadataFixture::default() + .with_origin(RawOrigin::Signed(DEFAULT_DISTRIBUTION_PROVIDER_ACCOUNT_ID)) + .with_family_id(family_id) + .with_bucket_id(bucket_id) + .call_and_assert(Err( + Error::::MustBeDistributionProviderOperatorForBucket.into(), + )); + }); +} + +#[test] +fn set_distribution_operator_metadata_fails_with_invalid_distribution_bucket_family() { + build_test_externalities().execute_with(|| { + SetDistributionBucketMetadataFixture::default() + .with_origin(RawOrigin::Signed(DEFAULT_DISTRIBUTION_PROVIDER_ACCOUNT_ID)) + .call_and_assert(Err( + Error::::DistributionBucketFamilyDoesntExist.into() + )); + }); +} + +#[test] +fn remove_distribution_bucket_operator_succeeded() { + build_test_externalities().execute_with(|| { + let starting_block = 1; + run_to_block(starting_block); + + let operator_id = DEFAULT_DISTRIBUTION_PROVIDER_ID; + + let family_id = CreateDistributionBucketFamilyFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + let bucket_id = CreateDistributionBucketFixture::default() + .with_family_id(family_id) + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + InviteDistributionBucketOperatorFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .with_bucket_id(bucket_id) + .with_family_id(family_id) + .with_operator_worker_id(operator_id) + .call_and_assert(Ok(())); + + AcceptDistributionBucketInvitationFixture::default() + .with_origin(RawOrigin::Signed(DEFAULT_DISTRIBUTION_PROVIDER_ACCOUNT_ID)) + .with_family_id(family_id) + .with_bucket_id(bucket_id) + .with_worker_id(operator_id) + .call_and_assert(Ok(())); + + RemoveDistributionBucketOperatorFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .with_family_id(family_id) + .with_bucket_id(bucket_id) + .with_operator_worker_id(operator_id) + .call_and_assert(Ok(())); + + EventFixture::assert_last_crate_event(RawEvent::DistributionBucketOperatorRemoved( + family_id, + bucket_id, + operator_id, + )); + }); +} + +#[test] +fn remove_distribution_bucket_operator_fails_with_non_leader_origin() { + build_test_externalities().execute_with(|| { + RemoveDistributionBucketOperatorFixture::default() + .with_origin(RawOrigin::Signed(DEFAULT_DISTRIBUTION_PROVIDER_ACCOUNT_ID)) + .call_and_assert(Err(DispatchError::BadOrigin)); + }); +} + +#[test] +fn remove_distribution_bucket_operator_fails_with_non_existing_distribution_bucket_family() { + build_test_externalities().execute_with(|| { + RemoveDistributionBucketOperatorFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Err( + Error::::DistributionBucketFamilyDoesntExist.into() + )); + }); +} + +#[test] +fn remove_distribution_bucket_operator_fails_with_non_existing_distribution_bucket() { + build_test_externalities().execute_with(|| { + let family_id = CreateDistributionBucketFamilyFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + RemoveDistributionBucketOperatorFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .with_family_id(family_id) + .call_and_assert(Err(Error::::DistributionBucketDoesntExist.into())); + }); +} + +#[test] +fn remove_distribution_bucket_operator_fails_with_non_accepted_distribution_provider() { + build_test_externalities().execute_with(|| { + let operator_id = DEFAULT_DISTRIBUTION_PROVIDER_ID; + + let family_id = CreateDistributionBucketFamilyFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + let bucket_id = CreateDistributionBucketFixture::default() + .with_family_id(family_id) + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .call_and_assert(Ok(())) + .unwrap(); + + RemoveDistributionBucketOperatorFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .with_family_id(family_id) + .with_bucket_id(bucket_id) + .with_operator_worker_id(operator_id) + .call_and_assert(Err( + Error::::MustBeDistributionProviderOperatorForBucket.into(), + )); + + InviteDistributionBucketOperatorFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .with_bucket_id(bucket_id) + .with_family_id(family_id) + .with_operator_worker_id(operator_id) + .call_and_assert(Ok(())); + + RemoveDistributionBucketOperatorFixture::default() + .with_origin(RawOrigin::Signed(DISTRIBUTION_WG_LEADER_ACCOUNT_ID)) + .with_family_id(family_id) + .with_bucket_id(bucket_id) + .with_operator_worker_id(operator_id) + .call_and_assert(Err( + Error::::MustBeDistributionProviderOperatorForBucket.into(), + )); + }); +} diff --git a/runtime/src/integration/proposals/proposal_encoder.rs b/runtime/src/integration/proposals/proposal_encoder.rs index d0c4d357e0..8345615d3a 100644 --- a/runtime/src/integration/proposals/proposal_encoder.rs +++ b/runtime/src/integration/proposals/proposal_encoder.rs @@ -22,6 +22,9 @@ macro_rules! wrap_working_group_call { Call::ContentDirectoryWorkingGroup($working_group_instance_call) } WorkingGroup::Storage => Call::StorageWorkingGroup($working_group_instance_call), + WorkingGroup::Distribution => { + Call::DistributionWorkingGroup($working_group_instance_call) + } } }}; } diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index becc18e4c4..62b7182187 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -63,7 +63,6 @@ pub use runtime_api::*; use integration::proposals::{CouncilManager, ExtrinsicProposalEncoder, MembershipOriginValidator}; use governance::{council, election}; -use storage::DynamicBagCreationPolicy; // Node dependencies pub use common; @@ -565,6 +564,9 @@ pub type StorageWorkingGroupInstance = working_group::Instance2; // The content directory working group instance alias. pub type ContentDirectoryWorkingGroupInstance = working_group::Instance3; +// The distribution working group instance alias. +pub type DistributionWorkingGroupInstance = working_group::Instance4; + parameter_types! { pub const MaxWorkerNumberLimit: u32 = 100; } @@ -579,6 +581,11 @@ impl working_group::Trait for Runtime { type MaxWorkerNumberLimit = MaxWorkerNumberLimit; } +impl working_group::Trait for Runtime { + type Event = Event; + type MaxWorkerNumberLimit = MaxWorkerNumberLimit; +} + parameter_types! { pub const ProposalCancellationFee: u64 = 10000; pub const ProposalRejectionFee: u64 = 5000; @@ -645,19 +652,20 @@ parameter_types! { } parameter_types! { + pub const MaxDistributionBucketNumberPerFamily: u64 = 20; //TODO: adjust value + pub const MaxDistributionBucketFamilyNumber: u64 = 20; //TODO: adjust value pub const MaxNumberOfDataObjectsPerBag: u64 = 1000; //TODO: adjust value pub const DataObjectDeletionPrize: Balance = 10; //TODO: adjust value pub const BlacklistSizeLimit: u64 = 10000; //TODO: adjust value pub const MaxRandomIterationNumber: u64 = 30; //TODO: adjust value + pub const MaxNumberOfPendingInvitationsPerDistributionBucket: u64 = 30; //TODO: adjust value pub const StorageModuleId: ModuleId = ModuleId(*b"mstorage"); // module storage pub const StorageBucketsPerBagValueConstraint: storage::StorageBucketsPerBagValueConstraint = storage::StorageBucketsPerBagValueConstraint {min: 3, max_min_diff: 7}; //TODO: adjust value - pub const DefaultMemberDynamicBagCreationPolicy: DynamicBagCreationPolicy = DynamicBagCreationPolicy{ - number_of_storage_buckets: 4 - }; //TODO: adjust value - pub const DefaultChannelDynamicBagCreationPolicy: DynamicBagCreationPolicy = DynamicBagCreationPolicy{ - number_of_storage_buckets: 4 - }; //TODO: adjust value + pub const DefaultMemberDynamicBagNumberOfStorageBuckets: u64 = 4; //TODO: adjust value + pub const DefaultChannelDynamicBagNumberOfStorageBuckets: u64 = 4; //TODO: adjust value + pub const DistributionBucketsPerBagValueConstraint: storage::DistributionBucketsPerBagValueConstraint = + storage::DistributionBucketsPerBagValueConstraint {min: 3, max_min_diff: 7}; //TODO: adjust value } impl storage::Trait for Runtime { @@ -665,6 +673,7 @@ impl storage::Trait for Runtime { type DataObjectId = DataObjectId; type StorageBucketId = StorageBucketId; type DistributionBucketId = DistributionBucketId; + type DistributionBucketFamilyId = DistributionBucketFamilyId; type ChannelId = ChannelId; type MaxNumberOfDataObjectsPerBag = MaxNumberOfDataObjectsPerBag; type DataObjectDeletionPrize = DataObjectDeletionPrize; @@ -672,24 +681,49 @@ impl storage::Trait for Runtime { type ModuleId = StorageModuleId; type MemberOriginValidator = MembershipOriginValidator; type StorageBucketsPerBagValueConstraint = StorageBucketsPerBagValueConstraint; - type DefaultMemberDynamicBagCreationPolicy = DefaultMemberDynamicBagCreationPolicy; - type DefaultChannelDynamicBagCreationPolicy = DefaultChannelDynamicBagCreationPolicy; + type DefaultMemberDynamicBagNumberOfStorageBuckets = + DefaultMemberDynamicBagNumberOfStorageBuckets; + type DefaultChannelDynamicBagNumberOfStorageBuckets = + DefaultChannelDynamicBagNumberOfStorageBuckets; type Randomness = RandomnessCollectiveFlip; type MaxRandomIterationNumber = MaxRandomIterationNumber; - - fn ensure_working_group_leader_origin(origin: Self::Origin) -> DispatchResult { + type MaxDistributionBucketFamilyNumber = MaxDistributionBucketFamilyNumber; + type MaxDistributionBucketNumberPerFamily = MaxDistributionBucketNumberPerFamily; + type DistributionBucketsPerBagValueConstraint = DistributionBucketsPerBagValueConstraint; + type DistributionBucketOperatorId = DistributionBucketOperatorId; + type MaxNumberOfPendingInvitationsPerDistributionBucket = + MaxNumberOfPendingInvitationsPerDistributionBucket; + + fn ensure_storage_working_group_leader_origin(origin: Self::Origin) -> DispatchResult { StorageWorkingGroup::ensure_origin_is_active_leader(origin) } - fn ensure_worker_origin(origin: Self::Origin, worker_id: ActorId) -> DispatchResult { + fn ensure_storage_worker_origin(origin: Self::Origin, worker_id: ActorId) -> DispatchResult { StorageWorkingGroup::ensure_worker_signed(origin, &worker_id).map(|_| ()) } - fn ensure_worker_exists(worker_id: &ActorId) -> DispatchResult { + fn ensure_storage_worker_exists(worker_id: &ActorId) -> DispatchResult { StorageWorkingGroup::ensure_worker_exists(&worker_id) .map(|_| ()) .map_err(|err| err.into()) } + + fn ensure_distribution_working_group_leader_origin(origin: Self::Origin) -> DispatchResult { + DistributionWorkingGroup::ensure_origin_is_active_leader(origin) + } + + fn ensure_distribution_worker_origin( + origin: Self::Origin, + worker_id: ActorId, + ) -> DispatchResult { + DistributionWorkingGroup::ensure_worker_signed(origin, &worker_id).map(|_| ()) + } + + fn ensure_distribution_worker_exists(worker_id: &ActorId) -> DispatchResult { + DistributionWorkingGroup::ensure_worker_exists(&worker_id) + .map(|_| ()) + .map_err(|err| err.into()) + } } /// Opaque types. These are used by the CLI to instantiate machinery that don't need to know @@ -755,6 +789,7 @@ construct_runtime!( // reserved for the future use: ForumWorkingGroup: working_group::::{Module, Call, Storage, Event}, StorageWorkingGroup: working_group::::{Module, Call, Storage, Config, Event}, ContentDirectoryWorkingGroup: working_group::::{Module, Call, Storage, Config, Event}, + DistributionWorkingGroup: working_group::::{Module, Call, Storage, Config, Event}, // Storage: storage::{Module, Call, Storage, Event}, } diff --git a/runtime/src/primitives.rs b/runtime/src/primitives.rs index b326f4fb75..254ef0f7eb 100644 --- a/runtime/src/primitives.rs +++ b/runtime/src/primitives.rs @@ -74,9 +74,15 @@ pub type StorageBucketId = u64; /// Represent a distribution bucket from the storage pallet. pub type DistributionBucketId = u64; +/// Represent a distribution bucket family from the storage pallet. +pub type DistributionBucketFamilyId = u64; + /// Represent a media channel. pub type ChannelId = u64; +/// Represent relationships between distribution buckets and distribution working group workers. +pub type DistributionBucketOperatorId = u64; + /// App-specific crypto used for reporting equivocation/misbehavior in BABE and /// GRANDPA. Any rewards for misbehavior reporting will be paid out to this /// account. diff --git a/runtime/src/tests/proposals_integration/working_group_proposals.rs b/runtime/src/tests/proposals_integration/working_group_proposals.rs index b6c3866526..4c9e53155c 100644 --- a/runtime/src/tests/proposals_integration/working_group_proposals.rs +++ b/runtime/src/tests/proposals_integration/working_group_proposals.rs @@ -12,7 +12,8 @@ use working_group::{OpeningPolicyCommitment, RewardPolicy}; use crate::{ Balance, BlockNumber, ContentDirectoryWorkingGroup, ContentDirectoryWorkingGroupInstance, - StorageWorkingGroup, StorageWorkingGroupInstance, + DistributionWorkingGroup, DistributionWorkingGroupInstance, StorageWorkingGroup, + StorageWorkingGroupInstance, }; use sp_std::collections::btree_set::BTreeSet; @@ -52,6 +53,14 @@ fn add_opening( >>::contains_key(opening_id)); opening_id } + WorkingGroup::Distribution => { + let opening_id = DistributionWorkingGroup::next_opening_id(); + assert!(!>::contains_key(opening_id)); + opening_id + } }; let codex_extrinsic_test_fixture = CodexProposalTestFixture::default_for_call(|| { @@ -330,6 +339,12 @@ fn create_add_working_group_leader_opening_proposal_execution_succeeds() { StorageWorkingGroupInstance, >(group); } + WorkingGroup::Distribution => { + run_create_add_working_group_leader_opening_proposal_execution_succeeds::< + Runtime, + DistributionWorkingGroupInstance, + >(group); + } } } } @@ -388,6 +403,12 @@ fn create_begin_review_working_group_leader_applications_proposal_execution_succ StorageWorkingGroupInstance, >(group); } + WorkingGroup::Distribution => { + run_create_begin_review_working_group_leader_applications_proposal_execution_succeeds::< + Runtime, + DistributionWorkingGroupInstance, + >(group); + } } } } @@ -468,6 +489,12 @@ fn create_fill_working_group_leader_opening_proposal_execution_succeeds() { StorageWorkingGroupInstance, >(group); } + WorkingGroup::Distribution => { + run_create_fill_working_group_leader_opening_proposal_execution_succeeds::< + Runtime, + DistributionWorkingGroupInstance, + >(group); + } } } @@ -545,6 +572,12 @@ fn create_fill_working_group_leader_opening_proposal_execution_succeeds() { StorageWorkingGroupInstance, >(group); } + WorkingGroup::Distribution => { + run_create_decrease_group_leader_stake_proposal_execution_succeeds::< + Runtime, + DistributionWorkingGroupInstance, + >(group); + } } } } @@ -662,6 +695,12 @@ fn create_fill_working_group_leader_opening_proposal_execution_succeeds() { StorageWorkingGroupInstance, >(group) } + WorkingGroup::Distribution => { + run_create_slash_group_leader_stake_proposal_execution_succeeds::< + Runtime, + DistributionWorkingGroupInstance, + >(group) + } } } } @@ -780,6 +819,12 @@ fn create_fill_working_group_leader_opening_proposal_execution_succeeds() { StorageWorkingGroupInstance, >(group); } + WorkingGroup::Distribution => { + run_create_set_working_group_mint_capacity_proposal_execution_succeeds::< + Runtime, + DistributionWorkingGroupInstance, + >(group); + } } } @@ -836,6 +881,12 @@ fn create_fill_working_group_leader_opening_proposal_execution_succeeds() { StorageWorkingGroupInstance, >(group); } + WorkingGroup::Distribution => { + run_create_set_working_group_mint_capacity_proposal_execution_succeeds::< + Runtime, + DistributionWorkingGroupInstance, + >(group); + } } } } @@ -959,6 +1010,12 @@ fn create_fill_working_group_leader_opening_proposal_execution_succeeds() { StorageWorkingGroupInstance, >(group); } + WorkingGroup::Distribution => { + run_create_terminate_group_leader_role_proposal_execution_succeeds::< + Runtime, + DistributionWorkingGroupInstance, + >(group); + } } } } @@ -1078,6 +1135,9 @@ fn create_fill_working_group_leader_opening_proposal_execution_succeeds() { WorkingGroup::Storage => { run_create_terminate_group_leader_role_proposal_with_slashing_execution_succeeds::(group); } + WorkingGroup::Distribution => { + run_create_terminate_group_leader_role_proposal_with_slashing_execution_succeeds::(group); + } } } }