From bbc035fce05a5b7495cca330139e14a066263f67 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Fri, 22 Aug 2025 17:30:14 -0400 Subject: [PATCH 01/39] Basic framing for sub-subnets --- common/src/lib.rs | 34 ++++ pallets/subtensor/src/coinbase/block_step.rs | 2 + .../subtensor/src/coinbase/run_coinbase.rs | 2 +- pallets/subtensor/src/lib.rs | 32 ++++ pallets/subtensor/src/subnets/mod.rs | 1 + pallets/subtensor/src/subnets/subsubnet.rs | 161 ++++++++++++++++++ pallets/subtensor/src/tests/mod.rs | 1 + pallets/subtensor/src/tests/subsubnet.rs | 13 ++ runtime/src/lib.rs | 4 +- 9 files changed, 247 insertions(+), 3 deletions(-) create mode 100644 pallets/subtensor/src/subnets/subsubnet.rs create mode 100644 pallets/subtensor/src/tests/subsubnet.rs diff --git a/common/src/lib.rs b/common/src/lib.rs index a3882a88fc..ed7cc0fe88 100644 --- a/common/src/lib.rs +++ b/common/src/lib.rs @@ -223,6 +223,40 @@ pub mod time { pub const DAYS: BlockNumber = HOURS * 24; } +#[freeze_struct("8e576b32bb1bb664")] +#[repr(transparent)] +#[derive( + Deserialize, + Serialize, + Clone, + Copy, + Decode, + DecodeWithMemTracking, + Default, + Encode, + Eq, + Hash, + MaxEncodedLen, + Ord, + PartialEq, + PartialOrd, + RuntimeDebug, +)] +#[serde(transparent)] +pub struct SubId(u8); + +impl From for SubId { + fn from(value: u8) -> Self { + Self(value) + } +} + +impl From for u16 { + fn from(val: SubId) -> Self { + u16::from(val.0) + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/pallets/subtensor/src/coinbase/block_step.rs b/pallets/subtensor/src/coinbase/block_step.rs index 6a96090b05..6385a7f756 100644 --- a/pallets/subtensor/src/coinbase/block_step.rs +++ b/pallets/subtensor/src/coinbase/block_step.rs @@ -21,6 +21,8 @@ impl Pallet { Self::run_coinbase(block_emission); // --- 4. Set pending children on the epoch; but only after the coinbase has been run. Self::try_set_pending_children(block_number); + // --- 5. Update sub-subnet counts + Self::update_subsubnet_counts_if_needed(block_number); // Return ok. Ok(()) } diff --git a/pallets/subtensor/src/coinbase/run_coinbase.rs b/pallets/subtensor/src/coinbase/run_coinbase.rs index dcdab8072e..a71ba0d964 100644 --- a/pallets/subtensor/src/coinbase/run_coinbase.rs +++ b/pallets/subtensor/src/coinbase/run_coinbase.rs @@ -641,7 +641,7 @@ impl Pallet { // Run the epoch. let hotkey_emission: Vec<(T::AccountId, AlphaCurrency, AlphaCurrency)> = - Self::epoch(netuid, pending_alpha.saturating_add(pending_swapped)); + Self::epoch_with_subsubnets(netuid, pending_alpha.saturating_add(pending_swapped)); log::debug!("hotkey_emission: {hotkey_emission:?}"); // Compute the pending validator alpha. diff --git a/pallets/subtensor/src/lib.rs b/pallets/subtensor/src/lib.rs index 173d9bd0f4..7fa6c8a919 100644 --- a/pallets/subtensor/src/lib.rs +++ b/pallets/subtensor/src/lib.rs @@ -1797,6 +1797,38 @@ pub mod pallet { pub type CommitRevealWeightsVersion = StorageValue<_, u16, ValueQuery, DefaultCommitRevealWeightsVersion>; + /// ====================== + /// ==== Sub-subnets ===== + /// ====================== + #[pallet::type_value] + /// -- ITEM (Default number of sub-subnets) + pub fn DefaultSubsubnetCount() -> u8 { + 1 + } + #[pallet::type_value] + /// -- ITEM (Maximum number of sub-subnets) + pub fn MaxSubsubnetCount() -> u8 { + 8 + } + #[pallet::type_value] + /// -- ITEM (Number of tempos in subnet super-block) + pub fn SuperBlockTempos() -> u16 { + 20 + } + #[pallet::type_value] + /// -- ITEM (Maximum allowed sub-subnet count decrease per super-block) + pub fn GlobalSubsubnetDecreasePerSuperblock() -> u8 { + 1 + } + #[pallet::storage] + /// --- MAP ( netuid ) --> Number of sub-subnets desired by root or subnet owner. + pub type SubsubnetCountDesired = + StorageMap<_, Twox64Concat, NetUid, u8, ValueQuery, DefaultSubsubnetCount>; + #[pallet::storage] + /// --- MAP ( netuid ) --> Current number of sub-subnets + pub type SubsubnetCountCurrent = + StorageMap<_, Twox64Concat, NetUid, u8, ValueQuery, DefaultSubsubnetCount>; + /// ================== /// ==== Genesis ===== /// ================== diff --git a/pallets/subtensor/src/subnets/mod.rs b/pallets/subtensor/src/subnets/mod.rs index a823773395..a3705af084 100644 --- a/pallets/subtensor/src/subnets/mod.rs +++ b/pallets/subtensor/src/subnets/mod.rs @@ -3,6 +3,7 @@ pub mod leasing; pub mod registration; pub mod serving; pub mod subnet; +pub mod subsubnet; pub mod symbols; pub mod uids; pub mod weights; diff --git a/pallets/subtensor/src/subnets/subsubnet.rs b/pallets/subtensor/src/subnets/subsubnet.rs new file mode 100644 index 0000000000..816f4818bd --- /dev/null +++ b/pallets/subtensor/src/subnets/subsubnet.rs @@ -0,0 +1,161 @@ +//! This file contains all tooling to work with sub-subnets +//! + +use super::*; +use alloc::collections::BTreeMap; +use safe_math::*; +use sp_runtime::SaturatedConversion; +use subtensor_runtime_common::{AlphaCurrency, NetUid, SubId}; + +pub type LeaseId = u32; + +pub type CurrencyOf = ::Currency; + +pub type BalanceOf = + as fungible::Inspect<::AccountId>>::Balance; + +/// Theoretical maximum of subnets on bittensor. This value is used in indexed +/// storage of epoch values for sub-subnets as +/// +/// `storage_index = netuid + sub_id * GLOBAL_MAX_SUBNET_COUNT` +/// +/// For sub_id = 0 this index results in netuid and provides backward compatibility +/// for subnets with default sub-subnet count of 1. +/// +/// Changing this value will require a migration of all epoch maps. +/// +pub const GLOBAL_MAX_SUBNET_COUNT: u16 = 1024; + +impl Pallet { + pub fn get_subsubnet_storage_index(netuid: NetUid, sub_id: SubId) -> NetUid { + u16::from(sub_id) + .saturating_mul(GLOBAL_MAX_SUBNET_COUNT) + .saturating_add(u16::from(netuid)) + .into() + } + + /// Set the desired valus of sub-subnet count for a subnet identified + /// by netuid + pub fn do_set_desired_subsubnet_count(netuid: NetUid, subsubnet_count: u8) -> DispatchResult { + // Make sure the subnet exists + ensure!( + Self::if_subnet_exist(netuid), + Error::::SubNetworkDoesNotExist + ); + + // Count cannot be zero + ensure!(subsubnet_count > 0, Error::::InvalidValue); + + // Make sure we are not exceeding the max sub-subnet count + ensure!( + subsubnet_count <= MaxSubsubnetCount::::get(), + Error::::InvalidValue + ); + + SubsubnetCountDesired::::insert(netuid, subsubnet_count); + Ok(()) + } + + /// Update current count for a subnet identified by netuid + /// + /// - This function should be called in every block in run_counbase + /// - Cleans up all sub-subnet maps if count is reduced + /// - Decreases current subsubnet count by no more than `GlobalSubsubnetDecreasePerSuperblock` + /// + pub fn update_subsubnet_counts_if_needed(current_block: u64) { + // Run once per super-block + let super_block_tempos = u64::from(SuperBlockTempos::::get()); + Self::get_all_subnet_netuids().iter().for_each(|netuid| { + let super_block = super_block_tempos.saturating_mul(u64::from(Tempo::::get(netuid))); + if let Some(rem) = current_block.checked_rem(super_block) { + if rem == 0 { + let old_count = SubsubnetCountCurrent::::get(netuid); + let desired_count = SubsubnetCountDesired::::get(netuid); + let min_possible_count = old_count + .saturating_sub(GlobalSubsubnetDecreasePerSuperblock::::get()) + .max(1); + let new_count = desired_count.max(min_possible_count); + + if old_count > new_count { + + todo!(); + // Cleanup weights + // Cleanup StakeWeight + // Cleanup Active + // Cleanup Emission + // Cleanup Rank + // Cleanup Trust + // Cleanup Consensus + // Cleanup Incentive + // Cleanup Dividends + // Cleanup PruningScores + // Cleanup ValidatorTrust + // Cleanup ValidatorPermit + } + + SubsubnetCountCurrent::::insert(netuid, new_count); + } + } + }); + } + + /// Split alpha emission in sub-subnet proportions + /// Currently splits evenly between sub-subnets, but the implementation + /// may change in the future + /// + pub fn split_emissions(netuid: NetUid, alpha: AlphaCurrency) -> Vec { + let subsubnet_count = u64::from(SubsubnetCountCurrent::::get(netuid)); + + // If there's any rounding error, credit it to subsubnet 0 + let per_subsubnet = u64::from(alpha).safe_div(subsubnet_count); + let rounding_err = + u64::from(alpha).saturating_sub(per_subsubnet.saturating_mul(subsubnet_count)); + + let mut result = vec![AlphaCurrency::from(per_subsubnet); subsubnet_count as usize]; + result[0] = result[0].saturating_add(AlphaCurrency::from(rounding_err)); + result + } + + /// Splits rao_emission between different sub-subnets using `split_emissions` function. + /// + /// Runs the epoch function for each sub-subnet and consolidates hotkey_emission + /// into a single vector. + /// + pub fn epoch_with_subsubnets( + netuid: NetUid, + rao_emission: AlphaCurrency, + ) -> Vec<(T::AccountId, AlphaCurrency, AlphaCurrency)> { + let aggregated: BTreeMap = + Self::split_emissions(netuid, rao_emission) + .into_iter() + .enumerate() + // Run epoch function for each subsubnet to distribute its portion of emissions + .flat_map(|(sub_id, emission)| { + // This is subsubnet ID, e.g. a 0-7 number + let sub_id_u8: u8 = sub_id.saturated_into(); + // This is netuid index for storing subsubnet data in storage maps and for using in + // epoch function + let subsub_netuid = + Self::get_subsubnet_storage_index(netuid, SubId::from(sub_id_u8)); + // epoch returns: Vec<(T::AccountId, AlphaCurrency, AlphaCurrency)> + Self::epoch(subsub_netuid, emission).into_iter() + }) + // Consolidate the hotkey emissions into a single BTreeMap + .fold(BTreeMap::new(), |mut acc, (hotkey, divs, incs)| { + acc.entry(hotkey) + .and_modify(|tot| { + tot.0 = tot.0.saturating_add(divs); + tot.1 = tot.1.saturating_add(incs); + }) + .or_insert((divs, incs)); + acc + }); + + // Remap BTreeMap back to Vec<(T::AccountId, AlphaCurrency, AlphaCurrency)> format + // for processing in run_coinbase + aggregated + .into_iter() + .map(|(hotkey, (divs, incs))| (hotkey, divs, incs)) + .collect() + } +} diff --git a/pallets/subtensor/src/tests/mod.rs b/pallets/subtensor/src/tests/mod.rs index b743d7c1ff..1eb922f711 100644 --- a/pallets/subtensor/src/tests/mod.rs +++ b/pallets/subtensor/src/tests/mod.rs @@ -21,6 +21,7 @@ mod serving; mod staking; mod staking2; mod subnet; +mod subsubnet; mod swap_coldkey; mod swap_hotkey; mod swap_hotkey_with_subnet; diff --git a/pallets/subtensor/src/tests/subsubnet.rs b/pallets/subtensor/src/tests/subsubnet.rs new file mode 100644 index 0000000000..34c7ac1043 --- /dev/null +++ b/pallets/subtensor/src/tests/subsubnet.rs @@ -0,0 +1,13 @@ +#![allow( + clippy::arithmetic_side_effects, + clippy::indexing_slicing, + clippy::unwrap_used +)] + +use super::mock::*; + +#[test] +fn test_subsubnet_emission_proportions() { + new_test_ext(1).execute_with(|| { + }); +} diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index d7097b605c..120987a00a 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -220,7 +220,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // `spec_version`, and `authoring_version` are the same between Wasm and native. // This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use // the compatible custom types. - spec_version: 304, + spec_version: 303, impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, @@ -1144,7 +1144,7 @@ parameter_types! { pub const SubtensorInitialSenateRequiredStakePercentage: u64 = 1; // 1 percent of total stake pub const SubtensorInitialNetworkImmunity: u64 = 7 * 7200; pub const SubtensorInitialMinAllowedUids: u16 = 128; - pub const SubtensorInitialMinLockCost: u64 = prod_or_fast!(1_000_000_000_000, 100_000_000_000); // 1000 TAO for prod, 100 TAO for fast + pub const SubtensorInitialMinLockCost: u64 = 1_000_000_000_000; // 1000 TAO pub const SubtensorInitialSubnetOwnerCut: u16 = 11_796; // 18 percent // pub const SubtensorInitialSubnetLimit: u16 = 12; // (DEPRECATED) pub const SubtensorInitialNetworkLockReductionInterval: u64 = 14 * 7200; From 2c447fd99f46be260f4dd7d3488c52786a6f6c08 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Tue, 26 Aug 2025 18:54:59 -0400 Subject: [PATCH 02/39] Refactored epoch, tests failing --- common/src/lib.rs | 122 +++ pallets/admin-utils/src/tests/mod.rs | 9 +- pallets/subtensor/src/coinbase/root.rs | 29 +- pallets/subtensor/src/epoch/run_epoch.rs | 858 ++++++++++++++++-- pallets/subtensor/src/lib.rs | 30 +- pallets/subtensor/src/macros/dispatches.rs | 205 +++++ pallets/subtensor/src/macros/events.rs | 2 +- pallets/subtensor/src/macros/genesis.rs | 4 +- .../migrations/migrate_delete_subnet_21.rs | 10 +- .../src/migrations/migrate_delete_subnet_3.rs | 10 +- pallets/subtensor/src/rpc_info/metagraph.rs | 17 +- pallets/subtensor/src/rpc_info/neuron_info.rs | 14 +- pallets/subtensor/src/rpc_info/show_subnet.rs | 6 +- pallets/subtensor/src/rpc_info/subnet_info.rs | 6 +- pallets/subtensor/src/subnets/subsubnet.rs | 179 +++- pallets/subtensor/src/subnets/uids.rs | 14 +- pallets/subtensor/src/subnets/weights.rs | 654 ++++++++----- pallets/subtensor/src/swap/swap_hotkey.rs | 15 +- pallets/subtensor/src/tests/children.rs | 9 +- pallets/subtensor/src/tests/coinbase.rs | 22 +- pallets/subtensor/src/tests/consensus.rs | 3 +- pallets/subtensor/src/tests/epoch.rs | 77 +- pallets/subtensor/src/tests/staking.rs | 10 +- pallets/subtensor/src/tests/subsubnet.rs | 3 +- pallets/subtensor/src/tests/swap_hotkey.rs | 15 +- .../src/tests/swap_hotkey_with_subnet.rs | 15 +- pallets/subtensor/src/tests/uids.rs | 14 +- pallets/subtensor/src/tests/weights.rs | 45 +- pallets/subtensor/src/utils/misc.rs | 15 +- 29 files changed, 1894 insertions(+), 518 deletions(-) diff --git a/common/src/lib.rs b/common/src/lib.rs index ed7cc0fe88..2fd9ca30e6 100644 --- a/common/src/lib.rs +++ b/common/src/lib.rs @@ -245,6 +245,10 @@ pub mod time { #[serde(transparent)] pub struct SubId(u8); +impl SubId { + pub const MAIN: SubId = Self(0); +} + impl From for SubId { fn from(value: u8) -> Self { Self(value) @@ -257,6 +261,124 @@ impl From for u16 { } } +impl From for u64 { + fn from(val: SubId) -> Self { + u64::from(val.0) + } +} + +impl From for u8 { + fn from(val: SubId) -> Self { + u8::from(val.0) + } +} + +impl Display for SubId { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + Display::fmt(&self.0, f) + } +} + +impl CompactAs for SubId { + type As = u8; + + fn encode_as(&self) -> &Self::As { + &self.0 + } + + fn decode_from(v: Self::As) -> Result { + Ok(Self(v)) + } +} + +impl From> for SubId { + fn from(c: Compact) -> Self { + c.0 + } +} + +impl TypeInfo for SubId { + type Identity = ::Identity; + fn type_info() -> scale_info::Type { + ::type_info() + } +} + +#[freeze_struct("2d995c5478e16d4d")] +#[repr(transparent)] +#[derive( + Deserialize, + Serialize, + Clone, + Copy, + Decode, + DecodeWithMemTracking, + Default, + Encode, + Eq, + Hash, + MaxEncodedLen, + Ord, + PartialEq, + PartialOrd, + RuntimeDebug, +)] +#[serde(transparent)] +pub struct NetUidStorageIndex(u16); + +impl NetUidStorageIndex { + pub const ROOT: NetUidStorageIndex = Self(0); +} + +impl Display for NetUidStorageIndex { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + Display::fmt(&self.0, f) + } +} + +impl CompactAs for NetUidStorageIndex { + type As = u16; + + fn encode_as(&self) -> &Self::As { + &self.0 + } + + fn decode_from(v: Self::As) -> Result { + Ok(Self(v)) + } +} + +impl From> for NetUidStorageIndex { + fn from(c: Compact) -> Self { + c.0 + } +} + +impl From for NetUidStorageIndex { + fn from(val: NetUid) -> Self { + val.0.into() + } +} + +impl From for u16 { + fn from(val: NetUidStorageIndex) -> Self { + val.0 + } +} + +impl From for NetUidStorageIndex { + fn from(value: u16) -> Self { + Self(value) + } +} + +impl TypeInfo for NetUidStorageIndex { + type Identity = ::Identity; + fn type_info() -> scale_info::Type { + ::type_info() + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/pallets/admin-utils/src/tests/mod.rs b/pallets/admin-utils/src/tests/mod.rs index 754befc805..2e85457231 100644 --- a/pallets/admin-utils/src/tests/mod.rs +++ b/pallets/admin-utils/src/tests/mod.rs @@ -827,7 +827,7 @@ fn test_sudo_set_bonds_moving_average() { let netuid = NetUid::from(1); let to_be_set: u64 = 10; add_network(netuid, 10); - let init_value: u64 = SubtensorModule::get_bonds_moving_average(netuid); + let init_value: u64 = SubtensorModule::get_bonds_moving_average(netuid.into()); assert_eq!( AdminUtils::sudo_set_bonds_moving_average( <::RuntimeOrigin>::signed(U256::from(1)), @@ -845,7 +845,7 @@ fn test_sudo_set_bonds_moving_average() { Err(Error::::SubnetDoesNotExist.into()) ); assert_eq!( - SubtensorModule::get_bonds_moving_average(netuid), + SubtensorModule::get_bonds_moving_average(netuid.into()), init_value ); assert_ok!(AdminUtils::sudo_set_bonds_moving_average( @@ -853,7 +853,10 @@ fn test_sudo_set_bonds_moving_average() { netuid, to_be_set )); - assert_eq!(SubtensorModule::get_bonds_moving_average(netuid), to_be_set); + assert_eq!( + SubtensorModule::get_bonds_moving_average(netuid.into()), + to_be_set + ); }); } diff --git a/pallets/subtensor/src/coinbase/root.rs b/pallets/subtensor/src/coinbase/root.rs index fe1878f397..6d2824aec9 100644 --- a/pallets/subtensor/src/coinbase/root.rs +++ b/pallets/subtensor/src/coinbase/root.rs @@ -17,12 +17,11 @@ use super::*; use frame_support::dispatch::Pays; -use frame_support::storage::IterableStorageDoubleMap; use frame_support::weights::Weight; use safe_math::*; use sp_core::Get; use substrate_fixed::types::I64F64; -use subtensor_runtime_common::{AlphaCurrency, Currency, NetUid, TaoCurrency}; +use subtensor_runtime_common::{AlphaCurrency, Currency, NetUid, NetUidStorageIndex, TaoCurrency}; impl Pallet { /// Fetches the total count of root network validators @@ -410,6 +409,7 @@ impl Pallet { // --- 1. Return balance to subnet owner. let owner_coldkey: T::AccountId = SubnetOwner::::get(netuid); let reserved_amount = Self::get_subnet_locked_balance(netuid); + let subsubnets: u8 = SubsubnetCountCurrent::::get(netuid).into(); // --- 2. Remove network count. SubnetworkN::::remove(netuid); @@ -430,17 +430,16 @@ impl Pallet { let _ = Uids::::clear_prefix(netuid, u32::MAX, None); let keys = Keys::::iter_prefix(netuid).collect::>(); let _ = Keys::::clear_prefix(netuid, u32::MAX, None); - let _ = Bonds::::clear_prefix(netuid, u32::MAX, None); + let _ = Bonds::::clear_prefix(NetUidStorageIndex::from(netuid), u32::MAX, None); // --- 8. Removes the weights for this subnet (do not remove). - let _ = Weights::::clear_prefix(netuid, u32::MAX, None); + for subid in 0..subsubnets { + let netuid_index = Self::get_subsubnet_storage_index(netuid, subid.into()); + let _ = Weights::::clear_prefix(netuid_index, u32::MAX, None); + } // --- 9. Iterate over stored weights and fill the matrix. - for (uid_i, weights_i) in - as IterableStorageDoubleMap>>::iter_prefix( - NetUid::ROOT, - ) - { + for (uid_i, weights_i) in Weights::::iter_prefix(NetUidStorageIndex::ROOT) { // Create a new vector to hold modified weights. let mut modified_weights = weights_i.clone(); // Iterate over each weight entry to potentially update it. @@ -450,7 +449,7 @@ impl Pallet { *weight = 0; // Set weight to 0 for the matching subnet_id. } } - Weights::::insert(NetUid::ROOT, uid_i, modified_weights); + Weights::::insert(NetUidStorageIndex::ROOT, uid_i, modified_weights); } // --- 10. Remove various network-related parameters. @@ -458,11 +457,17 @@ impl Pallet { Trust::::remove(netuid); Active::::remove(netuid); Emission::::remove(netuid); - Incentive::::remove(netuid); + for subid in 0..subsubnets { + let netuid_index = Self::get_subsubnet_storage_index(netuid, subid.into()); + Incentive::::remove(netuid_index); + } Consensus::::remove(netuid); Dividends::::remove(netuid); PruningScores::::remove(netuid); - LastUpdate::::remove(netuid); + for subid in 0..subsubnets { + let netuid_index = Self::get_subsubnet_storage_index(netuid, subid.into()); + LastUpdate::::remove(netuid_index); + } ValidatorPermit::::remove(netuid); ValidatorTrust::::remove(netuid); diff --git a/pallets/subtensor/src/epoch/run_epoch.rs b/pallets/subtensor/src/epoch/run_epoch.rs index 2f302c2a5e..fc9bbd070f 100644 --- a/pallets/subtensor/src/epoch/run_epoch.rs +++ b/pallets/subtensor/src/epoch/run_epoch.rs @@ -1,19 +1,155 @@ use super::*; use crate::epoch::math::*; +use alloc::collections::BTreeMap; use frame_support::IterableStorageDoubleMap; use safe_math::*; +use sp_std::collections::btree_map::IntoIter; use sp_std::vec; use substrate_fixed::types::{I32F32, I64F64, I96F32}; -use subtensor_runtime_common::{AlphaCurrency, NetUid}; +use subtensor_runtime_common::{AlphaCurrency, NetUid, NetUidStorageIndex, SubId}; + +#[derive(Debug, Default)] +pub struct EpochTerms { + pub uid: usize, + pub dividend: u16, + pub incentive: u16, + pub validator_emission: AlphaCurrency, + pub server_emission: AlphaCurrency, + pub stake_weight: u16, + pub active: bool, + pub emission: AlphaCurrency, + pub rank: u16, + pub trust: u16, + pub consensus: u16, + pub pruning_score: u16, + pub validator_trust: u16, + pub new_validator_permit: bool, + pub bond: Vec<(u16, u16)>, +} + +pub struct EpochOutput(pub BTreeMap); + +impl EpochOutput { + pub fn as_map(&self) -> &BTreeMap { + &self.0 + } +} + +impl IntoIterator for EpochOutput +where + T: frame_system::Config, + T::AccountId: Ord, +{ + type Item = (T::AccountId, EpochTerms); + type IntoIter = IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } +} + +#[macro_export] +macro_rules! extract_from_sorted_terms { + ($sorted:expr, $field:ident) => {{ + ($sorted) + .iter() + .copied() + .map(|t| t.$field) + .collect::>() + }}; +} impl Pallet { + /// Legacy epoch function interface (TODO: Is only used for tests, remove) + pub fn epoch( + netuid: NetUid, + rao_emission: AlphaCurrency, + ) -> Vec<(T::AccountId, AlphaCurrency, AlphaCurrency)> { + // Run subsubnet-style epoch + let output = Self::epoch_subsubnet(netuid, SubId::MAIN, rao_emission); + + // Persist values in legacy format + Self::persist_subsub_epoch_terms(netuid, SubId::MAIN, &output.as_map()); + Self::persist_netuid_epoch_terms(netuid, &output.as_map()); + + // Remap and return + output + .into_iter() + .map(|(hotkey, terms)| (hotkey, terms.validator_emission, terms.server_emission)) + .collect() + } + + /// Legacy epoch_dense function interface (TODO: Is only used for tests, remove) + pub fn epoch_dense( + netuid: NetUid, + rao_emission: AlphaCurrency, + ) -> Vec<(T::AccountId, AlphaCurrency, AlphaCurrency)> { + Self::epoch_dense_subsubnet(netuid, SubId::MAIN, rao_emission) + } + + /// Persists per-subsubnet epoch output in state + pub fn persist_subsub_epoch_terms( + netuid: NetUid, + subid: SubId, + output: &BTreeMap, + ) { + let netuid_index = Self::get_subsubnet_storage_index(netuid, subid); + let mut terms_sorted: sp_std::vec::Vec<&EpochTerms> = output.values().collect(); + terms_sorted.sort_unstable_by_key(|t| t.uid); + + let incentive = extract_from_sorted_terms!(terms_sorted, incentive); + let bonds: Vec> = terms_sorted + .iter() + .cloned() + .map(|t| t.bond.clone()) + .collect::>(); + + Incentive::::insert(netuid_index, incentive); + bonds.into_iter().enumerate().for_each(|(uid_usize, bond_vec)| { + let uid: u16 = uid_usize + .try_into() + .unwrap_or_default(); + Bonds::::insert(netuid_index, uid, bond_vec); + }); + } + + /// Persists per-netuid epoch output in state + pub fn persist_netuid_epoch_terms(netuid: NetUid, output: &BTreeMap) { + let mut terms_sorted: sp_std::vec::Vec<&EpochTerms> = output.values().collect(); + terms_sorted.sort_unstable_by_key(|t| t.uid); + + let active = extract_from_sorted_terms!(terms_sorted, active); + let emission = extract_from_sorted_terms!(terms_sorted, emission); + let rank = extract_from_sorted_terms!(terms_sorted, rank); + let trust = extract_from_sorted_terms!(terms_sorted, trust); + let consensus = extract_from_sorted_terms!(terms_sorted, consensus); + let dividend = extract_from_sorted_terms!(terms_sorted, dividend); + let pruning_score = extract_from_sorted_terms!(terms_sorted, pruning_score); + let validator_trust = extract_from_sorted_terms!(terms_sorted, validator_trust); + let new_validator_permit = extract_from_sorted_terms!(terms_sorted, new_validator_permit); + + Active::::insert(netuid, active.clone()); + Emission::::insert(netuid, emission); + Rank::::insert(netuid, rank); + Trust::::insert(netuid, trust); + Consensus::::insert(netuid, consensus); + Dividends::::insert(netuid, dividend); + PruningScores::::insert(netuid, pruning_score); + ValidatorTrust::::insert(netuid, validator_trust); + ValidatorPermit::::insert(netuid, new_validator_permit); + } + /// Calculates reward consensus and returns the emissions for uids/hotkeys in a given `netuid`. /// (Dense version used only for testing purposes.) #[allow(clippy::indexing_slicing)] - pub fn epoch_dense( + pub fn epoch_dense_subsubnet( netuid: NetUid, + subid: SubId, rao_emission: AlphaCurrency, ) -> Vec<(T::AccountId, AlphaCurrency, AlphaCurrency)> { + // Calculate netuid storage index + let netuid_index = Self::get_subsubnet_storage_index(netuid, subid); + // Get subnetwork size. let n: u16 = Self::get_subnetwork_n(netuid); log::trace!("n: {n:?}"); @@ -35,7 +171,7 @@ impl Pallet { log::trace!("activity_cutoff: {activity_cutoff:?}"); // Last update vector. - let last_update: Vec = Self::get_last_update(netuid); + let last_update: Vec = Self::get_last_update(netuid_index); log::trace!("Last update: {:?}", &last_update); // Inactive mask. @@ -222,12 +358,12 @@ impl Pallet { let mut ema_bonds: Vec>; if Yuma3On::::get(netuid) { // Access network bonds. - let mut bonds: Vec> = Self::get_bonds_fixed_proportion(netuid); + let mut bonds: Vec> = Self::get_bonds_fixed_proportion(netuid_index); inplace_mask_cols(&recently_registered, &mut bonds); // mask outdated bonds log::trace!("B: {:?}", &bonds); // Compute the Exponential Moving Average (EMA) of bonds. - ema_bonds = Self::compute_bonds(netuid, &weights_for_bonds, &bonds, &consensus); + ema_bonds = Self::compute_bonds(netuid_index, &weights_for_bonds, &bonds, &consensus); log::trace!("emaB: {:?}", &ema_bonds); // Normalize EMA bonds. @@ -249,7 +385,7 @@ impl Pallet { } else { // original Yuma - liquid alpha disabled // Access network bonds. - let mut bonds: Vec> = Self::get_bonds(netuid); + let mut bonds: Vec> = Self::get_bonds(netuid_index); // Remove bonds referring to neurons that have registered since last tempo. inplace_mask_cols(&recently_registered, &mut bonds); // mask recently registered bonds inplace_col_normalize(&mut bonds); // sum_i b_ij = 1 @@ -261,7 +397,7 @@ impl Pallet { log::trace!("ΔB: {:?}", &bonds_delta); // Compute the Exponential Moving Average (EMA) of bonds. - ema_bonds = Self::compute_ema_bonds_normal(&bonds_delta, &bonds, netuid); + ema_bonds = Self::compute_ema_bonds_normal(&bonds_delta, &bonds, netuid_index); inplace_col_normalize(&mut ema_bonds); // sum_i b_ij = 1 log::trace!("emaB: {:?}", &ema_bonds); @@ -391,7 +527,7 @@ impl Pallet { Rank::::insert(netuid, cloned_ranks); Trust::::insert(netuid, cloned_trust); Consensus::::insert(netuid, cloned_consensus); - Incentive::::insert(netuid, cloned_incentive); + Incentive::::insert(NetUidStorageIndex::from(netuid), cloned_incentive); Dividends::::insert(netuid, cloned_dividends); PruningScores::::insert(netuid, cloned_pruning_scores); ValidatorTrust::::insert(netuid, cloned_validator_trust); @@ -408,11 +544,11 @@ impl Pallet { let new_bonds_row: Vec<(u16, u16)> = (0..n) .zip(vec_fixed_proportions_to_u16(ema_bond.clone())) .collect(); - Bonds::::insert(netuid, i as u16, new_bonds_row); + Bonds::::insert(netuid_index, i as u16, new_bonds_row); } else if validator_permit { // Only overwrite the intersection. let new_empty_bonds_row: Vec<(u16, u16)> = vec![]; - Bonds::::insert(netuid, i as u16, new_empty_bonds_row); + Bonds::::insert(netuid_index, i as u16, new_empty_bonds_row); } }); @@ -441,11 +577,27 @@ impl Pallet { /// * 'debug' ( bool ): /// - Print debugging outputs. /// - #[allow(clippy::indexing_slicing)] - pub fn epoch( + pub fn epoch_subsubnet( netuid: NetUid, + subid: SubId, rao_emission: AlphaCurrency, - ) -> Vec<(T::AccountId, AlphaCurrency, AlphaCurrency)> { + ) -> EpochOutput { + // Calculate netuid storage index + let netuid_index = Self::get_subsubnet_storage_index(netuid, subid); + + // Initialize output keys (neuron hotkeys) and UIDs + let mut terms_map: BTreeMap = Keys::::iter_prefix(netuid) + .map(|(uid, hotkey)| { + ( + hotkey, + EpochTerms { + uid: uid as usize, + ..Default::default() + }, + ) + }) + .collect(); + // Get subnetwork size. let n = Self::get_subnetwork_n(netuid); log::trace!("Number of Neurons in Network: {n:?}"); @@ -467,7 +619,7 @@ impl Pallet { log::trace!("activity_cutoff: {activity_cutoff:?}"); // Last update vector. - let last_update: Vec = Self::get_last_update(netuid); + let last_update: Vec = Self::get_last_update(netuid_index); log::trace!("Last update: {:?}", &last_update); // Inactive mask. @@ -488,11 +640,6 @@ impl Pallet { // == Stake == // =========== - let hotkeys: Vec<(u16, T::AccountId)> = - as IterableStorageDoubleMap>::iter_prefix(netuid) - .collect(); - log::debug!("hotkeys: {:?}", &hotkeys); - // Access network stake as normalized vector. let (total_stake, _alpha_stake, _tao_stake): (Vec, Vec, Vec) = Self::get_stake_weights_for_network(netuid); @@ -588,14 +735,15 @@ impl Pallet { // helper: hotkey → uid let uid_of = |acct: &T::AccountId| -> Option { - hotkeys - .iter() - .find(|(_, a)| a == acct) - .map(|(uid, _)| *uid as usize) + if let Some(terms) = terms_map.get(acct) { + Some(terms.uid) + } else { + None + } }; // ---------- v2 ------------------------------------------------------ - for (who, q) in WeightCommits::::iter_prefix(netuid) { + for (who, q) in WeightCommits::::iter_prefix(netuid_index) { for (_, cb, _, _) in q.iter() { if !Self::is_commit_expired(netuid, *cb) { if let Some(i) = uid_of(&who) { @@ -688,7 +836,7 @@ impl Pallet { let mut ema_bonds: Vec>; if Yuma3On::::get(netuid) { // Access network bonds. - let mut bonds = Self::get_bonds_sparse_fixed_proportion(netuid); + let mut bonds = Self::get_bonds_sparse_fixed_proportion(netuid_index); log::trace!("Bonds: {:?}", &bonds); // Remove bonds referring to neurons that have registered since last tempo. @@ -705,7 +853,8 @@ impl Pallet { // Compute the Exponential Moving Average (EMA) of bonds. log::trace!("weights_for_bonds: {:?}", &weights_for_bonds); - ema_bonds = Self::compute_bonds_sparse(netuid, &weights_for_bonds, &bonds, &consensus); + ema_bonds = + Self::compute_bonds_sparse(netuid_index, &weights_for_bonds, &bonds, &consensus); log::trace!("emaB: {:?}", &ema_bonds); // Normalize EMA bonds. @@ -727,7 +876,7 @@ impl Pallet { } else { // original Yuma - liquid alpha disabled // Access network bonds. - let mut bonds: Vec> = Self::get_bonds_sparse(netuid); + let mut bonds: Vec> = Self::get_bonds_sparse(netuid_index); log::trace!("B: {:?}", &bonds); // Remove bonds referring to neurons that have registered since last tempo. @@ -756,7 +905,7 @@ impl Pallet { log::trace!("ΔB (norm): {:?}", &bonds_delta); // Compute the Exponential Moving Average (EMA) of bonds. - ema_bonds = Self::compute_ema_bonds_normal_sparse(&bonds_delta, &bonds, netuid); + ema_bonds = Self::compute_ema_bonds_normal_sparse(&bonds_delta, &bonds, netuid_index); // Normalize EMA bonds. inplace_col_normalize_sparse(&mut ema_bonds, n); // sum_i b_ij = 1 log::trace!("Exponential Moving Average Bonds: {:?}", &ema_bonds); @@ -855,9 +1004,9 @@ impl Pallet { let pruning_scores: Vec = normalized_combined_emission.clone(); log::trace!("Pruning Scores: {:?}", &pruning_scores); - // =================== - // == Value storage == - // =================== + // =========================== + // == Populate epoch output == + // =========================== let cloned_stake_weight: Vec = stake .iter() .map(|xi| fixed_proportion_to_u16(*xi)) @@ -888,51 +1037,550 @@ impl Pallet { .iter() .map(|xi| fixed_proportion_to_u16(*xi)) .collect::>(); - StakeWeight::::insert(netuid, cloned_stake_weight.clone()); - Active::::insert(netuid, active.clone()); - Emission::::insert(netuid, cloned_emission); - Rank::::insert(netuid, cloned_ranks); - Trust::::insert(netuid, cloned_trust); - Consensus::::insert(netuid, cloned_consensus); - Incentive::::insert(netuid, cloned_incentive); - Dividends::::insert(netuid, cloned_dividends); - PruningScores::::insert(netuid, cloned_pruning_scores); - ValidatorTrust::::insert(netuid, cloned_validator_trust); - ValidatorPermit::::insert(netuid, new_validator_permits.clone()); - new_validator_permits - .iter() - .zip(validator_permits) - .zip(ema_bonds) - .enumerate() - .for_each(|(i, ((new_permit, validator_permit), ema_bond))| { - // Set bonds only if uid retains validator permit, otherwise clear bonds. - if *new_permit { - let new_bonds_row: Vec<(u16, u16)> = ema_bond - .iter() - .map(|(j, value)| (*j, fixed_proportion_to_u16(*value))) - .collect(); - Bonds::::insert(netuid, i as u16, new_bonds_row); - } else if validator_permit { - // Only overwrite the intersection. - let new_empty_bonds_row: Vec<(u16, u16)> = vec![]; - Bonds::::insert(netuid, i as u16, new_empty_bonds_row); - } - }); + for (_hotkey, terms) in terms_map.iter_mut() { + terms.dividend = cloned_dividends.get(terms.uid).copied().unwrap_or_default(); + terms.incentive = cloned_incentive.get(terms.uid).copied().unwrap_or_default(); + terms.validator_emission = validator_emission + .get(terms.uid) + .copied() + .unwrap_or_default(); + terms.server_emission = server_emission.get(terms.uid).copied().unwrap_or_default(); + terms.stake_weight = cloned_stake_weight + .get(terms.uid) + .copied() + .unwrap_or_default(); + terms.active = active.get(terms.uid).copied().unwrap_or_default(); + terms.emission = cloned_emission.get(terms.uid).copied().unwrap_or_default(); + terms.rank = cloned_ranks.get(terms.uid).copied().unwrap_or_default(); + terms.trust = cloned_trust.get(terms.uid).copied().unwrap_or_default(); + terms.consensus = cloned_consensus.get(terms.uid).copied().unwrap_or_default(); + terms.pruning_score = cloned_pruning_scores + .get(terms.uid) + .copied() + .unwrap_or_default(); + terms.validator_trust = cloned_validator_trust + .get(terms.uid) + .copied() + .unwrap_or_default(); + terms.new_validator_permit = new_validator_permits + .get(terms.uid) + .copied() + .unwrap_or_default(); + let old_validator_permit = validator_permits + .get(terms.uid) + .copied() + .unwrap_or_default(); + + // Bonds + if terms.new_validator_permit { + let ema_bond = ema_bonds.get(terms.uid).cloned().unwrap_or_default(); + terms.bond = ema_bond + .iter() + .map(|(j, value)| (*j, fixed_proportion_to_u16(*value))) + .collect(); + } else if old_validator_permit { + // Only overwrite the intersection. + terms.bond = vec![]; + } + } - // Emission tuples ( hotkeys, server_emission, validator_emission ) - hotkeys - .into_iter() - .map(|(uid_i, hotkey)| { - ( - hotkey, - server_emission[uid_i as usize], - validator_emission[uid_i as usize], - ) - }) - .collect() + EpochOutput(terms_map) } + // Legacy epoch fn + // #[allow(clippy::indexing_slicing)] + // pub fn epoch( + // netuid: NetUid, + // rao_emission: AlphaCurrency, + // ) -> Vec<(T::AccountId, AlphaCurrency, AlphaCurrency)> { + // // Get subnetwork size. + // let n = Self::get_subnetwork_n(netuid); + // log::trace!("Number of Neurons in Network: {n:?}"); + + // // ====================== + // // == Active & updated == + // // ====================== + + // // Get current block. + // let current_block: u64 = Self::get_current_block_as_u64(); + // log::trace!("current_block: {current_block:?}"); + + // // Get tempo. + // let tempo: u64 = Self::get_tempo(netuid).into(); + // log::trace!("tempo:\n{tempo:?}\n"); + + // // Get activity cutoff. + // let activity_cutoff: u64 = Self::get_activity_cutoff(netuid) as u64; + // log::trace!("activity_cutoff: {activity_cutoff:?}"); + + // // Last update vector. + // let last_update: Vec = Self::get_last_update(netuid); + // log::trace!("Last update: {:?}", &last_update); + + // // Inactive mask. + // let inactive: Vec = last_update + // .iter() + // .map(|updated| updated.saturating_add(activity_cutoff) < current_block) + // .collect(); + // log::debug!("Inactive: {:?}", inactive.clone()); + + // // Logical negation of inactive. + // let active: Vec = inactive.iter().map(|&b| !b).collect(); + + // // Block at registration vector (block when each neuron was most recently registered). + // let block_at_registration: Vec = Self::get_block_at_registration(netuid); + // log::trace!("Block at registration: {:?}", &block_at_registration); + + // // =========== + // // == Stake == + // // =========== + + // let hotkeys: Vec<(u16, T::AccountId)> = + // as IterableStorageDoubleMap>::iter_prefix(netuid) + // .collect(); + // log::debug!("hotkeys: {:?}", &hotkeys); + + // // Access network stake as normalized vector. + // let (total_stake, _alpha_stake, _tao_stake): (Vec, Vec, Vec) = + // Self::get_stake_weights_for_network(netuid); + + // // Get the minimum stake required. + // let min_stake = Self::get_stake_threshold(); + + // // Set stake of validators that doesn't meet the staking threshold to 0 as filter. + // let mut filtered_stake: Vec = total_stake + // .iter() + // .map(|&s| { + // if fixed64_to_u64(s) < min_stake { + // return I64F64::from(0); + // } + // s + // }) + // .collect(); + // log::debug!("Filtered stake: {:?}", &filtered_stake); + + // inplace_normalize_64(&mut filtered_stake); + // let stake: Vec = vec_fixed64_to_fixed32(filtered_stake); + // log::debug!("Normalised Stake: {:?}", &stake); + + // // ======================= + // // == Validator permits == + // // ======================= + + // // Get current validator permits. + // let validator_permits: Vec = Self::get_validator_permit(netuid); + // log::trace!("validator_permits: {validator_permits:?}"); + + // // Logical negation of validator_permits. + // let validator_forbids: Vec = validator_permits.iter().map(|&b| !b).collect(); + + // // Get max allowed validators. + // let max_allowed_validators: u16 = Self::get_max_allowed_validators(netuid); + // log::trace!("max_allowed_validators: {max_allowed_validators:?}"); + + // // Get new validator permits. + // let new_validator_permits: Vec = + // is_topk_nonzero(&stake, max_allowed_validators as usize); + // log::trace!("new_validator_permits: {new_validator_permits:?}"); + + // // ================== + // // == Active Stake == + // // ================== + + // let mut active_stake: Vec = stake.clone(); + + // // Remove inactive stake. + // inplace_mask_vector(&inactive, &mut active_stake); + + // // Remove non-validator stake. + // inplace_mask_vector(&validator_forbids, &mut active_stake); + + // // Normalize active stake. + // inplace_normalize(&mut active_stake); + // log::trace!("Active Stake: {:?}", &active_stake); + + // // ============= + // // == Weights == + // // ============= + + // let owner_uid: Option = Self::get_owner_uid(netuid); + + // // Access network weights row unnormalized. + // let mut weights: Vec> = Self::get_weights_sparse(netuid); + // log::trace!("Weights: {:?}", &weights); + + // // Mask weights that are not from permitted validators. + // weights = mask_rows_sparse(&validator_forbids, &weights); + // log::trace!("Weights (permit): {:?}", &weights); + + // // Remove self-weight by masking diagonal; keep owner_uid self-weight. + // if let Some(owner_uid) = owner_uid { + // weights = mask_diag_sparse_except_index(&weights, owner_uid); + // } else { + // weights = mask_diag_sparse(&weights); + // } + // log::trace!("Weights (permit+diag): {:?}", &weights); + + // // Remove weights referring to deregistered neurons. + // weights = vec_mask_sparse_matrix( + // &weights, + // &last_update, + // &block_at_registration, + // &|updated, registered| updated <= registered, + // ); + // log::trace!("Weights (permit+diag+outdate): {:?}", &weights); + + // if Self::get_commit_reveal_weights_enabled(netuid) { + // let mut commit_blocks: Vec = vec![u64::MAX; n as usize]; // MAX ⇒ “no active commit” + + // // helper: hotkey → uid + // let uid_of = |acct: &T::AccountId| -> Option { + // hotkeys + // .iter() + // .find(|(_, a)| a == acct) + // .map(|(uid, _)| *uid as usize) + // }; + + // // ---------- v2 ------------------------------------------------------ + // for (who, q) in WeightCommits::::iter_prefix(netuid) { + // for (_, cb, _, _) in q.iter() { + // if !Self::is_commit_expired(netuid, *cb) { + // if let Some(i) = uid_of(&who) { + // commit_blocks[i] = commit_blocks[i].min(*cb); + // } + // break; // earliest active found + // } + // } + // } + + // // ---------- v3 ------------------------------------------------------ + // for (_epoch, q) in TimelockedWeightCommits::::iter_prefix(netuid) { + // for (who, cb, ..) in q.iter() { + // if !Self::is_commit_expired(netuid, *cb) { + // if let Some(i) = uid_of(who) { + // commit_blocks[i] = commit_blocks[i].min(*cb); + // } + // } + // } + // } + + // weights = vec_mask_sparse_matrix( + // &weights, + // &commit_blocks, + // &block_at_registration, + // &|cb, reg| cb < reg, + // ); + + // log::trace!( + // "Commit-reveal column mask applied ({} masked rows)", + // commit_blocks.iter().filter(|&&cb| cb != u64::MAX).count() + // ); + // } + + // // Normalize remaining weights. + // inplace_row_normalize_sparse(&mut weights); + // log::trace!("Weights (mask+norm): {:?}", &weights); + + // // ================================ + // // == Consensus, Validator Trust == + // // ================================ + + // // Compute preranks: r_j = SUM(i) w_ij * s_i + // let preranks: Vec = matmul_sparse(&weights, &active_stake, n); + // log::trace!("Ranks (before): {:?}", &preranks); + + // // Consensus majority ratio, e.g. 51%. + // let kappa: I32F32 = Self::get_float_kappa(netuid); + // // Calculate consensus as stake-weighted median of weights. + // let consensus: Vec = weighted_median_col_sparse(&active_stake, &weights, n, kappa); + // log::trace!("Consensus: {:?}", &consensus); + + // // Clip weights at majority consensus. + // let clipped_weights: Vec> = col_clip_sparse(&weights, &consensus); + // log::trace!("Clipped Weights: {:?}", &clipped_weights); + + // // Calculate validator trust as sum of clipped weights set by validator. + // let validator_trust: Vec = row_sum_sparse(&clipped_weights); + // log::trace!("Validator Trust: {:?}", &validator_trust); + + // // ============================= + // // == Ranks, Trust, Incentive == + // // ============================= + + // // Compute ranks: r_j = SUM(i) w_ij * s_i. + // let mut ranks: Vec = matmul_sparse(&clipped_weights, &active_stake, n); + // log::trace!("Ranks (after): {:?}", &ranks); + + // // Compute server trust: ratio of rank after vs. rank before. + // let trust: Vec = vecdiv(&ranks, &preranks); // range: I32F32(0, 1) + // log::trace!("Trust: {:?}", &trust); + + // inplace_normalize(&mut ranks); // range: I32F32(0, 1) + // let incentive: Vec = ranks.clone(); + // log::trace!("Incentive (=Rank): {:?}", &incentive); + + // // ========================= + // // == Bonds and Dividends == + // // ========================= + + // // Get validator bonds penalty in [0, 1]. + // let bonds_penalty: I32F32 = Self::get_float_bonds_penalty(netuid); + // // Calculate weights for bonds, apply bonds penalty to weights. + // // bonds_penalty = 0: weights_for_bonds = weights.clone() + // // bonds_penalty = 1: weights_for_bonds = clipped_weights.clone() + // let weights_for_bonds: Vec> = + // interpolate_sparse(&weights, &clipped_weights, n, bonds_penalty); + + // let mut dividends: Vec; + // let mut ema_bonds: Vec>; + // if Yuma3On::::get(netuid) { + // // Access network bonds. + // let mut bonds = Self::get_bonds_sparse_fixed_proportion(netuid); + // log::trace!("Bonds: {:?}", &bonds); + + // // Remove bonds referring to neurons that have registered since last tempo. + // // Mask if: the last tempo block happened *before* the registration block + // // ==> last_tempo <= registered + // let last_tempo: u64 = current_block.saturating_sub(tempo); + // bonds = scalar_vec_mask_sparse_matrix( + // &bonds, + // last_tempo, + // &block_at_registration, + // &|last_tempo, registered| last_tempo <= registered, + // ); + // log::trace!("Bonds: (mask) {:?}", &bonds); + + // // Compute the Exponential Moving Average (EMA) of bonds. + // log::trace!("weights_for_bonds: {:?}", &weights_for_bonds); + // ema_bonds = Self::compute_bonds_sparse(netuid, &weights_for_bonds, &bonds, &consensus); + // log::trace!("emaB: {:?}", &ema_bonds); + + // // Normalize EMA bonds. + // let mut ema_bonds_norm = ema_bonds.clone(); + // inplace_col_normalize_sparse(&mut ema_bonds_norm, n); // sum_i b_ij = 1 + // log::trace!("emaB norm: {:?}", &ema_bonds_norm); + + // // # === Dividend Calculation=== + // let total_bonds_per_validator: Vec = + // row_sum_sparse(&mat_vec_mul_sparse(&ema_bonds_norm, &incentive)); + // log::trace!( + // "total_bonds_per_validator: {:?}", + // &total_bonds_per_validator + // ); + + // dividends = vec_mul(&total_bonds_per_validator, &active_stake); + // inplace_normalize(&mut dividends); + // log::trace!("Dividends: {:?}", ÷nds); + // } else { + // // original Yuma - liquid alpha disabled + // // Access network bonds. + // let mut bonds: Vec> = Self::get_bonds_sparse(netuid); + // log::trace!("B: {:?}", &bonds); + + // // Remove bonds referring to neurons that have registered since last tempo. + // // Mask if: the last tempo block happened *before* the registration block + // // ==> last_tempo <= registered + // let last_tempo: u64 = current_block.saturating_sub(tempo); + // bonds = scalar_vec_mask_sparse_matrix( + // &bonds, + // last_tempo, + // &block_at_registration, + // &|last_tempo, registered| last_tempo <= registered, + // ); + // log::trace!("B (outdatedmask): {:?}", &bonds); + + // // Normalize remaining bonds: sum_i b_ij = 1. + // inplace_col_normalize_sparse(&mut bonds, n); + // log::trace!("B (mask+norm): {:?}", &bonds); + + // // Compute bonds delta column normalized. + // let mut bonds_delta: Vec> = + // row_hadamard_sparse(&weights_for_bonds, &active_stake); // ΔB = W◦S (outdated W masked) + // log::trace!("ΔB: {:?}", &bonds_delta); + + // // Normalize bonds delta. + // inplace_col_normalize_sparse(&mut bonds_delta, n); // sum_i b_ij = 1 + // log::trace!("ΔB (norm): {:?}", &bonds_delta); + + // // Compute the Exponential Moving Average (EMA) of bonds. + // ema_bonds = Self::compute_ema_bonds_normal_sparse(&bonds_delta, &bonds, netuid); + // // Normalize EMA bonds. + // inplace_col_normalize_sparse(&mut ema_bonds, n); // sum_i b_ij = 1 + // log::trace!("Exponential Moving Average Bonds: {:?}", &ema_bonds); + + // // Compute dividends: d_i = SUM(j) b_ij * inc_j. + // // range: I32F32(0, 1) + // dividends = matmul_transpose_sparse(&ema_bonds, &incentive); + // inplace_normalize(&mut dividends); + // log::trace!("Dividends: {:?}", ÷nds); + + // // Column max-upscale EMA bonds for storage: max_i w_ij = 1. + // inplace_col_max_upscale_sparse(&mut ema_bonds, n); + // } + + // // ================================= + // // == Emission and Pruning scores == + // // ================================= + + // // Compute normalized emission scores. range: I32F32(0, 1) + // let combined_emission: Vec = incentive + // .iter() + // .zip(dividends.clone()) + // .map(|(ii, di)| ii.saturating_add(di)) + // .collect(); + // let emission_sum: I32F32 = combined_emission.iter().sum(); + + // let mut normalized_server_emission: Vec = incentive.clone(); // Servers get incentive. + // let mut normalized_validator_emission: Vec = dividends.clone(); // Validators get dividends. + // let mut normalized_combined_emission: Vec = combined_emission.clone(); + // // Normalize on the sum of incentive + dividends. + // inplace_normalize_using_sum(&mut normalized_server_emission, emission_sum); + // inplace_normalize_using_sum(&mut normalized_validator_emission, emission_sum); + // inplace_normalize(&mut normalized_combined_emission); + + // // If emission is zero, replace emission with normalized stake. + // if emission_sum == I32F32::from(0) { + // // no weights set | outdated weights | self_weights + // if is_zero(&active_stake) { + // // no active stake + // normalized_validator_emission.clone_from(&stake); // do not mask inactive, assumes stake is normalized + // normalized_combined_emission.clone_from(&stake); + // } else { + // normalized_validator_emission.clone_from(&active_stake); // emission proportional to inactive-masked normalized stake + // normalized_combined_emission.clone_from(&active_stake); + // } + // } + + // // Compute rao based emission scores. range: I96F32(0, rao_emission) + // let float_rao_emission: I96F32 = I96F32::saturating_from_num(rao_emission); + + // let server_emission: Vec = normalized_server_emission + // .iter() + // .map(|se: &I32F32| I96F32::saturating_from_num(*se).saturating_mul(float_rao_emission)) + // .collect(); + // let server_emission: Vec = server_emission + // .iter() + // .map(|e: &I96F32| e.saturating_to_num::().into()) + // .collect(); + + // let validator_emission: Vec = normalized_validator_emission + // .iter() + // .map(|ve: &I32F32| I96F32::saturating_from_num(*ve).saturating_mul(float_rao_emission)) + // .collect(); + // let validator_emission: Vec = validator_emission + // .iter() + // .map(|e: &I96F32| e.saturating_to_num::().into()) + // .collect(); + + // // Only used to track emission in storage. + // let combined_emission: Vec = normalized_combined_emission + // .iter() + // .map(|ce: &I32F32| I96F32::saturating_from_num(*ce).saturating_mul(float_rao_emission)) + // .collect(); + // let combined_emission: Vec = combined_emission + // .iter() + // .map(|e: &I96F32| AlphaCurrency::from(e.saturating_to_num::())) + // .collect(); + + // log::trace!( + // "Normalized Server Emission: {:?}", + // &normalized_server_emission + // ); + // log::trace!("Server Emission: {:?}", &server_emission); + // log::trace!( + // "Normalized Validator Emission: {:?}", + // &normalized_validator_emission + // ); + // log::trace!("Validator Emission: {:?}", &validator_emission); + // log::trace!( + // "Normalized Combined Emission: {:?}", + // &normalized_combined_emission + // ); + // log::trace!("Combined Emission: {:?}", &combined_emission); + + // // Set pruning scores using combined emission scores. + // let pruning_scores: Vec = normalized_combined_emission.clone(); + // log::trace!("Pruning Scores: {:?}", &pruning_scores); + + // // =================== + // // == Value storage == + // // =================== + // let cloned_stake_weight: Vec = stake + // .iter() + // .map(|xi| fixed_proportion_to_u16(*xi)) + // .collect::>(); + // let cloned_emission = combined_emission.clone(); + // let cloned_ranks: Vec = ranks + // .iter() + // .map(|xi| fixed_proportion_to_u16(*xi)) + // .collect::>(); + // let cloned_trust: Vec = trust + // .iter() + // .map(|xi| fixed_proportion_to_u16(*xi)) + // .collect::>(); + // let cloned_consensus: Vec = consensus + // .iter() + // .map(|xi| fixed_proportion_to_u16(*xi)) + // .collect::>(); + // let cloned_incentive: Vec = incentive + // .iter() + // .map(|xi| fixed_proportion_to_u16(*xi)) + // .collect::>(); + // let cloned_dividends: Vec = dividends + // .iter() + // .map(|xi| fixed_proportion_to_u16(*xi)) + // .collect::>(); + // let cloned_pruning_scores: Vec = vec_max_upscale_to_u16(&pruning_scores); + // let cloned_validator_trust: Vec = validator_trust + // .iter() + // .map(|xi| fixed_proportion_to_u16(*xi)) + // .collect::>(); + // StakeWeight::::insert(netuid, cloned_stake_weight.clone()); + // Active::::insert(netuid, active.clone()); + // Emission::::insert(netuid, cloned_emission); + // Rank::::insert(netuid, cloned_ranks); + // Trust::::insert(netuid, cloned_trust); + // Consensus::::insert(netuid, cloned_consensus); + // Incentive::::insert(netuid, cloned_incentive); + // Dividends::::insert(netuid, cloned_dividends); + // PruningScores::::insert(netuid, cloned_pruning_scores); + // ValidatorTrust::::insert(netuid, cloned_validator_trust); + // ValidatorPermit::::insert(netuid, new_validator_permits.clone()); + + // new_validator_permits + // .iter() + // .zip(validator_permits) + // .zip(ema_bonds) + // .enumerate() + // .for_each(|(i, ((new_permit, validator_permit), ema_bond))| { + // // Set bonds only if uid retains validator permit, otherwise clear bonds. + // if *new_permit { + // let new_bonds_row: Vec<(u16, u16)> = ema_bond + // .iter() + // .map(|(j, value)| (*j, fixed_proportion_to_u16(*value))) + // .collect(); + // Bonds::::insert(netuid, i as u16, new_bonds_row); + // } else if validator_permit { + // // Only overwrite the intersection. + // let new_empty_bonds_row: Vec<(u16, u16)> = vec![]; + // Bonds::::insert(netuid, i as u16, new_empty_bonds_row); + // } + // }); + + // // Emission tuples ( hotkeys, server_emission, validator_emission ) + // hotkeys + // .into_iter() + // .map(|(uid_i, hotkey)| { + // ( + // hotkey, + // server_emission[uid_i as usize], + // validator_emission[uid_i as usize], + // ) + // }) + // .collect() + // } + pub fn get_float_rho(netuid: NetUid) -> I32F32 { I32F32::saturating_from_num(Self::get_rho(netuid)) } @@ -963,11 +1611,12 @@ impl Pallet { pub fn get_weights_sparse(netuid: NetUid) -> Vec> { let n = Self::get_subnetwork_n(netuid) as usize; let mut weights: Vec> = vec![vec![]; n]; - for (uid_i, weights_i) in - as IterableStorageDoubleMap>>::iter_prefix( - netuid, - ) - .filter(|(uid_i, _)| *uid_i < n as u16) + for (uid_i, weights_i) in as IterableStorageDoubleMap< + NetUidStorageIndex, + u16, + Vec<(u16, u16)>, + >>::iter_prefix(NetUidStorageIndex::from(netuid)) + .filter(|(uid_i, _)| *uid_i < n as u16) { for (uid_j, weight_ij) in weights_i.iter().filter(|(uid_j, _)| *uid_j < n as u16) { weights @@ -983,11 +1632,12 @@ impl Pallet { pub fn get_weights(netuid: NetUid) -> Vec> { let n = Self::get_subnetwork_n(netuid) as usize; let mut weights: Vec> = vec![vec![I32F32::saturating_from_num(0.0); n]; n]; - for (uid_i, weights_vec) in - as IterableStorageDoubleMap>>::iter_prefix( - netuid, - ) - .filter(|(uid_i, _)| *uid_i < n as u16) + for (uid_i, weights_vec) in as IterableStorageDoubleMap< + NetUidStorageIndex, + u16, + Vec<(u16, u16)>, + >>::iter_prefix(NetUidStorageIndex::from(netuid)) + .filter(|(uid_i, _)| *uid_i < n as u16) { for (uid_j, weight_ij) in weights_vec .into_iter() @@ -1005,12 +1655,13 @@ impl Pallet { } /// Output unnormalized sparse bonds, input bonds are assumed to be column max-upscaled in u16. - pub fn get_bonds_sparse(netuid: NetUid) -> Vec> { + pub fn get_bonds_sparse(netuid_index: NetUidStorageIndex) -> Vec> { + let (netuid, _) = Self::get_netuid_and_subid(netuid_index).unwrap_or_default(); let n = Self::get_subnetwork_n(netuid) as usize; let mut bonds: Vec> = vec![vec![]; n]; for (uid_i, bonds_vec) in - as IterableStorageDoubleMap>>::iter_prefix( - netuid, + Bonds::::iter_prefix( + netuid_index, ) .filter(|(uid_i, _)| *uid_i < n as u16) { @@ -1025,12 +1676,13 @@ impl Pallet { } /// Output unnormalized bonds in [n, n] matrix, input bonds are assumed to be column max-upscaled in u16. - pub fn get_bonds(netuid: NetUid) -> Vec> { + pub fn get_bonds(netuid_index: NetUidStorageIndex) -> Vec> { + let (netuid, _) = Self::get_netuid_and_subid(netuid_index).unwrap_or_default(); let n: usize = Self::get_subnetwork_n(netuid) as usize; let mut bonds: Vec> = vec![vec![I32F32::saturating_from_num(0.0); n]; n]; for (uid_i, bonds_vec) in - as IterableStorageDoubleMap>>::iter_prefix( - netuid, + Bonds::::iter_prefix( + netuid_index, ) .filter(|(uid_i, _)| *uid_i < n as u16) { @@ -1046,7 +1698,7 @@ impl Pallet { bonds } - pub fn get_bonds_fixed_proportion(netuid: NetUid) -> Vec> { + pub fn get_bonds_fixed_proportion(netuid: NetUidStorageIndex) -> Vec> { let mut bonds = Self::get_bonds(netuid); bonds.iter_mut().for_each(|bonds_row| { bonds_row @@ -1056,7 +1708,7 @@ impl Pallet { bonds } - pub fn get_bonds_sparse_fixed_proportion(netuid: NetUid) -> Vec> { + pub fn get_bonds_sparse_fixed_proportion(netuid: NetUidStorageIndex) -> Vec> { let mut bonds = Self::get_bonds_sparse(netuid); bonds.iter_mut().for_each(|bonds_row| { bonds_row @@ -1078,7 +1730,7 @@ impl Pallet { pub fn compute_ema_bonds_normal_sparse( bonds_delta: &[Vec<(u16, I32F32)>], bonds: &[Vec<(u16, I32F32)>], - netuid: NetUid, + netuid: NetUidStorageIndex, ) -> Vec> { // Retrieve the bonds moving average for the given network ID and scale it down. let bonds_moving_average: I64F64 = @@ -1112,7 +1764,7 @@ impl Pallet { pub fn compute_ema_bonds_normal( bonds_delta: &[Vec], bonds: &[Vec], - netuid: NetUid, + netuid: NetUidStorageIndex, ) -> Vec> { // Retrieve the bonds moving average for the given network ID and scale it down. let bonds_moving_average: I64F64 = @@ -1146,11 +1798,13 @@ impl Pallet { /// # Returns: /// A vector of EMA bonds. pub fn compute_bonds( - netuid: NetUid, + netuid_index: NetUidStorageIndex, weights: &[Vec], // weights_for_bonds bonds: &[Vec], consensus: &[I32F32], ) -> Vec> { + let (netuid, _) = Self::get_netuid_and_subid(netuid_index).unwrap_or_default(); + // Check if Liquid Alpha is enabled, consensus is not empty, and contains non-zero values. if LiquidAlphaOn::::get(netuid) && !consensus.is_empty() @@ -1167,7 +1821,7 @@ impl Pallet { mat_ema_alpha(weights, bonds, &alphas) } else { // Liquid Alpha is disabled, compute the liquid alpha value. - let alpha: I32F32 = Self::compute_disabled_liquid_alpha(netuid); + let alpha: I32F32 = Self::compute_disabled_liquid_alpha(netuid_index); // Compute the Exponential Moving Average (EMA) of bonds using the calculated alpha value. mat_ema(weights, bonds, alpha) @@ -1186,11 +1840,13 @@ impl Pallet { /// # Returns: /// A vector of EMA bonds. pub fn compute_bonds_sparse( - netuid: NetUid, + netuid_index: NetUidStorageIndex, weights: &[Vec<(u16, I32F32)>], bonds: &[Vec<(u16, I32F32)>], consensus: &[I32F32], ) -> Vec> { + let (netuid, _) = Self::get_netuid_and_subid(netuid_index).unwrap_or_default(); + // Check if Liquid Alpha is enabled, consensus is not empty, and contains non-zero values. if LiquidAlphaOn::::get(netuid) && !consensus.is_empty() @@ -1207,7 +1863,7 @@ impl Pallet { mat_ema_alpha_sparse(weights, bonds, &alphas) } else { // Liquid Alpha is disabled, compute the liquid alpha value. - let alpha: I32F32 = Self::compute_disabled_liquid_alpha(netuid); + let alpha: I32F32 = Self::compute_disabled_liquid_alpha(netuid_index); // Compute the Exponential Moving Average (EMA) of bonds using the calculated alpha value. mat_ema_sparse(weights, bonds, alpha) @@ -1362,7 +2018,7 @@ impl Pallet { clamp_value(alpha, alpha_low, alpha_high) } - pub fn compute_disabled_liquid_alpha(netuid: NetUid) -> I32F32 { + pub fn compute_disabled_liquid_alpha(netuid: NetUidStorageIndex) -> I32F32 { // Retrieve the bonds moving average for the given network ID and scale it down. let bonds_moving_average: I64F64 = I64F64::from_num(Self::get_bonds_moving_average(netuid)) .saturating_div(I64F64::from_num(1_000_000)); @@ -1413,7 +2069,9 @@ impl Pallet { Ok(()) } - pub fn do_reset_bonds(netuid: NetUid, account_id: &T::AccountId) -> Result<(), DispatchError> { + pub fn do_reset_bonds(netuid_index: NetUidStorageIndex, account_id: &T::AccountId) -> Result<(), DispatchError> { + let (netuid, _) = Self::get_netuid_and_subid(netuid_index).unwrap_or_default(); + // check bonds reset enabled for this subnet let bonds_reset_enabled: bool = Self::get_bonds_reset(netuid); if !bonds_reset_enabled { @@ -1421,9 +2079,9 @@ impl Pallet { } if let Ok(uid) = Self::get_uid_for_net_and_hotkey(netuid, account_id) { - for (i, bonds_vec) in Bonds::::iter_prefix(netuid) { + for (i, bonds_vec) in Bonds::::iter_prefix(netuid_index) { Bonds::::insert( - netuid, + netuid_index, i, bonds_vec .clone() diff --git a/pallets/subtensor/src/lib.rs b/pallets/subtensor/src/lib.rs index 7fa6c8a919..da19d9d54e 100644 --- a/pallets/subtensor/src/lib.rs +++ b/pallets/subtensor/src/lib.rs @@ -85,7 +85,9 @@ pub mod pallet { use sp_std::vec::Vec; use substrate_fixed::types::{I96F32, U64F64}; use subtensor_macros::freeze_struct; - use subtensor_runtime_common::{AlphaCurrency, Currency, NetUid, TaoCurrency}; + use subtensor_runtime_common::{ + AlphaCurrency, Currency, NetUid, NetUidStorageIndex, SubId, TaoCurrency, + }; #[cfg(not(feature = "std"))] use alloc::boxed::Box; @@ -1522,7 +1524,7 @@ pub mod pallet { #[pallet::storage] /// --- MAP ( netuid ) --> incentive pub type Incentive = - StorageMap<_, Identity, NetUid, Vec, ValueQuery, EmptyU16Vec>; + StorageMap<_, Identity, NetUidStorageIndex, Vec, ValueQuery, EmptyU16Vec>; #[pallet::storage] /// --- MAP ( netuid ) --> dividends pub type Dividends = @@ -1533,7 +1535,7 @@ pub mod pallet { #[pallet::storage] /// --- MAP ( netuid ) --> last_update pub type LastUpdate = - StorageMap<_, Identity, NetUid, Vec, ValueQuery, EmptyU64Vec>; + StorageMap<_, Identity, NetUidStorageIndex, Vec, ValueQuery, EmptyU64Vec>; #[pallet::storage] /// --- MAP ( netuid ) --> validator_trust pub type ValidatorTrust = @@ -1551,7 +1553,7 @@ pub mod pallet { pub type Weights = StorageDoubleMap< _, Identity, - NetUid, + NetUidStorageIndex, Identity, u16, Vec<(u16, u16)>, @@ -1563,7 +1565,7 @@ pub mod pallet { pub type Bonds = StorageDoubleMap< _, Identity, - NetUid, + NetUidStorageIndex, Identity, u16, Vec<(u16, u16)>, @@ -1670,7 +1672,7 @@ pub mod pallet { pub type WeightCommits = StorageDoubleMap< _, Twox64Concat, - NetUid, + NetUidStorageIndex, Twox64Concat, T::AccountId, VecDeque<(H256, u64, u64, u64)>, @@ -1802,13 +1804,13 @@ pub mod pallet { /// ====================== #[pallet::type_value] /// -- ITEM (Default number of sub-subnets) - pub fn DefaultSubsubnetCount() -> u8 { - 1 + pub fn DefaultSubsubnetCount() -> SubId { + SubId::from(1) } #[pallet::type_value] /// -- ITEM (Maximum number of sub-subnets) - pub fn MaxSubsubnetCount() -> u8 { - 8 + pub fn MaxSubsubnetCount() -> SubId { + SubId::from(8) } #[pallet::type_value] /// -- ITEM (Number of tempos in subnet super-block) @@ -1817,17 +1819,17 @@ pub mod pallet { } #[pallet::type_value] /// -- ITEM (Maximum allowed sub-subnet count decrease per super-block) - pub fn GlobalSubsubnetDecreasePerSuperblock() -> u8 { - 1 + pub fn GlobalSubsubnetDecreasePerSuperblock() -> SubId { + SubId::from(1) } #[pallet::storage] /// --- MAP ( netuid ) --> Number of sub-subnets desired by root or subnet owner. pub type SubsubnetCountDesired = - StorageMap<_, Twox64Concat, NetUid, u8, ValueQuery, DefaultSubsubnetCount>; + StorageMap<_, Twox64Concat, NetUid, SubId, ValueQuery, DefaultSubsubnetCount>; #[pallet::storage] /// --- MAP ( netuid ) --> Current number of sub-subnets pub type SubsubnetCountCurrent = - StorageMap<_, Twox64Concat, NetUid, u8, ValueQuery, DefaultSubsubnetCount>; + StorageMap<_, Twox64Concat, NetUid, SubId, ValueQuery, DefaultSubsubnetCount>; /// ================== /// ==== Genesis ===== diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index 35439479ab..ad9fc8571c 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -94,6 +94,83 @@ mod dispatches { } } + /// --- Sets the caller weights for the incentive mechanism. The call can be + /// made from the hotkey account so is potentially insecure, however, the damage + /// of changing weights is minimal if caught early. This function includes all the + /// checks that the passed weights meet the requirements. Stored as u16s they represent + /// rational values in the range [0,1] which sum to 1 and can be interpreted as + /// probabilities. The specific weights determine how inflation propagates outward + /// from this peer. + /// + /// Note: The 16 bit integers weights should represent 1.0 as the max u16. + /// However, the function normalizes all integers to u16_max anyway. This means that if the sum of all + /// elements is larger or smaller than the amount of elements * u16_max, all elements + /// will be corrected for this deviation. + /// + /// # Args: + /// * `origin`: (Origin): + /// - The caller, a hotkey who wishes to set their weights. + /// + /// * `netuid` (u16): + /// - The network uid we are setting these weights on. + /// + /// * `dests` (Vec): + /// - The edge endpoint for the weight, i.e. j for w_ij. + /// + /// * 'weights' (Vec): + /// - The u16 integer encoded weights. Interpreted as rational + /// values in the range [0,1]. They must sum to in32::MAX. + /// + /// * 'version_key' ( u64 ): + /// - The network version key to check if the validator is up to date. + /// + /// # Event: + /// * WeightsSet; + /// - On successfully setting the weights on chain. + /// + /// # Raises: + /// * 'SubNetworkDoesNotExist': + /// - Attempting to set weights on a non-existent network. + /// + /// * 'NotRegistered': + /// - Attempting to set weights from a non registered account. + /// + /// * 'WeightVecNotEqualSize': + /// - Attempting to set weights with uids not of same length. + /// + /// * 'DuplicateUids': + /// - Attempting to set weights with duplicate uids. + /// + /// * 'UidsLengthExceedUidsInSubNet': + /// - Attempting to set weights above the max allowed uids. + /// + /// * 'UidVecContainInvalidOne': + /// - Attempting to set weights with invalid uids. + /// + /// * 'WeightVecLengthIsLow': + /// - Attempting to set weights with fewer weights than min. + /// + /// * 'MaxWeightExceeded': + /// - Attempting to set weights with max value exceeding limit. + #[pallet::call_index(114)] + #[pallet::weight((Weight::from_parts(15_540_000_000, 0) + .saturating_add(T::DbWeight::get().reads(4111)) + .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] + pub fn set_sub_weights( + origin: OriginFor, + netuid: NetUid, + subid: SubId, + dests: Vec, + weights: Vec, + version_key: u64, + ) -> DispatchResult { + if Self::get_commit_reveal_weights_enabled(netuid) { + Err(Error::::CommitRevealEnabled.into()) + } else { + Self::do_set_sub_weights(origin, netuid, subid, dests, weights, version_key) + } + } + /// --- Allows a hotkey to set weights for multiple netuids as a batch. /// /// # Args: @@ -163,6 +240,38 @@ mod dispatches { Self::do_commit_weights(origin, netuid, commit_hash) } + /// ---- Used to commit a hash of your weight values to later be revealed. + /// + /// # Args: + /// * `origin`: (`::RuntimeOrigin`): + /// - The signature of the committing hotkey. + /// + /// * `netuid` (`u16`): + /// - The u16 network identifier. + /// + /// * `commit_hash` (`H256`): + /// - The hash representing the committed weights. + /// + /// # Raises: + /// * `CommitRevealDisabled`: + /// - Attempting to commit when the commit-reveal mechanism is disabled. + /// + /// * `TooManyUnrevealedCommits`: + /// - Attempting to commit when the user has more than the allowed limit of unrevealed commits. + /// + #[pallet::call_index(115)] + #[pallet::weight((Weight::from_parts(55_130_000, 0) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] + pub fn commit_sub_weights( + origin: T::RuntimeOrigin, + netuid: NetUid, + subid: SubId, + commit_hash: H256, + ) -> DispatchResult { + Self::do_commit_sub_weights(origin, netuid, subid, commit_hash) + } + /// --- Allows a hotkey to commit weight hashes for multiple netuids as a batch. /// /// # Args: @@ -249,6 +358,59 @@ mod dispatches { Self::do_reveal_weights(origin, netuid, uids, values, salt, version_key) } + /// ---- Used to reveal the weights for a previously committed hash. + /// + /// # Args: + /// * `origin`: (`::RuntimeOrigin`): + /// - The signature of the revealing hotkey. + /// + /// * `netuid` (`u16`): + /// - The u16 network identifier. + /// + /// * `uids` (`Vec`): + /// - The uids for the weights being revealed. + /// + /// * `values` (`Vec`): + /// - The values of the weights being revealed. + /// + /// * `salt` (`Vec`): + /// - The salt used to generate the commit hash. + /// + /// * `version_key` (`u64`): + /// - The network version key. + /// + /// # Raises: + /// * `CommitRevealDisabled`: + /// - Attempting to reveal weights when the commit-reveal mechanism is disabled. + /// + /// * `NoWeightsCommitFound`: + /// - Attempting to reveal weights without an existing commit. + /// + /// * `ExpiredWeightCommit`: + /// - Attempting to reveal a weight commit that has expired. + /// + /// * `RevealTooEarly`: + /// - Attempting to reveal weights outside the valid reveal period. + /// + /// * `InvalidRevealCommitHashNotMatch`: + /// - The revealed hash does not match any committed hash. + /// + #[pallet::call_index(116)] + #[pallet::weight((Weight::from_parts(122_000_000, 0) + .saturating_add(T::DbWeight::get().reads(16)) + .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] + pub fn reveal_sub_weights( + origin: T::RuntimeOrigin, + netuid: NetUid, + subid: SubId, + uids: Vec, + values: Vec, + salt: Vec, + version_key: u64, + ) -> DispatchResult { + Self::do_reveal_sub_weights(origin, netuid, subid, uids, values, salt, version_key) + } + /// ---- Used to commit encrypted commit-reveal v3 weight values to later be revealed. /// /// # Args: @@ -291,6 +453,49 @@ mod dispatches { Self::do_commit_timelocked_weights(origin, netuid, commit, reveal_round, 4) } + /// ---- Used to commit encrypted commit-reveal v3 weight values to later be revealed. + /// + /// # Args: + /// * `origin`: (`::RuntimeOrigin`): + /// - The committing hotkey. + /// + /// * `netuid` (`u16`): + /// - The u16 network identifier. + /// + /// * `commit` (`Vec`): + /// - The encrypted compressed commit. + /// The steps for this are: + /// 1. Instantiate [`WeightsTlockPayload`] + /// 2. Serialize it using the `parity_scale_codec::Encode` trait + /// 3. Encrypt it following the steps (here)[https://github.com/ideal-lab5/tle/blob/f8e6019f0fb02c380ebfa6b30efb61786dede07b/timelock/src/tlock.rs#L283-L336] + /// to produce a [`TLECiphertext`] type. + /// 4. Serialize and compress using the `ark-serialize` `CanonicalSerialize` trait. + /// + /// * reveal_round (`u64`): + /// - The drand reveal round which will be avaliable during epoch `n+1` from the current + /// epoch. + /// + /// # Raises: + /// * `CommitRevealV3Disabled`: + /// - Attempting to commit when the commit-reveal mechanism is disabled. + /// + /// * `TooManyUnrevealedCommits`: + /// - Attempting to commit when the user has more than the allowed limit of unrevealed commits. + /// + #[pallet::call_index(117)] + #[pallet::weight((Weight::from_parts(77_750_000, 0) + .saturating_add(T::DbWeight::get().reads(7_u64)) + .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] + pub fn commit_crv3_sub_weights( + origin: T::RuntimeOrigin, + netuid: NetUid, + subid: SubId, + commit: BoundedVec>, + reveal_round: u64, + ) -> DispatchResult { + Self::do_commit_timelocked_sub_weights(origin, netuid, subid, commit, reveal_round, 4) + } + /// ---- The implementation for batch revealing committed weights. /// /// # Args: diff --git a/pallets/subtensor/src/macros/events.rs b/pallets/subtensor/src/macros/events.rs index 2fab5ecdb4..2fc9517daf 100644 --- a/pallets/subtensor/src/macros/events.rs +++ b/pallets/subtensor/src/macros/events.rs @@ -41,7 +41,7 @@ mod events { TaoCurrency, ), /// a caller successfully sets their weights on a subnetwork. - WeightsSet(NetUid, u16), + WeightsSet(NetUidStorageIndex, u16), /// a new neuron account has been registered to the chain. NeuronRegistered(NetUid, u16, T::AccountId), /// multiple uids have been concurrently registered. diff --git a/pallets/subtensor/src/macros/genesis.rs b/pallets/subtensor/src/macros/genesis.rs index e50bf01d7d..b9378e38f6 100644 --- a/pallets/subtensor/src/macros/genesis.rs +++ b/pallets/subtensor/src/macros/genesis.rs @@ -96,9 +96,9 @@ mod genesis { Active::::mutate(netuid, |v| v.push(true)); Emission::::mutate(netuid, |v| v.push(0.into())); Consensus::::mutate(netuid, |v| v.push(0)); - Incentive::::mutate(netuid, |v| v.push(0)); + Incentive::::mutate(NetUidStorageIndex::from(netuid), |v| v.push(0)); Dividends::::mutate(netuid, |v| v.push(0)); - LastUpdate::::mutate(netuid, |v| v.push(block_number)); + LastUpdate::::mutate(NetUidStorageIndex::from(netuid), |v| v.push(block_number)); PruningScores::::mutate(netuid, |v| v.push(0)); ValidatorTrust::::mutate(netuid, |v| v.push(0)); ValidatorPermit::::mutate(netuid, |v| v.push(false)); diff --git a/pallets/subtensor/src/migrations/migrate_delete_subnet_21.rs b/pallets/subtensor/src/migrations/migrate_delete_subnet_21.rs index e6a8c72eae..58f880f1e4 100644 --- a/pallets/subtensor/src/migrations/migrate_delete_subnet_21.rs +++ b/pallets/subtensor/src/migrations/migrate_delete_subnet_21.rs @@ -6,7 +6,7 @@ use frame_support::{ }; use log::info; use sp_std::vec::Vec; -use subtensor_runtime_common::NetUid; +use subtensor_runtime_common::{NetUid, NetUidStorageIndex}; /// Constant for logging purposes const LOG_TARGET: &str = "migrate_delete_subnet_21"; @@ -76,8 +76,8 @@ pub fn migrate_delete_subnet_21() -> Weight { // Remove incentive mechanism memory let _ = Uids::::clear_prefix(netuid, u32::MAX, None); let _ = Keys::::clear_prefix(netuid, u32::MAX, None); - let _ = Bonds::::clear_prefix(netuid, u32::MAX, None); - let _ = Weights::::clear_prefix(netuid, u32::MAX, None); + let _ = Bonds::::clear_prefix(NetUidStorageIndex::from(netuid), u32::MAX, None); + let _ = Weights::::clear_prefix(NetUidStorageIndex::from(netuid), u32::MAX, None); weight.saturating_accrue(T::DbWeight::get().writes(4)); @@ -86,11 +86,11 @@ pub fn migrate_delete_subnet_21() -> Weight { Trust::::remove(netuid); Active::::remove(netuid); Emission::::remove(netuid); - Incentive::::remove(netuid); + Incentive::::remove(NetUidStorageIndex::from(netuid)); Consensus::::remove(netuid); Dividends::::remove(netuid); PruningScores::::remove(netuid); - LastUpdate::::remove(netuid); + LastUpdate::::remove(NetUidStorageIndex::from(netuid)); ValidatorPermit::::remove(netuid); ValidatorTrust::::remove(netuid); diff --git a/pallets/subtensor/src/migrations/migrate_delete_subnet_3.rs b/pallets/subtensor/src/migrations/migrate_delete_subnet_3.rs index c479bd613a..1cfb37d164 100644 --- a/pallets/subtensor/src/migrations/migrate_delete_subnet_3.rs +++ b/pallets/subtensor/src/migrations/migrate_delete_subnet_3.rs @@ -6,7 +6,7 @@ use frame_support::{ }; use log::info; use sp_std::vec::Vec; -use subtensor_runtime_common::NetUid; +use subtensor_runtime_common::{NetUid, NetUidStorageIndex}; /// Constant for logging purposes const LOG_TARGET: &str = "migrate_delete_subnet_3"; @@ -78,8 +78,8 @@ pub fn migrate_delete_subnet_3() -> Weight { // Remove incentive mechanism memory let _ = Uids::::clear_prefix(netuid, u32::MAX, None); let _ = Keys::::clear_prefix(netuid, u32::MAX, None); - let _ = Bonds::::clear_prefix(netuid, u32::MAX, None); - let _ = Weights::::clear_prefix(netuid, u32::MAX, None); + let _ = Bonds::::clear_prefix(NetUidStorageIndex::from(netuid), u32::MAX, None); + let _ = Weights::::clear_prefix(NetUidStorageIndex::from(netuid), u32::MAX, None); weight.saturating_accrue(T::DbWeight::get().writes(4)); @@ -88,11 +88,11 @@ pub fn migrate_delete_subnet_3() -> Weight { Trust::::remove(netuid); Active::::remove(netuid); Emission::::remove(netuid); - Incentive::::remove(netuid); + Incentive::::remove(NetUidStorageIndex::from(netuid)); Consensus::::remove(netuid); Dividends::::remove(netuid); PruningScores::::remove(netuid); - LastUpdate::::remove(netuid); + LastUpdate::::remove(NetUidStorageIndex::from(netuid)); ValidatorPermit::::remove(netuid); ValidatorTrust::::remove(netuid); diff --git a/pallets/subtensor/src/rpc_info/metagraph.rs b/pallets/subtensor/src/rpc_info/metagraph.rs index 7f9dc46bee..308c85cba6 100644 --- a/pallets/subtensor/src/rpc_info/metagraph.rs +++ b/pallets/subtensor/src/rpc_info/metagraph.rs @@ -7,7 +7,7 @@ use frame_support::pallet_prelude::{Decode, Encode}; use substrate_fixed::types::I64F64; use substrate_fixed::types::I96F32; use subtensor_macros::freeze_struct; -use subtensor_runtime_common::{AlphaCurrency, NetUid, TaoCurrency}; +use subtensor_runtime_common::{AlphaCurrency, NetUid, NetUidStorageIndex, TaoCurrency}; #[freeze_struct("6fc49d5a7dc0e339")] #[derive(Decode, Encode, PartialEq, Eq, Clone, Debug, TypeInfo)] @@ -727,7 +727,8 @@ impl Pallet { liquid_alpha_enabled: Self::get_liquid_alpha_enabled(netuid), // Bonds liquid enabled. alpha_high: Self::get_alpha_values(netuid).1.into(), // Alpha param high alpha_low: Self::get_alpha_values(netuid).0.into(), // Alpha param low - bonds_moving_avg: Self::get_bonds_moving_average(netuid).into(), // Bonds moving avg + bonds_moving_avg: Self::get_bonds_moving_average(NetUidStorageIndex::from(netuid)) + .into(), // Bonds moving avg // Metagraph info. hotkeys, // hotkey per UID @@ -740,7 +741,7 @@ impl Pallet { .into_iter() .map(Compact::from) .collect(), // Pruning per UID - last_update: LastUpdate::::get(netuid) + last_update: LastUpdate::::get(NetUidStorageIndex::from(netuid)) .into_iter() .map(Compact::from) .collect(), // Last update per UID @@ -752,7 +753,7 @@ impl Pallet { .into_iter() .map(Compact::from) .collect(), // Dividends per UID - incentives: Incentive::::get(netuid) + incentives: Incentive::::get(NetUidStorageIndex::from(netuid)) .into_iter() .map(Compact::from) .collect(), // Mining incentives per UID @@ -1113,7 +1114,9 @@ impl Pallet { }, Some(SelectiveMetagraphIndex::BondsMovingAvg) => SelectiveMetagraph { netuid: netuid.into(), - bonds_moving_avg: Some(Self::get_bonds_moving_average(netuid).into()), + bonds_moving_avg: Some( + Self::get_bonds_moving_average(NetUidStorageIndex::from(netuid)).into(), + ), ..Default::default() }, @@ -1198,7 +1201,7 @@ impl Pallet { Some(SelectiveMetagraphIndex::LastUpdate) => SelectiveMetagraph { netuid: netuid.into(), last_update: Some( - LastUpdate::::get(netuid) + LastUpdate::::get(NetUidStorageIndex::from(netuid)) .into_iter() .map(Compact::from) .collect(), @@ -1231,7 +1234,7 @@ impl Pallet { Some(SelectiveMetagraphIndex::Incentives) => SelectiveMetagraph { netuid: netuid.into(), incentives: Some( - Incentive::::get(netuid) + Incentive::::get(NetUidStorageIndex::from(netuid)) .into_iter() .map(Compact::from) .collect(), diff --git a/pallets/subtensor/src/rpc_info/neuron_info.rs b/pallets/subtensor/src/rpc_info/neuron_info.rs index 8eae264c6e..6e29a51ef5 100644 --- a/pallets/subtensor/src/rpc_info/neuron_info.rs +++ b/pallets/subtensor/src/rpc_info/neuron_info.rs @@ -2,7 +2,7 @@ use super::*; use frame_support::pallet_prelude::{Decode, Encode}; extern crate alloc; use codec::Compact; -use subtensor_runtime_common::{AlphaCurrency, NetUid}; +use subtensor_runtime_common::{AlphaCurrency, NetUid, NetUidStorageIndex}; #[freeze_struct("9e5a291e7e71482d")] #[derive(Decode, Encode, PartialEq, Eq, Clone, Debug, TypeInfo)] @@ -87,16 +87,16 @@ impl Pallet { let active = Self::get_active_for_uid(netuid, uid); let rank = Self::get_rank_for_uid(netuid, uid); let emission = Self::get_emission_for_uid(netuid, uid); - let incentive = Self::get_incentive_for_uid(netuid, uid); + let incentive = Self::get_incentive_for_uid(netuid.into(), uid); let consensus = Self::get_consensus_for_uid(netuid, uid); let trust = Self::get_trust_for_uid(netuid, uid); let validator_trust = Self::get_validator_trust_for_uid(netuid, uid); let dividends = Self::get_dividends_for_uid(netuid, uid); let pruning_score = Self::get_pruning_score_for_uid(netuid, uid); - let last_update = Self::get_last_update_for_uid(netuid, uid); + let last_update = Self::get_last_update_for_uid(NetUidStorageIndex::from(netuid), uid); let validator_permit = Self::get_validator_permit_for_uid(netuid, uid); - let weights = Weights::::get(netuid, uid) + let weights = Weights::::get(NetUidStorageIndex::from(netuid), uid) .into_iter() .filter_map(|(i, w)| { if w > 0 { @@ -107,7 +107,7 @@ impl Pallet { }) .collect::, Compact)>>(); - let bonds = >::get(netuid, uid) + let bonds = Bonds::::get(NetUidStorageIndex::from(netuid), uid) .iter() .filter_map(|(i, b)| { if *b > 0 { @@ -173,13 +173,13 @@ impl Pallet { let active = Self::get_active_for_uid(netuid, uid); let rank = Self::get_rank_for_uid(netuid, uid); let emission = Self::get_emission_for_uid(netuid, uid); - let incentive = Self::get_incentive_for_uid(netuid, uid); + let incentive = Self::get_incentive_for_uid(netuid.into(), uid); let consensus = Self::get_consensus_for_uid(netuid, uid); let trust = Self::get_trust_for_uid(netuid, uid); let validator_trust = Self::get_validator_trust_for_uid(netuid, uid); let dividends = Self::get_dividends_for_uid(netuid, uid); let pruning_score = Self::get_pruning_score_for_uid(netuid, uid); - let last_update = Self::get_last_update_for_uid(netuid, uid); + let last_update = Self::get_last_update_for_uid(NetUidStorageIndex::from(netuid), uid); let validator_permit = Self::get_validator_permit_for_uid(netuid, uid); let stake: Vec<(T::AccountId, Compact)> = vec![( diff --git a/pallets/subtensor/src/rpc_info/show_subnet.rs b/pallets/subtensor/src/rpc_info/show_subnet.rs index 2123345a4e..abd9670bb8 100644 --- a/pallets/subtensor/src/rpc_info/show_subnet.rs +++ b/pallets/subtensor/src/rpc_info/show_subnet.rs @@ -4,7 +4,7 @@ use crate::epoch::math::*; use codec::Compact; use frame_support::pallet_prelude::{Decode, Encode}; use substrate_fixed::types::I64F64; -use subtensor_runtime_common::{AlphaCurrency, NetUid, TaoCurrency}; +use subtensor_runtime_common::{AlphaCurrency, NetUid, NetUidStorageIndex, TaoCurrency}; #[freeze_struct("9354762261420485")] #[derive(Decode, Encode, PartialEq, Eq, Clone, Debug, TypeInfo)] @@ -103,7 +103,7 @@ impl Pallet { .into_iter() .map(Compact::from) .collect(); - let last_update: Vec> = LastUpdate::::get(netuid) + let last_update: Vec> = LastUpdate::::get(NetUidStorageIndex::from(netuid)) .into_iter() .map(Compact::from) .collect(); @@ -115,7 +115,7 @@ impl Pallet { .into_iter() .map(Compact::from) .collect(); - let incentives: Vec> = Incentive::::get(netuid) + let incentives: Vec> = Incentive::::get(NetUidStorageIndex::from(netuid)) .into_iter() .map(Compact::from) .collect(); diff --git a/pallets/subtensor/src/rpc_info/subnet_info.rs b/pallets/subtensor/src/rpc_info/subnet_info.rs index d1e0a05419..7ca8a8f948 100644 --- a/pallets/subtensor/src/rpc_info/subnet_info.rs +++ b/pallets/subtensor/src/rpc_info/subnet_info.rs @@ -4,7 +4,7 @@ use frame_support::storage::IterableStorageMap; extern crate alloc; use codec::Compact; use substrate_fixed::types::I32F32; -use subtensor_runtime_common::{NetUid, TaoCurrency}; +use subtensor_runtime_common::{NetUid, NetUidStorageIndex, TaoCurrency}; #[freeze_struct("edd6bd3273dfea76")] #[derive(Decode, Encode, PartialEq, Eq, Clone, Debug, TypeInfo)] @@ -286,7 +286,7 @@ impl Pallet { let target_regs_per_interval = Self::get_target_registrations_per_interval(netuid); let min_burn = Self::get_min_burn(netuid); let max_burn = Self::get_max_burn(netuid); - let bonds_moving_avg = Self::get_bonds_moving_average(netuid); + let bonds_moving_avg = Self::get_bonds_moving_average(NetUidStorageIndex::from(netuid)); let max_regs_per_block = Self::get_max_registrations_per_block(netuid); let serving_rate_limit = Self::get_serving_rate_limit(netuid); let max_validators = Self::get_max_allowed_validators(netuid); @@ -349,7 +349,7 @@ impl Pallet { let target_regs_per_interval = Self::get_target_registrations_per_interval(netuid); let min_burn = Self::get_min_burn(netuid); let max_burn = Self::get_max_burn(netuid); - let bonds_moving_avg = Self::get_bonds_moving_average(netuid); + let bonds_moving_avg = Self::get_bonds_moving_average(NetUidStorageIndex::from(netuid)); let max_regs_per_block = Self::get_max_registrations_per_block(netuid); let serving_rate_limit = Self::get_serving_rate_limit(netuid); let max_validators = Self::get_max_allowed_validators(netuid); diff --git a/pallets/subtensor/src/subnets/subsubnet.rs b/pallets/subtensor/src/subnets/subsubnet.rs index 816f4818bd..37e20299a9 100644 --- a/pallets/subtensor/src/subnets/subsubnet.rs +++ b/pallets/subtensor/src/subnets/subsubnet.rs @@ -2,10 +2,11 @@ //! use super::*; +use crate::epoch::run_epoch::EpochTerms; use alloc::collections::BTreeMap; use safe_math::*; -use sp_runtime::SaturatedConversion; -use subtensor_runtime_common::{AlphaCurrency, NetUid, SubId}; +use substrate_fixed::types::U64F64; +use subtensor_runtime_common::{AlphaCurrency, NetUid, NetUidStorageIndex, SubId}; pub type LeaseId = u32; @@ -27,16 +28,62 @@ pub type BalanceOf = pub const GLOBAL_MAX_SUBNET_COUNT: u16 = 1024; impl Pallet { - pub fn get_subsubnet_storage_index(netuid: NetUid, sub_id: SubId) -> NetUid { + pub fn get_subsubnet_storage_index(netuid: NetUid, sub_id: SubId) -> NetUidStorageIndex { u16::from(sub_id) .saturating_mul(GLOBAL_MAX_SUBNET_COUNT) .saturating_add(u16::from(netuid)) .into() } + pub fn get_netuid_and_subid( + sub_or_netid: NetUidStorageIndex, + ) -> Result<(NetUid, SubId), Error> { + let maybe_netuid = u16::from(sub_or_netid).checked_rem(GLOBAL_MAX_SUBNET_COUNT); + if let Some(netuid_u16) = maybe_netuid { + let netuid = NetUid::from(netuid_u16); + + // Make sure the base subnet exists + ensure!( + Self::if_subnet_exist(netuid), + Error::::SubNetworkDoesNotExist + ); + + // Extract sub_id + let sub_id_u8 = u8::try_from(u16::from(sub_or_netid).safe_div(GLOBAL_MAX_SUBNET_COUNT)) + .map_err(|_| Error::::SubNetworkDoesNotExist)?; + let sub_id = SubId::from(sub_id_u8); + + if SubsubnetCountCurrent::::get(netuid) > sub_id { + Ok((netuid, sub_id)) + } else { + Err(Error::::SubNetworkDoesNotExist.into()) + } + } else { + Err(Error::::SubNetworkDoesNotExist.into()) + } + } + + pub fn ensure_subsubnet_exists(netuid: NetUid, sub_id: SubId) -> DispatchResult { + // Make sure the base subnet exists + ensure!( + Self::if_subnet_exist(netuid), + Error::::SubNetworkDoesNotExist + ); + + // Make sure the subsub limit is not exceeded + ensure!( + SubsubnetCountCurrent::::get(netuid) > sub_id, + Error::::SubNetworkDoesNotExist + ); + Ok(()) + } + /// Set the desired valus of sub-subnet count for a subnet identified /// by netuid - pub fn do_set_desired_subsubnet_count(netuid: NetUid, subsubnet_count: u8) -> DispatchResult { + pub fn do_set_desired_subsubnet_count( + netuid: NetUid, + subsubnet_count: SubId, + ) -> DispatchResult { // Make sure the subnet exists ensure!( Self::if_subnet_exist(netuid), @@ -44,7 +91,7 @@ impl Pallet { ); // Count cannot be zero - ensure!(subsubnet_count > 0, Error::::InvalidValue); + ensure!(subsubnet_count > 0.into(), Error::::InvalidValue); // Make sure we are not exceeding the max sub-subnet count ensure!( @@ -69,15 +116,14 @@ impl Pallet { let super_block = super_block_tempos.saturating_mul(u64::from(Tempo::::get(netuid))); if let Some(rem) = current_block.checked_rem(super_block) { if rem == 0 { - let old_count = SubsubnetCountCurrent::::get(netuid); - let desired_count = SubsubnetCountDesired::::get(netuid); + let old_count = u8::from(SubsubnetCountCurrent::::get(netuid)); + let desired_count = u8::from(SubsubnetCountDesired::::get(netuid)); let min_possible_count = old_count - .saturating_sub(GlobalSubsubnetDecreasePerSuperblock::::get()) + .saturating_sub(u8::from(GlobalSubsubnetDecreasePerSuperblock::::get())) .max(1); let new_count = desired_count.max(min_possible_count); if old_count > new_count { - todo!(); // Cleanup weights // Cleanup StakeWeight @@ -93,7 +139,7 @@ impl Pallet { // Cleanup ValidatorPermit } - SubsubnetCountCurrent::::insert(netuid, new_count); + SubsubnetCountCurrent::::insert(netuid, SubId::from(new_count)); } } }); @@ -116,6 +162,23 @@ impl Pallet { result } + fn weighted_acc_u16(existing: u16, added: u16, weight: U64F64) -> u16 { + U64F64::saturating_from_num(existing) + .saturating_add(U64F64::saturating_from_num(added).saturating_mul(weight)) + .saturating_to_num::() + } + + fn weighted_acc_alpha( + existing: AlphaCurrency, + added: AlphaCurrency, + weight: U64F64, + ) -> AlphaCurrency { + U64F64::saturating_from_num(existing) + .saturating_add(U64F64::saturating_from_num(added).saturating_mul(weight)) + .saturating_to_num::() + .into() + } + /// Splits rao_emission between different sub-subnets using `split_emissions` function. /// /// Runs the epoch function for each sub-subnet and consolidates hotkey_emission @@ -125,37 +188,97 @@ impl Pallet { netuid: NetUid, rao_emission: AlphaCurrency, ) -> Vec<(T::AccountId, AlphaCurrency, AlphaCurrency)> { - let aggregated: BTreeMap = + let aggregated: BTreeMap = Self::split_emissions(netuid, rao_emission) .into_iter() .enumerate() // Run epoch function for each subsubnet to distribute its portion of emissions - .flat_map(|(sub_id, emission)| { - // This is subsubnet ID, e.g. a 0-7 number - let sub_id_u8: u8 = sub_id.saturated_into(); - // This is netuid index for storing subsubnet data in storage maps and for using in - // epoch function - let subsub_netuid = - Self::get_subsubnet_storage_index(netuid, SubId::from(sub_id_u8)); - // epoch returns: Vec<(T::AccountId, AlphaCurrency, AlphaCurrency)> - Self::epoch(subsub_netuid, emission).into_iter() + .flat_map(|(sub_id_usize, sub_emission)| { + let sub_id_u8: u8 = sub_id_usize.try_into().unwrap_or_default(); + let sub_id = SubId::from(sub_id_u8); + + // Run epoch function on the subsubnet emission + let epoch_output = Self::epoch_subsubnet(netuid, sub_id, sub_emission); + Self::persist_subsub_epoch_terms(netuid, sub_id, &epoch_output.as_map()); + + // Calculate subsubnet weight from the split emission (not the other way because preserving + // emission accuracy is the priority) + let sub_weight = U64F64::saturating_from_num(sub_emission) + .safe_div(U64F64::saturating_from_num(rao_emission)); + + // Produce an iterator of (hotkey, (terms, sub_weight)) tuples + epoch_output + .0 + .into_iter() + .map(move |(hotkey, terms)| (hotkey, (terms, sub_weight))) }) // Consolidate the hotkey emissions into a single BTreeMap - .fold(BTreeMap::new(), |mut acc, (hotkey, divs, incs)| { + .fold(BTreeMap::new(), |mut acc, (hotkey, (terms, sub_weight))| { + + println!("Hotkey: {:?}, terms: {:?}", hotkey, terms); + acc.entry(hotkey) - .and_modify(|tot| { - tot.0 = tot.0.saturating_add(divs); - tot.1 = tot.1.saturating_add(incs); + .and_modify(|acc_terms| { + acc_terms.dividend = Self::weighted_acc_u16( + acc_terms.dividend, + terms.dividend, + sub_weight, + ); + acc_terms.validator_emission = Self::weighted_acc_alpha( + acc_terms.validator_emission, + terms.validator_emission, + sub_weight, + ); + acc_terms.server_emission = Self::weighted_acc_alpha( + acc_terms.server_emission, + terms.server_emission, + sub_weight, + ); + acc_terms.stake_weight = Self::weighted_acc_u16( + acc_terms.stake_weight, + terms.stake_weight, + sub_weight, + ); + acc_terms.active = acc_terms.active | terms.active; + acc_terms.emission = Self::weighted_acc_alpha( + acc_terms.emission, + terms.emission, + sub_weight, + ); + acc_terms.rank = + Self::weighted_acc_u16(acc_terms.rank, terms.rank, sub_weight); + acc_terms.trust = + Self::weighted_acc_u16(acc_terms.trust, terms.trust, sub_weight); + acc_terms.consensus = Self::weighted_acc_u16( + acc_terms.consensus, + terms.consensus, + sub_weight, + ); + acc_terms.pruning_score = Self::weighted_acc_u16( + acc_terms.pruning_score, + terms.pruning_score, + sub_weight, + ); + acc_terms.validator_trust = Self::weighted_acc_u16( + acc_terms.validator_trust, + terms.validator_trust, + sub_weight, + ); + acc_terms.new_validator_permit = + acc_terms.new_validator_permit | terms.new_validator_permit; }) - .or_insert((divs, incs)); + .or_insert(terms); acc }); - // Remap BTreeMap back to Vec<(T::AccountId, AlphaCurrency, AlphaCurrency)> format - // for processing in run_coinbase + // State updates from epoch function + Self::persist_netuid_epoch_terms(netuid, &aggregated); + + // Remap BTreeMap back to Vec<(T::AccountId, AlphaCurrency, AlphaCurrency)> format + // for processing emissions in run_coinbase aggregated .into_iter() - .map(|(hotkey, (divs, incs))| (hotkey, divs, incs)) + .map(|(hotkey, terms)| (hotkey, terms.validator_emission, terms.server_emission)) .collect() } } diff --git a/pallets/subtensor/src/subnets/uids.rs b/pallets/subtensor/src/subnets/uids.rs index f5a14c490b..b12aa24e25 100644 --- a/pallets/subtensor/src/subnets/uids.rs +++ b/pallets/subtensor/src/subnets/uids.rs @@ -22,9 +22,12 @@ impl Pallet { Emission::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0.into())); Trust::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0)); Consensus::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0)); - Incentive::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0)); + for subid in 0..SubsubnetCountCurrent::::get(netuid).into() { + let netuid_index = Self::get_subsubnet_storage_index(netuid, subid.into()); + Incentive::::mutate(netuid_index, |v| Self::set_element_at(v, neuron_index, 0)); + Bonds::::remove(netuid_index, neuron_uid); // Remove bonds for Validator. + } Dividends::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0)); - Bonds::::remove(netuid, neuron_uid); // Remove bonds for Validator. } /// Replace the neuron under this uid. @@ -93,9 +96,12 @@ impl Pallet { Active::::mutate(netuid, |v| v.push(true)); Emission::::mutate(netuid, |v| v.push(0.into())); Consensus::::mutate(netuid, |v| v.push(0)); - Incentive::::mutate(netuid, |v| v.push(0)); + for subid in 0..SubsubnetCountCurrent::::get(netuid).into() { + let netuid_index = Self::get_subsubnet_storage_index(netuid, subid.into()); + Incentive::::mutate(netuid_index, |v| v.push(0)); + LastUpdate::::mutate(netuid_index, |v| v.push(block_number)); + } Dividends::::mutate(netuid, |v| v.push(0)); - LastUpdate::::mutate(netuid, |v| v.push(block_number)); PruningScores::::mutate(netuid, |v| v.push(0)); ValidatorTrust::::mutate(netuid, |v| v.push(0)); ValidatorPermit::::mutate(netuid, |v| v.push(false)); diff --git a/pallets/subtensor/src/subnets/weights.rs b/pallets/subtensor/src/subnets/weights.rs index 7d49e0d40a..bf9d573644 100644 --- a/pallets/subtensor/src/subnets/weights.rs +++ b/pallets/subtensor/src/subnets/weights.rs @@ -8,7 +8,7 @@ use sp_runtime::{ traits::{BlakeTwo256, Hash}, }; use sp_std::{collections::vec_deque::VecDeque, vec}; -use subtensor_runtime_common::NetUid; +use subtensor_runtime_common::{NetUid, NetUidStorageIndex, SubId}; impl Pallet { /// ---- The implementation for committing weight hashes. @@ -44,6 +44,27 @@ impl Pallet { netuid: NetUid, commit_hash: H256, ) -> DispatchResult { + Self::internal_commit_weights(origin, netuid, SubId::MAIN, commit_hash) + } + + pub fn do_commit_sub_weights( + origin: T::RuntimeOrigin, + netuid: NetUid, + subid: SubId, + commit_hash: H256, + ) -> DispatchResult { + Self::internal_commit_weights(origin, netuid, subid, commit_hash) + } + + fn internal_commit_weights( + origin: T::RuntimeOrigin, + netuid: NetUid, + subid: SubId, + commit_hash: H256, + ) -> DispatchResult { + // Calculate subnet storage index + let netuid_index = Self::get_subsubnet_storage_index(netuid, subid); + // 1. Verify the caller's signature (hotkey). let who = ensure_signed(origin)?; @@ -65,7 +86,8 @@ impl Pallet { let commit_block = Self::get_current_block_as_u64(); let neuron_uid = Self::get_uid_for_net_and_hotkey(netuid, &who)?; ensure!( - Self::check_rate_limit(netuid, neuron_uid, commit_block), + // Rate limiting should happen per sub-subnet, so use netuid_index here + Self::check_rate_limit(netuid_index, neuron_uid, commit_block), Error::::CommittingWeightsTooFast ); @@ -73,7 +95,7 @@ impl Pallet { let (first_reveal_block, last_reveal_block) = Self::get_reveal_blocks(netuid, commit_block); // 6. Retrieve or initialize the VecDeque of commits for the hotkey. - WeightCommits::::try_mutate(netuid, &who, |maybe_commits| -> DispatchResult { + WeightCommits::::try_mutate(netuid_index, &who, |maybe_commits| -> DispatchResult { let mut commits: VecDeque<(H256, u64, u64, u64)> = maybe_commits.take().unwrap_or_default(); @@ -104,7 +126,7 @@ impl Pallet { Self::deposit_event(Event::WeightsCommitted(who.clone(), netuid, commit_hash)); // 12. Update the last commit block for the hotkey's UID. - Self::set_last_update_for_uid(netuid, neuron_uid, commit_block); + Self::set_last_update_for_uid(netuid_index, neuron_uid, commit_block); // 13. Return success. Ok(()) @@ -233,6 +255,45 @@ impl Pallet { reveal_round: u64, commit_reveal_version: u16, ) -> DispatchResult { + Self::internal_commit_timelocked_weights( + origin, + netuid, + SubId::MAIN, + commit, + reveal_round, + commit_reveal_version, + ) + } + + pub fn do_commit_timelocked_sub_weights( + origin: T::RuntimeOrigin, + netuid: NetUid, + subid: SubId, + commit: BoundedVec>, + reveal_round: u64, + commit_reveal_version: u16, + ) -> DispatchResult { + Self::internal_commit_timelocked_weights( + origin, + netuid, + subid, + commit, + reveal_round, + commit_reveal_version, + ) + } + + pub fn internal_commit_timelocked_weights( + origin: T::RuntimeOrigin, + netuid: NetUid, + subid: SubId, + commit: BoundedVec>, + reveal_round: u64, + commit_reveal_version: u16, + ) -> DispatchResult { + // Calculate netuid storage index + let netuid_index = Self::get_subsubnet_storage_index(netuid, subid); + // 1. Verify the caller's signature (hotkey). let who = ensure_signed(origin)?; @@ -260,7 +321,7 @@ impl Pallet { let commit_block = Self::get_current_block_as_u64(); let neuron_uid = Self::get_uid_for_net_and_hotkey(netuid, &who)?; ensure!( - Self::check_rate_limit(netuid, neuron_uid, commit_block), + Self::check_rate_limit(netuid_index, neuron_uid, commit_block), Error::::CommittingWeightsTooFast ); @@ -297,7 +358,7 @@ impl Pallet { )); // 10. Update the last commit block for the hotkey's UID. - Self::set_last_update_for_uid(netuid, neuron_uid, commit_block); + Self::set_last_update_for_uid(netuid_index, neuron_uid, commit_block); // 11. Return success. Ok(()) @@ -348,6 +409,33 @@ impl Pallet { salt: Vec, version_key: u64, ) -> DispatchResult { + Self::internal_reveal_weights(origin, netuid, SubId::MAIN, uids, values, salt, version_key) + } + + pub fn do_reveal_sub_weights( + origin: T::RuntimeOrigin, + netuid: NetUid, + subid: SubId, + uids: Vec, + values: Vec, + salt: Vec, + version_key: u64, + ) -> DispatchResult { + Self::internal_reveal_weights(origin, netuid, subid, uids, values, salt, version_key) + } + + fn internal_reveal_weights( + origin: T::RuntimeOrigin, + netuid: NetUid, + subid: SubId, + uids: Vec, + values: Vec, + salt: Vec, + version_key: u64, + ) -> DispatchResult { + // Calculate netuid storage index + let netuid_index = Self::get_subsubnet_storage_index(netuid, subid); + // --- 1. Check the caller's signature (hotkey). let who = ensure_signed(origin.clone())?; @@ -360,80 +448,90 @@ impl Pallet { ); // --- 3. Mutate the WeightCommits to retrieve existing commits for the user. - WeightCommits::::try_mutate_exists(netuid, &who, |maybe_commits| -> DispatchResult { - let commits = maybe_commits - .as_mut() - .ok_or(Error::::NoWeightsCommitFound)?; - - // --- 4. Remove any expired commits from the front of the queue, collecting their hashes. - let mut expired_hashes = Vec::new(); - while let Some((hash, commit_block, _, _)) = commits.front() { - if Self::is_commit_expired(netuid, *commit_block) { - // Collect the expired commit hash - expired_hashes.push(*hash); - commits.pop_front(); - } else { - break; - } - } - - // --- 5. Hash the provided data. - let provided_hash: H256 = - Self::get_commit_hash(&who, netuid, &uids, &values, &salt, version_key); - - // --- 6. After removing expired commits, check if any commits are left. - if commits.is_empty() { - // Check if provided_hash matches any expired commits - if expired_hashes.contains(&provided_hash) { - return Err(Error::::ExpiredWeightCommit.into()); - } else { - return Err(Error::::NoWeightsCommitFound.into()); - } - } - - // --- 7. Search for the provided_hash in the non-expired commits. - if let Some(position) = commits - .iter() - .position(|(hash, _, _, _)| *hash == provided_hash) - { - // --- 8. Get the commit block for the commit being revealed. - let (_, commit_block, _, _) = commits - .get(position) + WeightCommits::::try_mutate_exists( + netuid_index, + &who, + |maybe_commits| -> DispatchResult { + let commits = maybe_commits + .as_mut() .ok_or(Error::::NoWeightsCommitFound)?; - // --- 9. Ensure the commit is ready to be revealed in the current block range. - ensure!( - Self::is_reveal_block_range(netuid, *commit_block), - Error::::RevealTooEarly - ); - - // --- 10. Remove all commits up to and including the one being revealed. - for _ in 0..=position { - commits.pop_front(); + // --- 4. Remove any expired commits from the front of the queue, collecting their hashes. + let mut expired_hashes = Vec::new(); + while let Some((hash, commit_block, _, _)) = commits.front() { + if Self::is_commit_expired(netuid, *commit_block) { + // Collect the expired commit hash + expired_hashes.push(*hash); + commits.pop_front(); + } else { + break; + } } - // --- 11. If the queue is now empty, remove the storage entry for the user. + // --- 5. Hash the provided data. + let provided_hash: H256 = + Self::get_commit_hash(&who, netuid, &uids, &values, &salt, version_key); + + // --- 6. After removing expired commits, check if any commits are left. if commits.is_empty() { - *maybe_commits = None; + // Check if provided_hash matches any expired commits + if expired_hashes.contains(&provided_hash) { + return Err(Error::::ExpiredWeightCommit.into()); + } else { + return Err(Error::::NoWeightsCommitFound.into()); + } } - // --- 12. Proceed to set the revealed weights. - Self::do_set_weights(origin, netuid, uids.clone(), values.clone(), version_key)?; + // --- 7. Search for the provided_hash in the non-expired commits. + if let Some(position) = commits + .iter() + .position(|(hash, _, _, _)| *hash == provided_hash) + { + // --- 8. Get the commit block for the commit being revealed. + let (_, commit_block, _, _) = commits + .get(position) + .ok_or(Error::::NoWeightsCommitFound)?; + + // --- 9. Ensure the commit is ready to be revealed in the current block range. + ensure!( + Self::is_reveal_block_range(netuid, *commit_block), + Error::::RevealTooEarly + ); + + // --- 10. Remove all commits up to and including the one being revealed. + for _ in 0..=position { + commits.pop_front(); + } - // --- 13. Emit the WeightsRevealed event. - Self::deposit_event(Event::WeightsRevealed(who.clone(), netuid, provided_hash)); + // --- 11. If the queue is now empty, remove the storage entry for the user. + if commits.is_empty() { + *maybe_commits = None; + } - // --- 14. Return ok. - Ok(()) - } else { - // --- 15. The provided_hash does not match any non-expired commits. - if expired_hashes.contains(&provided_hash) { - Err(Error::::ExpiredWeightCommit.into()) + // --- 12. Proceed to set the revealed weights. + Self::do_set_weights( + origin, + netuid, + uids.clone(), + values.clone(), + version_key, + )?; + + // --- 13. Emit the WeightsRevealed event. + Self::deposit_event(Event::WeightsRevealed(who.clone(), netuid, provided_hash)); + + // --- 14. Return ok. + Ok(()) } else { - Err(Error::::InvalidRevealCommitHashNotMatch.into()) + // --- 15. The provided_hash does not match any non-expired commits. + if expired_hashes.contains(&provided_hash) { + Err(Error::::ExpiredWeightCommit.into()) + } else { + Err(Error::::InvalidRevealCommitHashNotMatch.into()) + } } - } - }) + }, + ) } /// ---- The implementation for batch revealing committed weights. @@ -483,6 +581,9 @@ impl Pallet { salts_list: Vec>, version_keys: Vec, ) -> DispatchResult { + // Calculate netuid storage index + let netuid_index = Self::get_subsubnet_storage_index(netuid, SubId::MAIN); + // --- 1. Check that the input lists are of the same length. let num_reveals = uids_list.len(); ensure!( @@ -504,176 +605,128 @@ impl Pallet { ); // --- 4. Mutate the WeightCommits to retrieve existing commits for the user. - WeightCommits::::try_mutate_exists(netuid, &who, |maybe_commits| -> DispatchResult { - let commits = maybe_commits - .as_mut() - .ok_or(Error::::NoWeightsCommitFound)?; - - // --- 5. Remove any expired commits from the front of the queue, collecting their hashes. - let mut expired_hashes = Vec::new(); - while let Some((hash, commit_block, _, _)) = commits.front() { - if Self::is_commit_expired(netuid, *commit_block) { - // Collect the expired commit hash - expired_hashes.push(*hash); - commits.pop_front(); - } else { - break; - } - } - - // --- 6. Prepare to collect all provided hashes and their corresponding reveals. - let mut provided_hashes = Vec::new(); - let mut reveals = Vec::new(); - let mut revealed_hashes: Vec = Vec::with_capacity(num_reveals); - - for ((uids, values), (salt, version_key)) in uids_list - .into_iter() - .zip(values_list) - .zip(salts_list.into_iter().zip(version_keys)) - { - // --- 6a. Hash the provided data. - let provided_hash: H256 = BlakeTwo256::hash_of(&( - who.clone(), - netuid, - uids.clone(), - values.clone(), - salt.clone(), - version_key, - )); - provided_hashes.push(provided_hash); - reveals.push((uids, values, version_key, provided_hash)); - } + WeightCommits::::try_mutate_exists( + netuid_index, + &who, + |maybe_commits| -> DispatchResult { + let commits = maybe_commits + .as_mut() + .ok_or(Error::::NoWeightsCommitFound)?; - // --- 7. Validate all reveals first to ensure atomicity. - for (_uids, _values, _version_key, provided_hash) in &reveals { - // --- 7a. Check if the provided_hash is in the non-expired commits. - if !commits - .iter() - .any(|(hash, _, _, _)| *hash == *provided_hash) - { - // --- 7b. If not found, check if it matches any expired commits. - if expired_hashes.contains(provided_hash) { - return Err(Error::::ExpiredWeightCommit.into()); + // --- 5. Remove any expired commits from the front of the queue, collecting their hashes. + let mut expired_hashes = Vec::new(); + while let Some((hash, commit_block, _, _)) = commits.front() { + if Self::is_commit_expired(netuid, *commit_block) { + // Collect the expired commit hash + expired_hashes.push(*hash); + commits.pop_front(); } else { - return Err(Error::::InvalidRevealCommitHashNotMatch.into()); + break; } } - // --- 7c. Find the commit corresponding to the provided_hash. - let commit = commits - .iter() - .find(|(hash, _, _, _)| *hash == *provided_hash) - .ok_or(Error::::NoWeightsCommitFound)?; - - // --- 7d. Check if the commit is within the reveal window. - ensure!( - Self::is_reveal_block_range(netuid, commit.1), - Error::::RevealTooEarly - ); - } + // --- 6. Prepare to collect all provided hashes and their corresponding reveals. + let mut provided_hashes = Vec::new(); + let mut reveals = Vec::new(); + let mut revealed_hashes: Vec = Vec::with_capacity(num_reveals); - // --- 8. All reveals are valid. Proceed to remove and process each reveal. - for (uids, values, version_key, provided_hash) in reveals { - // --- 8a. Find the position of the provided_hash. - if let Some(position) = commits - .iter() - .position(|(hash, _, _, _)| *hash == provided_hash) + for ((uids, values), (salt, version_key)) in uids_list + .into_iter() + .zip(values_list) + .zip(salts_list.into_iter().zip(version_keys)) { - // --- 8b. Remove the commit from the queue. - commits.remove(position); + // --- 6a. Hash the provided data. + let provided_hash: H256 = BlakeTwo256::hash_of(&( + who.clone(), + netuid, + uids.clone(), + values.clone(), + salt.clone(), + version_key, + )); + provided_hashes.push(provided_hash); + reveals.push((uids, values, version_key, provided_hash)); + } - // --- 8c. Proceed to set the revealed weights. - Self::do_set_weights(origin.clone(), netuid, uids, values, version_key)?; + // --- 7. Validate all reveals first to ensure atomicity. + for (_uids, _values, _version_key, provided_hash) in &reveals { + // --- 7a. Check if the provided_hash is in the non-expired commits. + if !commits + .iter() + .any(|(hash, _, _, _)| *hash == *provided_hash) + { + // --- 7b. If not found, check if it matches any expired commits. + if expired_hashes.contains(provided_hash) { + return Err(Error::::ExpiredWeightCommit.into()); + } else { + return Err(Error::::InvalidRevealCommitHashNotMatch.into()); + } + } - // --- 8d. Collect the revealed hash. - revealed_hashes.push(provided_hash); - } else if expired_hashes.contains(&provided_hash) { - return Err(Error::::ExpiredWeightCommit.into()); - } else { - return Err(Error::::InvalidRevealCommitHashNotMatch.into()); + // --- 7c. Find the commit corresponding to the provided_hash. + let commit = commits + .iter() + .find(|(hash, _, _, _)| *hash == *provided_hash) + .ok_or(Error::::NoWeightsCommitFound)?; + + // --- 7d. Check if the commit is within the reveal window. + ensure!( + Self::is_reveal_block_range(netuid, commit.1), + Error::::RevealTooEarly + ); } - } - // --- 9. If the queue is now empty, remove the storage entry for the user. - if commits.is_empty() { - *maybe_commits = None; - } + // --- 8. All reveals are valid. Proceed to remove and process each reveal. + for (uids, values, version_key, provided_hash) in reveals { + // --- 8a. Find the position of the provided_hash. + if let Some(position) = commits + .iter() + .position(|(hash, _, _, _)| *hash == provided_hash) + { + // --- 8b. Remove the commit from the queue. + commits.remove(position); + + // --- 8c. Proceed to set the revealed weights. + Self::do_set_weights(origin.clone(), netuid, uids, values, version_key)?; + + // --- 8d. Collect the revealed hash. + revealed_hashes.push(provided_hash); + } else if expired_hashes.contains(&provided_hash) { + return Err(Error::::ExpiredWeightCommit.into()); + } else { + return Err(Error::::InvalidRevealCommitHashNotMatch.into()); + } + } - // --- 10. Emit the WeightsBatchRevealed event with all revealed hashes. - Self::deposit_event(Event::WeightsBatchRevealed( - who.clone(), - netuid, - revealed_hashes, - )); + // --- 9. If the queue is now empty, remove the storage entry for the user. + if commits.is_empty() { + *maybe_commits = None; + } - // --- 11. Return ok. - Ok(()) - }) + // --- 10. Emit the WeightsBatchRevealed event with all revealed hashes. + Self::deposit_event(Event::WeightsBatchRevealed( + who.clone(), + netuid, + revealed_hashes, + )); + + // --- 11. Return ok. + Ok(()) + }, + ) } - /// ---- The implementation for the extrinsic set_weights. - /// - /// # Args: - /// * 'origin': (RuntimeOrigin): - /// - The signature of the calling hotkey. - /// - /// * 'netuid' (u16): - /// - The u16 network identifier. - /// - /// * 'uids' ( Vec ): - /// - The uids of the weights to be set on the chain. - /// - /// * 'values' ( Vec ): - /// - The values of the weights to set on the chain. - /// - /// * 'version_key' ( u64 ): - /// - The network version key. - /// - /// # Event: - /// * WeightsSet; - /// - On successfully setting the weights on chain. - /// - /// # Raises: - /// * 'SubNetworkDoesNotExist': - /// - Attempting to set weights on a non-existent network. - /// - /// * 'NotRegistered': - /// - Attempting to set weights from a non registered account. - /// - /// * 'IncorrectWeightVersionKey': - /// - Attempting to set weights without having an up-to-date version_key. - /// - /// * 'SettingWeightsTooFast': - /// - Attempting to set weights faster than the weights_set_rate_limit. - /// - /// * 'NeuronNoValidatorPermit': - /// - Attempting to set non-self weights without a validator permit. - /// - /// * 'WeightVecNotEqualSize': - /// - Attempting to set weights with uids not of same length. - /// - /// * 'DuplicateUids': - /// - Attempting to set weights with duplicate uids. - /// - /// * 'UidsLengthExceedUidsInSubNet': - /// - Attempting to set weights above the max allowed uids. - /// - /// * 'UidVecContainInvalidOne': - /// - Attempting to set weights with invalid uids. - /// - /// * 'WeightVecLengthIsLow': - /// - Attempting to set weights with fewer weights than min. - /// - /// * 'MaxWeightExceeded': - /// - Attempting to set weights with max value exceeding limit. - /// - pub fn do_set_weights( + fn internal_set_weights( origin: T::RuntimeOrigin, netuid: NetUid, + subid: SubId, uids: Vec, values: Vec, version_key: u64, ) -> dispatch::DispatchResult { + // Calculate subnet storage index + let netuid_index = Self::get_subsubnet_storage_index(netuid, subid); + // --- 1. Check the caller's signature. This is the hotkey of a registered account. let hotkey = ensure_signed(origin)?; log::debug!( @@ -689,11 +742,8 @@ impl Pallet { Error::::WeightVecNotEqualSize ); - // --- 3. Check to see if this is a valid network. - ensure!( - Self::if_subnet_exist(netuid), - Error::::SubNetworkDoesNotExist - ); + // --- 3. Check to see if this is a valid network and sub-subnet. + Self::ensure_subsubnet_exists(netuid, subid)?; // --- 4. Check to see if the number of uids is within the max allowed uids for this network. ensure!( @@ -724,7 +774,8 @@ impl Pallet { let current_block: u64 = Self::get_current_block_as_u64(); if !Self::get_commit_reveal_weights_enabled(netuid) { ensure!( - Self::check_rate_limit(netuid, neuron_uid, current_block), + // Rate limit should apply per sub-subnet, so use netuid_index here + Self::check_rate_limit(netuid_index, neuron_uid, current_block), Error::::SettingWeightsTooFast ); } @@ -764,22 +815,158 @@ impl Pallet { zipped_weights.push((*uid, *val)) } - // --- 17. Set weights under netuid, uid double map entry. - Weights::::insert(netuid, neuron_uid, zipped_weights); + // --- 17. Set weights under netuid_index (sub-subnet), uid double map entry. + Weights::::insert(netuid_index, neuron_uid, zipped_weights); // --- 18. Set the activity for the weights on this network. if !Self::get_commit_reveal_weights_enabled(netuid) { - Self::set_last_update_for_uid(netuid, neuron_uid, current_block); + Self::set_last_update_for_uid(netuid_index, neuron_uid, current_block); } // --- 19. Emit the tracking event. - log::debug!("WeightsSet( netuid:{netuid:?}, neuron_uid:{neuron_uid:?} )"); - Self::deposit_event(Event::WeightsSet(netuid, neuron_uid)); + log::debug!("WeightsSet( netuid:{netuid_index:?}, neuron_uid:{neuron_uid:?} )"); + Self::deposit_event(Event::WeightsSet(netuid_index, neuron_uid)); // --- 20. Return ok. Ok(()) } + /// ---- The implementation for the extrinsic set_weights. + /// + /// # Args: + /// * 'origin': (RuntimeOrigin): + /// - The signature of the calling hotkey. + /// + /// * 'netuid' (u16): + /// - The u16 network identifier. + /// + /// * 'uids' ( Vec ): + /// - The uids of the weights to be set on the chain. + /// + /// * 'values' ( Vec ): + /// - The values of the weights to set on the chain. + /// + /// * 'version_key' ( u64 ): + /// - The network version key. + /// + /// # Event: + /// * WeightsSet; + /// - On successfully setting the weights on chain. + /// + /// # Raises: + /// * 'SubNetworkDoesNotExist': + /// - Attempting to set weights on a non-existent network. + /// + /// * 'NotRegistered': + /// - Attempting to set weights from a non registered account. + /// + /// * 'IncorrectWeightVersionKey': + /// - Attempting to set weights without having an up-to-date version_key. + /// + /// * 'SettingWeightsTooFast': + /// - Attempting to set weights faster than the weights_set_rate_limit. + /// + /// * 'NeuronNoValidatorPermit': + /// - Attempting to set non-self weights without a validator permit. + /// + /// * 'WeightVecNotEqualSize': + /// - Attempting to set weights with uids not of same length. + /// + /// * 'DuplicateUids': + /// - Attempting to set weights with duplicate uids. + /// + /// * 'UidsLengthExceedUidsInSubNet': + /// - Attempting to set weights above the max allowed uids. + /// + /// * 'UidVecContainInvalidOne': + /// - Attempting to set weights with invalid uids. + /// + /// * 'WeightVecLengthIsLow': + /// - Attempting to set weights with fewer weights than min. + /// + /// * 'MaxWeightExceeded': + /// - Attempting to set weights with max value exceeding limit. + /// + pub fn do_set_weights( + origin: T::RuntimeOrigin, + netuid: NetUid, + uids: Vec, + values: Vec, + version_key: u64, + ) -> dispatch::DispatchResult { + Self::internal_set_weights(origin, netuid, SubId::MAIN, uids, values, version_key) + } + + /// ---- The implementation for the extrinsic set_weights. + /// + /// # Args: + /// * 'origin': (RuntimeOrigin): + /// - The signature of the calling hotkey. + /// + /// * 'netuid' (u16): + /// - The u16 network identifier. + /// + /// * 'subid' (u8): + /// - The u8 identifier of sub-subnet. + /// + /// * 'uids' ( Vec ): + /// - The uids of the weights to be set on the chain. + /// + /// * 'values' ( Vec ): + /// - The values of the weights to set on the chain. + /// + /// * 'version_key' ( u64 ): + /// - The network version key. + /// + /// # Event: + /// * WeightsSet; + /// - On successfully setting the weights on chain. + /// + /// # Raises: + /// * 'SubNetworkDoesNotExist': + /// - Attempting to set weights on a non-existent network. + /// + /// * 'NotRegistered': + /// - Attempting to set weights from a non registered account. + /// + /// * 'IncorrectWeightVersionKey': + /// - Attempting to set weights without having an up-to-date version_key. + /// + /// * 'SettingWeightsTooFast': + /// - Attempting to set weights faster than the weights_set_rate_limit. + /// + /// * 'NeuronNoValidatorPermit': + /// - Attempting to set non-self weights without a validator permit. + /// + /// * 'WeightVecNotEqualSize': + /// - Attempting to set weights with uids not of same length. + /// + /// * 'DuplicateUids': + /// - Attempting to set weights with duplicate uids. + /// + /// * 'UidsLengthExceedUidsInSubNet': + /// - Attempting to set weights above the max allowed uids. + /// + /// * 'UidVecContainInvalidOne': + /// - Attempting to set weights with invalid uids. + /// + /// * 'WeightVecLengthIsLow': + /// - Attempting to set weights with fewer weights than min. + /// + /// * 'MaxWeightExceeded': + /// - Attempting to set weights with max value exceeding limit. + /// + pub fn do_set_sub_weights( + origin: T::RuntimeOrigin, + netuid: NetUid, + subid: SubId, + uids: Vec, + values: Vec, + version_key: u64, + ) -> dispatch::DispatchResult { + Self::internal_set_weights(origin, netuid, subid, uids, values, version_key) + } + /// ---- The implementation for the extrinsic batch_set_weights. /// /// This call runs a batch of set weights calls, continuing on errors. @@ -887,10 +1074,15 @@ impl Pallet { /// Checks if the neuron has set weights within the weights_set_rate_limit. /// - pub fn check_rate_limit(netuid: NetUid, neuron_uid: u16, current_block: u64) -> bool { + pub fn check_rate_limit( + netuid_index: NetUidStorageIndex, + neuron_uid: u16, + current_block: u64, + ) -> bool { + let (netuid, _) = Self::get_netuid_and_subid(netuid_index).unwrap_or_default(); if Self::is_uid_exist_on_network(netuid, neuron_uid) { // --- 1. Ensure that the diff between current and last_set weights is greater than limit. - let last_set_weights: u64 = Self::get_last_update_for_uid(netuid, neuron_uid); + let last_set_weights: u64 = Self::get_last_update_for_uid(netuid_index, neuron_uid); if last_set_weights == 0 { return true; } // (Storage default) Never set weights. diff --git a/pallets/subtensor/src/swap/swap_hotkey.rs b/pallets/subtensor/src/swap/swap_hotkey.rs index e233460e39..177708ed90 100644 --- a/pallets/subtensor/src/swap/swap_hotkey.rs +++ b/pallets/subtensor/src/swap/swap_hotkey.rs @@ -2,7 +2,7 @@ use super::*; use frame_support::weights::Weight; use sp_core::Get; use substrate_fixed::types::U64F64; -use subtensor_runtime_common::{Currency, NetUid}; +use subtensor_runtime_common::{Currency, NetUid, SubId}; impl Pallet { /// Swaps the hotkey of a coldkey account. @@ -414,10 +414,15 @@ impl Pallet { // 3.5 Swap WeightCommits // WeightCommits( hotkey ) --> Vec -- the weight commits for the hotkey. if is_network_member { - if let Ok(old_weight_commits) = WeightCommits::::try_get(netuid, old_hotkey) { - WeightCommits::::remove(netuid, old_hotkey); - WeightCommits::::insert(netuid, new_hotkey, old_weight_commits); - weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2)); + for subid in 0..SubsubnetCountCurrent::::get(netuid).into() { + let netuid_index = Self::get_subsubnet_storage_index(netuid, SubId::from(subid)); + if let Ok(old_weight_commits) = + WeightCommits::::try_get(netuid_index, old_hotkey) + { + WeightCommits::::remove(netuid_index, old_hotkey); + WeightCommits::::insert(netuid_index, new_hotkey, old_weight_commits); + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2)); + } } } diff --git a/pallets/subtensor/src/tests/children.rs b/pallets/subtensor/src/tests/children.rs index 41d25c8aea..6a6bc639b4 100644 --- a/pallets/subtensor/src/tests/children.rs +++ b/pallets/subtensor/src/tests/children.rs @@ -6,7 +6,7 @@ use super::mock::*; use approx::assert_abs_diff_eq; use frame_support::{assert_err, assert_noop, assert_ok}; use substrate_fixed::types::{I64F64, I96F32, U96F32}; -use subtensor_runtime_common::{AlphaCurrency, TaoCurrency}; +use subtensor_runtime_common::{AlphaCurrency, NetUidStorageIndex, TaoCurrency}; use subtensor_swap_interface::SwapHandler; use crate::{utils::rate_limiting::TransactionType, *}; @@ -2839,6 +2839,7 @@ fn test_set_weights_no_parent() { /// Test that drain_pending_emission sends childkey take fully to the nominators if childkey /// doesn't have its own stake, independently of parent hotkey take. +/// cargo test --package pallet-subtensor --lib -- tests::children::test_childkey_take_drain --exact --show-output #[allow(clippy::assertions_on_constants)] #[test] fn test_childkey_take_drain() { @@ -2914,12 +2915,12 @@ fn test_childkey_take_drain() { )); // Setup YUMA so that it creates emissions - Weights::::insert(netuid, 0, vec![(2, 0xFFFF)]); - Weights::::insert(netuid, 1, vec![(2, 0xFFFF)]); + Weights::::insert(NetUidStorageIndex::from(netuid), 0, vec![(2, 0xFFFF)]); + Weights::::insert(NetUidStorageIndex::from(netuid), 1, vec![(2, 0xFFFF)]); BlockAtRegistration::::set(netuid, 0, 1); BlockAtRegistration::::set(netuid, 1, 1); BlockAtRegistration::::set(netuid, 2, 1); - LastUpdate::::set(netuid, vec![2, 2, 2]); + LastUpdate::::set(NetUidStorageIndex::from(netuid), vec![2, 2, 2]); Kappa::::set(netuid, u16::MAX / 5); ActivityCutoff::::set(netuid, u16::MAX); // makes all stake active ValidatorPermit::::insert(netuid, vec![true, true, false]); diff --git a/pallets/subtensor/src/tests/coinbase.rs b/pallets/subtensor/src/tests/coinbase.rs index a660f1b815..f6b128c342 100644 --- a/pallets/subtensor/src/tests/coinbase.rs +++ b/pallets/subtensor/src/tests/coinbase.rs @@ -9,7 +9,7 @@ use frame_support::assert_ok; use pallet_subtensor_swap::position::PositionId; use sp_core::U256; use substrate_fixed::types::{I64F64, I96F32, U96F32}; -use subtensor_runtime_common::AlphaCurrency; +use subtensor_runtime_common::{AlphaCurrency, NetUidStorageIndex}; use subtensor_swap_interface::SwapHandler; #[allow(clippy::arithmetic_side_effects)] @@ -2489,7 +2489,7 @@ fn test_drain_pending_emission_zero_emission() { run_to_block_no_epoch(netuid, 50); // Clear incentive and dividends. - Incentive::::remove(netuid); + Incentive::::remove(NetUidStorageIndex::from(netuid)); Dividends::::remove(netuid); // Set the emission to be ZERO. @@ -2507,7 +2507,12 @@ fn test_drain_pending_emission_zero_emission() { assert_eq!(new_stake, init_stake.into()); // Check that the incentive and dividends are set by epoch. - assert!(Incentive::::get(netuid).iter().sum::() > 0); + assert!( + Incentive::::get(NetUidStorageIndex::from(netuid)) + .iter() + .sum::() + > 0 + ); assert!(Dividends::::get(netuid).iter().sum::() > 0); }); } @@ -2573,7 +2578,7 @@ fn test_run_coinbase_not_started() { )); // Clear incentive and dividends. - Incentive::::remove(netuid); + Incentive::::remove(NetUidStorageIndex::from(netuid)); Dividends::::remove(netuid); // Step so tempo should run. @@ -2595,7 +2600,12 @@ fn test_run_coinbase_not_started() { assert_eq!(new_stake, init_stake.into()); // Check that the incentive and dividends are set. - assert!(Incentive::::get(netuid).iter().sum::() > 0); + assert!( + Incentive::::get(NetUidStorageIndex::from(netuid)) + .iter() + .sum::() + > 0 + ); assert!(Dividends::::get(netuid).iter().sum::() > 0); }); } @@ -2658,7 +2668,7 @@ fn test_run_coinbase_not_started_start_after() { )); // Clear incentive and dividends. - Incentive::::remove(netuid); + Incentive::::remove(NetUidStorageIndex::from(netuid)); Dividends::::remove(netuid); // Step so tempo should run. diff --git a/pallets/subtensor/src/tests/consensus.rs b/pallets/subtensor/src/tests/consensus.rs index 6a7aa7d467..7eb65c3fc0 100644 --- a/pallets/subtensor/src/tests/consensus.rs +++ b/pallets/subtensor/src/tests/consensus.rs @@ -13,6 +13,7 @@ use sp_core::U256; use std::time::Instant; use substrate_fixed::transcendental::{PI, cos, ln, sqrt}; use substrate_fixed::types::{I32F32, I64F64}; +use subtensor_runtime_common::NetUidStorageIndex; pub fn fixed(val: f32) -> I32F32 { I32F32::from_num(val) @@ -134,7 +135,7 @@ fn uid_stats(netuid: NetUid, uid: u16) { ); log::info!( "incentive: {:?}", - SubtensorModule::get_incentive_for_uid(netuid, uid) + SubtensorModule::get_incentive_for_uid(NetUidStorageIndex::from(netuid), uid) ); log::info!( "dividend: {:?}", diff --git a/pallets/subtensor/src/tests/epoch.rs b/pallets/subtensor/src/tests/epoch.rs index bdf675648b..07c938be98 100644 --- a/pallets/subtensor/src/tests/epoch.rs +++ b/pallets/subtensor/src/tests/epoch.rs @@ -11,7 +11,7 @@ use frame_support::{assert_err, assert_ok}; use rand::{Rng, SeedableRng, distributions::Uniform, rngs::StdRng, seq::SliceRandom, thread_rng}; use sp_core::{Get, U256}; use substrate_fixed::types::I32F32; -use subtensor_runtime_common::{AlphaCurrency, TaoCurrency}; +use subtensor_runtime_common::{AlphaCurrency, NetUidStorageIndex, TaoCurrency}; use subtensor_swap_interface::SwapHandler; use super::mock::*; @@ -128,7 +128,7 @@ fn uid_stats(netuid: NetUid, uid: u16) { ); log::info!( "incentive: {:?}", - SubtensorModule::get_incentive_for_uid(netuid, uid) + SubtensorModule::get_incentive_for_uid(NetUidStorageIndex::from(netuid), uid) ); log::info!( "dividend: {:?}", @@ -595,7 +595,10 @@ fn test_1_graph() { assert_eq!(SubtensorModule::get_rank_for_uid(netuid, uid), 0); assert_eq!(SubtensorModule::get_trust_for_uid(netuid, uid), 0); assert_eq!(SubtensorModule::get_consensus_for_uid(netuid, uid), 0); - assert_eq!(SubtensorModule::get_incentive_for_uid(netuid, uid), 0); + assert_eq!( + SubtensorModule::get_incentive_for_uid(netuid.into(), uid), + 0 + ); assert_eq!(SubtensorModule::get_dividends_for_uid(netuid, uid), 0); }); } @@ -657,7 +660,10 @@ fn test_10_graph() { assert_eq!(SubtensorModule::get_rank_for_uid(netuid, i as u16), 0); assert_eq!(SubtensorModule::get_trust_for_uid(netuid, i as u16), 0); assert_eq!(SubtensorModule::get_consensus_for_uid(netuid, i as u16), 0); - assert_eq!(SubtensorModule::get_incentive_for_uid(netuid, i as u16), 0); + assert_eq!( + SubtensorModule::get_incentive_for_uid(netuid.into(), i as u16), + 0 + ); assert_eq!(SubtensorModule::get_dividends_for_uid(netuid, i as u16), 0); assert_eq!( SubtensorModule::get_emission_for_uid(netuid, i as u16), @@ -705,7 +711,7 @@ fn test_512_graph() { false, u16::MAX, ); - let bonds = SubtensorModule::get_bonds(netuid); + let bonds = SubtensorModule::get_bonds(netuid.into()); for uid in validators { assert_eq!( SubtensorModule::get_total_stake_for_hotkey(&(U256::from(uid))), @@ -714,7 +720,10 @@ fn test_512_graph() { assert_eq!(SubtensorModule::get_rank_for_uid(netuid, uid), 0); assert_eq!(SubtensorModule::get_trust_for_uid(netuid, uid), 0); assert_eq!(SubtensorModule::get_consensus_for_uid(netuid, uid), 0); - assert_eq!(SubtensorModule::get_incentive_for_uid(netuid, uid), 0); + assert_eq!( + SubtensorModule::get_incentive_for_uid(netuid.into(), uid), + 0 + ); assert_eq!(SubtensorModule::get_dividends_for_uid(netuid, uid), 1023); // Note D = floor(1 / 64 * 65_535) = 1023 assert_eq!( SubtensorModule::get_emission_for_uid(netuid, uid), @@ -732,7 +741,10 @@ fn test_512_graph() { assert_eq!(SubtensorModule::get_rank_for_uid(netuid, uid), 146); // Note R = floor(1 / (512 - 64) * 65_535) = 146 assert_eq!(SubtensorModule::get_trust_for_uid(netuid, uid), 65535); assert_eq!(SubtensorModule::get_consensus_for_uid(netuid, uid), 146); // Note C = floor(1 / (512 - 64) * 65_535) = 146 - assert_eq!(SubtensorModule::get_incentive_for_uid(netuid, uid), 146); // Note I = floor(1 / (512 - 64) * 65_535) = 146 + assert_eq!( + SubtensorModule::get_incentive_for_uid(netuid.into(), uid), + 146 + ); // Note I = floor(1 / (512 - 64) * 65_535) = 146 assert_eq!(SubtensorModule::get_dividends_for_uid(netuid, uid), 0); assert_eq!( SubtensorModule::get_emission_for_uid(netuid, uid), @@ -795,10 +807,10 @@ fn test_512_graph_random_weights() { bonds_penalty, ); - let bond = SubtensorModule::get_bonds(netuid); + let bond = SubtensorModule::get_bonds(netuid.into()); for uid in 0..network_n { rank.push(SubtensorModule::get_rank_for_uid(netuid, uid)); - incentive.push(SubtensorModule::get_incentive_for_uid(netuid, uid)); + incentive.push(SubtensorModule::get_incentive_for_uid(netuid.into(), uid)); dividend.push(SubtensorModule::get_dividends_for_uid(netuid, uid)); emission.push(SubtensorModule::get_emission_for_uid(netuid, uid)); bondv.push(bond[uid as usize][validator]); @@ -826,14 +838,14 @@ fn test_512_graph_random_weights() { bonds_penalty, ); // Assert that dense and sparse epoch results are equal - let bond = SubtensorModule::get_bonds(netuid); + let bond = SubtensorModule::get_bonds(netuid.into()); for uid in 0..network_n { assert_eq!( SubtensorModule::get_rank_for_uid(netuid, uid), rank[uid as usize] ); assert_eq!( - SubtensorModule::get_incentive_for_uid(netuid, uid), + SubtensorModule::get_incentive_for_uid(netuid.into(), uid), incentive[uid as usize] ); assert_eq!( @@ -1070,7 +1082,7 @@ fn test_bonds() { E: [49999998, 99999999, 150000000, 200000001, 49998779, 100000610, 149996337, 200004272] P: [0.0499999989, 0.0999999992, 0.1500000006, 0.2000000011, 0.049998779, 0.1000006103, 0.1499963375, 0.2000042726] emaB: [[(4, 0.2499999937), (5, 0.2499999953), (6, 0.2499999937), (7, 0.2499999937)], [(4, 0.4999999942), (5, 0.499999997), (6, 0.4999999942), (7, 0.4999999942)], [(4, 0.7499999937), (5, 0.7499999981), (6, 0.7499999995), (7, 0.7499999995)], [(4, 1), (5, 1), (6, 1), (7, 1)], [], [], [], []] */ - let bonds = SubtensorModule::get_bonds( netuid ); + let bonds = SubtensorModule::get_bonds( netuid.into() ); assert_eq!(bonds[0][4], 16383); assert_eq!(bonds[1][4], 32767); assert_eq!(bonds[2][4], 49151); @@ -1118,7 +1130,7 @@ fn test_bonds() { E: [44998351, 101110561, 151667215, 202223870, 49998779, 100000610, 149996337, 200004272] P: [0.0449983515, 0.1011105615, 0.1516672159, 0.2022238704, 0.049998779, 0.1000006103, 0.1499963377, 0.2000042726] emaB: [[(4, 0.2225175085), (5, 0.2225175085), (6, 0.2225175085), (7, 0.2225175085)], [(4, 0.499993208), (5, 0.4999932083), (6, 0.4999932083), (7, 0.4999932083)], [(4, 0.7499966028), (5, 0.7499966032), (6, 0.7499966032), (7, 0.7499966032)], [(4, 1), (5, 1), (6, 1), (7, 1)], [], [], [], []] */ - let bonds = SubtensorModule::get_bonds( netuid ); + let bonds = SubtensorModule::get_bonds( netuid.into() ); assert_eq!(bonds[0][4], 14582); assert_eq!(bonds[1][4], 32767); assert_eq!(bonds[2][4], 49151); @@ -1155,7 +1167,7 @@ fn test_bonds() { E: [40496805, 90999783, 157929636, 210573773, 49998779, 100000610, 149996337, 200004272] P: [0.040496806, 0.0909997837, 0.157929636, 0.2105737738, 0.049998779, 0.1000006103, 0.1499963377, 0.2000042726] emaB: [[(4, 0.192316476), (5, 0.192316476), (6, 0.192316476), (7, 0.192316476)], [(4, 0.4321515555), (5, 0.4321515558), (6, 0.4321515558), (7, 0.4321515558)], [(4, 0.7499967015), (5, 0.7499967027), (6, 0.7499967027), (7, 0.7499967027)], [(4, 1), (5, 1), (6, 1), (7, 1)], [], [], [], []] */ - let bonds = SubtensorModule::get_bonds( netuid ); + let bonds = SubtensorModule::get_bonds( netuid.into() ); assert_eq!(bonds[0][4], 12603); assert_eq!(bonds[1][4], 28321); assert_eq!(bonds[2][4], 49151); @@ -1192,7 +1204,7 @@ fn test_bonds() { E: [99999999, 199999999, 299999999, 399999999, 0, 0, 0, 0] P: [0.0999999999, 0.2, 0.2999999998, 0.4, 0, 0, 0, 0] emaB: [[(4, 0.1923094518), (5, 0.1923094518), (6, 0.1923094518), (7, 0.1923094518)], [(4, 0.4321507583), (5, 0.4321507583), (6, 0.4321507583), (7, 0.4321507583)], [(4, 0.7499961846), (5, 0.7499961846), (6, 0.7499961846), (7, 0.7499961846)], [(4, 1), (5, 1), (6, 1), (7, 1)], [], [], [], []] */ - let bonds = SubtensorModule::get_bonds( netuid ); + let bonds = SubtensorModule::get_bonds( netuid.into() ); assert_eq!(bonds[0][7], 12602); assert_eq!(bonds[1][7], 28320); assert_eq!(bonds[2][7], 49150); @@ -1228,7 +1240,7 @@ fn test_bonds() { E: [36443733, 81898628, 163565493, 218092144, 0, 0, 0, 500000000] P: [0.0364437331, 0.081898629, 0.1635654932, 0.2180921442, 0, 0, 0, 0.5] emaB: [[(4, 0.1922941932), (5, 0.1922941932), (6, 0.1922941932), (7, 0.1671024568)], [(4, 0.4321354993), (5, 0.4321354993), (6, 0.4321354993), (7, 0.3755230587)], [(4, 0.7499809256), (5, 0.7499809256), (6, 0.7499809256), (7, 0.749983425)], [(4, 1), (5, 1), (6, 1), (7, 1)], [], [], [], []] */ - let bonds = SubtensorModule::get_bonds( netuid ); + let bonds = SubtensorModule::get_bonds( netuid.into() ); assert_eq!(bonds[0][7], 10951); assert_eq!(bonds[1][7], 24609); assert_eq!(bonds[2][7], 49150); @@ -1250,7 +1262,7 @@ fn test_bonds() { E: [32799427, 73706612, 168638129, 224855830, 0, 0, 0, 500000000] P: [0.0327994274, 0.0737066122, 0.1686381293, 0.2248558307, 0, 0, 0, 0.5] emaB: [[(4, 0.1922789337), (5, 0.1922789337), (6, 0.1922789337), (7, 0.1458686984)], [(4, 0.4321202405), (5, 0.4321202405), (6, 0.4321202405), (7, 0.3277949789)], [(4, 0.749965667), (5, 0.749965667), (6, 0.749965667), (7, 0.74998335)], [(4, 1), (5, 1), (6, 1), (7, 1)], [], [], [], []] */ - let bonds = SubtensorModule::get_bonds( netuid ); + let bonds = SubtensorModule::get_bonds( netuid.into() ); assert_eq!(bonds[0][7], 9559); assert_eq!(bonds[1][7], 21482); assert_eq!(bonds[2][7], 49150); @@ -1272,7 +1284,7 @@ fn test_bonds() { E: [29518068, 66336137, 173203134, 230942659, 0, 0, 0, 500000000] P: [0.029518068, 0.0663361375, 0.1732031347, 0.2309426593, 0, 0, 0, 0.5] emaB: [[(4, 0.192263675), (5, 0.192263675), (6, 0.192263675), (7, 0.1278155716)], [(4, 0.4321049813), (5, 0.4321049813), (6, 0.4321049813), (7, 0.2872407278)], [(4, 0.7499504078), (5, 0.7499504078), (6, 0.7499504078), (7, 0.7499832863)], [(4, 1), (5, 1), (6, 1), (7, 1)], [], [], [], []] */ - let bonds = SubtensorModule::get_bonds( netuid ); + let bonds = SubtensorModule::get_bonds( netuid.into() ); assert_eq!(bonds[0][7], 8376); assert_eq!(bonds[1][7], 18824); assert_eq!(bonds[2][7], 49150); @@ -1408,7 +1420,7 @@ fn test_active_stake() { } else { SubtensorModule::epoch_dense(netuid, 1_000_000_000.into()); } - let bonds = SubtensorModule::get_bonds(netuid); + let bonds = SubtensorModule::get_bonds(netuid.into()); for uid in 0..n { // log::info!("\n{uid}" ); // uid_stats(netuid, uid); @@ -1473,7 +1485,7 @@ fn test_active_stake() { E: [274999999, 224999999, 250000000, 250000000] P: [0.275, 0.2249999999, 0.25, 0.25] P (u16): [65535, 53619, 59577, 59577] */ - let bonds = SubtensorModule::get_bonds(netuid); + let bonds = SubtensorModule::get_bonds(netuid.into()); assert_eq!(SubtensorModule::get_dividends_for_uid(netuid, 0), 36044); // Note D = floor((0.5 * 0.9 + 0.1) * 65_535) assert_eq!( SubtensorModule::get_emission_for_uid(netuid, 0), @@ -1537,7 +1549,7 @@ fn test_active_stake() { E: [272501132, 227498866, 250000000, 250000000] P: [0.272501133, 0.2274988669, 0.25, 0.25] P (u16): [65535, 54711, 60123, 60123] */ - let bonds = SubtensorModule::get_bonds(netuid); + let bonds = SubtensorModule::get_bonds(netuid.into()); assert_eq!(SubtensorModule::get_dividends_for_uid(netuid, 0), 35716); // Note D = floor((0.55 * 0.9 + 0.5 * 0.1) * 65_535) assert_eq!( SubtensorModule::get_emission_for_uid(netuid, 0), @@ -1736,7 +1748,7 @@ fn test_outdated_weights() { E: [250000000, 250000000, 500000000, 0] P: [0.25, 0.25, 0.5, 0] P (u16): [32767, 32767, 65535, 0] */ - let bonds = SubtensorModule::get_bonds(netuid); + let bonds = SubtensorModule::get_bonds(netuid.into()); assert_eq!(SubtensorModule::get_dividends_for_uid(netuid, 0), 32767); // Note D = floor(0.5 * 65_535) assert_eq!( SubtensorModule::get_emission_for_uid(netuid, 0), @@ -2035,7 +2047,7 @@ fn test_deregistered_miner_bonds() { } // Check the bond values for the servers - let bonds = SubtensorModule::get_bonds(netuid); + let bonds = SubtensorModule::get_bonds(netuid.into()); let bond_0_2 = bonds[0][2]; let bond_0_3 = bonds[0][3]; @@ -2107,7 +2119,7 @@ fn test_deregistered_miner_bonds() { } // Check the bond values for the servers - let bonds = SubtensorModule::get_bonds(netuid); + let bonds = SubtensorModule::get_bonds(netuid.into()); let bond_0_2_new = bonds[0][2]; let bond_0_3_new = bonds[0][3]; @@ -2483,11 +2495,15 @@ fn test_can_set_self_weight_as_subnet_owner() { // Set weight of 50% to each hotkey. // This includes a self-weight let fifty_percent: u16 = u16::MAX / 2; - Weights::::insert(netuid, 0, vec![(0, fifty_percent), (1, fifty_percent)]); + Weights::::insert( + NetUidStorageIndex::from(netuid), + 0, + vec![(0, fifty_percent), (1, fifty_percent)], + ); step_block(1); // Set updated so weights are valid - LastUpdate::::insert(netuid, vec![2, 0]); + LastUpdate::::insert(NetUidStorageIndex::from(netuid), vec![2, 0]); // Run epoch let hotkey_emission = SubtensorModule::epoch(netuid, to_emit.into()); @@ -2742,7 +2758,7 @@ fn run_epoch_and_check_bonds_dividends( target_dividends: &[f32], ) { run_epoch(netuid, sparse); - let bonds = SubtensorModule::get_bonds_fixed_proportion(netuid); + let bonds = SubtensorModule::get_bonds_fixed_proportion(netuid.into()); let dividends = SubtensorModule::get_dividends(netuid); let epsilon = I32F32::from_num(1e-3); @@ -3485,7 +3501,7 @@ fn test_yuma_3_bonds_reset() { if epoch == 20 { let hotkey = SubtensorModule::get_hotkey_for_net_and_uid(netuid, 3) .expect("Hotkey not found"); - let _ = SubtensorModule::do_reset_bonds(netuid, &hotkey); + let _ = SubtensorModule::do_reset_bonds(netuid.into(), &hotkey); } } 21 => { @@ -3650,7 +3666,10 @@ fn test_epoch_masks_incoming_to_sniped_uid_prevents_inheritance() { SubtensorModule::epoch(netuid, 1_000.into()); assert_eq!(SubtensorModule::get_rank_for_uid(netuid, new_uid), 0); - assert_eq!(SubtensorModule::get_incentive_for_uid(netuid, new_uid), 0); + assert_eq!( + SubtensorModule::get_incentive_for_uid(netuid.into(), new_uid), + 0 + ); }); } diff --git a/pallets/subtensor/src/tests/staking.rs b/pallets/subtensor/src/tests/staking.rs index 8345d24fff..2013fe35ae 100644 --- a/pallets/subtensor/src/tests/staking.rs +++ b/pallets/subtensor/src/tests/staking.rs @@ -13,7 +13,9 @@ use sp_core::{Get, H256, U256}; use sp_runtime::traits::Dispatchable; use substrate_fixed::traits::FromFixed; use substrate_fixed::types::{I96F32, I110F18, U64F64, U96F32}; -use subtensor_runtime_common::{AlphaCurrency, Currency as CurrencyT, NetUid, TaoCurrency}; +use subtensor_runtime_common::{ + AlphaCurrency, Currency as CurrencyT, NetUid, NetUidStorageIndex, TaoCurrency, +}; use subtensor_swap_interface::{OrderType, SwapHandler}; use super::mock; @@ -2439,12 +2441,12 @@ fn test_mining_emission_distribution_validator_valiminer_miner() { )); // Setup YUMA so that it creates emissions - Weights::::insert(netuid, 0, vec![(1, 0xFFFF)]); - Weights::::insert(netuid, 1, vec![(2, 0xFFFF)]); + Weights::::insert(NetUidStorageIndex::from(netuid), 0, vec![(1, 0xFFFF)]); + Weights::::insert(NetUidStorageIndex::from(netuid), 1, vec![(2, 0xFFFF)]); BlockAtRegistration::::set(netuid, 0, 1); BlockAtRegistration::::set(netuid, 1, 1); BlockAtRegistration::::set(netuid, 2, 1); - LastUpdate::::set(netuid, vec![2, 2, 2]); + LastUpdate::::set(NetUidStorageIndex::from(netuid), vec![2, 2, 2]); Kappa::::set(netuid, u16::MAX / 5); ActivityCutoff::::set(netuid, u16::MAX); // makes all stake active ValidatorPermit::::insert(netuid, vec![true, true, false]); diff --git a/pallets/subtensor/src/tests/subsubnet.rs b/pallets/subtensor/src/tests/subsubnet.rs index 34c7ac1043..4a88fa0d09 100644 --- a/pallets/subtensor/src/tests/subsubnet.rs +++ b/pallets/subtensor/src/tests/subsubnet.rs @@ -8,6 +8,5 @@ use super::mock::*; #[test] fn test_subsubnet_emission_proportions() { - new_test_ext(1).execute_with(|| { - }); + new_test_ext(1).execute_with(|| {}); } diff --git a/pallets/subtensor/src/tests/swap_hotkey.rs b/pallets/subtensor/src/tests/swap_hotkey.rs index 5a9ebf5127..6910946982 100644 --- a/pallets/subtensor/src/tests/swap_hotkey.rs +++ b/pallets/subtensor/src/tests/swap_hotkey.rs @@ -8,7 +8,7 @@ use frame_system::{Config, RawOrigin}; use sp_core::{Get, H160, H256, U256}; use sp_runtime::SaturatedConversion; use substrate_fixed::types::U64F64; -use subtensor_runtime_common::{AlphaCurrency, TaoCurrency}; +use subtensor_runtime_common::{AlphaCurrency, NetUidStorageIndex, TaoCurrency}; use subtensor_swap_interface::SwapHandler; use super::mock; @@ -326,7 +326,11 @@ fn test_swap_weight_commits() { add_network(netuid, 1, 1); IsNetworkMember::::insert(old_hotkey, netuid, true); - WeightCommits::::insert(netuid, old_hotkey, weight_commits.clone()); + WeightCommits::::insert( + NetUidStorageIndex::from(netuid), + old_hotkey, + weight_commits.clone(), + ); assert_ok!(SubtensorModule::perform_hotkey_swap_on_all_subnets( &old_hotkey, @@ -335,9 +339,12 @@ fn test_swap_weight_commits() { &mut weight )); - assert!(!WeightCommits::::contains_key(netuid, old_hotkey)); + assert!(!WeightCommits::::contains_key( + NetUidStorageIndex::from(netuid), + old_hotkey + )); assert_eq!( - WeightCommits::::get(netuid, new_hotkey), + WeightCommits::::get(NetUidStorageIndex::from(netuid), new_hotkey), Some(weight_commits) ); }); diff --git a/pallets/subtensor/src/tests/swap_hotkey_with_subnet.rs b/pallets/subtensor/src/tests/swap_hotkey_with_subnet.rs index 314f72c2bd..7ed0a4b355 100644 --- a/pallets/subtensor/src/tests/swap_hotkey_with_subnet.rs +++ b/pallets/subtensor/src/tests/swap_hotkey_with_subnet.rs @@ -5,7 +5,7 @@ use codec::Encode; use frame_support::weights::Weight; use frame_support::{assert_err, assert_noop, assert_ok}; use frame_system::{Config, RawOrigin}; -use subtensor_runtime_common::{AlphaCurrency, Currency, TaoCurrency}; +use subtensor_runtime_common::{AlphaCurrency, Currency, NetUidStorageIndex, TaoCurrency}; use super::mock::*; use crate::*; @@ -343,7 +343,11 @@ fn test_swap_weight_commits() { SubtensorModule::add_balance_to_coldkey_account(&coldkey, u64::MAX); IsNetworkMember::::insert(old_hotkey, netuid, true); - WeightCommits::::insert(netuid, old_hotkey, weight_commits.clone()); + WeightCommits::::insert( + NetUidStorageIndex::from(netuid), + old_hotkey, + weight_commits.clone(), + ); System::set_block_number(System::block_number() + HotkeySwapOnSubnetInterval::get()); assert_ok!(SubtensorModule::do_swap_hotkey( @@ -353,9 +357,12 @@ fn test_swap_weight_commits() { Some(netuid) )); - assert!(!WeightCommits::::contains_key(netuid, old_hotkey)); + assert!(!WeightCommits::::contains_key( + NetUidStorageIndex::from(netuid), + old_hotkey + )); assert_eq!( - WeightCommits::::get(netuid, new_hotkey), + WeightCommits::::get(NetUidStorageIndex::from(netuid), new_hotkey), Some(weight_commits) ); }); diff --git a/pallets/subtensor/src/tests/uids.rs b/pallets/subtensor/src/tests/uids.rs index bca6945b44..74fb074169 100644 --- a/pallets/subtensor/src/tests/uids.rs +++ b/pallets/subtensor/src/tests/uids.rs @@ -5,7 +5,7 @@ use crate::*; use frame_support::{assert_err, assert_ok}; use frame_system::Config; use sp_core::{H160, U256}; -use subtensor_runtime_common::AlphaCurrency; +use subtensor_runtime_common::{AlphaCurrency, NetUidStorageIndex}; /******************************************** tests for uids.rs file @@ -63,13 +63,13 @@ fn test_replace_neuron() { Consensus::::mutate(netuid, |v| { SubtensorModule::set_element_at(v, neuron_uid as usize, 5u16) }); - Incentive::::mutate(netuid, |v| { + Incentive::::mutate(NetUidStorageIndex::from(netuid), |v| { SubtensorModule::set_element_at(v, neuron_uid as usize, 5u16) }); Dividends::::mutate(netuid, |v| { SubtensorModule::set_element_at(v, neuron_uid as usize, 5u16) }); - Bonds::::insert(netuid, neuron_uid, vec![(0, 1)]); + Bonds::::insert(NetUidStorageIndex::from(netuid), neuron_uid, vec![(0, 1)]); // serve axon mock address let ip: u128 = 1676056785; @@ -130,7 +130,7 @@ fn test_replace_neuron() { 0 ); assert_eq!( - SubtensorModule::get_incentive_for_uid(netuid, neuron_uid), + SubtensorModule::get_incentive_for_uid(netuid.into(), neuron_uid), 0 ); assert_eq!( @@ -145,7 +145,7 @@ fn test_replace_neuron() { assert_eq!(axon_info.ip_type, 0); // Check bonds are cleared. - assert_eq!(Bonds::::get(netuid, neuron_uid), vec![]); + assert_eq!(Bonds::::get(NetUidStorageIndex::from(netuid), neuron_uid), vec![]); assert_eq!(AssociatedEvmAddress::::get(netuid, neuron_uid), None); }); } @@ -189,7 +189,7 @@ fn test_bonds_cleared_on_replace() { let neuron_uid = neuron_uid.unwrap(); AssociatedEvmAddress::::insert(netuid, neuron_uid, (evm_address, 1)); // set non-default bonds - Bonds::::insert(netuid, neuron_uid, vec![(0, 1)]); + Bonds::::insert(NetUidStorageIndex::from(netuid), neuron_uid, vec![(0, 1)]); // Replace the neuron. SubtensorModule::replace_neuron(netuid, neuron_uid, &new_hotkey_account_id, block_number); @@ -214,7 +214,7 @@ fn test_bonds_cleared_on_replace() { assert_eq!(curr_hotkey.unwrap(), new_hotkey_account_id); // Check bonds are cleared. - assert_eq!(Bonds::::get(netuid, neuron_uid), vec![]); + assert_eq!(Bonds::::get(NetUidStorageIndex::from(netuid), neuron_uid), vec![]); assert_eq!(AssociatedEvmAddress::::get(netuid, neuron_uid), None); }); } diff --git a/pallets/subtensor/src/tests/weights.rs b/pallets/subtensor/src/tests/weights.rs index 4bce2ec3af..648befa3c1 100644 --- a/pallets/subtensor/src/tests/weights.rs +++ b/pallets/subtensor/src/tests/weights.rs @@ -21,7 +21,7 @@ use sp_runtime::{ }; use sp_std::collections::vec_deque::VecDeque; use substrate_fixed::types::I32F32; -use subtensor_runtime_common::TaoCurrency; +use subtensor_runtime_common::{NetUidStorageIndex, TaoCurrency}; use subtensor_swap_interface::SwapHandler; use tle::{ curves::drand::TinyBLS381, @@ -525,7 +525,7 @@ fn test_reveal_weights_validate() { ); // Add the commit to the hotkey - WeightCommits::::mutate(netuid, hotkey, |maybe_commits| { + WeightCommits::::mutate(NetUidStorageIndex::from(netuid), hotkey, |maybe_commits| { let mut commits: VecDeque<(H256, u64, u64, u64)> = maybe_commits.take().unwrap_or_default(); commits.push_back(( @@ -2646,8 +2646,9 @@ fn test_commit_reveal_multiple_commits() { )); // Check that commits before the revealed one are removed - let remaining_commits = crate::WeightCommits::::get(netuid, hotkey) - .expect("expected 8 remaining commits"); + let remaining_commits = + crate::WeightCommits::::get(NetUidStorageIndex::from(netuid), hotkey) + .expect("expected 8 remaining commits"); assert_eq!(remaining_commits.len(), 8); // 10 commits - 2 removed (index 0 and 1) // 4. Reveal the last commit next @@ -2662,7 +2663,8 @@ fn test_commit_reveal_multiple_commits() { )); // Remaining commits should have removed up to index 9 - let remaining_commits = crate::WeightCommits::::get(netuid, hotkey); + let remaining_commits = + crate::WeightCommits::::get(NetUidStorageIndex::from(netuid), hotkey); assert!(remaining_commits.is_none()); // All commits removed // After revealing all commits, attempt to commit again should now succeed @@ -2907,7 +2909,8 @@ fn test_commit_reveal_multiple_commits() { )); // Check that the first commit has been removed - let remaining_commits = crate::WeightCommits::::get(netuid, hotkey); + let remaining_commits = + crate::WeightCommits::::get(NetUidStorageIndex::from(netuid), hotkey); assert!(remaining_commits.is_none()); // Attempting to reveal the first commit should fail as it was removed @@ -3067,7 +3070,8 @@ fn test_expired_commits_handling_in_commit_and_reveal() { // 6. Verify that the number of unrevealed, non-expired commits is now 6 let commits: VecDeque<(H256, u64, u64, u64)> = - crate::WeightCommits::::get(netuid, hotkey).expect("Expected a commit"); + crate::WeightCommits::::get(NetUidStorageIndex::from(netuid), hotkey) + .expect("Expected a commit"); assert_eq!(commits.len(), 6); // 5 non-expired commits from epoch 1 + new commit // 7. Attempt to reveal an expired commit (from epoch 0) @@ -3113,7 +3117,7 @@ fn test_expired_commits_handling_in_commit_and_reveal() { )); // 10. Verify that all commits have been revealed and the queue is empty - let commits = crate::WeightCommits::::get(netuid, hotkey); + let commits = crate::WeightCommits::::get(NetUidStorageIndex::from(netuid), hotkey); assert!(commits.is_none()); // 11. Attempt to reveal again, should fail with NoWeightsCommitFound @@ -3304,7 +3308,7 @@ fn test_reveal_at_exact_epoch() { Error::::ExpiredWeightCommit ); - crate::WeightCommits::::remove(netuid, hotkey); + crate::WeightCommits::::remove(NetUidStorageIndex::from(netuid), hotkey); } }); } @@ -3581,7 +3585,8 @@ fn test_commit_reveal_order_enforcement() { // Check that commits A and B are removed let remaining_commits = - crate::WeightCommits::::get(netuid, hotkey).expect("expected 1 remaining commit"); + crate::WeightCommits::::get(NetUidStorageIndex::from(netuid), hotkey) + .expect("expected 1 remaining commit"); assert_eq!(remaining_commits.len(), 1); // Only commit C should remain // Attempt to reveal C (index 2), should succeed @@ -3776,7 +3781,7 @@ fn test_reveal_at_exact_block() { ); // Clean up for next iteration - crate::WeightCommits::::remove(netuid, hotkey); + crate::WeightCommits::::remove(NetUidStorageIndex::from(netuid), hotkey); } }); } @@ -3854,7 +3859,7 @@ fn test_successful_batch_reveal() { )); // 4. Ensure all commits are removed - let commits = crate::WeightCommits::::get(netuid, hotkey); + let commits = crate::WeightCommits::::get(NetUidStorageIndex::from(netuid), hotkey); assert!(commits.is_none()); }); } @@ -3955,8 +3960,8 @@ fn test_batch_reveal_with_expired_commits() { assert_err!(result, Error::::ExpiredWeightCommit); // 5. Expired commit is not removed until a successful call - let commits = - crate::WeightCommits::::get(netuid, hotkey).expect("Expected remaining commits"); + let commits = crate::WeightCommits::::get(NetUidStorageIndex::from(netuid), hotkey) + .expect("Expected remaining commits"); assert_eq!(commits.len(), 3); // 6. Try revealing the remaining commits @@ -3975,7 +3980,7 @@ fn test_batch_reveal_with_expired_commits() { )); // 7. Ensure all commits are removed - let commits = crate::WeightCommits::::get(netuid, hotkey); + let commits = crate::WeightCommits::::get(NetUidStorageIndex::from(netuid), hotkey); assert!(commits.is_none()); }); } @@ -4382,7 +4387,7 @@ fn test_batch_reveal_with_out_of_order_commits() { )); // 6. Ensure all commits are removed - let commits = crate::WeightCommits::::get(netuid, hotkey); + let commits = crate::WeightCommits::::get(NetUidStorageIndex::from(netuid), hotkey); assert!(commits.is_none()); }); } @@ -4446,7 +4451,7 @@ fn test_highly_concurrent_commits_and_reveals_with_multiple_hotkeys() { for i in 0..commits_per_hotkey { for hotkey in &hotkeys { - let current_commits = crate::WeightCommits::::get(netuid, hotkey) + let current_commits = crate::WeightCommits::::get(NetUidStorageIndex::from(netuid), hotkey) .unwrap_or_default(); if current_commits.len() >= max_unrevealed_commits { continue; @@ -4795,7 +4800,7 @@ fn test_get_reveal_blocks() { assert_err!(result, Error::::NoWeightsCommitFound); // **15. Verify that All Commits Have Been Removed from Storage** - let commits = crate::WeightCommits::::get(netuid, hotkey); + let commits = crate::WeightCommits::::get(NetUidStorageIndex::from(netuid), hotkey); assert!( commits.is_none(), "Commits should be cleared after successful reveal" @@ -4851,7 +4856,7 @@ fn test_commit_weights_rate_limit() { let neuron_uid = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey).expect("expected uid"); - SubtensorModule::set_last_update_for_uid(netuid, neuron_uid, 0); + SubtensorModule::set_last_update_for_uid(NetUidStorageIndex::from(netuid), neuron_uid, 0); assert_ok!(SubtensorModule::commit_weights( RuntimeOrigin::signed(hotkey), @@ -5388,7 +5393,7 @@ fn test_do_commit_crv3_weights_committing_too_fast() { SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); let neuron_uid = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey).expect("Expected uid"); - SubtensorModule::set_last_update_for_uid(netuid, neuron_uid, 0); + SubtensorModule::set_last_update_for_uid(NetUidStorageIndex::from(netuid), neuron_uid, 0); assert_ok!(SubtensorModule::do_commit_timelocked_weights( RuntimeOrigin::signed(hotkey), diff --git a/pallets/subtensor/src/utils/misc.rs b/pallets/subtensor/src/utils/misc.rs index 9fd6d27de7..475a3c1a22 100644 --- a/pallets/subtensor/src/utils/misc.rs +++ b/pallets/subtensor/src/utils/misc.rs @@ -8,7 +8,7 @@ use sp_core::Get; use sp_core::U256; use sp_runtime::Saturating; use substrate_fixed::types::{I32F32, U96F32}; -use subtensor_runtime_common::{AlphaCurrency, NetUid, TaoCurrency}; +use subtensor_runtime_common::{AlphaCurrency, NetUid, NetUidStorageIndex, TaoCurrency}; impl Pallet { pub fn ensure_subnet_owner_or_root( @@ -98,13 +98,13 @@ impl Pallet { pub fn get_consensus(netuid: NetUid) -> Vec { Consensus::::get(netuid) } - pub fn get_incentive(netuid: NetUid) -> Vec { + pub fn get_incentive(netuid: NetUidStorageIndex) -> Vec { Incentive::::get(netuid) } pub fn get_dividends(netuid: NetUid) -> Vec { Dividends::::get(netuid) } - pub fn get_last_update(netuid: NetUid) -> Vec { + pub fn get_last_update(netuid: NetUidStorageIndex) -> Vec { LastUpdate::::get(netuid) } pub fn get_pruning_score(netuid: NetUid) -> Vec { @@ -120,7 +120,7 @@ impl Pallet { // ================================== // ==== YumaConsensus UID params ==== // ================================== - pub fn set_last_update_for_uid(netuid: NetUid, uid: u16, last_update: u64) { + pub fn set_last_update_for_uid(netuid: NetUidStorageIndex, uid: u16, last_update: u64) { let mut updated_last_update_vec = Self::get_last_update(netuid); let Some(updated_last_update) = updated_last_update_vec.get_mut(uid as usize) else { return; @@ -183,7 +183,7 @@ impl Pallet { let vec = Consensus::::get(netuid); vec.get(uid as usize).copied().unwrap_or(0) } - pub fn get_incentive_for_uid(netuid: NetUid, uid: u16) -> u16 { + pub fn get_incentive_for_uid(netuid: NetUidStorageIndex, uid: u16) -> u16 { let vec = Incentive::::get(netuid); vec.get(uid as usize).copied().unwrap_or(0) } @@ -191,7 +191,7 @@ impl Pallet { let vec = Dividends::::get(netuid); vec.get(uid as usize).copied().unwrap_or(0) } - pub fn get_last_update_for_uid(netuid: NetUid, uid: u16) -> u64 { + pub fn get_last_update_for_uid(netuid: NetUidStorageIndex, uid: u16) -> u64 { let vec = LastUpdate::::get(netuid); vec.get(uid as usize).copied().unwrap_or(0) } @@ -576,7 +576,8 @@ impl Pallet { )); } - pub fn get_bonds_moving_average(netuid: NetUid) -> u64 { + pub fn get_bonds_moving_average(netuid_index: NetUidStorageIndex) -> u64 { + let (netuid, _) = Self::get_netuid_and_subid(netuid_index).unwrap_or_default(); BondsMovingAverage::::get(netuid) } pub fn set_bonds_moving_average(netuid: NetUid, bonds_moving_average: u64) { From 27ddaffc0a8d721613a90ae0751fadfacc7cd697 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Tue, 26 Aug 2025 19:18:26 -0400 Subject: [PATCH 03/39] Simplify bonds calculation (no subsubnet logic for ema) --- pallets/subtensor/src/epoch/run_epoch.rs | 61 +++++++++---------- pallets/subtensor/src/rpc_info/metagraph.rs | 4 +- pallets/subtensor/src/rpc_info/subnet_info.rs | 6 +- pallets/subtensor/src/tests/epoch.rs | 2 +- pallets/subtensor/src/tests/weights.rs | 34 +++++------ pallets/subtensor/src/utils/misc.rs | 3 +- 6 files changed, 52 insertions(+), 58 deletions(-) diff --git a/pallets/subtensor/src/epoch/run_epoch.rs b/pallets/subtensor/src/epoch/run_epoch.rs index fc9bbd070f..ab805a8259 100644 --- a/pallets/subtensor/src/epoch/run_epoch.rs +++ b/pallets/subtensor/src/epoch/run_epoch.rs @@ -286,7 +286,7 @@ impl Pallet { let owner_uid: Option = Self::get_owner_uid(netuid); // Access network weights row unnormalized. - let mut weights: Vec> = Self::get_weights(netuid); + let mut weights: Vec> = Self::get_weights(netuid_index); log::trace!("W: {:?}", &weights); // Mask weights that are not from permitted validators. @@ -363,7 +363,7 @@ impl Pallet { log::trace!("B: {:?}", &bonds); // Compute the Exponential Moving Average (EMA) of bonds. - ema_bonds = Self::compute_bonds(netuid_index, &weights_for_bonds, &bonds, &consensus); + ema_bonds = Self::compute_bonds(netuid, &weights_for_bonds, &bonds, &consensus); log::trace!("emaB: {:?}", &ema_bonds); // Normalize EMA bonds. @@ -397,7 +397,7 @@ impl Pallet { log::trace!("ΔB: {:?}", &bonds_delta); // Compute the Exponential Moving Average (EMA) of bonds. - ema_bonds = Self::compute_ema_bonds_normal(&bonds_delta, &bonds, netuid_index); + ema_bonds = Self::compute_ema_bonds_normal(&bonds_delta, &bonds, netuid); inplace_col_normalize(&mut ema_bonds); // sum_i b_ij = 1 log::trace!("emaB: {:?}", &ema_bonds); @@ -706,7 +706,7 @@ impl Pallet { let owner_uid: Option = Self::get_owner_uid(netuid); // Access network weights row unnormalized. - let mut weights: Vec> = Self::get_weights_sparse(netuid); + let mut weights: Vec> = Self::get_weights_sparse(netuid_index); log::trace!("Weights: {:?}", &weights); // Mask weights that are not from permitted validators. @@ -1608,47 +1608,42 @@ impl Pallet { } /// Output unnormalized sparse weights, input weights are assumed to be row max-upscaled in u16. - pub fn get_weights_sparse(netuid: NetUid) -> Vec> { + pub fn get_weights_sparse(netuid_index: NetUidStorageIndex) -> Vec> { + let (netuid, _) = Self::get_netuid_and_subid(netuid_index).unwrap_or_default(); let n = Self::get_subnetwork_n(netuid) as usize; let mut weights: Vec> = vec![vec![]; n]; - for (uid_i, weights_i) in as IterableStorageDoubleMap< - NetUidStorageIndex, - u16, - Vec<(u16, u16)>, - >>::iter_prefix(NetUidStorageIndex::from(netuid)) + for (uid_i, weights_i) in Weights::::iter_prefix(netuid_index) .filter(|(uid_i, _)| *uid_i < n as u16) { for (uid_j, weight_ij) in weights_i.iter().filter(|(uid_j, _)| *uid_j < n as u16) { - weights - .get_mut(uid_i as usize) - .expect("uid_i is filtered to be less than n; qed") - .push((*uid_j, I32F32::saturating_from_num(*weight_ij))); + if let Some(row) = weights.get_mut(uid_i as usize) { + row.push((*uid_j, I32F32::saturating_from_num(*weight_ij))); + } else { + log::error!("uid_i {:?} is filtered to be less than n", uid_i); + } } } weights } /// Output unnormalized weights in [n, n] matrix, input weights are assumed to be row max-upscaled in u16. - pub fn get_weights(netuid: NetUid) -> Vec> { + pub fn get_weights(netuid_index: NetUidStorageIndex) -> Vec> { + let (netuid, _) = Self::get_netuid_and_subid(netuid_index).unwrap_or_default(); let n = Self::get_subnetwork_n(netuid) as usize; let mut weights: Vec> = vec![vec![I32F32::saturating_from_num(0.0); n]; n]; - for (uid_i, weights_vec) in as IterableStorageDoubleMap< - NetUidStorageIndex, - u16, - Vec<(u16, u16)>, - >>::iter_prefix(NetUidStorageIndex::from(netuid)) + for (uid_i, weights_vec) in Weights::::iter_prefix(netuid_index) .filter(|(uid_i, _)| *uid_i < n as u16) { for (uid_j, weight_ij) in weights_vec .into_iter() .filter(|(uid_j, _)| *uid_j < n as u16) { - *weights + if let Some(cell) = weights .get_mut(uid_i as usize) - .expect("uid_i is filtered to be less than n; qed") - .get_mut(uid_j as usize) - .expect("uid_j is filtered to be less than n; qed") = - I32F32::saturating_from_num(weight_ij); + .and_then(|row| row.get_mut(uid_j as usize)) + { + *cell = I32F32::saturating_from_num(weight_ij); + } } } weights @@ -1730,8 +1725,10 @@ impl Pallet { pub fn compute_ema_bonds_normal_sparse( bonds_delta: &[Vec<(u16, I32F32)>], bonds: &[Vec<(u16, I32F32)>], - netuid: NetUidStorageIndex, + netuid_index: NetUidStorageIndex, ) -> Vec> { + let (netuid, _) = Self::get_netuid_and_subid(netuid_index).unwrap_or_default(); + // Retrieve the bonds moving average for the given network ID and scale it down. let bonds_moving_average: I64F64 = I64F64::saturating_from_num(Self::get_bonds_moving_average(netuid)) @@ -1764,7 +1761,7 @@ impl Pallet { pub fn compute_ema_bonds_normal( bonds_delta: &[Vec], bonds: &[Vec], - netuid: NetUidStorageIndex, + netuid: NetUid, ) -> Vec> { // Retrieve the bonds moving average for the given network ID and scale it down. let bonds_moving_average: I64F64 = @@ -1798,13 +1795,11 @@ impl Pallet { /// # Returns: /// A vector of EMA bonds. pub fn compute_bonds( - netuid_index: NetUidStorageIndex, + netuid: NetUid, weights: &[Vec], // weights_for_bonds bonds: &[Vec], consensus: &[I32F32], ) -> Vec> { - let (netuid, _) = Self::get_netuid_and_subid(netuid_index).unwrap_or_default(); - // Check if Liquid Alpha is enabled, consensus is not empty, and contains non-zero values. if LiquidAlphaOn::::get(netuid) && !consensus.is_empty() @@ -1821,7 +1816,7 @@ impl Pallet { mat_ema_alpha(weights, bonds, &alphas) } else { // Liquid Alpha is disabled, compute the liquid alpha value. - let alpha: I32F32 = Self::compute_disabled_liquid_alpha(netuid_index); + let alpha: I32F32 = Self::compute_disabled_liquid_alpha(netuid); // Compute the Exponential Moving Average (EMA) of bonds using the calculated alpha value. mat_ema(weights, bonds, alpha) @@ -1863,7 +1858,7 @@ impl Pallet { mat_ema_alpha_sparse(weights, bonds, &alphas) } else { // Liquid Alpha is disabled, compute the liquid alpha value. - let alpha: I32F32 = Self::compute_disabled_liquid_alpha(netuid_index); + let alpha: I32F32 = Self::compute_disabled_liquid_alpha(netuid); // Compute the Exponential Moving Average (EMA) of bonds using the calculated alpha value. mat_ema_sparse(weights, bonds, alpha) @@ -2018,7 +2013,7 @@ impl Pallet { clamp_value(alpha, alpha_low, alpha_high) } - pub fn compute_disabled_liquid_alpha(netuid: NetUidStorageIndex) -> I32F32 { + pub fn compute_disabled_liquid_alpha(netuid: NetUid) -> I32F32 { // Retrieve the bonds moving average for the given network ID and scale it down. let bonds_moving_average: I64F64 = I64F64::from_num(Self::get_bonds_moving_average(netuid)) .saturating_div(I64F64::from_num(1_000_000)); diff --git a/pallets/subtensor/src/rpc_info/metagraph.rs b/pallets/subtensor/src/rpc_info/metagraph.rs index 308c85cba6..bac19e5468 100644 --- a/pallets/subtensor/src/rpc_info/metagraph.rs +++ b/pallets/subtensor/src/rpc_info/metagraph.rs @@ -727,7 +727,7 @@ impl Pallet { liquid_alpha_enabled: Self::get_liquid_alpha_enabled(netuid), // Bonds liquid enabled. alpha_high: Self::get_alpha_values(netuid).1.into(), // Alpha param high alpha_low: Self::get_alpha_values(netuid).0.into(), // Alpha param low - bonds_moving_avg: Self::get_bonds_moving_average(NetUidStorageIndex::from(netuid)) + bonds_moving_avg: Self::get_bonds_moving_average(netuid) .into(), // Bonds moving avg // Metagraph info. @@ -1115,7 +1115,7 @@ impl Pallet { Some(SelectiveMetagraphIndex::BondsMovingAvg) => SelectiveMetagraph { netuid: netuid.into(), bonds_moving_avg: Some( - Self::get_bonds_moving_average(NetUidStorageIndex::from(netuid)).into(), + Self::get_bonds_moving_average(netuid).into(), ), ..Default::default() }, diff --git a/pallets/subtensor/src/rpc_info/subnet_info.rs b/pallets/subtensor/src/rpc_info/subnet_info.rs index 7ca8a8f948..d1e0a05419 100644 --- a/pallets/subtensor/src/rpc_info/subnet_info.rs +++ b/pallets/subtensor/src/rpc_info/subnet_info.rs @@ -4,7 +4,7 @@ use frame_support::storage::IterableStorageMap; extern crate alloc; use codec::Compact; use substrate_fixed::types::I32F32; -use subtensor_runtime_common::{NetUid, NetUidStorageIndex, TaoCurrency}; +use subtensor_runtime_common::{NetUid, TaoCurrency}; #[freeze_struct("edd6bd3273dfea76")] #[derive(Decode, Encode, PartialEq, Eq, Clone, Debug, TypeInfo)] @@ -286,7 +286,7 @@ impl Pallet { let target_regs_per_interval = Self::get_target_registrations_per_interval(netuid); let min_burn = Self::get_min_burn(netuid); let max_burn = Self::get_max_burn(netuid); - let bonds_moving_avg = Self::get_bonds_moving_average(NetUidStorageIndex::from(netuid)); + let bonds_moving_avg = Self::get_bonds_moving_average(netuid); let max_regs_per_block = Self::get_max_registrations_per_block(netuid); let serving_rate_limit = Self::get_serving_rate_limit(netuid); let max_validators = Self::get_max_allowed_validators(netuid); @@ -349,7 +349,7 @@ impl Pallet { let target_regs_per_interval = Self::get_target_registrations_per_interval(netuid); let min_burn = Self::get_min_burn(netuid); let max_burn = Self::get_max_burn(netuid); - let bonds_moving_avg = Self::get_bonds_moving_average(NetUidStorageIndex::from(netuid)); + let bonds_moving_avg = Self::get_bonds_moving_average(netuid); let max_regs_per_block = Self::get_max_registrations_per_block(netuid); let serving_rate_limit = Self::get_serving_rate_limit(netuid); let max_validators = Self::get_max_allowed_validators(netuid); diff --git a/pallets/subtensor/src/tests/epoch.rs b/pallets/subtensor/src/tests/epoch.rs index 07c938be98..3370f2973b 100644 --- a/pallets/subtensor/src/tests/epoch.rs +++ b/pallets/subtensor/src/tests/epoch.rs @@ -3712,7 +3712,7 @@ fn test_epoch_no_mask_when_commit_reveal_disabled() { for _ in 0..3 { SubtensorModule::epoch(netuid, 1.into()); assert!( - !SubtensorModule::get_weights_sparse(netuid)[0].is_empty(), + !SubtensorModule::get_weights_sparse(netuid.into())[0].is_empty(), "row visible when CR disabled" ); run_to_block(System::block_number() + tempo as u64 + 1); diff --git a/pallets/subtensor/src/tests/weights.rs b/pallets/subtensor/src/tests/weights.rs index 648befa3c1..19e085378a 100644 --- a/pallets/subtensor/src/tests/weights.rs +++ b/pallets/subtensor/src/tests/weights.rs @@ -1488,7 +1488,7 @@ fn test_set_weights_sum_larger_than_u16_max() { assert_ok!(result); // Get max-upscaled unnormalized weights. - let all_weights: Vec> = SubtensorModule::get_weights(netuid); + let all_weights: Vec> = SubtensorModule::get_weights(netuid.into()); let weights_set: &[I32F32] = &all_weights[neuron_uid as usize]; assert_eq!(weights_set[0], I32F32::from_num(u16::MAX)); assert_eq!(weights_set[1], I32F32::from_num(u16::MAX)); @@ -5113,7 +5113,7 @@ fn test_reveal_crv3_commits_success() { // Step epochs to run the epoch via the blockstep step_epochs(3, netuid); - let weights_sparse = SubtensorModule::get_weights_sparse(netuid); + let weights_sparse = SubtensorModule::get_weights_sparse(netuid.into()); let weights = weights_sparse.get(neuron_uid1 as usize).cloned().unwrap_or_default(); assert!( @@ -5235,7 +5235,7 @@ fn test_reveal_crv3_commits_cannot_reveal_after_reveal_epoch() { step_epochs(3, netuid); // Verify that weights are not set - let weights_sparse = SubtensorModule::get_weights_sparse(netuid); + let weights_sparse = SubtensorModule::get_weights_sparse(netuid.into()); let weights = weights_sparse .get(neuron_uid1 as usize) .cloned() @@ -5270,7 +5270,7 @@ fn test_reveal_crv3_commits_cannot_reveal_after_reveal_epoch() { assert_ok!(SubtensorModule::reveal_crv3_commits(netuid)); // Verify that the weights for the neuron have not been set - let weights_sparse = SubtensorModule::get_weights_sparse(netuid); + let weights_sparse = SubtensorModule::get_weights_sparse(netuid.into()); let weights = weights_sparse .get(neuron_uid1 as usize) .cloned() @@ -5607,7 +5607,7 @@ fn test_reveal_crv3_commits_decryption_failure() { let neuron_uid = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey) .expect("Failed to get neuron UID for hotkey") as usize; - let weights_matrix = SubtensorModule::get_weights(netuid); + let weights_matrix = SubtensorModule::get_weights(netuid.into()); let weights = weights_matrix.get(neuron_uid).cloned().unwrap_or_default(); assert!(weights.iter().all(|&w| w == I32F32::from_num(0))); }); @@ -5720,7 +5720,7 @@ fn test_reveal_crv3_commits_multiple_commits_some_fail_some_succeed() { // Verify that weights are set for hotkey1 let neuron_uid1 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey1) .expect("Failed to get neuron UID for hotkey1") as usize; - let weights_sparse = SubtensorModule::get_weights_sparse(netuid); + let weights_sparse = SubtensorModule::get_weights_sparse(netuid.into()); let weights1 = weights_sparse.get(neuron_uid1).cloned().unwrap_or_default(); assert!( !weights1.is_empty(), @@ -5815,7 +5815,7 @@ fn test_reveal_crv3_commits_do_set_weights_failure() { // Verify that weights are not set due to `do_set_weights` failure let neuron_uid = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey) .expect("Failed to get neuron UID for hotkey") as usize; - let weights_sparse = SubtensorModule::get_weights_sparse(netuid); + let weights_sparse = SubtensorModule::get_weights_sparse(netuid.into()); let weights = weights_sparse.get(neuron_uid).cloned().unwrap_or_default(); assert!( weights.is_empty(), @@ -5893,7 +5893,7 @@ fn test_reveal_crv3_commits_payload_decoding_failure() { // Verify that weights are not set let neuron_uid = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey) .expect("Failed to get neuron UID for hotkey") as usize; - let weights_sparse = SubtensorModule::get_weights_sparse(netuid); + let weights_sparse = SubtensorModule::get_weights_sparse(netuid.into()); let weights = weights_sparse.get(neuron_uid).cloned().unwrap_or_default(); assert!( weights.is_empty(), @@ -5975,7 +5975,7 @@ fn test_reveal_crv3_commits_signature_deserialization_failure() { // Verify that weights are not set let neuron_uid = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey) .expect("Failed to get neuron UID for hotkey") as usize; - let weights_sparse = SubtensorModule::get_weights_sparse(netuid); + let weights_sparse = SubtensorModule::get_weights_sparse(netuid.into()); let weights = weights_sparse.get(neuron_uid).cloned().unwrap_or_default(); assert!( weights.is_empty(), @@ -6040,7 +6040,7 @@ fn test_reveal_crv3_commits_with_empty_commit_queue() { step_epochs(2, netuid); - let weights_sparse = SubtensorModule::get_weights_sparse(netuid); + let weights_sparse = SubtensorModule::get_weights_sparse(netuid.into()); assert!( weights_sparse.is_empty(), "Weights should be empty as there were no commits to reveal" @@ -6127,7 +6127,7 @@ fn test_reveal_crv3_commits_with_incorrect_identity_message() { // Verify that weights are not set due to decryption failure let neuron_uid = neuron_uid as usize; - let weights_sparse = SubtensorModule::get_weights_sparse(netuid); + let weights_sparse = SubtensorModule::get_weights_sparse(netuid.into()); let weights = weights_sparse.get(neuron_uid).cloned().unwrap_or_default(); assert!( weights.is_empty(), @@ -6337,7 +6337,7 @@ fn test_reveal_crv3_commits_multiple_valid_commits_all_processed() { step_epochs(2, netuid); // ───── assertions ─────────────────────────────────────────────────── - let w_sparse = SubtensorModule::get_weights_sparse(netuid); + let w_sparse = SubtensorModule::get_weights_sparse(netuid.into()); for hk in hotkeys { let uid = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hk).unwrap() as usize; assert!( @@ -6452,7 +6452,7 @@ fn test_reveal_crv3_commits_max_neurons() { step_epochs(2, netuid); // ───── verify weights ─────────────────────────────────────────────── - let w_sparse = SubtensorModule::get_weights_sparse(netuid); + let w_sparse = SubtensorModule::get_weights_sparse(netuid.into()); for hk in &committing_hotkeys { let uid = SubtensorModule::get_uid_for_net_and_hotkey(netuid, hk).unwrap() as usize; assert!( @@ -6682,7 +6682,7 @@ fn test_reveal_crv3_commits_hotkey_check() { // Step epochs to run the epoch via the blockstep step_epochs(3, netuid); - let weights_sparse = SubtensorModule::get_weights_sparse(netuid); + let weights_sparse = SubtensorModule::get_weights_sparse(netuid.into()); let weights = weights_sparse.get(neuron_uid1 as usize).cloned().unwrap_or_default(); assert!( @@ -6799,7 +6799,7 @@ fn test_reveal_crv3_commits_hotkey_check() { // Step epochs to run the epoch via the blockstep step_epochs(3, netuid); - let weights_sparse = SubtensorModule::get_weights_sparse(netuid); + let weights_sparse = SubtensorModule::get_weights_sparse(netuid.into()); let weights = weights_sparse.get(neuron_uid1 as usize).cloned().unwrap_or_default(); assert!( @@ -6937,7 +6937,7 @@ fn test_reveal_crv3_commits_retry_on_missing_pulse() { step_block(1); // automatic reveal runs here - let weights = SubtensorModule::get_weights_sparse(netuid) + let weights = SubtensorModule::get_weights_sparse(netuid.into()) .get(uid as usize) .cloned() .unwrap_or_default(); @@ -7072,7 +7072,7 @@ fn test_reveal_crv3_commits_legacy_payload_success() { // ───────────────────────────────────── // 5 ▸ assertions // ───────────────────────────────────── - let weights_sparse = SubtensorModule::get_weights_sparse(netuid); + let weights_sparse = SubtensorModule::get_weights_sparse(netuid.into()); let w1 = weights_sparse .get(uid1 as usize) .cloned() diff --git a/pallets/subtensor/src/utils/misc.rs b/pallets/subtensor/src/utils/misc.rs index 475a3c1a22..c127fa4c71 100644 --- a/pallets/subtensor/src/utils/misc.rs +++ b/pallets/subtensor/src/utils/misc.rs @@ -576,8 +576,7 @@ impl Pallet { )); } - pub fn get_bonds_moving_average(netuid_index: NetUidStorageIndex) -> u64 { - let (netuid, _) = Self::get_netuid_and_subid(netuid_index).unwrap_or_default(); + pub fn get_bonds_moving_average(netuid: NetUid) -> u64 { BondsMovingAverage::::get(netuid) } pub fn set_bonds_moving_average(netuid: NetUid, bonds_moving_average: u64) { From 9646bfc31ae809778894cdebe46f245f991bec8b Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Wed, 27 Aug 2025 12:52:24 -0400 Subject: [PATCH 04/39] Fix hotkey emission tuples --- pallets/subtensor/src/subnets/subsubnet.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/pallets/subtensor/src/subnets/subsubnet.rs b/pallets/subtensor/src/subnets/subsubnet.rs index 37e20299a9..4f81f69e2e 100644 --- a/pallets/subtensor/src/subnets/subsubnet.rs +++ b/pallets/subtensor/src/subnets/subsubnet.rs @@ -214,9 +214,6 @@ impl Pallet { }) // Consolidate the hotkey emissions into a single BTreeMap .fold(BTreeMap::new(), |mut acc, (hotkey, (terms, sub_weight))| { - - println!("Hotkey: {:?}, terms: {:?}", hotkey, terms); - acc.entry(hotkey) .and_modify(|acc_terms| { acc_terms.dividend = Self::weighted_acc_u16( @@ -276,9 +273,10 @@ impl Pallet { // Remap BTreeMap back to Vec<(T::AccountId, AlphaCurrency, AlphaCurrency)> format // for processing emissions in run_coinbase + // Emission tuples ( hotkeys, server_emission, validator_emission ) aggregated .into_iter() - .map(|(hotkey, terms)| (hotkey, terms.validator_emission, terms.server_emission)) + .map(|(hotkey, terms)| (hotkey, terms.server_emission, terms.validator_emission)) .collect() } } From 1a061461b65ebbb9d96003be19bd1e9c5248ad2a Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Wed, 27 Aug 2025 15:14:04 -0400 Subject: [PATCH 05/39] Epoch refactored, all tests pass --- pallets/subtensor/src/epoch/run_epoch.rs | 42 ++++++++++----------- pallets/subtensor/src/rpc_info/metagraph.rs | 7 +--- pallets/subtensor/src/tests/epoch.rs | 9 ++++- pallets/subtensor/src/tests/uids.rs | 10 ++++- 4 files changed, 38 insertions(+), 30 deletions(-) diff --git a/pallets/subtensor/src/epoch/run_epoch.rs b/pallets/subtensor/src/epoch/run_epoch.rs index ab805a8259..56d121bc4e 100644 --- a/pallets/subtensor/src/epoch/run_epoch.rs +++ b/pallets/subtensor/src/epoch/run_epoch.rs @@ -75,7 +75,7 @@ impl Pallet { // Remap and return output .into_iter() - .map(|(hotkey, terms)| (hotkey, terms.validator_emission, terms.server_emission)) + .map(|(hotkey, terms)| (hotkey, terms.server_emission, terms.validator_emission)) .collect() } @@ -105,12 +105,13 @@ impl Pallet { .collect::>(); Incentive::::insert(netuid_index, incentive); - bonds.into_iter().enumerate().for_each(|(uid_usize, bond_vec)| { - let uid: u16 = uid_usize - .try_into() - .unwrap_or_default(); - Bonds::::insert(netuid_index, uid, bond_vec); - }); + bonds + .into_iter() + .enumerate() + .for_each(|(uid_usize, bond_vec)| { + let uid: u16 = uid_usize.try_into().unwrap_or_default(); + Bonds::::insert(netuid_index, uid, bond_vec); + }); } /// Persists per-netuid epoch output in state @@ -1612,8 +1613,8 @@ impl Pallet { let (netuid, _) = Self::get_netuid_and_subid(netuid_index).unwrap_or_default(); let n = Self::get_subnetwork_n(netuid) as usize; let mut weights: Vec> = vec![vec![]; n]; - for (uid_i, weights_i) in Weights::::iter_prefix(netuid_index) - .filter(|(uid_i, _)| *uid_i < n as u16) + for (uid_i, weights_i) in + Weights::::iter_prefix(netuid_index).filter(|(uid_i, _)| *uid_i < n as u16) { for (uid_j, weight_ij) in weights_i.iter().filter(|(uid_j, _)| *uid_j < n as u16) { if let Some(row) = weights.get_mut(uid_i as usize) { @@ -1631,8 +1632,8 @@ impl Pallet { let (netuid, _) = Self::get_netuid_and_subid(netuid_index).unwrap_or_default(); let n = Self::get_subnetwork_n(netuid) as usize; let mut weights: Vec> = vec![vec![I32F32::saturating_from_num(0.0); n]; n]; - for (uid_i, weights_vec) in Weights::::iter_prefix(netuid_index) - .filter(|(uid_i, _)| *uid_i < n as u16) + for (uid_i, weights_vec) in + Weights::::iter_prefix(netuid_index).filter(|(uid_i, _)| *uid_i < n as u16) { for (uid_j, weight_ij) in weights_vec .into_iter() @@ -1655,10 +1656,7 @@ impl Pallet { let n = Self::get_subnetwork_n(netuid) as usize; let mut bonds: Vec> = vec![vec![]; n]; for (uid_i, bonds_vec) in - Bonds::::iter_prefix( - netuid_index, - ) - .filter(|(uid_i, _)| *uid_i < n as u16) + Bonds::::iter_prefix(netuid_index).filter(|(uid_i, _)| *uid_i < n as u16) { for (uid_j, bonds_ij) in bonds_vec { bonds @@ -1676,10 +1674,7 @@ impl Pallet { let n: usize = Self::get_subnetwork_n(netuid) as usize; let mut bonds: Vec> = vec![vec![I32F32::saturating_from_num(0.0); n]; n]; for (uid_i, bonds_vec) in - Bonds::::iter_prefix( - netuid_index, - ) - .filter(|(uid_i, _)| *uid_i < n as u16) + Bonds::::iter_prefix(netuid_index).filter(|(uid_i, _)| *uid_i < n as u16) { for (uid_j, bonds_ij) in bonds_vec.into_iter().filter(|(uid_j, _)| *uid_j < n as u16) { *bonds @@ -1703,7 +1698,9 @@ impl Pallet { bonds } - pub fn get_bonds_sparse_fixed_proportion(netuid: NetUidStorageIndex) -> Vec> { + pub fn get_bonds_sparse_fixed_proportion( + netuid: NetUidStorageIndex, + ) -> Vec> { let mut bonds = Self::get_bonds_sparse(netuid); bonds.iter_mut().for_each(|bonds_row| { bonds_row @@ -2064,7 +2061,10 @@ impl Pallet { Ok(()) } - pub fn do_reset_bonds(netuid_index: NetUidStorageIndex, account_id: &T::AccountId) -> Result<(), DispatchError> { + pub fn do_reset_bonds( + netuid_index: NetUidStorageIndex, + account_id: &T::AccountId, + ) -> Result<(), DispatchError> { let (netuid, _) = Self::get_netuid_and_subid(netuid_index).unwrap_or_default(); // check bonds reset enabled for this subnet diff --git a/pallets/subtensor/src/rpc_info/metagraph.rs b/pallets/subtensor/src/rpc_info/metagraph.rs index bac19e5468..e65ddf0696 100644 --- a/pallets/subtensor/src/rpc_info/metagraph.rs +++ b/pallets/subtensor/src/rpc_info/metagraph.rs @@ -727,8 +727,7 @@ impl Pallet { liquid_alpha_enabled: Self::get_liquid_alpha_enabled(netuid), // Bonds liquid enabled. alpha_high: Self::get_alpha_values(netuid).1.into(), // Alpha param high alpha_low: Self::get_alpha_values(netuid).0.into(), // Alpha param low - bonds_moving_avg: Self::get_bonds_moving_average(netuid) - .into(), // Bonds moving avg + bonds_moving_avg: Self::get_bonds_moving_average(netuid).into(), // Bonds moving avg // Metagraph info. hotkeys, // hotkey per UID @@ -1114,9 +1113,7 @@ impl Pallet { }, Some(SelectiveMetagraphIndex::BondsMovingAvg) => SelectiveMetagraph { netuid: netuid.into(), - bonds_moving_avg: Some( - Self::get_bonds_moving_average(netuid).into(), - ), + bonds_moving_avg: Some(Self::get_bonds_moving_average(netuid).into()), ..Default::default() }, diff --git a/pallets/subtensor/src/tests/epoch.rs b/pallets/subtensor/src/tests/epoch.rs index 8d1b04351c..af72ac6924 100644 --- a/pallets/subtensor/src/tests/epoch.rs +++ b/pallets/subtensor/src/tests/epoch.rs @@ -2464,6 +2464,7 @@ fn test_blocks_since_last_step() { }); } +/// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --package pallet-subtensor --lib -- tests::epoch::test_can_set_self_weight_as_subnet_owner --exact --show-output #[test] fn test_can_set_self_weight_as_subnet_owner() { new_test_ext(1).execute_with(|| { @@ -2510,8 +2511,12 @@ fn test_can_set_self_weight_as_subnet_owner() { // hotkey_emission is [(hotkey, incentive, dividend)] assert_eq!(hotkey_emission.len(), 2); - assert_eq!(hotkey_emission[0].0, subnet_owner_hotkey); - assert_eq!(hotkey_emission[1].0, other_hotkey); + assert!( + hotkey_emission + .iter() + .any(|(hk, _, _)| *hk == subnet_owner_hotkey) + ); + assert!(hotkey_emission.iter().any(|(hk, _, _)| *hk == other_hotkey)); log::debug!("hotkey_emission: {hotkey_emission:?}"); // Both should have received incentive emission diff --git a/pallets/subtensor/src/tests/uids.rs b/pallets/subtensor/src/tests/uids.rs index 74fb074169..4317337ffd 100644 --- a/pallets/subtensor/src/tests/uids.rs +++ b/pallets/subtensor/src/tests/uids.rs @@ -145,7 +145,10 @@ fn test_replace_neuron() { assert_eq!(axon_info.ip_type, 0); // Check bonds are cleared. - assert_eq!(Bonds::::get(NetUidStorageIndex::from(netuid), neuron_uid), vec![]); + assert_eq!( + Bonds::::get(NetUidStorageIndex::from(netuid), neuron_uid), + vec![] + ); assert_eq!(AssociatedEvmAddress::::get(netuid, neuron_uid), None); }); } @@ -214,7 +217,10 @@ fn test_bonds_cleared_on_replace() { assert_eq!(curr_hotkey.unwrap(), new_hotkey_account_id); // Check bonds are cleared. - assert_eq!(Bonds::::get(NetUidStorageIndex::from(netuid), neuron_uid), vec![]); + assert_eq!( + Bonds::::get(NetUidStorageIndex::from(netuid), neuron_uid), + vec![] + ); assert_eq!(AssociatedEvmAddress::::get(netuid, neuron_uid), None); }); } From 842dda4437fe9ade9f69cecbd1e003f7c5f71f9b Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Wed, 27 Aug 2025 15:23:36 -0400 Subject: [PATCH 06/39] happy clippy --- pallets/subtensor/src/epoch/run_epoch.rs | 22 ++++++++-------------- pallets/subtensor/src/subnets/subsubnet.rs | 11 ++++++----- 2 files changed, 14 insertions(+), 19 deletions(-) diff --git a/pallets/subtensor/src/epoch/run_epoch.rs b/pallets/subtensor/src/epoch/run_epoch.rs index 56d121bc4e..2288cec48c 100644 --- a/pallets/subtensor/src/epoch/run_epoch.rs +++ b/pallets/subtensor/src/epoch/run_epoch.rs @@ -69,8 +69,8 @@ impl Pallet { let output = Self::epoch_subsubnet(netuid, SubId::MAIN, rao_emission); // Persist values in legacy format - Self::persist_subsub_epoch_terms(netuid, SubId::MAIN, &output.as_map()); - Self::persist_netuid_epoch_terms(netuid, &output.as_map()); + Self::persist_subsub_epoch_terms(netuid, SubId::MAIN, output.as_map()); + Self::persist_netuid_epoch_terms(netuid, output.as_map()); // Remap and return output @@ -735,20 +735,14 @@ impl Pallet { let mut commit_blocks: Vec = vec![u64::MAX; n as usize]; // MAX ⇒ “no active commit” // helper: hotkey → uid - let uid_of = |acct: &T::AccountId| -> Option { - if let Some(terms) = terms_map.get(acct) { - Some(terms.uid) - } else { - None - } - }; + let uid_of = |acct: &T::AccountId| terms_map.get(acct).map(|t| t.uid); // ---------- v2 ------------------------------------------------------ for (who, q) in WeightCommits::::iter_prefix(netuid_index) { for (_, cb, _, _) in q.iter() { if !Self::is_commit_expired(netuid, *cb) { - if let Some(i) = uid_of(&who) { - commit_blocks[i] = commit_blocks[i].min(*cb); + if let Some(cell) = uid_of(&who).and_then(|i| commit_blocks.get_mut(i)) { + *cell = (*cell).min(*cb); } break; // earliest active found } @@ -759,8 +753,8 @@ impl Pallet { for (_epoch, q) in TimelockedWeightCommits::::iter_prefix(netuid) { for (who, cb, ..) in q.iter() { if !Self::is_commit_expired(netuid, *cb) { - if let Some(i) = uid_of(who) { - commit_blocks[i] = commit_blocks[i].min(*cb); + if let Some(cell) = uid_of(who).and_then(|i| commit_blocks.get_mut(i)) { + *cell = (*cell).min(*cb); } } } @@ -1620,7 +1614,7 @@ impl Pallet { if let Some(row) = weights.get_mut(uid_i as usize) { row.push((*uid_j, I32F32::saturating_from_num(*weight_ij))); } else { - log::error!("uid_i {:?} is filtered to be less than n", uid_i); + log::error!("uid_i {uid_i:?} is filtered to be less than n"); } } } diff --git a/pallets/subtensor/src/subnets/subsubnet.rs b/pallets/subtensor/src/subnets/subsubnet.rs index 4f81f69e2e..12f80a96e0 100644 --- a/pallets/subtensor/src/subnets/subsubnet.rs +++ b/pallets/subtensor/src/subnets/subsubnet.rs @@ -158,7 +158,9 @@ impl Pallet { u64::from(alpha).saturating_sub(per_subsubnet.saturating_mul(subsubnet_count)); let mut result = vec![AlphaCurrency::from(per_subsubnet); subsubnet_count as usize]; - result[0] = result[0].saturating_add(AlphaCurrency::from(rounding_err)); + if let Some(cell) = result.first_mut() { + *cell = cell.saturating_add(AlphaCurrency::from(rounding_err)); + } result } @@ -199,7 +201,7 @@ impl Pallet { // Run epoch function on the subsubnet emission let epoch_output = Self::epoch_subsubnet(netuid, sub_id, sub_emission); - Self::persist_subsub_epoch_terms(netuid, sub_id, &epoch_output.as_map()); + Self::persist_subsub_epoch_terms(netuid, sub_id, epoch_output.as_map()); // Calculate subsubnet weight from the split emission (not the other way because preserving // emission accuracy is the priority) @@ -236,7 +238,7 @@ impl Pallet { terms.stake_weight, sub_weight, ); - acc_terms.active = acc_terms.active | terms.active; + acc_terms.active |= terms.active; acc_terms.emission = Self::weighted_acc_alpha( acc_terms.emission, terms.emission, @@ -261,8 +263,7 @@ impl Pallet { terms.validator_trust, sub_weight, ); - acc_terms.new_validator_permit = - acc_terms.new_validator_permit | terms.new_validator_permit; + acc_terms.new_validator_permit |= terms.new_validator_permit; }) .or_insert(terms); acc From 4e1e7bf9a428f2814ec71f62a649de71ebfc1d53 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Wed, 27 Aug 2025 15:42:35 -0400 Subject: [PATCH 07/39] Add state cleanup on subsubnet reduction, cleanup --- pallets/subtensor/src/epoch/run_epoch.rs | 493 --------------------- pallets/subtensor/src/subnets/subsubnet.rs | 32 +- 2 files changed, 19 insertions(+), 506 deletions(-) diff --git a/pallets/subtensor/src/epoch/run_epoch.rs b/pallets/subtensor/src/epoch/run_epoch.rs index 2288cec48c..9c6377601e 100644 --- a/pallets/subtensor/src/epoch/run_epoch.rs +++ b/pallets/subtensor/src/epoch/run_epoch.rs @@ -1083,499 +1083,6 @@ impl Pallet { EpochOutput(terms_map) } - // Legacy epoch fn - // #[allow(clippy::indexing_slicing)] - // pub fn epoch( - // netuid: NetUid, - // rao_emission: AlphaCurrency, - // ) -> Vec<(T::AccountId, AlphaCurrency, AlphaCurrency)> { - // // Get subnetwork size. - // let n = Self::get_subnetwork_n(netuid); - // log::trace!("Number of Neurons in Network: {n:?}"); - - // // ====================== - // // == Active & updated == - // // ====================== - - // // Get current block. - // let current_block: u64 = Self::get_current_block_as_u64(); - // log::trace!("current_block: {current_block:?}"); - - // // Get tempo. - // let tempo: u64 = Self::get_tempo(netuid).into(); - // log::trace!("tempo:\n{tempo:?}\n"); - - // // Get activity cutoff. - // let activity_cutoff: u64 = Self::get_activity_cutoff(netuid) as u64; - // log::trace!("activity_cutoff: {activity_cutoff:?}"); - - // // Last update vector. - // let last_update: Vec = Self::get_last_update(netuid); - // log::trace!("Last update: {:?}", &last_update); - - // // Inactive mask. - // let inactive: Vec = last_update - // .iter() - // .map(|updated| updated.saturating_add(activity_cutoff) < current_block) - // .collect(); - // log::debug!("Inactive: {:?}", inactive.clone()); - - // // Logical negation of inactive. - // let active: Vec = inactive.iter().map(|&b| !b).collect(); - - // // Block at registration vector (block when each neuron was most recently registered). - // let block_at_registration: Vec = Self::get_block_at_registration(netuid); - // log::trace!("Block at registration: {:?}", &block_at_registration); - - // // =========== - // // == Stake == - // // =========== - - // let hotkeys: Vec<(u16, T::AccountId)> = - // as IterableStorageDoubleMap>::iter_prefix(netuid) - // .collect(); - // log::debug!("hotkeys: {:?}", &hotkeys); - - // // Access network stake as normalized vector. - // let (total_stake, _alpha_stake, _tao_stake): (Vec, Vec, Vec) = - // Self::get_stake_weights_for_network(netuid); - - // // Get the minimum stake required. - // let min_stake = Self::get_stake_threshold(); - - // // Set stake of validators that doesn't meet the staking threshold to 0 as filter. - // let mut filtered_stake: Vec = total_stake - // .iter() - // .map(|&s| { - // if fixed64_to_u64(s) < min_stake { - // return I64F64::from(0); - // } - // s - // }) - // .collect(); - // log::debug!("Filtered stake: {:?}", &filtered_stake); - - // inplace_normalize_64(&mut filtered_stake); - // let stake: Vec = vec_fixed64_to_fixed32(filtered_stake); - // log::debug!("Normalised Stake: {:?}", &stake); - - // // ======================= - // // == Validator permits == - // // ======================= - - // // Get current validator permits. - // let validator_permits: Vec = Self::get_validator_permit(netuid); - // log::trace!("validator_permits: {validator_permits:?}"); - - // // Logical negation of validator_permits. - // let validator_forbids: Vec = validator_permits.iter().map(|&b| !b).collect(); - - // // Get max allowed validators. - // let max_allowed_validators: u16 = Self::get_max_allowed_validators(netuid); - // log::trace!("max_allowed_validators: {max_allowed_validators:?}"); - - // // Get new validator permits. - // let new_validator_permits: Vec = - // is_topk_nonzero(&stake, max_allowed_validators as usize); - // log::trace!("new_validator_permits: {new_validator_permits:?}"); - - // // ================== - // // == Active Stake == - // // ================== - - // let mut active_stake: Vec = stake.clone(); - - // // Remove inactive stake. - // inplace_mask_vector(&inactive, &mut active_stake); - - // // Remove non-validator stake. - // inplace_mask_vector(&validator_forbids, &mut active_stake); - - // // Normalize active stake. - // inplace_normalize(&mut active_stake); - // log::trace!("Active Stake: {:?}", &active_stake); - - // // ============= - // // == Weights == - // // ============= - - // let owner_uid: Option = Self::get_owner_uid(netuid); - - // // Access network weights row unnormalized. - // let mut weights: Vec> = Self::get_weights_sparse(netuid); - // log::trace!("Weights: {:?}", &weights); - - // // Mask weights that are not from permitted validators. - // weights = mask_rows_sparse(&validator_forbids, &weights); - // log::trace!("Weights (permit): {:?}", &weights); - - // // Remove self-weight by masking diagonal; keep owner_uid self-weight. - // if let Some(owner_uid) = owner_uid { - // weights = mask_diag_sparse_except_index(&weights, owner_uid); - // } else { - // weights = mask_diag_sparse(&weights); - // } - // log::trace!("Weights (permit+diag): {:?}", &weights); - - // // Remove weights referring to deregistered neurons. - // weights = vec_mask_sparse_matrix( - // &weights, - // &last_update, - // &block_at_registration, - // &|updated, registered| updated <= registered, - // ); - // log::trace!("Weights (permit+diag+outdate): {:?}", &weights); - - // if Self::get_commit_reveal_weights_enabled(netuid) { - // let mut commit_blocks: Vec = vec![u64::MAX; n as usize]; // MAX ⇒ “no active commit” - - // // helper: hotkey → uid - // let uid_of = |acct: &T::AccountId| -> Option { - // hotkeys - // .iter() - // .find(|(_, a)| a == acct) - // .map(|(uid, _)| *uid as usize) - // }; - - // // ---------- v2 ------------------------------------------------------ - // for (who, q) in WeightCommits::::iter_prefix(netuid) { - // for (_, cb, _, _) in q.iter() { - // if !Self::is_commit_expired(netuid, *cb) { - // if let Some(i) = uid_of(&who) { - // commit_blocks[i] = commit_blocks[i].min(*cb); - // } - // break; // earliest active found - // } - // } - // } - - // // ---------- v3 ------------------------------------------------------ - // for (_epoch, q) in TimelockedWeightCommits::::iter_prefix(netuid) { - // for (who, cb, ..) in q.iter() { - // if !Self::is_commit_expired(netuid, *cb) { - // if let Some(i) = uid_of(who) { - // commit_blocks[i] = commit_blocks[i].min(*cb); - // } - // } - // } - // } - - // weights = vec_mask_sparse_matrix( - // &weights, - // &commit_blocks, - // &block_at_registration, - // &|cb, reg| cb < reg, - // ); - - // log::trace!( - // "Commit-reveal column mask applied ({} masked rows)", - // commit_blocks.iter().filter(|&&cb| cb != u64::MAX).count() - // ); - // } - - // // Normalize remaining weights. - // inplace_row_normalize_sparse(&mut weights); - // log::trace!("Weights (mask+norm): {:?}", &weights); - - // // ================================ - // // == Consensus, Validator Trust == - // // ================================ - - // // Compute preranks: r_j = SUM(i) w_ij * s_i - // let preranks: Vec = matmul_sparse(&weights, &active_stake, n); - // log::trace!("Ranks (before): {:?}", &preranks); - - // // Consensus majority ratio, e.g. 51%. - // let kappa: I32F32 = Self::get_float_kappa(netuid); - // // Calculate consensus as stake-weighted median of weights. - // let consensus: Vec = weighted_median_col_sparse(&active_stake, &weights, n, kappa); - // log::trace!("Consensus: {:?}", &consensus); - - // // Clip weights at majority consensus. - // let clipped_weights: Vec> = col_clip_sparse(&weights, &consensus); - // log::trace!("Clipped Weights: {:?}", &clipped_weights); - - // // Calculate validator trust as sum of clipped weights set by validator. - // let validator_trust: Vec = row_sum_sparse(&clipped_weights); - // log::trace!("Validator Trust: {:?}", &validator_trust); - - // // ============================= - // // == Ranks, Trust, Incentive == - // // ============================= - - // // Compute ranks: r_j = SUM(i) w_ij * s_i. - // let mut ranks: Vec = matmul_sparse(&clipped_weights, &active_stake, n); - // log::trace!("Ranks (after): {:?}", &ranks); - - // // Compute server trust: ratio of rank after vs. rank before. - // let trust: Vec = vecdiv(&ranks, &preranks); // range: I32F32(0, 1) - // log::trace!("Trust: {:?}", &trust); - - // inplace_normalize(&mut ranks); // range: I32F32(0, 1) - // let incentive: Vec = ranks.clone(); - // log::trace!("Incentive (=Rank): {:?}", &incentive); - - // // ========================= - // // == Bonds and Dividends == - // // ========================= - - // // Get validator bonds penalty in [0, 1]. - // let bonds_penalty: I32F32 = Self::get_float_bonds_penalty(netuid); - // // Calculate weights for bonds, apply bonds penalty to weights. - // // bonds_penalty = 0: weights_for_bonds = weights.clone() - // // bonds_penalty = 1: weights_for_bonds = clipped_weights.clone() - // let weights_for_bonds: Vec> = - // interpolate_sparse(&weights, &clipped_weights, n, bonds_penalty); - - // let mut dividends: Vec; - // let mut ema_bonds: Vec>; - // if Yuma3On::::get(netuid) { - // // Access network bonds. - // let mut bonds = Self::get_bonds_sparse_fixed_proportion(netuid); - // log::trace!("Bonds: {:?}", &bonds); - - // // Remove bonds referring to neurons that have registered since last tempo. - // // Mask if: the last tempo block happened *before* the registration block - // // ==> last_tempo <= registered - // let last_tempo: u64 = current_block.saturating_sub(tempo); - // bonds = scalar_vec_mask_sparse_matrix( - // &bonds, - // last_tempo, - // &block_at_registration, - // &|last_tempo, registered| last_tempo <= registered, - // ); - // log::trace!("Bonds: (mask) {:?}", &bonds); - - // // Compute the Exponential Moving Average (EMA) of bonds. - // log::trace!("weights_for_bonds: {:?}", &weights_for_bonds); - // ema_bonds = Self::compute_bonds_sparse(netuid, &weights_for_bonds, &bonds, &consensus); - // log::trace!("emaB: {:?}", &ema_bonds); - - // // Normalize EMA bonds. - // let mut ema_bonds_norm = ema_bonds.clone(); - // inplace_col_normalize_sparse(&mut ema_bonds_norm, n); // sum_i b_ij = 1 - // log::trace!("emaB norm: {:?}", &ema_bonds_norm); - - // // # === Dividend Calculation=== - // let total_bonds_per_validator: Vec = - // row_sum_sparse(&mat_vec_mul_sparse(&ema_bonds_norm, &incentive)); - // log::trace!( - // "total_bonds_per_validator: {:?}", - // &total_bonds_per_validator - // ); - - // dividends = vec_mul(&total_bonds_per_validator, &active_stake); - // inplace_normalize(&mut dividends); - // log::trace!("Dividends: {:?}", ÷nds); - // } else { - // // original Yuma - liquid alpha disabled - // // Access network bonds. - // let mut bonds: Vec> = Self::get_bonds_sparse(netuid); - // log::trace!("B: {:?}", &bonds); - - // // Remove bonds referring to neurons that have registered since last tempo. - // // Mask if: the last tempo block happened *before* the registration block - // // ==> last_tempo <= registered - // let last_tempo: u64 = current_block.saturating_sub(tempo); - // bonds = scalar_vec_mask_sparse_matrix( - // &bonds, - // last_tempo, - // &block_at_registration, - // &|last_tempo, registered| last_tempo <= registered, - // ); - // log::trace!("B (outdatedmask): {:?}", &bonds); - - // // Normalize remaining bonds: sum_i b_ij = 1. - // inplace_col_normalize_sparse(&mut bonds, n); - // log::trace!("B (mask+norm): {:?}", &bonds); - - // // Compute bonds delta column normalized. - // let mut bonds_delta: Vec> = - // row_hadamard_sparse(&weights_for_bonds, &active_stake); // ΔB = W◦S (outdated W masked) - // log::trace!("ΔB: {:?}", &bonds_delta); - - // // Normalize bonds delta. - // inplace_col_normalize_sparse(&mut bonds_delta, n); // sum_i b_ij = 1 - // log::trace!("ΔB (norm): {:?}", &bonds_delta); - - // // Compute the Exponential Moving Average (EMA) of bonds. - // ema_bonds = Self::compute_ema_bonds_normal_sparse(&bonds_delta, &bonds, netuid); - // // Normalize EMA bonds. - // inplace_col_normalize_sparse(&mut ema_bonds, n); // sum_i b_ij = 1 - // log::trace!("Exponential Moving Average Bonds: {:?}", &ema_bonds); - - // // Compute dividends: d_i = SUM(j) b_ij * inc_j. - // // range: I32F32(0, 1) - // dividends = matmul_transpose_sparse(&ema_bonds, &incentive); - // inplace_normalize(&mut dividends); - // log::trace!("Dividends: {:?}", ÷nds); - - // // Column max-upscale EMA bonds for storage: max_i w_ij = 1. - // inplace_col_max_upscale_sparse(&mut ema_bonds, n); - // } - - // // ================================= - // // == Emission and Pruning scores == - // // ================================= - - // // Compute normalized emission scores. range: I32F32(0, 1) - // let combined_emission: Vec = incentive - // .iter() - // .zip(dividends.clone()) - // .map(|(ii, di)| ii.saturating_add(di)) - // .collect(); - // let emission_sum: I32F32 = combined_emission.iter().sum(); - - // let mut normalized_server_emission: Vec = incentive.clone(); // Servers get incentive. - // let mut normalized_validator_emission: Vec = dividends.clone(); // Validators get dividends. - // let mut normalized_combined_emission: Vec = combined_emission.clone(); - // // Normalize on the sum of incentive + dividends. - // inplace_normalize_using_sum(&mut normalized_server_emission, emission_sum); - // inplace_normalize_using_sum(&mut normalized_validator_emission, emission_sum); - // inplace_normalize(&mut normalized_combined_emission); - - // // If emission is zero, replace emission with normalized stake. - // if emission_sum == I32F32::from(0) { - // // no weights set | outdated weights | self_weights - // if is_zero(&active_stake) { - // // no active stake - // normalized_validator_emission.clone_from(&stake); // do not mask inactive, assumes stake is normalized - // normalized_combined_emission.clone_from(&stake); - // } else { - // normalized_validator_emission.clone_from(&active_stake); // emission proportional to inactive-masked normalized stake - // normalized_combined_emission.clone_from(&active_stake); - // } - // } - - // // Compute rao based emission scores. range: I96F32(0, rao_emission) - // let float_rao_emission: I96F32 = I96F32::saturating_from_num(rao_emission); - - // let server_emission: Vec = normalized_server_emission - // .iter() - // .map(|se: &I32F32| I96F32::saturating_from_num(*se).saturating_mul(float_rao_emission)) - // .collect(); - // let server_emission: Vec = server_emission - // .iter() - // .map(|e: &I96F32| e.saturating_to_num::().into()) - // .collect(); - - // let validator_emission: Vec = normalized_validator_emission - // .iter() - // .map(|ve: &I32F32| I96F32::saturating_from_num(*ve).saturating_mul(float_rao_emission)) - // .collect(); - // let validator_emission: Vec = validator_emission - // .iter() - // .map(|e: &I96F32| e.saturating_to_num::().into()) - // .collect(); - - // // Only used to track emission in storage. - // let combined_emission: Vec = normalized_combined_emission - // .iter() - // .map(|ce: &I32F32| I96F32::saturating_from_num(*ce).saturating_mul(float_rao_emission)) - // .collect(); - // let combined_emission: Vec = combined_emission - // .iter() - // .map(|e: &I96F32| AlphaCurrency::from(e.saturating_to_num::())) - // .collect(); - - // log::trace!( - // "Normalized Server Emission: {:?}", - // &normalized_server_emission - // ); - // log::trace!("Server Emission: {:?}", &server_emission); - // log::trace!( - // "Normalized Validator Emission: {:?}", - // &normalized_validator_emission - // ); - // log::trace!("Validator Emission: {:?}", &validator_emission); - // log::trace!( - // "Normalized Combined Emission: {:?}", - // &normalized_combined_emission - // ); - // log::trace!("Combined Emission: {:?}", &combined_emission); - - // // Set pruning scores using combined emission scores. - // let pruning_scores: Vec = normalized_combined_emission.clone(); - // log::trace!("Pruning Scores: {:?}", &pruning_scores); - - // // =================== - // // == Value storage == - // // =================== - // let cloned_stake_weight: Vec = stake - // .iter() - // .map(|xi| fixed_proportion_to_u16(*xi)) - // .collect::>(); - // let cloned_emission = combined_emission.clone(); - // let cloned_ranks: Vec = ranks - // .iter() - // .map(|xi| fixed_proportion_to_u16(*xi)) - // .collect::>(); - // let cloned_trust: Vec = trust - // .iter() - // .map(|xi| fixed_proportion_to_u16(*xi)) - // .collect::>(); - // let cloned_consensus: Vec = consensus - // .iter() - // .map(|xi| fixed_proportion_to_u16(*xi)) - // .collect::>(); - // let cloned_incentive: Vec = incentive - // .iter() - // .map(|xi| fixed_proportion_to_u16(*xi)) - // .collect::>(); - // let cloned_dividends: Vec = dividends - // .iter() - // .map(|xi| fixed_proportion_to_u16(*xi)) - // .collect::>(); - // let cloned_pruning_scores: Vec = vec_max_upscale_to_u16(&pruning_scores); - // let cloned_validator_trust: Vec = validator_trust - // .iter() - // .map(|xi| fixed_proportion_to_u16(*xi)) - // .collect::>(); - // StakeWeight::::insert(netuid, cloned_stake_weight.clone()); - // Active::::insert(netuid, active.clone()); - // Emission::::insert(netuid, cloned_emission); - // Rank::::insert(netuid, cloned_ranks); - // Trust::::insert(netuid, cloned_trust); - // Consensus::::insert(netuid, cloned_consensus); - // Incentive::::insert(netuid, cloned_incentive); - // Dividends::::insert(netuid, cloned_dividends); - // PruningScores::::insert(netuid, cloned_pruning_scores); - // ValidatorTrust::::insert(netuid, cloned_validator_trust); - // ValidatorPermit::::insert(netuid, new_validator_permits.clone()); - - // new_validator_permits - // .iter() - // .zip(validator_permits) - // .zip(ema_bonds) - // .enumerate() - // .for_each(|(i, ((new_permit, validator_permit), ema_bond))| { - // // Set bonds only if uid retains validator permit, otherwise clear bonds. - // if *new_permit { - // let new_bonds_row: Vec<(u16, u16)> = ema_bond - // .iter() - // .map(|(j, value)| (*j, fixed_proportion_to_u16(*value))) - // .collect(); - // Bonds::::insert(netuid, i as u16, new_bonds_row); - // } else if validator_permit { - // // Only overwrite the intersection. - // let new_empty_bonds_row: Vec<(u16, u16)> = vec![]; - // Bonds::::insert(netuid, i as u16, new_empty_bonds_row); - // } - // }); - - // // Emission tuples ( hotkeys, server_emission, validator_emission ) - // hotkeys - // .into_iter() - // .map(|(uid_i, hotkey)| { - // ( - // hotkey, - // server_emission[uid_i as usize], - // validator_emission[uid_i as usize], - // ) - // }) - // .collect() - // } - pub fn get_float_rho(netuid: NetUid) -> I32F32 { I32F32::saturating_from_num(Self::get_rho(netuid)) } diff --git a/pallets/subtensor/src/subnets/subsubnet.rs b/pallets/subtensor/src/subnets/subsubnet.rs index 12f80a96e0..9452cbf09f 100644 --- a/pallets/subtensor/src/subnets/subsubnet.rs +++ b/pallets/subtensor/src/subnets/subsubnet.rs @@ -124,19 +124,25 @@ impl Pallet { let new_count = desired_count.max(min_possible_count); if old_count > new_count { - todo!(); - // Cleanup weights - // Cleanup StakeWeight - // Cleanup Active - // Cleanup Emission - // Cleanup Rank - // Cleanup Trust - // Cleanup Consensus - // Cleanup Incentive - // Cleanup Dividends - // Cleanup PruningScores - // Cleanup ValidatorTrust - // Cleanup ValidatorPermit + for subid in new_count..old_count { + let netuid_index = + Self::get_subsubnet_storage_index(*netuid, SubId::from(subid)); + + // Cleanup Weights + let _ = Weights::::clear_prefix(netuid_index, u32::MAX, None); + + // Cleanup Incentive + Incentive::::remove(netuid_index); + + // Cleanup LastUpdate + LastUpdate::::remove(netuid_index); + + // Cleanup Bonds + let _ = Bonds::::clear_prefix(netuid_index, u32::MAX, None); + + // Cleanup WeightCommits + let _ = WeightCommits::::clear_prefix(netuid_index, u32::MAX, None); + } } SubsubnetCountCurrent::::insert(netuid, SubId::from(new_count)); From fff6f0b7423463e1c1c91010b3f1fa127c59a61e Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Wed, 27 Aug 2025 21:40:14 +0000 Subject: [PATCH 08/39] auto-update benchmark weights --- pallets/subtensor/src/macros/dispatches.rs | 34 +++++++++++----------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index ad9fc8571c..fef807ff4d 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -78,7 +78,7 @@ mod dispatches { /// - Attempting to set weights with max value exceeding limit. #[pallet::call_index(0)] #[pallet::weight((Weight::from_parts(15_540_000_000, 0) - .saturating_add(T::DbWeight::get().reads(4111)) + .saturating_add(T::DbWeight::get().reads(4112_u64)) .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] pub fn set_weights( origin: OriginFor, @@ -197,9 +197,9 @@ mod dispatches { /// - On failure for each failed item in the batch. /// #[pallet::call_index(80)] - #[pallet::weight((Weight::from_parts(95_160_000, 0) - .saturating_add(T::DbWeight::get().reads(14)) - .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] + #[pallet::weight((Weight::from_parts(19_180_000, 0) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(0_u64)), DispatchClass::Normal, Pays::No))] pub fn batch_set_weights( origin: OriginFor, netuids: Vec>, @@ -229,8 +229,8 @@ mod dispatches { /// - Attempting to commit when the user has more than the allowed limit of unrevealed commits. /// #[pallet::call_index(96)] - #[pallet::weight((Weight::from_parts(55_130_000, 0) - .saturating_add(T::DbWeight::get().reads(7)) + #[pallet::weight((Weight::from_parts(67_770_000, 0) + .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] pub fn commit_weights( origin: T::RuntimeOrigin, @@ -295,8 +295,8 @@ mod dispatches { /// - On failure for each failed item in the batch. /// #[pallet::call_index(100)] - #[pallet::weight((Weight::from_parts(82_010_000, 0) - .saturating_add(T::DbWeight::get().reads(8)) + #[pallet::weight((Weight::from_parts(106_600_000, 0) + .saturating_add(T::DbWeight::get().reads(10_u64)) .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] pub fn batch_commit_weights( origin: OriginFor, @@ -345,7 +345,7 @@ mod dispatches { /// #[pallet::call_index(97)] #[pallet::weight((Weight::from_parts(122_000_000, 0) - .saturating_add(T::DbWeight::get().reads(16)) + .saturating_add(T::DbWeight::get().reads(17_u64)) .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] pub fn reveal_weights( origin: T::RuntimeOrigin, @@ -442,7 +442,7 @@ mod dispatches { /// #[pallet::call_index(99)] #[pallet::weight((Weight::from_parts(77_750_000, 0) - .saturating_add(T::DbWeight::get().reads(7_u64)) + .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] pub fn commit_crv3_weights( origin: T::RuntimeOrigin, @@ -537,7 +537,7 @@ mod dispatches { /// - The input vectors are of mismatched lengths. #[pallet::call_index(98)] #[pallet::weight((Weight::from_parts(412_000_000, 0) - .saturating_add(T::DbWeight::get().reads(16)) + .saturating_add(T::DbWeight::get().reads(17_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)), DispatchClass::Normal, Pays::No))] pub fn batch_reveal_weights( origin: T::RuntimeOrigin, @@ -1095,7 +1095,7 @@ mod dispatches { /// #[pallet::call_index(6)] #[pallet::weight((Weight::from_parts(197_900_000, 0) - .saturating_add(T::DbWeight::get().reads(26)) + .saturating_add(T::DbWeight::get().reads(27_u64)) .saturating_add(T::DbWeight::get().writes(23)), DispatchClass::Normal, Pays::Yes))] pub fn register( origin: OriginFor, @@ -1112,7 +1112,7 @@ mod dispatches { /// Register the hotkey to root network #[pallet::call_index(62)] #[pallet::weight((Weight::from_parts(111_700_000, 0) - .saturating_add(T::DbWeight::get().reads(23)) + .saturating_add(T::DbWeight::get().reads(24_u64)) .saturating_add(T::DbWeight::get().writes(20)), DispatchClass::Normal, Pays::No))] pub fn root_register(origin: OriginFor, hotkey: T::AccountId) -> DispatchResult { Self::do_root_register(origin, hotkey) @@ -1130,7 +1130,7 @@ mod dispatches { /// User register a new subnetwork via burning token #[pallet::call_index(7)] #[pallet::weight((Weight::from_parts(354_200_000, 0) - .saturating_add(T::DbWeight::get().reads(49)) + .saturating_add(T::DbWeight::get().reads(50_u64)) .saturating_add(T::DbWeight::get().writes(43)), DispatchClass::Normal, Pays::Yes))] pub fn burned_register( origin: OriginFor, @@ -1399,7 +1399,7 @@ mod dispatches { /// User register a new subnetwork #[pallet::call_index(59)] #[pallet::weight((Weight::from_parts(235_400_000, 0) - .saturating_add(T::DbWeight::get().reads(36)) + .saturating_add(T::DbWeight::get().reads(37_u64)) .saturating_add(T::DbWeight::get().writes(52)), DispatchClass::Normal, Pays::No))] pub fn register_network(origin: OriginFor, hotkey: T::AccountId) -> DispatchResult { Self::do_register_network(origin, &hotkey, 1, None) @@ -1744,7 +1744,7 @@ mod dispatches { /// User register a new subnetwork #[pallet::call_index(79)] #[pallet::weight((Weight::from_parts(234_200_000, 0) - .saturating_add(T::DbWeight::get().reads(35)) + .saturating_add(T::DbWeight::get().reads(36_u64)) .saturating_add(T::DbWeight::get().writes(51)), DispatchClass::Normal, Pays::No))] pub fn register_network_with_identity( origin: OriginFor, @@ -2407,7 +2407,7 @@ mod dispatches { /// - The client (bittensor-drand) version #[pallet::call_index(113)] #[pallet::weight((Weight::from_parts(64_530_000, 0) - .saturating_add(T::DbWeight::get().reads(7_u64)) + .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] pub fn commit_timelocked_weights( origin: T::RuntimeOrigin, From f251b010c6d79938e05da3464bc4614e4aca77e6 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Wed, 27 Aug 2025 19:02:19 -0400 Subject: [PATCH 09/39] Add test plan --- pallets/subtensor/src/subnets/subsubnet.rs | 2 +- pallets/subtensor/src/tests/subsubnet.rs | 20 ++++++++++++++++++++ 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/pallets/subtensor/src/subnets/subsubnet.rs b/pallets/subtensor/src/subnets/subsubnet.rs index 9452cbf09f..f2de8b347d 100644 --- a/pallets/subtensor/src/subnets/subsubnet.rs +++ b/pallets/subtensor/src/subnets/subsubnet.rs @@ -25,7 +25,7 @@ pub type BalanceOf = /// /// Changing this value will require a migration of all epoch maps. /// -pub const GLOBAL_MAX_SUBNET_COUNT: u16 = 1024; +pub const GLOBAL_MAX_SUBNET_COUNT: u16 = 4096; impl Pallet { pub fn get_subsubnet_storage_index(netuid: NetUid, sub_id: SubId) -> NetUidStorageIndex { diff --git a/pallets/subtensor/src/tests/subsubnet.rs b/pallets/subtensor/src/tests/subsubnet.rs index 4a88fa0d09..73bc323829 100644 --- a/pallets/subtensor/src/tests/subsubnet.rs +++ b/pallets/subtensor/src/tests/subsubnet.rs @@ -4,8 +4,28 @@ clippy::unwrap_used )] +/// Test plan: +/// - [ ] Netuid index math (with SubsubnetCountCurrent limiting) +/// - [ ] Emissions are split proportionally +/// - [ ] Sum of split emissions is equal to rao_emission passed to epoch +/// - [ ] Weights can be set/commited/revealed by subsubnet +/// - [ ] Rate limiting is enforced by subsubnet +/// - [ ] Bonds are applied per subsubnet +/// - [ ] Incentives are per subsubnet +/// - [ ] Subsubnet limit can be set up to 8 (with admin pallet) +/// - [ ] When subsubnet limit is reduced, reduction is GlobalSubsubnetDecreasePerSuperblock per super-block +/// - [ ] When reduction of subsubnet limit occurs, Weights, Incentive, LastUpdate, Bonds, and WeightCommits are cleared +/// - [ ] Epoch terms of subnet are weighted sum (or logical OR) of all subsubnet epoch terms +/// - [ ] Subnet epoch terms persist in state +/// - [ ] Subsubnet epoch terms persist in state + use super::mock::*; +#[test] +fn test_index_from_netuid_and_subnet() { + new_test_ext(1).execute_with(|| {}); +} + #[test] fn test_subsubnet_emission_proportions() { new_test_ext(1).execute_with(|| {}); From df6e32b59c56acb3de55f6ece544770b917c1997 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Thu, 28 Aug 2025 12:55:21 -0400 Subject: [PATCH 10/39] Convert TimelockedWeightCommits to be per-subnet and use NetUidStorageIndex --- .../subtensor/src/coinbase/reveal_commits.rs | 277 +++++++++--------- pallets/subtensor/src/epoch/run_epoch.rs | 2 +- pallets/subtensor/src/lib.rs | 6 +- pallets/subtensor/src/macros/events.rs | 4 +- .../migrate_crv3_commits_add_block.rs | 5 +- pallets/subtensor/src/subnets/subsubnet.rs | 7 + pallets/subtensor/src/subnets/weights.rs | 56 ++-- pallets/subtensor/src/tests/migration.rs | 32 +- pallets/subtensor/src/tests/subsubnet.rs | 1 - pallets/subtensor/src/tests/weights.rs | 42 ++- 10 files changed, 238 insertions(+), 194 deletions(-) diff --git a/pallets/subtensor/src/coinbase/reveal_commits.rs b/pallets/subtensor/src/coinbase/reveal_commits.rs index e7bc6dc008..d0c068303b 100644 --- a/pallets/subtensor/src/coinbase/reveal_commits.rs +++ b/pallets/subtensor/src/coinbase/reveal_commits.rs @@ -3,7 +3,7 @@ use ark_serialize::CanonicalDeserialize; use codec::Decode; use frame_support::{dispatch, traits::OriginTrait}; use scale_info::prelude::collections::VecDeque; -use subtensor_runtime_common::NetUid; +use subtensor_runtime_common::{NetUid, SubId}; use tle::{ curves::drand::TinyBLS381, stream_ciphers::AESGCMStreamCipherProvider, @@ -44,152 +44,159 @@ impl Pallet { // Weights revealed must have been committed during epoch `cur_epoch - reveal_period`. let reveal_epoch = cur_epoch.saturating_sub(reveal_period); - // Clean expired commits - for (epoch, _) in TimelockedWeightCommits::::iter_prefix(netuid) { - if epoch < reveal_epoch { - TimelockedWeightCommits::::remove(netuid, epoch); - } - } + // All subsubnets share the same epoch, so the reveal_period/reveal_epoch are also the same + // Reveal for all subsubnets + for subid in 0..SubsubnetCountCurrent::::get(netuid).into() { + let netuid_index = Self::get_subsubnet_storage_index(netuid, subid.into()); - // No commits to reveal until at least epoch reveal_period. - if cur_epoch < reveal_period { - log::trace!("Failed to reveal commit for subnet {netuid} Too early"); - return Ok(()); - } - - let mut entries = TimelockedWeightCommits::::take(netuid, reveal_epoch); - let mut unrevealed = VecDeque::new(); - - // Keep popping items off the front of the queue until we successfully reveal a commit. - while let Some((who, commit_block, serialized_compresssed_commit, round_number)) = - entries.pop_front() - { - // Try to get the round number from pallet_drand. - let pulse = match pallet_drand::Pulses::::get(round_number) { - Some(p) => p, - None => { - // Round number used was not found on the chain. Skip this commit. - log::trace!( - "Failed to reveal commit for subnet {netuid} submitted by {who:?} on block {commit_block} due to missing round number {round_number}; will retry every block in reveal epoch." - ); - unrevealed.push_back(( - who, - commit_block, - serialized_compresssed_commit, - round_number, - )); - continue; + // Clean expired commits + for (epoch, _) in TimelockedWeightCommits::::iter_prefix(netuid_index) { + if epoch < reveal_epoch { + TimelockedWeightCommits::::remove(netuid_index, epoch); } - }; + } - let reader = &mut &serialized_compresssed_commit[..]; - let commit = match TLECiphertext::::deserialize_compressed(reader) { - Ok(c) => c, - Err(e) => { - log::trace!( - "Failed to reveal commit for subnet {netuid} submitted by {who:?} due to error deserializing the commit: {e:?}" - ); - continue; - } - }; - - let signature_bytes = pulse - .signature - .strip_prefix(b"0x") - .unwrap_or(&pulse.signature); - - let sig_reader = &mut &signature_bytes[..]; - let sig = match ::SignatureGroup::deserialize_compressed( - sig_reader, - ) { - Ok(s) => s, - Err(e) => { - log::trace!( - "Failed to reveal commit for subnet {netuid} submitted by {who:?} due to error deserializing signature from drand pallet: {e:?}" - ); - continue; - } - }; + // No commits to reveal until at least epoch reveal_period. + if cur_epoch < reveal_period { + log::trace!("Failed to reveal commit for subsubnet {netuid_index} Too early"); + return Ok(()); + } - let decrypted_bytes: Vec = match tld::( - commit, sig, - ) { - Ok(d) => d, - Err(e) => { - log::trace!( - "Failed to reveal commit for subnet {netuid} submitted by {who:?} due to error decrypting the commit: {e:?}" - ); - continue; - } - }; - - // ------------------------------------------------------------------ - // Try to decode payload with the new and legacy formats. - // ------------------------------------------------------------------ - let (uids, values, version_key) = { - let mut reader_new = &decrypted_bytes[..]; - if let Ok(payload) = WeightsTlockPayload::decode(&mut reader_new) { - // Verify hotkey matches committer - let mut hk_reader = &payload.hotkey[..]; - match T::AccountId::decode(&mut hk_reader) { - Ok(decoded_hotkey) if decoded_hotkey == who => { - (payload.uids, payload.values, payload.version_key) - } - Ok(_) => { - log::trace!( - "Failed to reveal commit for subnet {netuid} submitted by {who:?} due to hotkey mismatch in payload" - ); - continue; - } - Err(e) => { - let mut reader_legacy = &decrypted_bytes[..]; - match LegacyWeightsTlockPayload::decode(&mut reader_legacy) { - Ok(legacy) => (legacy.uids, legacy.values, legacy.version_key), - Err(_) => { - log::trace!( - "Failed to reveal commit for subnet {netuid} submitted by {who:?} due to error deserializing hotkey: {e:?}" - ); - continue; + let mut entries = TimelockedWeightCommits::::take(netuid_index, reveal_epoch); + let mut unrevealed = VecDeque::new(); + + // Keep popping items off the front of the queue until we successfully reveal a commit. + while let Some((who, commit_block, serialized_compresssed_commit, round_number)) = + entries.pop_front() + { + // Try to get the round number from pallet_drand. + let pulse = match pallet_drand::Pulses::::get(round_number) { + Some(p) => p, + None => { + // Round number used was not found on the chain. Skip this commit. + log::trace!( + "Failed to reveal commit for subsubnet {netuid_index} submitted by {who:?} on block {commit_block} due to missing round number {round_number}; will retry every block in reveal epoch." + ); + unrevealed.push_back(( + who, + commit_block, + serialized_compresssed_commit, + round_number, + )); + continue; + } + }; + + let reader = &mut &serialized_compresssed_commit[..]; + let commit = match TLECiphertext::::deserialize_compressed(reader) { + Ok(c) => c, + Err(e) => { + log::trace!( + "Failed to reveal commit for subsubnet {netuid_index} submitted by {who:?} due to error deserializing the commit: {e:?}" + ); + continue; + } + }; + + let signature_bytes = pulse + .signature + .strip_prefix(b"0x") + .unwrap_or(&pulse.signature); + + let sig_reader = &mut &signature_bytes[..]; + let sig = match ::SignatureGroup::deserialize_compressed( + sig_reader, + ) { + Ok(s) => s, + Err(e) => { + log::trace!( + "Failed to reveal commit for subsubnet {netuid_index} submitted by {who:?} due to error deserializing signature from drand pallet: {e:?}" + ); + continue; + } + }; + + let decrypted_bytes: Vec = match tld::( + commit, sig, + ) { + Ok(d) => d, + Err(e) => { + log::trace!( + "Failed to reveal commit for subsubnet {netuid_index} submitted by {who:?} due to error decrypting the commit: {e:?}" + ); + continue; + } + }; + + // ------------------------------------------------------------------ + // Try to decode payload with the new and legacy formats. + // ------------------------------------------------------------------ + let (uids, values, version_key) = { + let mut reader_new = &decrypted_bytes[..]; + if let Ok(payload) = WeightsTlockPayload::decode(&mut reader_new) { + // Verify hotkey matches committer + let mut hk_reader = &payload.hotkey[..]; + match T::AccountId::decode(&mut hk_reader) { + Ok(decoded_hotkey) if decoded_hotkey == who => { + (payload.uids, payload.values, payload.version_key) + } + Ok(_) => { + log::trace!( + "Failed to reveal commit for subsubnet {netuid_index} submitted by {who:?} due to hotkey mismatch in payload" + ); + continue; + } + Err(e) => { + let mut reader_legacy = &decrypted_bytes[..]; + match LegacyWeightsTlockPayload::decode(&mut reader_legacy) { + Ok(legacy) => (legacy.uids, legacy.values, legacy.version_key), + Err(_) => { + log::trace!( + "Failed to reveal commit for subsubnet {netuid_index} submitted by {who:?} due to error deserializing hotkey: {e:?}" + ); + continue; + } } } } - } - } else { - // Fallback to legacy payload - let mut reader_legacy = &decrypted_bytes[..]; - match LegacyWeightsTlockPayload::decode(&mut reader_legacy) { - Ok(legacy) => (legacy.uids, legacy.values, legacy.version_key), - Err(e) => { - log::trace!( - "Failed to reveal commit for subnet {netuid} submitted by {who:?} due to error deserializing both payload formats: {e:?}" - ); - continue; + } else { + // Fallback to legacy payload + let mut reader_legacy = &decrypted_bytes[..]; + match LegacyWeightsTlockPayload::decode(&mut reader_legacy) { + Ok(legacy) => (legacy.uids, legacy.values, legacy.version_key), + Err(e) => { + log::trace!( + "Failed to reveal commit for subsubnet {netuid_index} submitted by {who:?} due to error deserializing both payload formats: {e:?}" + ); + continue; + } } } + }; + + // ------------------------------------------------------------------ + // Apply weights + // ------------------------------------------------------------------ + if let Err(e) = Self::do_set_sub_weights( + T::RuntimeOrigin::signed(who.clone()), + netuid, + SubId::from(subid), + uids, + values, + version_key, + ) { + log::trace!( + "Failed to `do_set_sub_weights` for subsubnet {netuid_index} submitted by {who:?}: {e:?}" + ); + continue; } - }; - - // ------------------------------------------------------------------ - // Apply weights - // ------------------------------------------------------------------ - if let Err(e) = Self::do_set_weights( - T::RuntimeOrigin::signed(who.clone()), - netuid, - uids, - values, - version_key, - ) { - log::trace!( - "Failed to `do_set_weights` for subnet {netuid} submitted by {who:?}: {e:?}" - ); - continue; - } - Self::deposit_event(Event::TimelockedWeightsRevealed(netuid, who)); - } + Self::deposit_event(Event::TimelockedWeightsRevealed(netuid_index, who)); + } - if !unrevealed.is_empty() { - TimelockedWeightCommits::::insert(netuid, reveal_epoch, unrevealed); + if !unrevealed.is_empty() { + TimelockedWeightCommits::::insert(netuid_index, reveal_epoch, unrevealed); + } } Ok(()) diff --git a/pallets/subtensor/src/epoch/run_epoch.rs b/pallets/subtensor/src/epoch/run_epoch.rs index 9c6377601e..e44ff5b7ef 100644 --- a/pallets/subtensor/src/epoch/run_epoch.rs +++ b/pallets/subtensor/src/epoch/run_epoch.rs @@ -750,7 +750,7 @@ impl Pallet { } // ---------- v3 ------------------------------------------------------ - for (_epoch, q) in TimelockedWeightCommits::::iter_prefix(netuid) { + for (_epoch, q) in TimelockedWeightCommits::::iter_prefix(netuid_index) { for (who, cb, ..) in q.iter() { if !Self::is_commit_expired(netuid, *cb) { if let Some(cell) = uid_of(who).and_then(|i| commit_blocks.get_mut(i)) { diff --git a/pallets/subtensor/src/lib.rs b/pallets/subtensor/src/lib.rs index 3052105a9e..961109c200 100644 --- a/pallets/subtensor/src/lib.rs +++ b/pallets/subtensor/src/lib.rs @@ -1689,7 +1689,7 @@ pub mod pallet { pub type TimelockedWeightCommits = StorageDoubleMap< _, Twox64Concat, - NetUid, + NetUidStorageIndex, Twox64Concat, u64, // epoch key VecDeque<( @@ -1706,7 +1706,7 @@ pub mod pallet { pub type CRV3WeightCommits = StorageDoubleMap< _, Twox64Concat, - NetUid, + NetUidStorageIndex, Twox64Concat, u64, // epoch key VecDeque<( @@ -1722,7 +1722,7 @@ pub mod pallet { pub type CRV3WeightCommitsV2 = StorageDoubleMap< _, Twox64Concat, - NetUid, + NetUidStorageIndex, Twox64Concat, u64, // epoch key VecDeque<( diff --git a/pallets/subtensor/src/macros/events.rs b/pallets/subtensor/src/macros/events.rs index 2fc9517daf..0259863cd8 100644 --- a/pallets/subtensor/src/macros/events.rs +++ b/pallets/subtensor/src/macros/events.rs @@ -406,12 +406,12 @@ mod events { /// - **netuid**: The network identifier. /// - **commit_hash**: The hash representing the committed weights. /// - **reveal_round**: The round at which weights can be revealed. - TimelockedWeightsCommitted(T::AccountId, NetUid, H256, u64), + TimelockedWeightsCommitted(T::AccountId, NetUidStorageIndex, H256, u64), /// Timelocked Weights have been successfully revealed. /// /// - **netuid**: The network identifier. /// - **who**: The account ID of the user revealing the weights. - TimelockedWeightsRevealed(NetUid, T::AccountId), + TimelockedWeightsRevealed(NetUidStorageIndex, T::AccountId), } } diff --git a/pallets/subtensor/src/migrations/migrate_crv3_commits_add_block.rs b/pallets/subtensor/src/migrations/migrate_crv3_commits_add_block.rs index 27f2fe6d65..bf5a0bb2b5 100644 --- a/pallets/subtensor/src/migrations/migrate_crv3_commits_add_block.rs +++ b/pallets/subtensor/src/migrations/migrate_crv3_commits_add_block.rs @@ -22,9 +22,10 @@ pub fn migrate_crv3_commits_add_block() -> Weight { log::info!("Running migration '{}'", String::from_utf8_lossy(&mig_name)); // iterate over *all* (netuid, epoch, queue) triples - for (netuid, epoch, old_q) in CRV3WeightCommits::::drain() { + for (netuid_index, epoch, old_q) in CRV3WeightCommits::::drain() { total_weight = total_weight.saturating_add(T::DbWeight::get().reads_writes(1, 1)); + let (netuid, _) = Pallet::::get_netuid_and_subid(netuid_index).unwrap_or_default(); let commit_block = Pallet::::get_first_block_of_epoch(netuid, epoch); // convert VecDeque<(who,cipher,rnd)> → VecDeque<(who,cb,cipher,rnd)> @@ -34,7 +35,7 @@ pub fn migrate_crv3_commits_add_block() -> Weight { .collect(); // write back under *new* storage definition - CRV3WeightCommitsV2::::insert(netuid, epoch, new_q); + CRV3WeightCommitsV2::::insert(netuid_index, epoch, new_q); } // mark as done diff --git a/pallets/subtensor/src/subnets/subsubnet.rs b/pallets/subtensor/src/subnets/subsubnet.rs index f2de8b347d..eaeadd38bd 100644 --- a/pallets/subtensor/src/subnets/subsubnet.rs +++ b/pallets/subtensor/src/subnets/subsubnet.rs @@ -142,6 +142,13 @@ impl Pallet { // Cleanup WeightCommits let _ = WeightCommits::::clear_prefix(netuid_index, u32::MAX, None); + + // Cleanup TimelockedWeightCommits + let _ = TimelockedWeightCommits::::clear_prefix( + netuid_index, + u32::MAX, + None, + ); } } diff --git a/pallets/subtensor/src/subnets/weights.rs b/pallets/subtensor/src/subnets/weights.rs index f60d59f376..f9393cd6bd 100644 --- a/pallets/subtensor/src/subnets/weights.rs +++ b/pallets/subtensor/src/subnets/weights.rs @@ -334,37 +334,41 @@ impl Pallet { false => Self::get_epoch_index(netuid, cur_block), }; - TimelockedWeightCommits::::try_mutate(netuid, cur_epoch, |commits| -> DispatchResult { - // 7. Verify that the number of unrevealed commits is within the allowed limit. + TimelockedWeightCommits::::try_mutate( + netuid_index, + cur_epoch, + |commits| -> DispatchResult { + // 7. Verify that the number of unrevealed commits is within the allowed limit. - let unrevealed_commits_for_who = commits - .iter() - .filter(|(account, _, _, _)| account == &who) - .count(); - ensure!( - unrevealed_commits_for_who < 10, - Error::::TooManyUnrevealedCommits - ); + let unrevealed_commits_for_who = commits + .iter() + .filter(|(account, _, _, _)| account == &who) + .count(); + ensure!( + unrevealed_commits_for_who < 10, + Error::::TooManyUnrevealedCommits + ); - // 8. Append the new commit with calculated reveal blocks. - // Hash the commit before it is moved, for the event - let commit_hash = BlakeTwo256::hash(&commit); - commits.push_back((who.clone(), cur_block, commit, reveal_round)); + // 8. Append the new commit with calculated reveal blocks. + // Hash the commit before it is moved, for the event + let commit_hash = BlakeTwo256::hash(&commit); + commits.push_back((who.clone(), cur_block, commit, reveal_round)); - // 9. Emit the WeightsCommitted event - Self::deposit_event(Event::TimelockedWeightsCommitted( - who.clone(), - netuid, - commit_hash, - reveal_round, - )); + // 9. Emit the WeightsCommitted event + Self::deposit_event(Event::TimelockedWeightsCommitted( + who.clone(), + netuid_index, + commit_hash, + reveal_round, + )); - // 10. Update the last commit block for the hotkey's UID. - Self::set_last_update_for_uid(netuid_index, neuron_uid, commit_block); + // 10. Update the last commit block for the hotkey's UID. + Self::set_last_update_for_uid(netuid_index, neuron_uid, commit_block); - // 11. Return success. - Ok(()) - }) + // 11. Return success. + Ok(()) + }, + ) } /// ---- The implementation for revealing committed weights. diff --git a/pallets/subtensor/src/tests/migration.rs b/pallets/subtensor/src/tests/migration.rs index e93aab7669..f19d8dec4d 100644 --- a/pallets/subtensor/src/tests/migration.rs +++ b/pallets/subtensor/src/tests/migration.rs @@ -22,7 +22,7 @@ use sp_io::hashing::twox_128; use sp_runtime::traits::Zero; use substrate_fixed::types::I96F32; use substrate_fixed::types::extra::U2; -use subtensor_runtime_common::TaoCurrency; +use subtensor_runtime_common::{NetUidStorageIndex, TaoCurrency}; #[allow(clippy::arithmetic_side_effects)] fn close(value: u64, target: u64, eps: u64) { @@ -1063,10 +1063,17 @@ fn test_migrate_crv3_commits_add_block() { let old_queue: VecDeque<_> = VecDeque::from(vec![(who, ciphertext.clone(), round)]); - CRV3WeightCommits::::insert(netuid, epoch, old_queue.clone()); + CRV3WeightCommits::::insert( + NetUidStorageIndex::from(netuid), + epoch, + old_queue.clone(), + ); // Sanity: entry decodes under old alias - assert_eq!(CRV3WeightCommits::::get(netuid, epoch), old_queue); + assert_eq!( + CRV3WeightCommits::::get(NetUidStorageIndex::from(netuid), epoch), + old_queue + ); assert!( !HasMigrationRun::::get(MIG_NAME.to_vec()), @@ -1091,11 +1098,11 @@ fn test_migrate_crv3_commits_add_block() { // Old storage must be empty (drained) assert!( - CRV3WeightCommits::::get(netuid, epoch).is_empty(), + CRV3WeightCommits::::get(NetUidStorageIndex::from(netuid), epoch).is_empty(), "old queue should have been drained" ); - let new_q = CRV3WeightCommitsV2::::get(netuid, epoch); + let new_q = CRV3WeightCommitsV2::::get(NetUidStorageIndex::from(netuid), epoch); assert_eq!(new_q.len(), 1, "exactly one migrated element expected"); let (who2, commit_block, cipher2, round2) = new_q.front().cloned().unwrap(); @@ -1318,18 +1325,23 @@ fn test_migrate_crv3_v2_to_timelocked() { VecDeque::from(vec![(who, commit_block, ciphertext.clone(), round)]); // Insert under the deprecated alias - CRV3WeightCommitsV2::::insert(netuid, epoch, old_queue.clone()); + CRV3WeightCommitsV2::::insert( + NetUidStorageIndex::from(netuid), + epoch, + old_queue.clone(), + ); // Sanity: entry decodes under old alias assert_eq!( - CRV3WeightCommitsV2::::get(netuid, epoch), + CRV3WeightCommitsV2::::get(NetUidStorageIndex::from(netuid), epoch), old_queue, "pre-migration: old queue should be present" ); // Destination should be empty pre-migration assert!( - TimelockedWeightCommits::::get(netuid, epoch).is_empty(), + TimelockedWeightCommits::::get(NetUidStorageIndex::from(netuid), epoch) + .is_empty(), "pre-migration: destination should be empty" ); @@ -1356,12 +1368,12 @@ fn test_migrate_crv3_v2_to_timelocked() { // Old storage must be empty (drained) assert!( - CRV3WeightCommitsV2::::get(netuid, epoch).is_empty(), + CRV3WeightCommitsV2::::get(NetUidStorageIndex::from(netuid), epoch).is_empty(), "old queue should have been drained" ); // New storage must match exactly - let new_q = TimelockedWeightCommits::::get(netuid, epoch); + let new_q = TimelockedWeightCommits::::get(NetUidStorageIndex::from(netuid), epoch); assert_eq!( new_q, old_queue, "migrated queue must exactly match the old queue" diff --git a/pallets/subtensor/src/tests/subsubnet.rs b/pallets/subtensor/src/tests/subsubnet.rs index 73bc323829..7730dfcb0f 100644 --- a/pallets/subtensor/src/tests/subsubnet.rs +++ b/pallets/subtensor/src/tests/subsubnet.rs @@ -18,7 +18,6 @@ /// - [ ] Epoch terms of subnet are weighted sum (or logical OR) of all subsubnet epoch terms /// - [ ] Subnet epoch terms persist in state /// - [ ] Subsubnet epoch terms persist in state - use super::mock::*; #[test] diff --git a/pallets/subtensor/src/tests/weights.rs b/pallets/subtensor/src/tests/weights.rs index 3b3873b7d3..a52ca67607 100644 --- a/pallets/subtensor/src/tests/weights.rs +++ b/pallets/subtensor/src/tests/weights.rs @@ -5299,7 +5299,8 @@ fn test_do_commit_crv3_weights_success() { let cur_epoch = SubtensorModule::get_epoch_index(netuid, SubtensorModule::get_current_block_as_u64()); - let commits = TimelockedWeightCommits::::get(netuid, cur_epoch); + let commits = + TimelockedWeightCommits::::get(NetUidStorageIndex::from(netuid), cur_epoch); assert_eq!(commits.len(), 1); assert_eq!(commits[0].0, hotkey); assert_eq!(commits[0].2, commit_data); @@ -6154,7 +6155,8 @@ fn test_multiple_commits_by_same_hotkey_within_limit() { let cur_epoch = SubtensorModule::get_epoch_index(netuid, SubtensorModule::get_current_block_as_u64()); - let commits = TimelockedWeightCommits::::get(netuid, cur_epoch); + let commits = + TimelockedWeightCommits::::get(NetUidStorageIndex::from(netuid), cur_epoch); assert_eq!( commits.len(), 10, @@ -6189,7 +6191,7 @@ fn test_reveal_crv3_commits_removes_past_epoch_commits() { let bounded_commit = vec![epoch as u8; 5].try_into().expect("bounded vec"); assert_ok!(TimelockedWeightCommits::::try_mutate( - netuid, + NetUidStorageIndex::from(netuid), epoch, |q| -> DispatchResult { q.push_back((hotkey, cur_block, bounded_commit, reveal_round)); @@ -6199,8 +6201,14 @@ fn test_reveal_crv3_commits_removes_past_epoch_commits() { } // Sanity – both epochs presently hold a commit. - assert!(!TimelockedWeightCommits::::get(netuid, past_epoch).is_empty()); - assert!(!TimelockedWeightCommits::::get(netuid, reveal_epoch).is_empty()); + assert!( + !TimelockedWeightCommits::::get(NetUidStorageIndex::from(netuid), past_epoch) + .is_empty() + ); + assert!( + !TimelockedWeightCommits::::get(NetUidStorageIndex::from(netuid), reveal_epoch) + .is_empty() + ); // --------------------------------------------------------------------- // Run the reveal pass WITHOUT a pulse – only expiry housekeeping runs. @@ -6209,13 +6217,15 @@ fn test_reveal_crv3_commits_removes_past_epoch_commits() { // past_epoch (< reveal_epoch) must be gone assert!( - TimelockedWeightCommits::::get(netuid, past_epoch).is_empty(), + TimelockedWeightCommits::::get(NetUidStorageIndex::from(netuid), past_epoch) + .is_empty(), "expired epoch {past_epoch} should be cleared" ); // reveal_epoch queue is *kept* because its commit could still be revealed later. assert!( - !TimelockedWeightCommits::::get(netuid, reveal_epoch).is_empty(), + !TimelockedWeightCommits::::get(NetUidStorageIndex::from(netuid), reveal_epoch) + .is_empty(), "reveal-epoch {reveal_epoch} must be retained until commit can be revealed" ); }); @@ -6891,10 +6901,11 @@ fn test_reveal_crv3_commits_retry_on_missing_pulse() { )); // epoch in which commit was stored - let stored_epoch = TimelockedWeightCommits::::iter_prefix(netuid) - .next() - .map(|(e, _)| e) - .expect("commit stored"); + let stored_epoch = + TimelockedWeightCommits::::iter_prefix(NetUidStorageIndex::from(netuid)) + .next() + .map(|(e, _)| e) + .expect("commit stored"); // first block of reveal epoch (commit_epoch + RP) let first_reveal_epoch = stored_epoch + SubtensorModule::get_reveal_period(netuid); @@ -6905,7 +6916,8 @@ fn test_reveal_crv3_commits_retry_on_missing_pulse() { // run *one* block inside reveal epoch without pulse → commit should stay queued step_block(1); assert!( - !TimelockedWeightCommits::::get(netuid, stored_epoch).is_empty(), + !TimelockedWeightCommits::::get(NetUidStorageIndex::from(netuid), stored_epoch) + .is_empty(), "commit must remain queued when pulse is missing" ); @@ -6933,7 +6945,8 @@ fn test_reveal_crv3_commits_retry_on_missing_pulse() { assert!(!weights.is_empty(), "weights must be set after pulse"); assert!( - TimelockedWeightCommits::::get(netuid, stored_epoch).is_empty(), + TimelockedWeightCommits::::get(NetUidStorageIndex::from(netuid), stored_epoch) + .is_empty(), "queue should be empty after successful reveal" ); }); @@ -7076,7 +7089,8 @@ fn test_reveal_crv3_commits_legacy_payload_success() { // commit should be gone assert!( - TimelockedWeightCommits::::get(netuid, commit_epoch).is_empty(), + TimelockedWeightCommits::::get(NetUidStorageIndex::from(netuid), commit_epoch) + .is_empty(), "commit storage should be cleaned after reveal" ); }); From 5dec4269c9b8a1df37b904fb9a70e890e7427348 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Thu, 28 Aug 2025 19:57:46 -0400 Subject: [PATCH 11/39] Add netuid index math tests --- pallets/subtensor/src/subnets/subsubnet.rs | 16 +++- pallets/subtensor/src/tests/subsubnet.rs | 104 ++++++++++++++++++--- 2 files changed, 102 insertions(+), 18 deletions(-) diff --git a/pallets/subtensor/src/subnets/subsubnet.rs b/pallets/subtensor/src/subnets/subsubnet.rs index eaeadd38bd..cca8df95db 100644 --- a/pallets/subtensor/src/subnets/subsubnet.rs +++ b/pallets/subtensor/src/subnets/subsubnet.rs @@ -27,6 +27,10 @@ pub type BalanceOf = /// pub const GLOBAL_MAX_SUBNET_COUNT: u16 = 4096; +// Theoretical maximum number of subsubnets per subnet +// GLOBAL_MAX_SUBNET_COUNT * MAX_SUBSUBNET_COUNT_PER_SUBNET should be 0x10000 +pub const MAX_SUBSUBNET_COUNT_PER_SUBNET: u8 = 16; + impl Pallet { pub fn get_subsubnet_storage_index(netuid: NetUid, sub_id: SubId) -> NetUidStorageIndex { u16::from(sub_id) @@ -36,9 +40,9 @@ impl Pallet { } pub fn get_netuid_and_subid( - sub_or_netid: NetUidStorageIndex, + netuid_index: NetUidStorageIndex, ) -> Result<(NetUid, SubId), Error> { - let maybe_netuid = u16::from(sub_or_netid).checked_rem(GLOBAL_MAX_SUBNET_COUNT); + let maybe_netuid = u16::from(netuid_index).checked_rem(GLOBAL_MAX_SUBNET_COUNT); if let Some(netuid_u16) = maybe_netuid { let netuid = NetUid::from(netuid_u16); @@ -49,7 +53,7 @@ impl Pallet { ); // Extract sub_id - let sub_id_u8 = u8::try_from(u16::from(sub_or_netid).safe_div(GLOBAL_MAX_SUBNET_COUNT)) + let sub_id_u8 = u8::try_from(u16::from(netuid_index).safe_div(GLOBAL_MAX_SUBNET_COUNT)) .map_err(|_| Error::::SubNetworkDoesNotExist)?; let sub_id = SubId::from(sub_id_u8); @@ -99,6 +103,12 @@ impl Pallet { Error::::InvalidValue ); + // Make sure we are not allowing numbers that will break the math + ensure!( + subsubnet_count <= SubId::from(MAX_SUBSUBNET_COUNT_PER_SUBNET), + Error::::InvalidValue + ); + SubsubnetCountDesired::::insert(netuid, subsubnet_count); Ok(()) } diff --git a/pallets/subtensor/src/tests/subsubnet.rs b/pallets/subtensor/src/tests/subsubnet.rs index 7730dfcb0f..41c66f0da1 100644 --- a/pallets/subtensor/src/tests/subsubnet.rs +++ b/pallets/subtensor/src/tests/subsubnet.rs @@ -4,25 +4,99 @@ clippy::unwrap_used )] -/// Test plan: -/// - [ ] Netuid index math (with SubsubnetCountCurrent limiting) -/// - [ ] Emissions are split proportionally -/// - [ ] Sum of split emissions is equal to rao_emission passed to epoch -/// - [ ] Weights can be set/commited/revealed by subsubnet -/// - [ ] Rate limiting is enforced by subsubnet -/// - [ ] Bonds are applied per subsubnet -/// - [ ] Incentives are per subsubnet -/// - [ ] Subsubnet limit can be set up to 8 (with admin pallet) -/// - [ ] When subsubnet limit is reduced, reduction is GlobalSubsubnetDecreasePerSuperblock per super-block -/// - [ ] When reduction of subsubnet limit occurs, Weights, Incentive, LastUpdate, Bonds, and WeightCommits are cleared -/// - [ ] Epoch terms of subnet are weighted sum (or logical OR) of all subsubnet epoch terms -/// - [ ] Subnet epoch terms persist in state -/// - [ ] Subsubnet epoch terms persist in state +// Run all tests +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --package pallet-subtensor --lib -- tests::subsubnet --show-output + +// Test plan: +// - [x] Netuid index math (with SubsubnetCountCurrent limiting) +// - [ ] Emissions are split proportionally +// - [ ] Sum of split emissions is equal to rao_emission passed to epoch +// - [ ] Weights can be set/commited/revealed by subsubnet +// - [ ] Rate limiting is enforced by subsubnet +// - [ ] Bonds are applied per subsubnet +// - [ ] Incentives are per subsubnet +// - [ ] Subsubnet limit can be set up to 8 (with admin pallet) +// - [ ] When subsubnet limit is reduced, reduction is GlobalSubsubnetDecreasePerSuperblock per super-block +// - [ ] When reduction of subsubnet limit occurs, Weights, Incentive, LastUpdate, Bonds, and WeightCommits are cleared +// - [ ] Epoch terms of subnet are weighted sum (or logical OR) of all subsubnet epoch terms +// - [ ] Subnet epoch terms persist in state +// - [ ] Subsubnet epoch terms persist in state + use super::mock::*; +use crate::subnets::subsubnet::{GLOBAL_MAX_SUBNET_COUNT, MAX_SUBSUBNET_COUNT_PER_SUBNET}; +use crate::*; +use subtensor_runtime_common::{NetUid, NetUidStorageIndex, SubId}; #[test] fn test_index_from_netuid_and_subnet() { - new_test_ext(1).execute_with(|| {}); + new_test_ext(1).execute_with(|| { + [ + (0_u16, 0_u8), + (GLOBAL_MAX_SUBNET_COUNT / 2, 1), + (GLOBAL_MAX_SUBNET_COUNT / 2, 7), + (GLOBAL_MAX_SUBNET_COUNT / 2, 14), + (GLOBAL_MAX_SUBNET_COUNT / 2, 15), + (GLOBAL_MAX_SUBNET_COUNT - 1, 1), + (GLOBAL_MAX_SUBNET_COUNT - 1, 7), + (GLOBAL_MAX_SUBNET_COUNT - 1, 14), + (GLOBAL_MAX_SUBNET_COUNT - 1, 15), + ] + .iter() + .for_each(|(netuid, sub_id)| { + let idx = SubtensorModule::get_subsubnet_storage_index( + NetUid::from(*netuid), + SubId::from(*sub_id), + ); + let expected = *sub_id as u64 * GLOBAL_MAX_SUBNET_COUNT as u64 + *netuid as u64; + assert_eq!(idx, NetUidStorageIndex::from(expected as u16)); + }); + }); +} + +#[test] +fn test_netuid_and_subnet_from_index() { + new_test_ext(1).execute_with(|| { + [ + 0_u16, + 1, + 14, + 15, + 16, + 17, + GLOBAL_MAX_SUBNET_COUNT - 1, + GLOBAL_MAX_SUBNET_COUNT, + GLOBAL_MAX_SUBNET_COUNT + 1, + 0xFFFE / 2, + 0xFFFE, + 0xFFFF, + ] + .iter() + .for_each(|netuid_index| { + let expected_netuid = (*netuid_index as u64 % GLOBAL_MAX_SUBNET_COUNT as u64) as u16; + let expected_subid = (*netuid_index as u64 / GLOBAL_MAX_SUBNET_COUNT as u64) as u8; + + // Allow subnet ID + NetworksAdded::::insert(NetUid::from(expected_netuid), true); + SubsubnetCountCurrent::::insert( + NetUid::from(expected_netuid), + SubId::from(expected_subid + 1), + ); + + let (netuid, subid) = + SubtensorModule::get_netuid_and_subid(NetUidStorageIndex::from(*netuid_index)) + .unwrap(); + assert_eq!(netuid, NetUid::from(expected_netuid as u16)); + assert_eq!(subid, SubId::from(expected_subid as u8)); + }); + }); +} + +#[test] +fn test_netuid_index_math_constants() { + assert_eq!( + GLOBAL_MAX_SUBNET_COUNT as u64 * MAX_SUBSUBNET_COUNT_PER_SUBNET as u64, + 0x10000 + ); } #[test] From 9ddf6dc85f24cf504ab3993531fe9002ebdd3697 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Thu, 28 Aug 2025 20:50:45 -0400 Subject: [PATCH 12/39] Add tests for subsubnets --- pallets/subtensor/src/tests/subsubnet.rs | 266 +++++++++++++++++++++++ 1 file changed, 266 insertions(+) diff --git a/pallets/subtensor/src/tests/subsubnet.rs b/pallets/subtensor/src/tests/subsubnet.rs index 41c66f0da1..afb6006dac 100644 --- a/pallets/subtensor/src/tests/subsubnet.rs +++ b/pallets/subtensor/src/tests/subsubnet.rs @@ -9,6 +9,8 @@ // Test plan: // - [x] Netuid index math (with SubsubnetCountCurrent limiting) +// - [x] Sub-subnet validity tests +// - [x] do_set_desired tests // - [ ] Emissions are split proportionally // - [ ] Sum of split emissions is equal to rao_emission passed to epoch // - [ ] Weights can be set/commited/revealed by subsubnet @@ -25,6 +27,9 @@ use super::mock::*; use crate::subnets::subsubnet::{GLOBAL_MAX_SUBNET_COUNT, MAX_SUBSUBNET_COUNT_PER_SUBNET}; use crate::*; +use frame_support::{assert_noop, assert_ok}; +use sp_core::U256; +use sp_std::collections::vec_deque::VecDeque; use subtensor_runtime_common::{NetUid, NetUidStorageIndex, SubId}; #[test] @@ -99,6 +104,267 @@ fn test_netuid_index_math_constants() { ); } +#[test] +fn ensure_subsubnet_exists_ok() { + new_test_ext(1).execute_with(|| { + let netuid: NetUid = 3u16.into(); + let sub_id = SubId::from(1u8); + + // ensure base subnet exists + NetworksAdded::::insert(NetUid::from(netuid), true); + + // Allow at least 2 sub-subnets (so sub_id = 1 is valid) + SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); + assert_ok!(SubtensorModule::ensure_subsubnet_exists(netuid, sub_id)); + }); +} + +#[test] +fn ensure_subsubnet_fails_when_base_subnet_missing() { + new_test_ext(1).execute_with(|| { + let netuid: NetUid = 7u16.into(); + let sub_id = SubId::from(0u8); + + // Intentionally DO NOT create the base subnet + + assert_noop!( + SubtensorModule::ensure_subsubnet_exists(netuid, sub_id), + Error::::SubNetworkDoesNotExist + ); + }); +} + +#[test] +fn ensure_subsubnet_fails_when_subid_out_of_range() { + new_test_ext(1).execute_with(|| { + let netuid: NetUid = 9u16.into(); + NetworksAdded::::insert(NetUid::from(netuid), true); + + // Current allowed sub-subnet count is 2 => valid sub_ids: {0, 1} + SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); + + // sub_id == 2 is out of range (must be < 2) + let sub_id_eq = SubId::from(2u8); + assert_noop!( + SubtensorModule::ensure_subsubnet_exists(netuid, sub_id_eq), + Error::::SubNetworkDoesNotExist + ); + + // sub_id > 2 is also out of range + let sub_id_gt = SubId::from(3u8); + assert_noop!( + SubtensorModule::ensure_subsubnet_exists(netuid, sub_id_gt), + Error::::SubNetworkDoesNotExist + ); + }); +} + +#[test] +fn do_set_desired_subsubnet_count_ok_minimal() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(3u16); + NetworksAdded::::insert(NetUid::from(3u16), true); // base subnet exists + + assert_ok!(SubtensorModule::do_set_desired_subsubnet_count( + netuid, + SubId::from(1u8) + )); + + assert_eq!(SubsubnetCountDesired::::get(netuid), SubId::from(1u8)); + }); +} + +#[test] +fn do_set_desired_subsubnet_count_ok_at_effective_cap() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(4u16); + NetworksAdded::::insert(NetUid::from(4u16), true); // base subnet exists + + // Effective bound is min(runtime cap, compile-time cap) + let runtime_cap = MaxSubsubnetCount::::get(); // e.g., SubId::from(8) + let compile_cap = SubId::from(MAX_SUBSUBNET_COUNT_PER_SUBNET); + let bound = if runtime_cap <= compile_cap { + runtime_cap + } else { + compile_cap + }; + + assert_ok!(SubtensorModule::do_set_desired_subsubnet_count( + netuid, bound + )); + assert_eq!(SubsubnetCountDesired::::get(netuid), bound); + }); +} + +#[test] +fn do_set_desired_fails_when_base_subnet_missing() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(7u16); + // No NetworksAdded insert => base subnet absent + + assert_noop!( + SubtensorModule::do_set_desired_subsubnet_count(netuid, SubId::from(1u8)), + Error::::SubNetworkDoesNotExist + ); + }); +} + +#[test] +fn do_set_desired_fails_for_zero() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(9u16); + NetworksAdded::::insert(NetUid::from(9u16), true); // base subnet exists + + assert_noop!( + SubtensorModule::do_set_desired_subsubnet_count(netuid, SubId::from(0u8)), + Error::::InvalidValue + ); + }); +} + +#[test] +fn do_set_desired_fails_when_over_runtime_cap() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(11u16); + NetworksAdded::::insert(NetUid::from(11u16), true); // base subnet exists + + // Runtime cap is 8 (per function), so 9 must fail + assert_noop!( + SubtensorModule::do_set_desired_subsubnet_count(netuid, SubId::from(9u8)), + Error::::InvalidValue + ); + }); +} + +#[test] +fn do_set_desired_fails_when_over_compile_time_cap() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(12u16); + NetworksAdded::::insert(NetUid::from(12u16), true); // base subnet exists + + let too_big = SubId::from(MAX_SUBSUBNET_COUNT_PER_SUBNET + 1); + assert_noop!( + SubtensorModule::do_set_desired_subsubnet_count(netuid, too_big), + Error::::InvalidValue + ); + }); +} + +#[test] +fn update_subsubnet_counts_decreases_and_cleans_on_superblock() { + new_test_ext(1).execute_with(|| { + let hotkey = U256::from(1); + + // Base subnet exists + let netuid = NetUid::from(42u16); + NetworksAdded::::insert(NetUid::from(42u16), true); + + // super_block = SuperBlockTempos() * Tempo(netuid) + Tempo::::insert(netuid, 1u16); + let super_block = + u64::from(SuperBlockTempos::::get()) * u64::from(Tempo::::get(netuid)); + + // Choose counts so result is deterministic for ANY decrease-per-superblock. + // Let dec = GlobalSubsubnetDecreasePerSuperblock(); set old = dec + 3. + let dec: u8 = u8::from(GlobalSubsubnetDecreasePerSuperblock::::get()); + let old = SubId::from(dec.saturating_add(3)); // ≥3 + let desired = SubId::from(1u8); + // min_possible = max(old - dec, 1) = 3 → new_count = 3 + SubsubnetCountCurrent::::insert(netuid, old); + SubsubnetCountDesired::::insert(netuid, desired); + + // Seed data at a kept subid (2) and a removed subid (3) + let idx_keep = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(2u8)); + let idx_rm3 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(3u8)); + + Weights::::insert(idx_keep, 0u16, vec![(1u16, 1u16)]); + Incentive::::insert(idx_keep, vec![1u16]); + LastUpdate::::insert(idx_keep, vec![123u64]); + Bonds::::insert(idx_keep, 0u16, vec![(1u16, 2u16)]); + WeightCommits::::insert( + idx_keep, + hotkey, + VecDeque::from([(sp_core::H256::zero(), 1u64, 2u64, 3u64)]), + ); + TimelockedWeightCommits::::insert( + idx_keep, + 1u64, + VecDeque::from([(hotkey, 1u64, Default::default(), Default::default())]), + ); + + Weights::::insert(idx_rm3, 0u16, vec![(9u16, 9u16)]); + Incentive::::insert(idx_rm3, vec![9u16]); + LastUpdate::::insert(idx_rm3, vec![999u64]); + Bonds::::insert(idx_rm3, 0u16, vec![(9u16, 9u16)]); + WeightCommits::::insert( + idx_rm3, + hotkey, + VecDeque::from([(sp_core::H256::zero(), 1u64, 2u64, 3u64)]), + ); + TimelockedWeightCommits::::insert( + idx_rm3, + 1u64, + VecDeque::from([(hotkey, 1u64, Default::default(), Default::default())]), + ); + + // Act exactly on a super-block boundary + SubtensorModule::update_subsubnet_counts_if_needed(2 * super_block); + + // New count is 3 + assert_eq!(SubsubnetCountCurrent::::get(netuid), SubId::from(3u8)); + + // Kept prefix intact + assert_eq!(Incentive::::get(idx_keep), vec![1u16]); + assert!(Weights::::iter_prefix(idx_keep).next().is_some()); + assert!(LastUpdate::::contains_key(idx_keep)); + assert!(Bonds::::iter_prefix(idx_keep).next().is_some()); + assert!(WeightCommits::::contains_key(idx_keep, hotkey)); + assert!(TimelockedWeightCommits::::contains_key( + idx_keep, 1u64 + )); + + // Removed prefix (subid 3) cleared + assert!(Weights::::iter_prefix(idx_rm3).next().is_none()); + assert_eq!(Incentive::::get(idx_rm3), Vec::::new()); + assert!(!LastUpdate::::contains_key(idx_rm3)); + assert!(Bonds::::iter_prefix(idx_rm3).next().is_none()); + assert!(!WeightCommits::::contains_key(idx_rm3, hotkey)); + assert!(!TimelockedWeightCommits::::contains_key( + idx_rm3, 1u64 + )); + }); +} + +#[test] +fn update_subsubnet_counts_no_change_when_not_superblock() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(100u16); + NetworksAdded::::insert(NetUid::from(100u16), true); + + Tempo::::insert(netuid, 1u16); + let super_block = + u64::from(SuperBlockTempos::::get()) * u64::from(Tempo::::get(netuid)); + + // Setup counts as in the previous test + let dec: u8 = u8::from(GlobalSubsubnetDecreasePerSuperblock::::get()); + let old = SubId::from(dec.saturating_add(3)); + let desired = SubId::from(1u8); + SubsubnetCountCurrent::::insert(netuid, old); + SubsubnetCountDesired::::insert(netuid, desired); + + // Marker value at a subid that would be kept if a change happened + let idx_mark = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(2u8)); + Incentive::::insert(idx_mark, vec![77u16]); + + // Act on a non-boundary + SubtensorModule::update_subsubnet_counts_if_needed(super_block - 1); + + // Nothing changes + assert_eq!(SubsubnetCountCurrent::::get(netuid), old); + assert_eq!(Incentive::::get(idx_mark), vec![77u16]); + }); +} + #[test] fn test_subsubnet_emission_proportions() { new_test_ext(1).execute_with(|| {}); From 163c3f755f32aaeae36cc5f773bbb3ca7176f78f Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Thu, 28 Aug 2025 21:04:29 -0400 Subject: [PATCH 13/39] Fix Bonds cleanup on subnet removal --- pallets/subtensor/src/coinbase/root.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pallets/subtensor/src/coinbase/root.rs b/pallets/subtensor/src/coinbase/root.rs index 6d2824aec9..74c75f5624 100644 --- a/pallets/subtensor/src/coinbase/root.rs +++ b/pallets/subtensor/src/coinbase/root.rs @@ -430,7 +430,10 @@ impl Pallet { let _ = Uids::::clear_prefix(netuid, u32::MAX, None); let keys = Keys::::iter_prefix(netuid).collect::>(); let _ = Keys::::clear_prefix(netuid, u32::MAX, None); - let _ = Bonds::::clear_prefix(NetUidStorageIndex::from(netuid), u32::MAX, None); + for subid in 0..subsubnets { + let netuid_index = Self::get_subsubnet_storage_index(netuid, subid.into()); + let _ = Bonds::::clear_prefix(netuid_index, u32::MAX, None); + } // --- 8. Removes the weights for this subnet (do not remove). for subid in 0..subsubnets { From db2aa8c577ddf08fcc39d853d6e0116769866b27 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Fri, 29 Aug 2025 17:51:05 -0400 Subject: [PATCH 14/39] Add more tests for subsubnets. Merge test plan with bit --- pallets/subtensor/src/tests/subsubnet.rs | 230 +++++++++++++++++++++-- 1 file changed, 219 insertions(+), 11 deletions(-) diff --git a/pallets/subtensor/src/tests/subsubnet.rs b/pallets/subtensor/src/tests/subsubnet.rs index afb6006dac..09e43b6f13 100644 --- a/pallets/subtensor/src/tests/subsubnet.rs +++ b/pallets/subtensor/src/tests/subsubnet.rs @@ -11,22 +11,29 @@ // - [x] Netuid index math (with SubsubnetCountCurrent limiting) // - [x] Sub-subnet validity tests // - [x] do_set_desired tests -// - [ ] Emissions are split proportionally -// - [ ] Sum of split emissions is equal to rao_emission passed to epoch +// - [x] Emissions are split proportionally +// - [x] Sum of split emissions is equal to rao_emission passed to epoch +// - [ ] Only subnet owner or root can set desired subsubnet count // - [ ] Weights can be set/commited/revealed by subsubnet -// - [ ] Rate limiting is enforced by subsubnet -// - [ ] Bonds are applied per subsubnet -// - [ ] Incentives are per subsubnet -// - [ ] Subsubnet limit can be set up to 8 (with admin pallet) -// - [ ] When subsubnet limit is reduced, reduction is GlobalSubsubnetDecreasePerSuperblock per super-block -// - [ ] When reduction of subsubnet limit occurs, Weights, Incentive, LastUpdate, Bonds, and WeightCommits are cleared +// - [ ] Prevent weight setting/commitment/revealing above subsubnet_limit_in_force +// - [ ] When a miner is deregistered, their weights are cleaned across all subsubnets +// - [ ] Weight setting rate limiting is enforced by subsubnet +// - [x] Bonds are applied per subsubnet +// - [x] Incentives are per subsubnet +// - [x] Per-subsubnet incentives are distributed proportionally to miner weights +// - [x] Subsubnet limit can be set up to 8 (with admin pallet) +// - [x] When subsubnet limit is reduced, reduction is GlobalSubsubnetDecreasePerSuperblock per super-block +// - [x] When reduction of subsubnet limit occurs, Weights, Incentive, LastUpdate, Bonds, and WeightCommits are cleared // - [ ] Epoch terms of subnet are weighted sum (or logical OR) of all subsubnet epoch terms // - [ ] Subnet epoch terms persist in state -// - [ ] Subsubnet epoch terms persist in state +// - [x] Subsubnet epoch terms persist in state +// - [ ] "Yuma Emergency Mode" (consensus sum is 0 for a subsubnet), emission distributed by stake +// - [ ] Miner with no weights on any subsubnet receives no reward use super::mock::*; use crate::subnets::subsubnet::{GLOBAL_MAX_SUBNET_COUNT, MAX_SUBSUBNET_COUNT_PER_SUBNET}; use crate::*; +use approx::assert_abs_diff_eq; use frame_support::{assert_noop, assert_ok}; use sp_core::U256; use sp_std::collections::vec_deque::VecDeque; @@ -366,6 +373,207 @@ fn update_subsubnet_counts_no_change_when_not_superblock() { } #[test] -fn test_subsubnet_emission_proportions() { - new_test_ext(1).execute_with(|| {}); +fn split_emissions_even_division() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(5u16); + SubsubnetCountCurrent::::insert(netuid, SubId::from(5u8)); // 5 sub-subnets + let out = SubtensorModule::split_emissions(netuid, AlphaCurrency::from(25u64)); + assert_eq!(out, vec![AlphaCurrency::from(5u64); 5]); + }); +} + +#[test] +fn split_emissions_rounding_to_first() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(6u16); + SubsubnetCountCurrent::::insert(netuid, SubId::from(4u8)); // 4 sub-subnets + let out = SubtensorModule::split_emissions(netuid, AlphaCurrency::from(10u64)); // 10 / 4 = 2, rem=2 + assert_eq!( + out, + vec![ + AlphaCurrency::from(4u64), // 2 + remainder(2) + AlphaCurrency::from(2u64), + AlphaCurrency::from(2u64), + AlphaCurrency::from(2u64), + ] + ); + }); +} + +/// Seeds a 2-neuron and 2-subsubnet subnet so `epoch_subsubnet` produces non-zero +/// incentives & dividends. +/// Returns the sub-subnet storage index. +pub fn mock_epoch_state(netuid: NetUid, ck0: U256, hk0: U256, ck1: U256, hk1: U256) { + let idx0 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(0)); + let idx1 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(1)); + + // Base subnet exists; 2 neurons. + NetworksAdded::::insert(NetUid::from(u16::from(netuid)), true); + SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); + SubnetworkN::::insert(netuid, 2); + + // Register two neurons (UID 0,1) → keys drive `get_subnetwork_n`. + Keys::::insert(netuid, 0u16, hk0.clone()); + Keys::::insert(netuid, 1u16, hk1.clone()); + + // Make both ACTIVE: recent updates & old registrations. + Tempo::::insert(netuid, 1u16); + ActivityCutoff::::insert(netuid, u16::MAX); // large cutoff keeps them active + LastUpdate::::insert(idx0, vec![2, 2]); + LastUpdate::::insert(idx1, vec![2, 2]); + BlockAtRegistration::::insert(netuid, 0, 1u64); // registered long ago + BlockAtRegistration::::insert(netuid, 1, 1u64); + + // Add stake + let stake_amount = AlphaCurrency::from(1_000_000_000); // 1 Alpha + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hk0, + &ck0, + netuid, + stake_amount, + ); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hk1, + &ck1, + netuid, + stake_amount, + ); + + // Non-zero stake above threshold; permit both as validators. + StakeThreshold::::put(0u64); + ValidatorPermit::::insert(netuid, vec![true, true]); + + // Simple weights, setting for each other on both subsubnets + Weights::::insert(idx0, 0, vec![(0u16, 0xFFFF), (1u16, 0xFFFF)]); + Weights::::insert(idx0, 1, vec![(0u16, 0xFFFF), (1u16, 0xFFFF)]); + Weights::::insert(idx1, 0, vec![(0u16, 0xFFFF), (1u16, 0xFFFF)]); + Weights::::insert(idx1, 1, vec![(0u16, 0xFFFF), (1u16, 0xFFFF)]); + + // Keep weight masking off for simplicity. + CommitRevealWeightsEnabled::::insert(netuid, false); + Yuma3On::::insert(netuid, false); +} + +pub fn mock_3_neurons(netuid: NetUid, hk: U256) { + let idx0 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(0)); + let idx1 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(1)); + + SubnetworkN::::insert(netuid, 3); + Keys::::insert(netuid, 2u16, hk.clone()); + LastUpdate::::insert(idx0, vec![2, 2, 2]); + LastUpdate::::insert(idx1, vec![2, 2, 2]); + BlockAtRegistration::::insert(netuid, 2, 1u64); +} + +#[test] +fn epoch_with_subsubnets_produces_per_subsubnet_incentive() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(1u16); + let idx0 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(0)); + let idx1 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(1)); + let ck0 = U256::from(1); + let hk0 = U256::from(2); + let ck1 = U256::from(3); + let hk1 = U256::from(4); + let emission = AlphaCurrency::from(1_000_000_000); + + mock_epoch_state(netuid, ck0, hk0, ck1, hk1); + SubtensorModule::epoch_with_subsubnets(netuid, emission); + + let actual_incentive_sub0 = Incentive::::get(idx0); + let actual_incentive_sub1 = Incentive::::get(idx1); + let expected_incentive = 0xFFFF / 2; + assert_eq!(actual_incentive_sub0[0], expected_incentive); + assert_eq!(actual_incentive_sub0[1], expected_incentive); + assert_eq!(actual_incentive_sub1[0], expected_incentive); + assert_eq!(actual_incentive_sub1[1], expected_incentive); + }); +} + +#[test] +fn epoch_with_subsubnets_updates_bonds() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(1u16); + let idx0 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(0)); + let idx1 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(1)); + let ck0 = U256::from(1); + let hk0 = U256::from(2); + let ck1 = U256::from(3); + let hk1 = U256::from(4); + let emission = AlphaCurrency::from(1_000_000_000); + + mock_epoch_state(netuid, ck0, hk0, ck1, hk1); + + // Cause bonds to be asymmetric on diff subsubnets + Weights::::insert(idx1, 0, vec![(0u16, 0xFFFF), (1u16, 0)]); + Weights::::insert(idx1, 1, vec![(0u16, 0xFFFF), (1u16, 0xFFFF)]); + + SubtensorModule::epoch_with_subsubnets(netuid, emission); + + let bonds_uid0_sub0 = Bonds::::get(idx0, 0); + let bonds_uid1_sub0 = Bonds::::get(idx0, 1); + let bonds_uid0_sub1 = Bonds::::get(idx1, 0); + let bonds_uid1_sub1 = Bonds::::get(idx1, 1); + + // Subsubnet 0: UID0 fully bonds to UID1, UID1 fully bonds to UID0 + assert_eq!(bonds_uid0_sub0, vec![(1, 65535)]); + assert_eq!(bonds_uid1_sub0, vec![(0, 65535)]); + + // Subsubnet 1: UID0 no bond to UID1, UID1 fully bonds to UID0 + assert_eq!(bonds_uid0_sub1, vec![]); + assert_eq!(bonds_uid1_sub1, vec![(0, 65535)]); + }); +} + +#[test] +fn epoch_with_subsubnets_incentives_proportional_to_weights() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(1u16); + let idx0 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(0)); + let idx1 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(1)); + let ck0 = U256::from(1); + let hk0 = U256::from(2); + let ck1 = U256::from(3); + let hk1 = U256::from(4); + let hk2 = U256::from(6); + let emission = AlphaCurrency::from(1_000_000_000); + + mock_epoch_state(netuid, ck0, hk0, ck1, hk1); + mock_3_neurons(netuid, hk2); + + // Need 3 neurons for this: One validator that will be setting weights to 2 miners + ValidatorPermit::::insert(netuid, vec![true, false, false]); + + // Set greater weight to uid1 on sub-subnet 0 and to uid2 on subsubnet 1 + Weights::::insert(idx0, 0, vec![(1u16, 0xFFFF / 5 * 4), (2u16, 0xFFFF / 5)]); + Weights::::insert(idx1, 0, vec![(1u16, 0xFFFF / 5), (2u16, 0xFFFF / 5 * 4)]); + + SubtensorModule::epoch_with_subsubnets(netuid, emission); + + let actual_incentive_sub0 = Incentive::::get(idx0); + let actual_incentive_sub1 = Incentive::::get(idx1); + + let expected_incentive_high = 0xFFFF / 5 * 4; + let expected_incentive_low = 0xFFFF / 5; + assert_abs_diff_eq!( + actual_incentive_sub0[1], + expected_incentive_high, + epsilon = 1 + ); + assert_abs_diff_eq!( + actual_incentive_sub0[2], + expected_incentive_low, + epsilon = 1 + ); + assert_abs_diff_eq!( + actual_incentive_sub1[1], + expected_incentive_low, + epsilon = 1 + ); + assert_abs_diff_eq!( + actual_incentive_sub1[2], + expected_incentive_high, + epsilon = 1 + ); + }); } From 07a2f7059e54fdfcbb1570c0f0710c7c95903c9a Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Sat, 30 Aug 2025 15:29:50 +0000 Subject: [PATCH 15/39] auto-update benchmark weights --- pallets/subtensor/src/macros/dispatches.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index 3ca50fac5d..fff6c14f0e 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -197,9 +197,9 @@ mod dispatches { /// - On failure for each failed item in the batch. /// #[pallet::call_index(80)] - #[pallet::weight((Weight::from_parts(19_330_000, 0) - .saturating_add(T::DbWeight::get().reads(1_u64)) - .saturating_add(T::DbWeight::get().writes(0_u64)), DispatchClass::Normal, Pays::No))] + #[pallet::weight((Weight::from_parts(100_500_000, 0) + .saturating_add(T::DbWeight::get().reads(15_u64)) + .saturating_add(T::DbWeight::get().writes(2_u64)), DispatchClass::Normal, Pays::No))] pub fn batch_set_weights( origin: OriginFor, netuids: Vec>, @@ -790,7 +790,7 @@ mod dispatches { /// - Errors stemming from transaction pallet. /// #[pallet::call_index(2)] - #[pallet::weight((Weight::from_parts(340_400_000, 0) + #[pallet::weight((Weight::from_parts(439_200_000, 0) .saturating_add(T::DbWeight::get().reads(26)) .saturating_add(T::DbWeight::get().writes(15)), DispatchClass::Normal, Pays::Yes))] pub fn add_stake( @@ -2406,7 +2406,7 @@ mod dispatches { /// * commit_reveal_version (`u16`): /// - The client (bittensor-drand) version #[pallet::call_index(113)] - #[pallet::weight((Weight::from_parts(64_530_000, 0) + #[pallet::weight((Weight::from_parts(84_020_000, 0) .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] pub fn commit_timelocked_weights( From 241c33c3097d1ba850279f867273a5162ce78b64 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Tue, 2 Sep 2025 18:06:36 +0000 Subject: [PATCH 16/39] auto-update benchmark weights --- pallets/subtensor/src/macros/dispatches.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index 311aecf667..4d31a5829d 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -198,7 +198,7 @@ mod dispatches { /// #[pallet::call_index(80)] #[pallet::weight((Weight::from_parts(95_460_000, 0) - .saturating_add(T::DbWeight::get().reads(14_u64)) + .saturating_add(T::DbWeight::get().reads(15_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)), DispatchClass::Normal, Pays::No))] pub fn batch_set_weights( origin: OriginFor, @@ -678,7 +678,7 @@ mod dispatches { /// - Errors stemming from transaction pallet. /// #[pallet::call_index(2)] - #[pallet::weight((Weight::from_parts(439_200_000, 0) + #[pallet::weight((Weight::from_parts(340_800_000, 0) .saturating_add(T::DbWeight::get().reads(26)) .saturating_add(T::DbWeight::get().writes(15)), DispatchClass::Normal, Pays::Yes))] pub fn add_stake( @@ -999,7 +999,7 @@ mod dispatches { /// Register the hotkey to root network #[pallet::call_index(62)] - #[pallet::weight((Weight::from_parts(111_700_000, 0) + #[pallet::weight((Weight::from_parts(135_900_000, 0) .saturating_add(T::DbWeight::get().reads(24_u64)) .saturating_add(T::DbWeight::get().writes(20)), DispatchClass::Normal, Pays::No))] pub fn root_register(origin: OriginFor, hotkey: T::AccountId) -> DispatchResult { From 0d8234f48d6857d42a017a8817c1315697740025 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Wed, 3 Sep 2025 12:31:27 -0400 Subject: [PATCH 17/39] Add weight setting/removing tests --- pallets/subtensor/src/macros/dispatches.rs | 49 ++ pallets/subtensor/src/macros/events.rs | 6 +- pallets/subtensor/src/subnets/uids.rs | 18 +- pallets/subtensor/src/subnets/weights.rs | 42 +- pallets/subtensor/src/tests/epoch.rs | 1 + pallets/subtensor/src/tests/subsubnet.rs | 568 +++++++++++++++++- pallets/subtensor/src/tests/weights.rs | 2 +- .../subtensor/src/transaction_extension.rs | 6 +- 8 files changed, 663 insertions(+), 29 deletions(-) diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index 311aecf667..759bcc06a8 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -2254,5 +2254,54 @@ mod dispatches { commit_reveal_version, ) } + + /// ---- Used to commit timelock encrypted commit-reveal weight values to later be revealed. + /// + /// # Args: + /// * `origin`: (`::RuntimeOrigin`): + /// - The committing hotkey. + /// + /// * `netuid` (`u16`): + /// - The u16 network identifier. + /// + /// * `subid` (`u8`): + /// - The u8 subsubnet identifier. + /// + /// * `commit` (`Vec`): + /// - The encrypted compressed commit. + /// The steps for this are: + /// 1. Instantiate [`WeightsTlockPayload`] + /// 2. Serialize it using the `parity_scale_codec::Encode` trait + /// 3. Encrypt it following the steps (here)[https://github.com/ideal-lab5/tle/blob/f8e6019f0fb02c380ebfa6b30efb61786dede07b/timelock/src/tlock.rs#L283-L336] + /// to produce a [`TLECiphertext`] type. + /// 4. Serialize and compress using the `ark-serialize` `CanonicalSerialize` trait. + /// + /// * reveal_round (`u64`): + /// - The drand reveal round which will be avaliable during epoch `n+1` from the current + /// epoch. + /// + /// * commit_reveal_version (`u16`): + /// - The client (bittensor-drand) version + #[pallet::call_index(118)] + #[pallet::weight((Weight::from_parts(84_020_000, 0) + .saturating_add(T::DbWeight::get().reads(9_u64)) + .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] + pub fn commit_timelocked_sub_weights( + origin: T::RuntimeOrigin, + netuid: NetUid, + subid: SubId, + commit: BoundedVec>, + reveal_round: u64, + commit_reveal_version: u16, + ) -> DispatchResult { + Self::do_commit_timelocked_sub_weights( + origin, + netuid, + subid, + commit, + reveal_round, + commit_reveal_version, + ) + } } } diff --git a/pallets/subtensor/src/macros/events.rs b/pallets/subtensor/src/macros/events.rs index 0259863cd8..57bde5a374 100644 --- a/pallets/subtensor/src/macros/events.rs +++ b/pallets/subtensor/src/macros/events.rs @@ -242,20 +242,20 @@ mod events { /// - **who**: The account ID of the user committing the weights. /// - **netuid**: The network identifier. /// - **commit_hash**: The hash representing the committed weights. - CRV3WeightsCommitted(T::AccountId, NetUid, H256), + CRV3WeightsCommitted(T::AccountId, NetUidStorageIndex, H256), /// Weights have been successfully committed. /// /// - **who**: The account ID of the user committing the weights. /// - **netuid**: The network identifier. /// - **commit_hash**: The hash representing the committed weights. - WeightsCommitted(T::AccountId, NetUid, H256), + WeightsCommitted(T::AccountId, NetUidStorageIndex, H256), /// Weights have been successfully revealed. /// /// - **who**: The account ID of the user revealing the weights. /// - **netuid**: The network identifier. /// - **commit_hash**: The hash of the revealed weights. - WeightsRevealed(T::AccountId, NetUid, H256), + WeightsRevealed(T::AccountId, NetUidStorageIndex, H256), /// Weights have been successfully batch revealed. /// diff --git a/pallets/subtensor/src/subnets/uids.rs b/pallets/subtensor/src/subnets/uids.rs index 4c029862e4..ce0b14cc1c 100644 --- a/pallets/subtensor/src/subnets/uids.rs +++ b/pallets/subtensor/src/subnets/uids.rs @@ -16,7 +16,8 @@ impl Pallet { } } - /// Resets the trust, emission, consensus, incentive, dividends of the neuron to default + /// Resets the trust, emission, consensus, incentive, dividends, bonds, and weights of + /// the neuron to default pub fn clear_neuron(netuid: NetUid, neuron_uid: u16) { let neuron_index: usize = neuron_uid.into(); Emission::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0.into())); @@ -26,6 +27,21 @@ impl Pallet { let netuid_index = Self::get_subsubnet_storage_index(netuid, subid.into()); Incentive::::mutate(netuid_index, |v| Self::set_element_at(v, neuron_index, 0)); Bonds::::remove(netuid_index, neuron_uid); // Remove bonds for Validator. + + // Clear weights set BY the neuron_uid + Weights::::remove(netuid_index, neuron_uid); + + // Set weights FOR the neuron_uid to 0 + let all_uids: Vec = Weights::::iter_key_prefix(netuid_index).collect(); + for uid in all_uids { + Weights::::mutate(netuid_index, uid, |weight_vec: &mut Vec<(u16, u16)>| { + for (weight_uid, w) in weight_vec.iter_mut() { + if *weight_uid == neuron_uid { + *w = 0; + } + } + }); + } } Dividends::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0)); } diff --git a/pallets/subtensor/src/subnets/weights.rs b/pallets/subtensor/src/subnets/weights.rs index f9393cd6bd..0c1ad9efd7 100644 --- a/pallets/subtensor/src/subnets/weights.rs +++ b/pallets/subtensor/src/subnets/weights.rs @@ -64,6 +64,9 @@ impl Pallet { subid: SubId, commit_hash: H256, ) -> DispatchResult { + // Ensure netuid and subid exist + Self::ensure_subsubnet_exists(netuid, subid)?; + // Calculate subnet storage index let netuid_index = Self::get_subsubnet_storage_index(netuid, subid); @@ -125,7 +128,7 @@ impl Pallet { *maybe_commits = Some(commits); // 11. Emit the WeightsCommitted event - Self::deposit_event(Event::WeightsCommitted(who.clone(), netuid, commit_hash)); + Self::deposit_event(Event::WeightsCommitted(who.clone(), netuid_index, commit_hash)); // 12. Update the last commit block for the hotkey's UID. Self::set_last_update_for_uid(netuid_index, neuron_uid, commit_block); @@ -293,6 +296,9 @@ impl Pallet { reveal_round: u64, commit_reveal_version: u16, ) -> DispatchResult { + // Ensure netuid and subid exist + Self::ensure_subsubnet_exists(netuid, subid)?; + // Calculate netuid storage index let netuid_index = Self::get_subsubnet_storage_index(netuid, subid); @@ -476,7 +482,7 @@ impl Pallet { // --- 5. Hash the provided data. let provided_hash: H256 = - Self::get_commit_hash(&who, netuid, &uids, &values, &salt, version_key); + Self::get_commit_hash(&who, netuid_index, &uids, &values, &salt, version_key); // --- 6. After removing expired commits, check if any commits are left. if commits.is_empty() { @@ -515,16 +521,17 @@ impl Pallet { } // --- 12. Proceed to set the revealed weights. - Self::do_set_weights( + Self::do_set_sub_weights( origin, netuid, + subid, uids.clone(), values.clone(), version_key, )?; // --- 13. Emit the WeightsRevealed event. - Self::deposit_event(Event::WeightsRevealed(who.clone(), netuid, provided_hash)); + Self::deposit_event(Event::WeightsRevealed(who.clone(), netuid_index, provided_hash)); // --- 14. Return ok. Ok(()) @@ -1085,17 +1092,20 @@ impl Pallet { neuron_uid: u16, current_block: u64, ) -> bool { - let (netuid, _) = Self::get_netuid_and_subid(netuid_index).unwrap_or_default(); - if Self::is_uid_exist_on_network(netuid, neuron_uid) { - // --- 1. Ensure that the diff between current and last_set weights is greater than limit. - let last_set_weights: u64 = Self::get_last_update_for_uid(netuid_index, neuron_uid); - if last_set_weights == 0 { - return true; - } // (Storage default) Never set weights. - return current_block.saturating_sub(last_set_weights) - >= Self::get_weights_set_rate_limit(netuid); + let maybe_netuid_and_subid = Self::get_netuid_and_subid(netuid_index); + if let Ok((netuid, _)) = maybe_netuid_and_subid { + if Self::is_uid_exist_on_network(netuid, neuron_uid) { + // --- 1. Ensure that the diff between current and last_set weights is greater than limit. + let last_set_weights: u64 = Self::get_last_update_for_uid(netuid_index, neuron_uid); + if last_set_weights == 0 { + return true; + } // (Storage default) Never set weights. + return current_block.saturating_sub(last_set_weights) + >= Self::get_weights_set_rate_limit(netuid); + } } - // --- 3. Non registered peers cant pass. + + // --- 3. Non registered peers cant pass. Neither can non-existing subid false } @@ -1291,13 +1301,13 @@ impl Pallet { pub fn get_commit_hash( who: &T::AccountId, - netuid: NetUid, + netuid_index: NetUidStorageIndex, uids: &[u16], values: &[u16], salt: &[u16], version_key: u64, ) -> H256 { - BlakeTwo256::hash_of(&(who.clone(), netuid, uids, values, salt, version_key)) + BlakeTwo256::hash_of(&(who.clone(), netuid_index, uids, values, salt, version_key)) } pub fn find_commit_block_via_hash(hash: H256) -> Option { diff --git a/pallets/subtensor/src/tests/epoch.rs b/pallets/subtensor/src/tests/epoch.rs index af72ac6924..fa628013ac 100644 --- a/pallets/subtensor/src/tests/epoch.rs +++ b/pallets/subtensor/src/tests/epoch.rs @@ -1570,6 +1570,7 @@ fn test_active_stake() { } // Test that epoch masks out outdated weights and bonds of validators on deregistered servers. +// cargo test --package pallet-subtensor --lib -- tests::epoch::test_outdated_weights --exact --show-output #[test] fn test_outdated_weights() { new_test_ext(1).execute_with(|| { diff --git a/pallets/subtensor/src/tests/subsubnet.rs b/pallets/subtensor/src/tests/subsubnet.rs index 09e43b6f13..1761a9d595 100644 --- a/pallets/subtensor/src/tests/subsubnet.rs +++ b/pallets/subtensor/src/tests/subsubnet.rs @@ -14,9 +14,13 @@ // - [x] Emissions are split proportionally // - [x] Sum of split emissions is equal to rao_emission passed to epoch // - [ ] Only subnet owner or root can set desired subsubnet count -// - [ ] Weights can be set/commited/revealed by subsubnet -// - [ ] Prevent weight setting/commitment/revealing above subsubnet_limit_in_force -// - [ ] When a miner is deregistered, their weights are cleaned across all subsubnets +// - [x] Weights can be set by subsubnet +// - [x] Weights can be commited/revealed by subsubnet +// - [x] Weights can be commited/revealed in crv3 by subsubnet +// - [x] Prevent weight setting/commitment/revealing above subsubnet_limit_in_force +// - [x] Prevent weight commitment/revealing above subsubnet_limit_in_force +// - [x] Prevent weight commitment/revealing in crv3 above subsubnet_limit_in_force +// - [x] When a miner is deregistered, their weights are cleaned across all subsubnets // - [ ] Weight setting rate limiting is enforced by subsubnet // - [x] Bonds are applied per subsubnet // - [x] Incentives are per subsubnet @@ -28,16 +32,32 @@ // - [ ] Subnet epoch terms persist in state // - [x] Subsubnet epoch terms persist in state // - [ ] "Yuma Emergency Mode" (consensus sum is 0 for a subsubnet), emission distributed by stake -// - [ ] Miner with no weights on any subsubnet receives no reward +// - [x] Miner with no weights on any subsubnet receives no reward use super::mock::*; +use crate::coinbase::reveal_commits::WeightsTlockPayload; use crate::subnets::subsubnet::{GLOBAL_MAX_SUBNET_COUNT, MAX_SUBSUBNET_COUNT_PER_SUBNET}; use crate::*; use approx::assert_abs_diff_eq; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; +use codec::Encode; use frame_support::{assert_noop, assert_ok}; -use sp_core::U256; +use frame_system::RawOrigin; +use pallet_drand::types::Pulse; +use rand_chacha::{ChaCha20Rng, rand_core::SeedableRng}; +use sha2::Digest; +use sp_core::{H256, U256}; +use sp_runtime::traits::{BlakeTwo256, Hash}; use sp_std::collections::vec_deque::VecDeque; use subtensor_runtime_common::{NetUid, NetUidStorageIndex, SubId}; +use substrate_fixed::types::I32F32; +use tle::{ + curves::drand::TinyBLS381, + ibe::fullident::Identity, + stream_ciphers::AESGCMStreamCipherProvider, + tlock::tle, +}; +use w3f_bls::EngineBLS; #[test] fn test_index_from_netuid_and_subnet() { @@ -577,3 +597,541 @@ fn epoch_with_subsubnets_incentives_proportional_to_weights() { ); }); } + +#[test] +fn epoch_with_subsubnets_no_weight_no_incentive() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(1u16); + let idx0 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(0)); + let idx1 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(1)); + let ck0 = U256::from(1); + let hk0 = U256::from(2); + let ck1 = U256::from(3); + let hk1 = U256::from(4); + let hk2 = U256::from(5); // No weight miner + let emission = AlphaCurrency::from(1_000_000_000); + + mock_epoch_state(netuid, ck0, hk0, ck1, hk1); + mock_3_neurons(netuid, hk2); + + // Need 3 neurons for this: One validator that will be setting weights to 2 miners + ValidatorPermit::::insert(netuid, vec![true, false, false]); + + // Set no weight to uid2 on sub-subnet 0 and 1 + Weights::::insert(idx0, 0, vec![(1u16, 1), (2u16, 0)]); + Weights::::insert(idx1, 0, vec![(1u16, 1), (2u16, 0)]); + + SubtensorModule::epoch_with_subsubnets(netuid, emission); + + let actual_incentive_sub0 = Incentive::::get(idx0); + let actual_incentive_sub1 = Incentive::::get(idx1); + let expected_incentive = 0xFFFF; + assert_eq!(actual_incentive_sub0[0], 0); + assert_eq!(actual_incentive_sub0[1], expected_incentive); + assert_eq!(actual_incentive_sub0[2], 0); + assert_eq!(actual_incentive_sub1[0], 0); + assert_eq!(actual_incentive_sub1[1], expected_incentive); + assert_eq!(actual_incentive_sub1[2], 0); + assert_eq!(actual_incentive_sub0.len(), 3); + assert_eq!(actual_incentive_sub1.len(), 3); + }); +} + +#[test] +fn neuron_dereg_cleans_weights_across_subids() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(77u16); + let neuron_uid: u16 = 1; // we'll deregister UID=1 + // two sub-subnets + SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); + + // Setup initial map values + Emission::::insert(netuid, vec![AlphaCurrency::from(1u64), AlphaCurrency::from(9u64), AlphaCurrency::from(3u64)]); + Trust::::insert(netuid, vec![11u16, 99u16, 33u16]); + Consensus::::insert(netuid, vec![21u16, 88u16, 44u16]); + Dividends::::insert(netuid, vec![7u16, 77u16, 17u16]); + + // Clearing per-subid maps + for sub in [0u8, 1u8] { + let idx = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(sub)); + + // Incentive vector: position 1 should become 0 + Incentive::::insert(idx, vec![10u16, 20u16, 30u16]); + + // Row set BY neuron_uid (to be removed) + Weights::::insert(idx, neuron_uid, vec![(0u16, 5u16)]); + Bonds::::insert(idx, neuron_uid, vec![(0u16, 6u16)]); + + // Rows FOR neuron_uid inside other validators' vecs => value should be set to 0 (not removed) + Weights::::insert(idx, 0u16, vec![(neuron_uid, 7u16), (42u16, 3u16)]); + Bonds::::insert(idx, 0u16, vec![(neuron_uid, 8u16), (42u16, 4u16)]); + } + + // Act + SubtensorModule::clear_neuron(netuid, neuron_uid); + + // Top-level zeroed at index 1, others intact + let e = Emission::::get(netuid); + assert_eq!(e[0], 1u64.into()); + assert_eq!(e[1], 0u64.into()); + assert_eq!(e[2], 3u64.into()); + + let t = Trust::::get(netuid); + assert_eq!(t, vec![11, 0, 33]); + + let c = Consensus::::get(netuid); + assert_eq!(c, vec![21, 0, 44]); + + let d = Dividends::::get(netuid); + assert_eq!(d, vec![7, 0, 17]); + + // Per-subid cleanup + for sub in [0u8, 1u8] { + let idx = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(sub)); + + // Incentive element at index 1 set to 0 + let inc = Incentive::::get(idx); + assert_eq!(inc, vec![10, 0, 30]); + + // Rows BY neuron_uid removed + assert!(!Weights::::contains_key(idx, neuron_uid)); + assert!(!Bonds::::contains_key(idx, neuron_uid)); + + // In other rows, entries FOR neuron_uid are zeroed, others unchanged + let w0 = Weights::::get(idx, 0u16); + assert!(w0.iter().any(|&(u, w)| u == neuron_uid && w == 0)); + assert!(w0.iter().any(|&(u, w)| u == 42 && w == 3)); + } + }); +} + +#[test] +fn clear_neuron_handles_absent_rows_gracefully() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(55u16); + SubsubnetCountCurrent::::insert(netuid, SubId::from(1u8)); // single sub-subnet + + // Minimal vectors with non-zero at index 0 (we will clear UID=0) + Emission::::insert(netuid, vec![AlphaCurrency::from(5u64)]); + Trust::::insert(netuid, vec![5u16]); + Consensus::::insert(netuid, vec![6u16]); + Dividends::::insert(netuid, vec![7u16]); + + // No Weights/Bonds rows at all → function should not panic + let neuron_uid: u16 = 0; + SubtensorModule::clear_neuron(netuid, neuron_uid); + + // All zeroed at index 0 + assert_eq!(Emission::::get(netuid), vec![AlphaCurrency::from(0u64)]); + assert_eq!(Trust::::get(netuid), vec![0u16]); + assert_eq!(Consensus::::get(netuid), vec![0u16]); + assert_eq!(Dividends::::get(netuid), vec![0u16]); + }); +} + +#[test] +fn test_set_sub_weights_happy_path_sets_row_under_subid() { + new_test_ext(0).execute_with(|| { + let netuid = NetUid::from(1); + let tempo: u16 = 13; + add_network_disable_commit_reveal(netuid, tempo, 0); + + // Register validator (caller) and a destination neuron + let hk1 = U256::from(55); + let ck1 = U256::from(66); + let hk2 = U256::from(77); + let ck2 = U256::from(88); + let hk3 = U256::from(99); + let ck3 = U256::from(111); + register_ok_neuron(netuid, hk1, ck1, 0); + register_ok_neuron(netuid, hk2, ck2, 0); + register_ok_neuron(netuid, hk3, ck3, 0); + + let uid1 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hk1).expect("caller uid"); + let uid2 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hk2).expect("dest uid 1"); + let uid3 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hk3).expect("dest uid 2"); + + // Make caller a permitted validator with stake + SubtensorModule::set_stake_threshold(0); + SubtensorModule::set_validator_permit_for_uid(netuid, uid1, true); + SubtensorModule::add_balance_to_coldkey_account(&ck1, 1); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet(&hk1, &ck1, netuid, 1.into()); + + // Have at least two sub-subnets; write under subid = 1 + SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); + let subid = SubId::from(1u8); + + // Call extrinsic + let dests = vec![uid2, uid3]; + let weights = vec![88u16, 0xFFFF]; + assert_ok!(SubtensorModule::set_sub_weights( + RawOrigin::Signed(hk1).into(), + netuid, + subid, + dests.clone(), + weights.clone(), + 0, // version_key + )); + + // Verify row exists under the chosen subid and not under a different subid + let idx1 = SubtensorModule::get_subsubnet_storage_index(netuid, subid); + assert_eq!(Weights::::get(idx1, uid1), vec![(uid2, 88u16), (uid3, 0xFFFF)]); + + let idx0 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(0u8)); + assert!(Weights::::get(idx0, uid1).is_empty()); + }); +} + +#[test] +fn test_set_sub_weights_above_subsubnet_count_fails() { + new_test_ext(0).execute_with(|| { + let netuid = NetUid::from(1); + let tempo: u16 = 13; + add_network_disable_commit_reveal(netuid, tempo, 0); + + // Register validator (caller) and a destination neuron + let hk1 = U256::from(55); + let ck1 = U256::from(66); + let hk2 = U256::from(77); + let ck2 = U256::from(88); + register_ok_neuron(netuid, hk1, ck1, 0); + register_ok_neuron(netuid, hk2, ck2, 0); + + let uid1 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hk1).expect("caller uid"); + let uid2 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hk2).expect("dest uid 1"); + + // Make caller a permitted validator with stake + SubtensorModule::set_stake_threshold(0); + SubtensorModule::set_validator_permit_for_uid(netuid, uid1, true); + SubtensorModule::add_balance_to_coldkey_account(&ck1, 1); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet(&hk1, &ck1, netuid, 1.into()); + + // Have exactly two sub-subnets; write under subid = 1 + SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); + let subid_above = SubId::from(2u8); + + // Call extrinsic + let dests = vec![uid2]; + let weights = vec![88u16]; + assert_noop!( + SubtensorModule::set_sub_weights( + RawOrigin::Signed(hk1).into(), + netuid, + subid_above, + dests.clone(), + weights.clone(), + 0, // version_key + ), + Error::::SubNetworkDoesNotExist + ); + }); +} + +#[test] +fn test_commit_reveal_sub_weights_ok() { + new_test_ext(1).execute_with(|| { + System::set_block_number(0); + + let netuid = NetUid::from(1); + let tempo: u16 = 13; + add_network(netuid, tempo, 0); + + // Three neurons: validator (caller) + two destinations + let hk1 = U256::from(55); let ck1 = U256::from(66); + let hk2 = U256::from(77); let ck2 = U256::from(88); + let hk3 = U256::from(99); let ck3 = U256::from(111); + register_ok_neuron(netuid, hk1, ck1, 0); + register_ok_neuron(netuid, hk2, ck2, 0); + register_ok_neuron(netuid, hk3, ck3, 0); + + let uid1 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hk1).unwrap(); // caller + let uid2 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hk2).unwrap(); + let uid3 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hk3).unwrap(); + + // Enable commit-reveal path and make caller a validator with stake + SubtensorModule::set_stake_threshold(0); + SubtensorModule::set_weights_set_rate_limit(netuid, 5); + SubtensorModule::set_validator_permit_for_uid(netuid, uid1, true); + SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); + SubtensorModule::add_balance_to_coldkey_account(&ck1, 1); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet(&hk1, &ck1, netuid, 1.into()); + + // Ensure sub-subnet exists; write under subid = 1 + SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); + let subid = SubId::from(1u8); + let idx0 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(0u8)); + let idx1 = SubtensorModule::get_subsubnet_storage_index(netuid, subid); + + // Prepare payload and commit hash (include subid!) + let dests = vec![uid2, uid3]; + let weights = vec![88u16, 0xFFFFu16]; + let salt: Vec = vec![1,2,3,4,5,6,7,8]; + let version_key: u64 = 0; + let commit_hash: H256 = BlakeTwo256::hash_of(&(hk1, idx1, dests.clone(), weights.clone(), salt.clone(), version_key)); + + // Commit in epoch 0 + assert_ok!(SubtensorModule::commit_sub_weights(RuntimeOrigin::signed(hk1), netuid, subid, commit_hash)); + + // Advance one epoch, then reveal + step_epochs(1, netuid); + assert_ok!(SubtensorModule::reveal_sub_weights( + RuntimeOrigin::signed(hk1), + netuid, + subid, + dests.clone(), + weights.clone(), + salt, + version_key + )); + + // Verify weights stored under the chosen subid (normalized keeps max=0xFFFF here) + assert_eq!(Weights::::get(idx1, uid1), vec![(uid2, 88u16), (uid3, 0xFFFFu16)]); + + // And not under a different subid + assert!(Weights::::get(idx0, uid1).is_empty()); + }); +} + +#[test] +fn test_commit_reveal_above_subsubnet_count_fails() { + new_test_ext(1).execute_with(|| { + System::set_block_number(0); + + let netuid = NetUid::from(1); + let tempo: u16 = 13; + add_network(netuid, tempo, 0); + + // Two neurons: validator (caller) + miner + let hk1 = U256::from(55); let ck1 = U256::from(66); + let hk2 = U256::from(77); let ck2 = U256::from(88); + register_ok_neuron(netuid, hk1, ck1, 0); + register_ok_neuron(netuid, hk2, ck2, 0); + + let uid1 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hk1).unwrap(); // caller + let uid2 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hk2).unwrap(); + + // Enable commit-reveal path and make caller a validator with stake + SubtensorModule::set_stake_threshold(0); + SubtensorModule::set_weights_set_rate_limit(netuid, 5); + SubtensorModule::set_validator_permit_for_uid(netuid, uid1, true); + SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); + SubtensorModule::add_balance_to_coldkey_account(&ck1, 1); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet(&hk1, &ck1, netuid, 1.into()); + + // Ensure there are two subsubnets: 0 and 1 + SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); + let subid_above = SubId::from(2u8); // non-existing sub-subnet + let idx2 = SubtensorModule::get_subsubnet_storage_index(netuid, subid_above); + + // Prepare payload and commit hash + let dests = vec![uid2]; + let weights = vec![88u16]; + let salt: Vec = vec![1,2,3,4,5,6,7,8]; + let version_key: u64 = 0; + let commit_hash: H256 = BlakeTwo256::hash_of(&(hk1, idx2, dests.clone(), weights.clone(), salt.clone(), version_key)); + + // Commit in epoch 0 + assert_noop!( + SubtensorModule::commit_sub_weights(RuntimeOrigin::signed(hk1), netuid, subid_above, commit_hash), + Error::::SubNetworkDoesNotExist + ); + + // Advance one epoch, then attempt to reveal + step_epochs(1, netuid); + assert_noop!( + SubtensorModule::reveal_sub_weights( + RuntimeOrigin::signed(hk1), + netuid, + subid_above, + dests.clone(), + weights.clone(), + salt, + version_key + ), + Error::::NoWeightsCommitFound + ); + + // Verify that weights didn't update + assert!(Weights::::get(idx2, uid1).is_empty()); + assert!(Weights::::get(idx2, uid2).is_empty()); + }); +} + +#[test] +fn test_reveal_crv3_commits_sub_success() { + new_test_ext(100).execute_with(|| { + System::set_block_number(0); + + let netuid = NetUid::from(1); + let subid = SubId::from(1u8); // write under sub-subnet #1 + let hotkey1: AccountId = U256::from(1); + let hotkey2: AccountId = U256::from(2); + let reveal_round: u64 = 1000; + + add_network(netuid, 5, 0); + // ensure we actually have subid=1 available + SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); + + // Register neurons and set up configs + register_ok_neuron(netuid, hotkey1, U256::from(3), 100_000); + register_ok_neuron(netuid, hotkey2, U256::from(4), 100_000); + SubtensorModule::set_stake_threshold(0); + SubtensorModule::set_weights_set_rate_limit(netuid, 0); + SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); + assert_ok!(SubtensorModule::set_reveal_period(netuid, 3)); + + let uid1 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey1).expect("uid1"); + let uid2 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey2).expect("uid2"); + + SubtensorModule::set_validator_permit_for_uid(netuid, uid1, true); + SubtensorModule::set_validator_permit_for_uid(netuid, uid2, true); + SubtensorModule::add_balance_to_coldkey_account(&U256::from(3), 1); + SubtensorModule::add_balance_to_coldkey_account(&U256::from(4), 1); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet(&hotkey1, &U256::from(3), netuid, 1.into()); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet(&hotkey2, &U256::from(4), netuid, 1.into()); + + let version_key = SubtensorModule::get_weights_version_key(netuid); + + // Payload (same as legacy; subid is provided to the extrinsic) + let payload = WeightsTlockPayload { + hotkey: hotkey1.encode(), + values: vec![10, 20], + uids: vec![uid1, uid2], + version_key, + }; + let serialized_payload = payload.encode(); + + // Public key + encrypt + let esk = [2; 32]; + let rng = ChaCha20Rng::seed_from_u64(0); + let pk_bytes = hex::decode("83cf0f2896adee7eb8b5f01fcad3912212c437e0073e911fb90022d3e760183c8c4b450b6a0a6c3ac6a5776a2d1064510d1fec758c921cc22b0e17e63aaf4bcb5ed66304de9cf809bd274ca73bab4af5a6e9c76a4bc09e76eae8991ef5ece45a").unwrap(); + let pub_key = ::PublicKeyGroup::deserialize_compressed(&*pk_bytes).unwrap(); + + let message = { + let mut hasher = sha2::Sha256::new(); + hasher.update(reveal_round.to_be_bytes()); + hasher.finalize().to_vec() + }; + let identity = Identity::new(b"", vec![message]); + + let ct = tle::(pub_key, esk, &serialized_payload, identity, rng).expect("encrypt"); + let mut commit_bytes = Vec::new(); + ct.serialize_compressed(&mut commit_bytes).expect("serialize"); + + // Commit (sub variant) + assert_ok!(SubtensorModule::commit_timelocked_sub_weights( + RuntimeOrigin::signed(hotkey1), + netuid, + subid, + commit_bytes.clone().try_into().expect("bounded"), + reveal_round, + SubtensorModule::get_commit_reveal_weights_version() + )); + + // Inject drand pulse for the reveal round + let sig_bytes = hex::decode("b44679b9a59af2ec876b1a6b1ad52ea9b1615fc3982b19576350f93447cb1125e342b73a8dd2bacbe47e4b6b63ed5e39").unwrap(); + pallet_drand::Pulses::::insert( + reveal_round, + Pulse { + round: reveal_round, + randomness: vec![0; 32].try_into().unwrap(), + signature: sig_bytes.try_into().unwrap(), + }, + ); + + // Run epochs so the commit is processed + step_epochs(3, netuid); + + // Verify weights applied under the selected subid index + let idx = SubtensorModule::get_subsubnet_storage_index(netuid, subid); + let weights_sparse = SubtensorModule::get_weights_sparse(idx); + let row = weights_sparse.get(uid1 as usize).cloned().unwrap_or_default(); + assert!(!row.is_empty(), "expected weights set for validator uid1 under subid"); + + // Compare rounded normalized weights to expected proportions (like legacy test) + let expected: Vec<(u16, I32F32)> = payload.uids.iter().zip(payload.values.iter()).map(|(&u,&v)|(u, I32F32::from_num(v))).collect(); + let total: I32F32 = row.iter().map(|(_, w)| *w).sum(); + let normalized: Vec<(u16, I32F32)> = row.iter().map(|&(u,w)| (u, w * I32F32::from_num(30) / total)).collect(); + + for ((ua, wa), (ub, wb)) in normalized.iter().zip(expected.iter()) { + assert_eq!(ua, ub); + let actual = wa.to_num::().round() as i64; + let expect = wb.to_num::(); + assert_ne!(actual, 0, "actual weight for uid {} is zero", ua); + assert_eq!(actual, expect, "weight mismatch for uid {}", ua); + } + }); +} + +#[test] +fn test_crv3_above_subsubnet_count_fails() { + new_test_ext(100).execute_with(|| { + System::set_block_number(0); + + let netuid = NetUid::from(1); + let subid_above = SubId::from(2u8); // non-existing sub-subnet + let hotkey1: AccountId = U256::from(1); + let hotkey2: AccountId = U256::from(2); + let reveal_round: u64 = 1000; + + add_network(netuid, 5, 0); + // ensure we actually have subid=1 available + SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); + + // Register neurons and set up configs + register_ok_neuron(netuid, hotkey1, U256::from(3), 100_000); + register_ok_neuron(netuid, hotkey2, U256::from(4), 100_000); + SubtensorModule::set_stake_threshold(0); + SubtensorModule::set_weights_set_rate_limit(netuid, 0); + SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); + assert_ok!(SubtensorModule::set_reveal_period(netuid, 3)); + + let uid1 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey1).expect("uid1"); + let uid2 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey2).expect("uid2"); + + SubtensorModule::set_validator_permit_for_uid(netuid, uid1, true); + SubtensorModule::add_balance_to_coldkey_account(&U256::from(3), 1); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet(&hotkey1, &U256::from(3), netuid, 1.into()); + + let version_key = SubtensorModule::get_weights_version_key(netuid); + + // Payload (same as legacy; subid is provided to the extrinsic) + let payload = WeightsTlockPayload { + hotkey: hotkey1.encode(), + values: vec![10, 20], + uids: vec![uid1, uid2], + version_key, + }; + let serialized_payload = payload.encode(); + + // Public key + encrypt + let esk = [2; 32]; + let rng = ChaCha20Rng::seed_from_u64(0); + let pk_bytes = hex::decode("83cf0f2896adee7eb8b5f01fcad3912212c437e0073e911fb90022d3e760183c8c4b450b6a0a6c3ac6a5776a2d1064510d1fec758c921cc22b0e17e63aaf4bcb5ed66304de9cf809bd274ca73bab4af5a6e9c76a4bc09e76eae8991ef5ece45a").unwrap(); + let pub_key = ::PublicKeyGroup::deserialize_compressed(&*pk_bytes).unwrap(); + + let message = { + let mut hasher = sha2::Sha256::new(); + hasher.update(reveal_round.to_be_bytes()); + hasher.finalize().to_vec() + }; + let identity = Identity::new(b"", vec![message]); + + let ct = tle::(pub_key, esk, &serialized_payload, identity, rng).expect("encrypt"); + let mut commit_bytes = Vec::new(); + ct.serialize_compressed(&mut commit_bytes).expect("serialize"); + + // Commit (sub variant) + assert_noop!( + SubtensorModule::commit_timelocked_sub_weights( + RuntimeOrigin::signed(hotkey1), + netuid, + subid_above, + commit_bytes.clone().try_into().expect("bounded"), + reveal_round, + SubtensorModule::get_commit_reveal_weights_version() + ), + Error::::SubNetworkDoesNotExist + ); + }); +} diff --git a/pallets/subtensor/src/tests/weights.rs b/pallets/subtensor/src/tests/weights.rs index 191ad5ce47..5cc624b644 100644 --- a/pallets/subtensor/src/tests/weights.rs +++ b/pallets/subtensor/src/tests/weights.rs @@ -341,7 +341,7 @@ fn test_reveal_weights_validate() { }); let commit_hash: H256 = - SubtensorModule::get_commit_hash(&who, netuid, &dests, &weights, &salt, version_key); + SubtensorModule::get_commit_hash(&who, NetUidStorageIndex::from(netuid), &dests, &weights, &salt, version_key); let commit_block = SubtensorModule::get_current_block_as_u64(); let (first_reveal_block, last_reveal_block) = SubtensorModule::get_reveal_blocks(netuid, commit_block); diff --git a/pallets/subtensor/src/transaction_extension.rs b/pallets/subtensor/src/transaction_extension.rs index 537562fab7..ba65c3afe5 100644 --- a/pallets/subtensor/src/transaction_extension.rs +++ b/pallets/subtensor/src/transaction_extension.rs @@ -16,7 +16,7 @@ use sp_runtime::transaction_validity::{ use sp_std::marker::PhantomData; use sp_std::vec::Vec; use subtensor_macros::freeze_struct; -use subtensor_runtime_common::NetUid; +use subtensor_runtime_common::{NetUid, NetUidStorageIndex}; #[freeze_struct("2e02eb32e5cb25d3")] #[derive(Default, Encode, Decode, DecodeWithMemTracking, Clone, Eq, PartialEq, TypeInfo)] @@ -148,7 +148,7 @@ where if Self::check_weights_min_stake(who, *netuid) { let provided_hash = Pallet::::get_commit_hash( who, - *netuid, + NetUidStorageIndex::from(*netuid), uids, values, salt, @@ -185,7 +185,7 @@ where .map(|i| { Pallet::::get_commit_hash( who, - *netuid, + NetUidStorageIndex::from(*netuid), uids_list.get(i).unwrap_or(&Vec::new()), values_list.get(i).unwrap_or(&Vec::new()), salts_list.get(i).unwrap_or(&Vec::new()), From 100f6e20173360c7303cf89fee0626282c83bc6b Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Wed, 3 Sep 2025 13:09:11 -0400 Subject: [PATCH 18/39] Add rate limit test for committing timelocked weights --- pallets/subtensor/src/tests/subsubnet.rs | 90 ++++++++++++++++++++++++ 1 file changed, 90 insertions(+) diff --git a/pallets/subtensor/src/tests/subsubnet.rs b/pallets/subtensor/src/tests/subsubnet.rs index 1761a9d595..e25edea950 100644 --- a/pallets/subtensor/src/tests/subsubnet.rs +++ b/pallets/subtensor/src/tests/subsubnet.rs @@ -1135,3 +1135,93 @@ fn test_crv3_above_subsubnet_count_fails() { ); }); } + +#[test] +fn test_do_commit_crv3_sub_weights_committing_too_fast() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(1); + let subid = SubId::from(1u8); + let hotkey: AccountId = U256::from(1); + let commit_data_1: Vec = vec![1, 2, 3]; + let commit_data_2: Vec = vec![4, 5, 6]; + let reveal_round: u64 = 1000; + + add_network(netuid, 5, 0); + SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); // allow subids {0,1} + + register_ok_neuron(netuid, hotkey, U256::from(2), 100_000); + SubtensorModule::set_weights_set_rate_limit(netuid, 5); + SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); + + let uid = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey).expect("uid"); + let idx1 = SubtensorModule::get_subsubnet_storage_index(netuid, subid); + SubtensorModule::set_last_update_for_uid(idx1, uid, 0); + + // make validator with stake + SubtensorModule::set_stake_threshold(0); + SubtensorModule::set_validator_permit_for_uid(netuid, uid, true); + SubtensorModule::add_balance_to_coldkey_account(&U256::from(2), 1); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &U256::from(2), netuid, 1.into()); + + // first commit OK on subid=1 + assert_ok!(SubtensorModule::commit_timelocked_sub_weights( + RuntimeOrigin::signed(hotkey), + netuid, + subid, + commit_data_1.clone().try_into().expect("bounded"), + reveal_round, + SubtensorModule::get_commit_reveal_weights_version() + )); + + // immediate second commit on SAME subid blocked + assert_noop!( + SubtensorModule::commit_timelocked_sub_weights( + RuntimeOrigin::signed(hotkey), + netuid, + subid, + commit_data_2.clone().try_into().expect("bounded"), + reveal_round, + SubtensorModule::get_commit_reveal_weights_version() + ), + Error::::CommittingWeightsTooFast + ); + + // BUT committing too soon on a DIFFERENT subid is allowed + let other_subid = SubId::from(0u8); + let idx0 = SubtensorModule::get_subsubnet_storage_index(netuid, other_subid); + SubtensorModule::set_last_update_for_uid(idx0, uid, 0); // baseline like above + assert_ok!(SubtensorModule::commit_timelocked_sub_weights( + RuntimeOrigin::signed(hotkey), + netuid, + other_subid, + commit_data_2.clone().try_into().expect("bounded"), + reveal_round, + SubtensorModule::get_commit_reveal_weights_version() + )); + + // still too fast on original subid after 2 blocks + step_block(2); + assert_noop!( + SubtensorModule::commit_timelocked_sub_weights( + RuntimeOrigin::signed(hotkey), + netuid, + subid, + commit_data_2.clone().try_into().expect("bounded"), + reveal_round, + SubtensorModule::get_commit_reveal_weights_version() + ), + Error::::CommittingWeightsTooFast + ); + + // after enough blocks, OK again on original subid + step_block(3); + assert_ok!(SubtensorModule::commit_timelocked_sub_weights( + RuntimeOrigin::signed(hotkey), + netuid, + subid, + commit_data_2.try_into().expect("bounded"), + reveal_round, + SubtensorModule::get_commit_reveal_weights_version() + )); + }); +} From 88dd86c5cb96dfd1aff40d615162cdd8f5f7e3a5 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Wed, 3 Sep 2025 13:50:51 -0400 Subject: [PATCH 19/39] Add admin-until call to set desired subsubnet count and test --- pallets/admin-utils/src/lib.rs | 17 ++++++++- pallets/admin-utils/src/tests/mod.rs | 45 +++++++++++++++++++++++- pallets/subtensor/src/tests/epoch.rs | 2 +- pallets/subtensor/src/tests/subsubnet.rs | 4 +-- 4 files changed, 63 insertions(+), 5 deletions(-) diff --git a/pallets/admin-utils/src/lib.rs b/pallets/admin-utils/src/lib.rs index abc5e7a443..2df34f42df 100644 --- a/pallets/admin-utils/src/lib.rs +++ b/pallets/admin-utils/src/lib.rs @@ -31,7 +31,7 @@ pub mod pallet { use pallet_subtensor::utils::rate_limiting::TransactionType; use sp_runtime::BoundedVec; use substrate_fixed::types::I96F32; - use subtensor_runtime_common::{NetUid, TaoCurrency}; + use subtensor_runtime_common::{NetUid, SubId, TaoCurrency}; /// The main data structure of the module. #[pallet::pallet] @@ -1591,6 +1591,21 @@ pub mod pallet { pallet_subtensor::Pallet::::set_owner_immune_neuron_limit(netuid, immune_neurons)?; Ok(()) } + + /// Sets the desired number of subsubnets in a subnet + #[pallet::call_index(73)] + #[pallet::weight(Weight::from_parts(15_000_000, 0) + .saturating_add(::DbWeight::get().reads(1_u64)) + .saturating_add(::DbWeight::get().writes(1_u64)))] + pub fn sudo_set_desired_subsubnet_count( + origin: OriginFor, + netuid: NetUid, + subsub_count: SubId, + ) -> DispatchResult { + pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; + pallet_subtensor::Pallet::::do_set_desired_subsubnet_count(netuid, subsub_count)?; + Ok(()) + } } } diff --git a/pallets/admin-utils/src/tests/mod.rs b/pallets/admin-utils/src/tests/mod.rs index bf1b115dd6..e8cb71a9b9 100644 --- a/pallets/admin-utils/src/tests/mod.rs +++ b/pallets/admin-utils/src/tests/mod.rs @@ -11,7 +11,7 @@ use pallet_subtensor::Event; use sp_consensus_grandpa::AuthorityId as GrandpaId; use sp_core::{Get, Pair, U256, ed25519}; use substrate_fixed::types::I96F32; -use subtensor_runtime_common::{Currency, NetUid, TaoCurrency}; +use subtensor_runtime_common::{Currency, NetUid, SubId, TaoCurrency}; use crate::Error; use crate::pallet::PrecompileEnable; @@ -1954,3 +1954,46 @@ fn test_sudo_set_commit_reveal_version() { ); }); } + +#[test] +fn test_sudo_set_desired_subsubnet_count() { + new_test_ext().execute_with(|| { + let netuid = NetUid::from(1); + let ss_count_ok = SubId::from(8); + let ss_count_bad = SubId::from(9); + + let sn_owner = U256::from(1324); + add_network(netuid, 10); + // Set the Subnet Owner + SubnetOwner::::insert(netuid, sn_owner); + + assert_eq!( + AdminUtils::sudo_set_desired_subsubnet_count( + <::RuntimeOrigin>::signed(U256::from(1)), + netuid, + ss_count_ok + ), + Err(DispatchError::BadOrigin) + ); + assert_noop!( + AdminUtils::sudo_set_desired_subsubnet_count( + RuntimeOrigin::root(), + netuid, + ss_count_bad + ), + pallet_subtensor::Error::::InvalidValue + ); + + assert_ok!(AdminUtils::sudo_set_desired_subsubnet_count( + <::RuntimeOrigin>::root(), + netuid, + ss_count_ok + )); + + assert_ok!(AdminUtils::sudo_set_desired_subsubnet_count( + <::RuntimeOrigin>::signed(sn_owner), + netuid, + ss_count_ok + )); + }); +} \ No newline at end of file diff --git a/pallets/subtensor/src/tests/epoch.rs b/pallets/subtensor/src/tests/epoch.rs index fa628013ac..1b29fb7118 100644 --- a/pallets/subtensor/src/tests/epoch.rs +++ b/pallets/subtensor/src/tests/epoch.rs @@ -1570,7 +1570,7 @@ fn test_active_stake() { } // Test that epoch masks out outdated weights and bonds of validators on deregistered servers. -// cargo test --package pallet-subtensor --lib -- tests::epoch::test_outdated_weights --exact --show-output +// #[test] fn test_outdated_weights() { new_test_ext(1).execute_with(|| { diff --git a/pallets/subtensor/src/tests/subsubnet.rs b/pallets/subtensor/src/tests/subsubnet.rs index e25edea950..c4c043b4f9 100644 --- a/pallets/subtensor/src/tests/subsubnet.rs +++ b/pallets/subtensor/src/tests/subsubnet.rs @@ -13,7 +13,7 @@ // - [x] do_set_desired tests // - [x] Emissions are split proportionally // - [x] Sum of split emissions is equal to rao_emission passed to epoch -// - [ ] Only subnet owner or root can set desired subsubnet count +// - [x] Only subnet owner or root can set desired subsubnet count (pallet admin test) // - [x] Weights can be set by subsubnet // - [x] Weights can be commited/revealed by subsubnet // - [x] Weights can be commited/revealed in crv3 by subsubnet @@ -21,7 +21,7 @@ // - [x] Prevent weight commitment/revealing above subsubnet_limit_in_force // - [x] Prevent weight commitment/revealing in crv3 above subsubnet_limit_in_force // - [x] When a miner is deregistered, their weights are cleaned across all subsubnets -// - [ ] Weight setting rate limiting is enforced by subsubnet +// - [x] Weight setting rate limiting is enforced by subsubnet // - [x] Bonds are applied per subsubnet // - [x] Incentives are per subsubnet // - [x] Per-subsubnet incentives are distributed proportionally to miner weights From 4b3d2d39d1bdcb0458d72cb231bfa1d1031b6094 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Wed, 3 Sep 2025 17:15:26 -0400 Subject: [PATCH 20/39] Add settable emission split between subsubnets --- pallets/admin-utils/src/lib.rs | 17 ++- pallets/admin-utils/src/tests/mod.rs | 4 +- pallets/subtensor/src/lib.rs | 4 + pallets/subtensor/src/macros/dispatches.rs | 2 +- pallets/subtensor/src/subnets/subsubnet.rs | 129 ++++++++++++----- pallets/subtensor/src/subnets/uids.rs | 2 +- pallets/subtensor/src/subnets/weights.rs | 12 +- pallets/subtensor/src/tests/epoch.rs | 2 +- pallets/subtensor/src/tests/subsubnet.rs | 155 ++++++++++++++++----- pallets/subtensor/src/tests/weights.rs | 10 +- 10 files changed, 258 insertions(+), 79 deletions(-) diff --git a/pallets/admin-utils/src/lib.rs b/pallets/admin-utils/src/lib.rs index 2df34f42df..ac01f5ae9a 100644 --- a/pallets/admin-utils/src/lib.rs +++ b/pallets/admin-utils/src/lib.rs @@ -9,7 +9,7 @@ pub use pallet::*; // - we could use a type parameter for `AuthorityId`, but there is // no sense for this as GRANDPA's `AuthorityId` is not a parameter -- it's always the same use sp_consensus_grandpa::AuthorityList; -use sp_runtime::{DispatchResult, RuntimeAppPublic, traits::Member}; +use sp_runtime::{DispatchResult, RuntimeAppPublic, Vec, traits::Member}; mod benchmarking; @@ -1606,6 +1606,21 @@ pub mod pallet { pallet_subtensor::Pallet::::do_set_desired_subsubnet_count(netuid, subsub_count)?; Ok(()) } + + /// Sets the emission split between subsubnets in a subnet + #[pallet::call_index(74)] + #[pallet::weight(Weight::from_parts(15_000_000, 0) + .saturating_add(::DbWeight::get().reads(1_u64)) + .saturating_add(::DbWeight::get().writes(1_u64)))] + pub fn sudo_set_subsubnet_emission_split( + origin: OriginFor, + netuid: NetUid, + maybe_split: Option>, + ) -> DispatchResult { + pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; + pallet_subtensor::Pallet::::do_set_emission_split(netuid, maybe_split)?; + Ok(()) + } } } diff --git a/pallets/admin-utils/src/tests/mod.rs b/pallets/admin-utils/src/tests/mod.rs index e8cb71a9b9..f5a6876ec3 100644 --- a/pallets/admin-utils/src/tests/mod.rs +++ b/pallets/admin-utils/src/tests/mod.rs @@ -1991,9 +1991,9 @@ fn test_sudo_set_desired_subsubnet_count() { )); assert_ok!(AdminUtils::sudo_set_desired_subsubnet_count( - <::RuntimeOrigin>::signed(sn_owner), + <::RuntimeOrigin>::signed(sn_owner), netuid, ss_count_ok )); }); -} \ No newline at end of file +} diff --git a/pallets/subtensor/src/lib.rs b/pallets/subtensor/src/lib.rs index 961109c200..a08587934c 100644 --- a/pallets/subtensor/src/lib.rs +++ b/pallets/subtensor/src/lib.rs @@ -1835,6 +1835,10 @@ pub mod pallet { /// --- MAP ( netuid ) --> Current number of sub-subnets pub type SubsubnetCountCurrent = StorageMap<_, Twox64Concat, NetUid, SubId, ValueQuery, DefaultSubsubnetCount>; + #[pallet::storage] + /// --- MAP ( netuid ) --> Normalized vector of emission split proportion between subsubnets + pub type SubsubnetEmissionSplit = + StorageMap<_, Twox64Concat, NetUid, Vec, OptionQuery>; /// ================== /// ==== Genesis ===== diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index 070c8ca366..9c423aa6fb 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -2263,7 +2263,7 @@ mod dispatches { /// /// * `netuid` (`u16`): /// - The u16 network identifier. - /// + /// /// * `subid` (`u8`): /// - The u8 subsubnet identifier. /// diff --git a/pallets/subtensor/src/subnets/subsubnet.rs b/pallets/subtensor/src/subnets/subsubnet.rs index cca8df95db..d72c696c8b 100644 --- a/pallets/subtensor/src/subnets/subsubnet.rs +++ b/pallets/subtensor/src/subnets/subsubnet.rs @@ -117,7 +117,8 @@ impl Pallet { /// /// - This function should be called in every block in run_counbase /// - Cleans up all sub-subnet maps if count is reduced - /// - Decreases current subsubnet count by no more than `GlobalSubsubnetDecreasePerSuperblock` + /// - Decreases or increases current subsubnet count by no more than + /// `GlobalSubsubnetDecreasePerSuperblock` /// pub fn update_subsubnet_counts_if_needed(current_block: u64) { // Run once per super-block @@ -128,59 +129,113 @@ impl Pallet { if rem == 0 { let old_count = u8::from(SubsubnetCountCurrent::::get(netuid)); let desired_count = u8::from(SubsubnetCountDesired::::get(netuid)); - let min_possible_count = old_count + let min_capped_count = old_count .saturating_sub(u8::from(GlobalSubsubnetDecreasePerSuperblock::::get())) .max(1); - let new_count = desired_count.max(min_possible_count); - - if old_count > new_count { - for subid in new_count..old_count { - let netuid_index = - Self::get_subsubnet_storage_index(*netuid, SubId::from(subid)); + let max_capped_count = old_count + .saturating_add(u8::from(GlobalSubsubnetDecreasePerSuperblock::::get())); + let new_count = desired_count.max(min_capped_count).min(max_capped_count); + + if old_count != new_count { + if old_count > new_count { + for subid in new_count..old_count { + let netuid_index = + Self::get_subsubnet_storage_index(*netuid, SubId::from(subid)); + + // Cleanup Weights + let _ = Weights::::clear_prefix(netuid_index, u32::MAX, None); + + // Cleanup Incentive + Incentive::::remove(netuid_index); + + // Cleanup LastUpdate + LastUpdate::::remove(netuid_index); + + // Cleanup Bonds + let _ = Bonds::::clear_prefix(netuid_index, u32::MAX, None); + + // Cleanup WeightCommits + let _ = + WeightCommits::::clear_prefix(netuid_index, u32::MAX, None); + + // Cleanup TimelockedWeightCommits + let _ = TimelockedWeightCommits::::clear_prefix( + netuid_index, + u32::MAX, + None, + ); + } + } - // Cleanup Weights - let _ = Weights::::clear_prefix(netuid_index, u32::MAX, None); + SubsubnetCountCurrent::::insert(netuid, SubId::from(new_count)); - // Cleanup Incentive - Incentive::::remove(netuid_index); + // Reset split back to even + SubsubnetEmissionSplit::::remove(netuid); + } + } + } + }); + } - // Cleanup LastUpdate - LastUpdate::::remove(netuid_index); + pub fn do_set_emission_split(netuid: NetUid, maybe_split: Option>) -> DispatchResult { + // Make sure the subnet exists + ensure!( + Self::if_subnet_exist(netuid), + Error::::SubNetworkDoesNotExist + ); - // Cleanup Bonds - let _ = Bonds::::clear_prefix(netuid_index, u32::MAX, None); + if let Some(split) = maybe_split { + // Check the length + ensure!(!split.is_empty(), Error::::InvalidValue); + ensure!( + split.len() <= u8::from(SubsubnetCountCurrent::::get(netuid)) as usize, + Error::::InvalidValue + ); - // Cleanup WeightCommits - let _ = WeightCommits::::clear_prefix(netuid_index, u32::MAX, None); + // Check that values add up to 65535 + let total: u64 = split.iter().map(|s| *s as u64).sum(); + ensure!(total <= u16::MAX as u64, Error::::InvalidValue); - // Cleanup TimelockedWeightCommits - let _ = TimelockedWeightCommits::::clear_prefix( - netuid_index, - u32::MAX, - None, - ); - } - } + SubsubnetEmissionSplit::::insert(netuid, split); + } else { + SubsubnetEmissionSplit::::remove(netuid); + } - SubsubnetCountCurrent::::insert(netuid, SubId::from(new_count)); - } - } - }); + Ok(()) } /// Split alpha emission in sub-subnet proportions - /// Currently splits evenly between sub-subnets, but the implementation - /// may change in the future + /// stored in SubsubnetEmissionSplit /// pub fn split_emissions(netuid: NetUid, alpha: AlphaCurrency) -> Vec { let subsubnet_count = u64::from(SubsubnetCountCurrent::::get(netuid)); + let maybe_split = SubsubnetEmissionSplit::::get(netuid); + + // Unset split means even distribution + let mut result: Vec = if let Some(split) = maybe_split { + split + .iter() + .map(|s| { + AlphaCurrency::from( + (u64::from(alpha) as u128) + .saturating_mul(*s as u128) + .safe_div(u16::MAX as u128) as u64, + ) + }) + .collect() + } else { + let per_subsubnet = u64::from(alpha).safe_div(subsubnet_count); + vec![AlphaCurrency::from(per_subsubnet); subsubnet_count as usize] + }; - // If there's any rounding error, credit it to subsubnet 0 - let per_subsubnet = u64::from(alpha).safe_div(subsubnet_count); - let rounding_err = - u64::from(alpha).saturating_sub(per_subsubnet.saturating_mul(subsubnet_count)); + // Trim / extend and pad with zeroes if result is shorter than subsubnet_count + if result.len() != subsubnet_count as usize { + result.resize(subsubnet_count as usize, 0u64.into()); // pad with AlphaCurrency::from(0) + } - let mut result = vec![AlphaCurrency::from(per_subsubnet); subsubnet_count as usize]; + // If there's any rounding error or lost due to truncation emission, credit it to subsubnet 0 + let rounding_err = + u64::from(alpha).saturating_sub(result.iter().map(|s| u64::from(*s)).sum()); if let Some(cell) = result.first_mut() { *cell = cell.saturating_add(AlphaCurrency::from(rounding_err)); } diff --git a/pallets/subtensor/src/subnets/uids.rs b/pallets/subtensor/src/subnets/uids.rs index ce0b14cc1c..2ec6869bad 100644 --- a/pallets/subtensor/src/subnets/uids.rs +++ b/pallets/subtensor/src/subnets/uids.rs @@ -16,7 +16,7 @@ impl Pallet { } } - /// Resets the trust, emission, consensus, incentive, dividends, bonds, and weights of + /// Resets the trust, emission, consensus, incentive, dividends, bonds, and weights of /// the neuron to default pub fn clear_neuron(netuid: NetUid, neuron_uid: u16) { let neuron_index: usize = neuron_uid.into(); diff --git a/pallets/subtensor/src/subnets/weights.rs b/pallets/subtensor/src/subnets/weights.rs index 0c1ad9efd7..b751630d85 100644 --- a/pallets/subtensor/src/subnets/weights.rs +++ b/pallets/subtensor/src/subnets/weights.rs @@ -128,7 +128,11 @@ impl Pallet { *maybe_commits = Some(commits); // 11. Emit the WeightsCommitted event - Self::deposit_event(Event::WeightsCommitted(who.clone(), netuid_index, commit_hash)); + Self::deposit_event(Event::WeightsCommitted( + who.clone(), + netuid_index, + commit_hash, + )); // 12. Update the last commit block for the hotkey's UID. Self::set_last_update_for_uid(netuid_index, neuron_uid, commit_block); @@ -531,7 +535,11 @@ impl Pallet { )?; // --- 13. Emit the WeightsRevealed event. - Self::deposit_event(Event::WeightsRevealed(who.clone(), netuid_index, provided_hash)); + Self::deposit_event(Event::WeightsRevealed( + who.clone(), + netuid_index, + provided_hash, + )); // --- 14. Return ok. Ok(()) diff --git a/pallets/subtensor/src/tests/epoch.rs b/pallets/subtensor/src/tests/epoch.rs index 1b29fb7118..fec978a51d 100644 --- a/pallets/subtensor/src/tests/epoch.rs +++ b/pallets/subtensor/src/tests/epoch.rs @@ -1570,7 +1570,7 @@ fn test_active_stake() { } // Test that epoch masks out outdated weights and bonds of validators on deregistered servers. -// +// #[test] fn test_outdated_weights() { new_test_ext(1).execute_with(|| { diff --git a/pallets/subtensor/src/tests/subsubnet.rs b/pallets/subtensor/src/tests/subsubnet.rs index c4c043b4f9..78fbefa681 100644 --- a/pallets/subtensor/src/tests/subsubnet.rs +++ b/pallets/subtensor/src/tests/subsubnet.rs @@ -27,12 +27,15 @@ // - [x] Per-subsubnet incentives are distributed proportionally to miner weights // - [x] Subsubnet limit can be set up to 8 (with admin pallet) // - [x] When subsubnet limit is reduced, reduction is GlobalSubsubnetDecreasePerSuperblock per super-block +// - [ ] When subsubnet limit is increased, increase is GlobalSubsubnetDecreasePerSuperblock per super-block // - [x] When reduction of subsubnet limit occurs, Weights, Incentive, LastUpdate, Bonds, and WeightCommits are cleared // - [ ] Epoch terms of subnet are weighted sum (or logical OR) of all subsubnet epoch terms // - [ ] Subnet epoch terms persist in state // - [x] Subsubnet epoch terms persist in state // - [ ] "Yuma Emergency Mode" (consensus sum is 0 for a subsubnet), emission distributed by stake // - [x] Miner with no weights on any subsubnet receives no reward +// - [ ] SubsubnetEmissionSplit is reset on super-block on subsubnet count increase +// - [ ] SubsubnetEmissionSplit is reset on super-block on subsubnet count decrease use super::mock::*; use crate::coinbase::reveal_commits::WeightsTlockPayload; @@ -49,13 +52,11 @@ use sha2::Digest; use sp_core::{H256, U256}; use sp_runtime::traits::{BlakeTwo256, Hash}; use sp_std::collections::vec_deque::VecDeque; -use subtensor_runtime_common::{NetUid, NetUidStorageIndex, SubId}; use substrate_fixed::types::I32F32; +use subtensor_runtime_common::{NetUid, NetUidStorageIndex, SubId}; use tle::{ - curves::drand::TinyBLS381, - ibe::fullident::Identity, - stream_ciphers::AESGCMStreamCipherProvider, - tlock::tle, + curves::drand::TinyBLS381, ibe::fullident::Identity, + stream_ciphers::AESGCMStreamCipherProvider, tlock::tle, }; use w3f_bls::EngineBLS; @@ -117,8 +118,8 @@ fn test_netuid_and_subnet_from_index() { let (netuid, subid) = SubtensorModule::get_netuid_and_subid(NetUidStorageIndex::from(*netuid_index)) .unwrap(); - assert_eq!(netuid, NetUid::from(expected_netuid as u16)); - assert_eq!(subid, SubId::from(expected_subid as u8)); + assert_eq!(netuid, NetUid::from(expected_netuid)); + assert_eq!(subid, SubId::from(expected_subid)); }); }); } @@ -420,6 +421,26 @@ fn split_emissions_rounding_to_first() { }); } +#[test] +fn split_emissions_fibbonacci() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(5u16); + SubsubnetCountCurrent::::insert(netuid, SubId::from(5u8)); // 5 sub-subnets + SubsubnetEmissionSplit::::insert(netuid, vec![3450, 6899, 10348, 17247, 27594]); + let out = SubtensorModule::split_emissions(netuid, AlphaCurrency::from(19u64)); + assert_eq!( + out, + vec![ + AlphaCurrency::from(1u64), + AlphaCurrency::from(2u64), + AlphaCurrency::from(3u64), + AlphaCurrency::from(5u64), + AlphaCurrency::from(8u64), + ] + ); + }); +} + /// Seeds a 2-neuron and 2-subsubnet subnet so `epoch_subsubnet` produces non-zero /// incentives & dividends. /// Returns the sub-subnet storage index. @@ -433,8 +454,8 @@ pub fn mock_epoch_state(netuid: NetUid, ck0: U256, hk0: U256, ck1: U256, hk1: U2 SubnetworkN::::insert(netuid, 2); // Register two neurons (UID 0,1) → keys drive `get_subnetwork_n`. - Keys::::insert(netuid, 0u16, hk0.clone()); - Keys::::insert(netuid, 1u16, hk1.clone()); + Keys::::insert(netuid, 0u16, hk0); + Keys::::insert(netuid, 1u16, hk1); // Make both ACTIVE: recent updates & old registrations. Tempo::::insert(netuid, 1u16); @@ -479,7 +500,7 @@ pub fn mock_3_neurons(netuid: NetUid, hk: U256) { let idx1 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(1)); SubnetworkN::::insert(netuid, 3); - Keys::::insert(netuid, 2u16, hk.clone()); + Keys::::insert(netuid, 2u16, hk); LastUpdate::::insert(idx0, vec![2, 2, 2]); LastUpdate::::insert(idx1, vec![2, 2, 2]); BlockAtRegistration::::insert(netuid, 2, 1u64); @@ -646,7 +667,14 @@ fn neuron_dereg_cleans_weights_across_subids() { SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); // Setup initial map values - Emission::::insert(netuid, vec![AlphaCurrency::from(1u64), AlphaCurrency::from(9u64), AlphaCurrency::from(3u64)]); + Emission::::insert( + netuid, + vec![ + AlphaCurrency::from(1u64), + AlphaCurrency::from(9u64), + AlphaCurrency::from(3u64), + ], + ); Trust::::insert(netuid, vec![11u16, 99u16, 33u16]); Consensus::::insert(netuid, vec![21u16, 88u16, 44u16]); Dividends::::insert(netuid, vec![7u16, 77u16, 17u16]); @@ -722,7 +750,10 @@ fn clear_neuron_handles_absent_rows_gracefully() { SubtensorModule::clear_neuron(netuid, neuron_uid); // All zeroed at index 0 - assert_eq!(Emission::::get(netuid), vec![AlphaCurrency::from(0u64)]); + assert_eq!( + Emission::::get(netuid), + vec![AlphaCurrency::from(0u64)] + ); assert_eq!(Trust::::get(netuid), vec![0u16]); assert_eq!(Consensus::::get(netuid), vec![0u16]); assert_eq!(Dividends::::get(netuid), vec![0u16]); @@ -755,7 +786,12 @@ fn test_set_sub_weights_happy_path_sets_row_under_subid() { SubtensorModule::set_stake_threshold(0); SubtensorModule::set_validator_permit_for_uid(netuid, uid1, true); SubtensorModule::add_balance_to_coldkey_account(&ck1, 1); - SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet(&hk1, &ck1, netuid, 1.into()); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hk1, + &ck1, + netuid, + 1.into(), + ); // Have at least two sub-subnets; write under subid = 1 SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); @@ -775,7 +811,10 @@ fn test_set_sub_weights_happy_path_sets_row_under_subid() { // Verify row exists under the chosen subid and not under a different subid let idx1 = SubtensorModule::get_subsubnet_storage_index(netuid, subid); - assert_eq!(Weights::::get(idx1, uid1), vec![(uid2, 88u16), (uid3, 0xFFFF)]); + assert_eq!( + Weights::::get(idx1, uid1), + vec![(uid2, 88u16), (uid3, 0xFFFF)] + ); let idx0 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(0u8)); assert!(Weights::::get(idx0, uid1).is_empty()); @@ -804,7 +843,12 @@ fn test_set_sub_weights_above_subsubnet_count_fails() { SubtensorModule::set_stake_threshold(0); SubtensorModule::set_validator_permit_for_uid(netuid, uid1, true); SubtensorModule::add_balance_to_coldkey_account(&ck1, 1); - SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet(&hk1, &ck1, netuid, 1.into()); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hk1, + &ck1, + netuid, + 1.into(), + ); // Have exactly two sub-subnets; write under subid = 1 SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); @@ -837,9 +881,12 @@ fn test_commit_reveal_sub_weights_ok() { add_network(netuid, tempo, 0); // Three neurons: validator (caller) + two destinations - let hk1 = U256::from(55); let ck1 = U256::from(66); - let hk2 = U256::from(77); let ck2 = U256::from(88); - let hk3 = U256::from(99); let ck3 = U256::from(111); + let hk1 = U256::from(55); + let ck1 = U256::from(66); + let hk2 = U256::from(77); + let ck2 = U256::from(88); + let hk3 = U256::from(99); + let ck3 = U256::from(111); register_ok_neuron(netuid, hk1, ck1, 0); register_ok_neuron(netuid, hk2, ck2, 0); register_ok_neuron(netuid, hk3, ck3, 0); @@ -854,7 +901,12 @@ fn test_commit_reveal_sub_weights_ok() { SubtensorModule::set_validator_permit_for_uid(netuid, uid1, true); SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); SubtensorModule::add_balance_to_coldkey_account(&ck1, 1); - SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet(&hk1, &ck1, netuid, 1.into()); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hk1, + &ck1, + netuid, + 1.into(), + ); // Ensure sub-subnet exists; write under subid = 1 SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); @@ -865,12 +917,24 @@ fn test_commit_reveal_sub_weights_ok() { // Prepare payload and commit hash (include subid!) let dests = vec![uid2, uid3]; let weights = vec![88u16, 0xFFFFu16]; - let salt: Vec = vec![1,2,3,4,5,6,7,8]; + let salt: Vec = vec![1, 2, 3, 4, 5, 6, 7, 8]; let version_key: u64 = 0; - let commit_hash: H256 = BlakeTwo256::hash_of(&(hk1, idx1, dests.clone(), weights.clone(), salt.clone(), version_key)); + let commit_hash: H256 = BlakeTwo256::hash_of(&( + hk1, + idx1, + dests.clone(), + weights.clone(), + salt.clone(), + version_key, + )); // Commit in epoch 0 - assert_ok!(SubtensorModule::commit_sub_weights(RuntimeOrigin::signed(hk1), netuid, subid, commit_hash)); + assert_ok!(SubtensorModule::commit_sub_weights( + RuntimeOrigin::signed(hk1), + netuid, + subid, + commit_hash + )); // Advance one epoch, then reveal step_epochs(1, netuid); @@ -885,7 +949,10 @@ fn test_commit_reveal_sub_weights_ok() { )); // Verify weights stored under the chosen subid (normalized keeps max=0xFFFF here) - assert_eq!(Weights::::get(idx1, uid1), vec![(uid2, 88u16), (uid3, 0xFFFFu16)]); + assert_eq!( + Weights::::get(idx1, uid1), + vec![(uid2, 88u16), (uid3, 0xFFFFu16)] + ); // And not under a different subid assert!(Weights::::get(idx0, uid1).is_empty()); @@ -902,8 +969,10 @@ fn test_commit_reveal_above_subsubnet_count_fails() { add_network(netuid, tempo, 0); // Two neurons: validator (caller) + miner - let hk1 = U256::from(55); let ck1 = U256::from(66); - let hk2 = U256::from(77); let ck2 = U256::from(88); + let hk1 = U256::from(55); + let ck1 = U256::from(66); + let hk2 = U256::from(77); + let ck2 = U256::from(88); register_ok_neuron(netuid, hk1, ck1, 0); register_ok_neuron(netuid, hk2, ck2, 0); @@ -916,7 +985,12 @@ fn test_commit_reveal_above_subsubnet_count_fails() { SubtensorModule::set_validator_permit_for_uid(netuid, uid1, true); SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); SubtensorModule::add_balance_to_coldkey_account(&ck1, 1); - SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet(&hk1, &ck1, netuid, 1.into()); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hk1, + &ck1, + netuid, + 1.into(), + ); // Ensure there are two subsubnets: 0 and 1 SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); @@ -926,13 +1000,25 @@ fn test_commit_reveal_above_subsubnet_count_fails() { // Prepare payload and commit hash let dests = vec![uid2]; let weights = vec![88u16]; - let salt: Vec = vec![1,2,3,4,5,6,7,8]; + let salt: Vec = vec![1, 2, 3, 4, 5, 6, 7, 8]; let version_key: u64 = 0; - let commit_hash: H256 = BlakeTwo256::hash_of(&(hk1, idx2, dests.clone(), weights.clone(), salt.clone(), version_key)); + let commit_hash: H256 = BlakeTwo256::hash_of(&( + hk1, + idx2, + dests.clone(), + weights.clone(), + salt.clone(), + version_key, + )); // Commit in epoch 0 assert_noop!( - SubtensorModule::commit_sub_weights(RuntimeOrigin::signed(hk1), netuid, subid_above, commit_hash), + SubtensorModule::commit_sub_weights( + RuntimeOrigin::signed(hk1), + netuid, + subid_above, + commit_hash + ), Error::::SubNetworkDoesNotExist ); @@ -1057,8 +1143,8 @@ fn test_reveal_crv3_commits_sub_success() { assert_eq!(ua, ub); let actual = wa.to_num::().round() as i64; let expect = wb.to_num::(); - assert_ne!(actual, 0, "actual weight for uid {} is zero", ua); - assert_eq!(actual, expect, "weight mismatch for uid {}", ua); + assert_ne!(actual, 0, "actual weight for uid {ua} is zero"); + assert_eq!(actual, expect, "weight mismatch for uid {ua}"); } }); } @@ -1161,7 +1247,12 @@ fn test_do_commit_crv3_sub_weights_committing_too_fast() { SubtensorModule::set_stake_threshold(0); SubtensorModule::set_validator_permit_for_uid(netuid, uid, true); SubtensorModule::add_balance_to_coldkey_account(&U256::from(2), 1); - SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet(&hotkey, &U256::from(2), netuid, 1.into()); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &U256::from(2), + netuid, + 1.into(), + ); // first commit OK on subid=1 assert_ok!(SubtensorModule::commit_timelocked_sub_weights( diff --git a/pallets/subtensor/src/tests/weights.rs b/pallets/subtensor/src/tests/weights.rs index 5cc624b644..bc9af5cf07 100644 --- a/pallets/subtensor/src/tests/weights.rs +++ b/pallets/subtensor/src/tests/weights.rs @@ -340,8 +340,14 @@ fn test_reveal_weights_validate() { version_key, }); - let commit_hash: H256 = - SubtensorModule::get_commit_hash(&who, NetUidStorageIndex::from(netuid), &dests, &weights, &salt, version_key); + let commit_hash: H256 = SubtensorModule::get_commit_hash( + &who, + NetUidStorageIndex::from(netuid), + &dests, + &weights, + &salt, + version_key, + ); let commit_block = SubtensorModule::get_current_block_as_u64(); let (first_reveal_block, last_reveal_block) = SubtensorModule::get_reveal_blocks(netuid, commit_block); From afa8129482f26054522ecdfb0d49925f56519a38 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Wed, 3 Sep 2025 17:59:43 -0400 Subject: [PATCH 21/39] Add per-subsubnet RPC for metagraph --- pallets/subtensor/rpc/src/lib.rs | 64 +++++++++++++- pallets/subtensor/runtime-api/src/lib.rs | 5 +- pallets/subtensor/src/rpc_info/metagraph.rs | 98 ++++++++++++++++++++- runtime/src/lib.rs | 11 +++ 4 files changed, 175 insertions(+), 3 deletions(-) diff --git a/pallets/subtensor/rpc/src/lib.rs b/pallets/subtensor/rpc/src/lib.rs index e3d5d8f1c1..ea46695142 100644 --- a/pallets/subtensor/rpc/src/lib.rs +++ b/pallets/subtensor/rpc/src/lib.rs @@ -9,7 +9,7 @@ use jsonrpsee::{ use sp_blockchain::HeaderBackend; use sp_runtime::{AccountId32, traits::Block as BlockT}; use std::sync::Arc; -use subtensor_runtime_common::{NetUid, TaoCurrency}; +use subtensor_runtime_common::{NetUid, SubId, TaoCurrency}; use sp_api::ProvideRuntimeApi; @@ -72,6 +72,15 @@ pub trait SubtensorCustomApi { fn get_all_metagraphs(&self, at: Option) -> RpcResult>; #[method(name = "subnetInfo_getMetagraph")] fn get_metagraph(&self, netuid: NetUid, at: Option) -> RpcResult>; + #[method(name = "subnetInfo_getAllSubMetagraphs")] + fn get_all_submetagraphs(&self, at: Option) -> RpcResult>; + #[method(name = "subnetInfo_getSubMetagraph")] + fn get_submetagraph( + &self, + netuid: NetUid, + subid: SubId, + at: Option, + ) -> RpcResult>; #[method(name = "subnetInfo_getSubnetState")] fn get_subnet_state(&self, netuid: NetUid, at: Option) -> RpcResult>; #[method(name = "subnetInfo_getLockCost")] @@ -83,6 +92,14 @@ pub trait SubtensorCustomApi { metagraph_index: Vec, at: Option, ) -> RpcResult>; + #[method(name = "subnetInfo_getSelectiveSubMetagraph")] + fn get_selective_submetagraph( + &self, + netuid: NetUid, + subid: SubId, + metagraph_index: Vec, + at: Option, + ) -> RpcResult>; } pub struct SubtensorCustom { @@ -319,6 +336,16 @@ where } } + fn get_all_submetagraphs(&self, at: Option<::Hash>) -> RpcResult> { + let api = self.client.runtime_api(); + let at = at.unwrap_or_else(|| self.client.info().best_hash); + + match api.get_all_submetagraphs(at) { + Ok(result) => Ok(result.encode()), + Err(e) => Err(Error::RuntimeError(format!("Unable to get metagraps: {e:?}")).into()), + } + } + fn get_dynamic_info( &self, netuid: NetUid, @@ -352,6 +379,23 @@ where } } + fn get_submetagraph( + &self, + netuid: NetUid, + subid: SubId, + at: Option<::Hash>, + ) -> RpcResult> { + let api = self.client.runtime_api(); + let at = at.unwrap_or_else(|| self.client.info().best_hash); + match api.get_submetagraph(at, netuid, subid) { + Ok(result) => Ok(result.encode()), + Err(e) => Err(Error::RuntimeError(format!( + "Unable to get dynamic subnets info: {e:?}" + )) + .into()), + } + } + fn get_subnet_state( &self, netuid: NetUid, @@ -427,4 +471,22 @@ where } } } + + fn get_selective_submetagraph( + &self, + netuid: NetUid, + subid: SubId, + metagraph_index: Vec, + at: Option<::Hash>, + ) -> RpcResult> { + let api = self.client.runtime_api(); + let at = at.unwrap_or_else(|| self.client.info().best_hash); + + match api.get_selective_submetagraph(at, netuid, subid, metagraph_index) { + Ok(result) => Ok(result.encode()), + Err(e) => { + Err(Error::RuntimeError(format!("Unable to get selective metagraph: {e:?}")).into()) + } + } + } } diff --git a/pallets/subtensor/runtime-api/src/lib.rs b/pallets/subtensor/runtime-api/src/lib.rs index 42d12eb686..3ec76df45f 100644 --- a/pallets/subtensor/runtime-api/src/lib.rs +++ b/pallets/subtensor/runtime-api/src/lib.rs @@ -12,7 +12,7 @@ use pallet_subtensor::rpc_info::{ subnet_info::{SubnetHyperparams, SubnetHyperparamsV2, SubnetInfo, SubnetInfov2}, }; use sp_runtime::AccountId32; -use subtensor_runtime_common::{AlphaCurrency, NetUid, TaoCurrency}; +use subtensor_runtime_common::{AlphaCurrency, NetUid, SubId, TaoCurrency}; // Here we declare the runtime API. It is implemented it the `impl` block in // src/neuron_info.rs, src/subnet_info.rs, and src/delegate_info.rs @@ -40,9 +40,12 @@ sp_api::decl_runtime_apis! { fn get_all_dynamic_info() -> Vec>>; fn get_all_metagraphs() -> Vec>>; fn get_metagraph(netuid: NetUid) -> Option>; + fn get_all_submetagraphs() -> Vec>>; + fn get_submetagraph(netuid: NetUid, subid: SubId) -> Option>; fn get_dynamic_info(netuid: NetUid) -> Option>; fn get_subnet_state(netuid: NetUid) -> Option>; fn get_selective_metagraph(netuid: NetUid, metagraph_indexes: Vec) -> Option>; + fn get_selective_submetagraph(netuid: NetUid, subid: SubId, metagraph_indexes: Vec) -> Option>; } pub trait StakeInfoRuntimeApi { diff --git a/pallets/subtensor/src/rpc_info/metagraph.rs b/pallets/subtensor/src/rpc_info/metagraph.rs index e65ddf0696..0f6fa067c9 100644 --- a/pallets/subtensor/src/rpc_info/metagraph.rs +++ b/pallets/subtensor/src/rpc_info/metagraph.rs @@ -7,7 +7,7 @@ use frame_support::pallet_prelude::{Decode, Encode}; use substrate_fixed::types::I64F64; use substrate_fixed::types::I96F32; use subtensor_macros::freeze_struct; -use subtensor_runtime_common::{AlphaCurrency, NetUid, NetUidStorageIndex, TaoCurrency}; +use subtensor_runtime_common::{AlphaCurrency, NetUid, NetUidStorageIndex, SubId, TaoCurrency}; #[freeze_struct("6fc49d5a7dc0e339")] #[derive(Decode, Encode, PartialEq, Eq, Clone, Debug, TypeInfo)] @@ -796,6 +796,45 @@ impl Pallet { metagraphs } + pub fn get_submetagraph(netuid: NetUid, subid: SubId) -> Option> { + if Self::ensure_subsubnet_exists(netuid, subid).is_err() { + return None; + } + + // Get netuid metagraph + let maybe_meta = Self::get_metagraph(netuid); + if let Some(mut meta) = maybe_meta { + let netuid_index = Self::get_subsubnet_storage_index(netuid, subid); + + // Update with subsubnet information + meta.netuid = NetUid::from(u16::from(netuid_index)).into(); + meta.last_update = LastUpdate::::get(netuid_index) + .into_iter() + .map(Compact::from) + .collect(); + meta.incentives = Incentive::::get(netuid_index) + .into_iter() + .map(Compact::from) + .collect(); + + Some(meta) + } else { + None + } + } + + pub fn get_all_submetagraphs() -> Vec>> { + let netuids = Self::get_all_subnet_netuids(); + let mut metagraphs = Vec::>>::new(); + for netuid in netuids.clone().iter() { + let subsub_count = u8::from(SubsubnetCountCurrent::::get(netuid)); + for subid in 0..subsub_count { + metagraphs.push(Self::get_submetagraph(*netuid, SubId::from(subid))); + } + } + metagraphs + } + pub fn get_selective_metagraph( netuid: NetUid, metagraph_indexes: Vec, @@ -812,6 +851,23 @@ impl Pallet { } } + pub fn get_selective_submetagraph( + netuid: NetUid, + subid: SubId, + metagraph_indexes: Vec, + ) -> Option> { + if !Self::if_subnet_exist(netuid) { + None + } else { + let mut result = SelectiveMetagraph::default(); + for index in metagraph_indexes.iter() { + let value = Self::get_single_selective_submetagraph(netuid, subid, *index); + result.merge_value(&value, *index as usize); + } + Some(result) + } + } + fn get_single_selective_metagraph( netuid: NetUid, metagraph_index: u16, @@ -1375,6 +1431,46 @@ impl Pallet { } } + fn get_single_selective_submetagraph( + netuid: NetUid, + subid: SubId, + metagraph_index: u16, + ) -> SelectiveMetagraph { + let netuid_index = Self::get_subsubnet_storage_index(netuid, subid); + + // Default to netuid, replace as needed for subid + match SelectiveMetagraphIndex::from_index(metagraph_index as usize) { + Some(SelectiveMetagraphIndex::Incentives) => SelectiveMetagraph { + netuid: netuid.into(), + incentives: Some( + Incentive::::get(NetUidStorageIndex::from(netuid)) + .into_iter() + .map(Compact::from) + .collect(), + ), + ..Default::default() + }, + + Some(SelectiveMetagraphIndex::LastUpdate) => SelectiveMetagraph { + netuid: netuid.into(), + last_update: Some( + LastUpdate::::get(NetUidStorageIndex::from(netuid)) + .into_iter() + .map(Compact::from) + .collect(), + ), + ..Default::default() + }, + + _ => { + let mut meta = Self::get_single_selective_metagraph(netuid, metagraph_index); + // Replace netuid with index + meta.netuid = NetUid::from(u16::from(netuid_index)).into(); + meta + } + } + } + fn get_validators(netuid: NetUid) -> SelectiveMetagraph { let stake_threshold = Self::get_stake_threshold(); let hotkeys: Vec<(u16, T::AccountId)> = diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 7675c962c6..7d1b701171 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -2314,6 +2314,10 @@ impl_runtime_apis! { SubtensorModule::get_metagraph(netuid) } + fn get_submetagraph(netuid: NetUid, subid: SubId) -> Option> { + SubtensorModule::get_submetagraph(netuid, subid) + } + fn get_subnet_state(netuid: NetUid) -> Option> { SubtensorModule::get_subnet_state(netuid) } @@ -2322,6 +2326,10 @@ impl_runtime_apis! { SubtensorModule::get_all_metagraphs() } + fn get_all_submetagraphs() -> Vec>> { + SubtensorModule::get_all_submetagraphs() + } + fn get_all_dynamic_info() -> Vec>> { SubtensorModule::get_all_dynamic_info() } @@ -2330,6 +2338,9 @@ impl_runtime_apis! { SubtensorModule::get_selective_metagraph(netuid, metagraph_indexes) } + fn get_selective_submetagraph(netuid: NetUid, subid: SubId, metagraph_indexes: Vec) -> Option> { + SubtensorModule::get_selective_submetagraph(netuid, subid, metagraph_indexes) + } } impl subtensor_custom_rpc_runtime_api::StakeInfoRuntimeApi for Runtime { From 452390d97667eca8da245e192f51a86ede84b913 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Thu, 4 Sep 2025 08:25:00 -0400 Subject: [PATCH 22/39] Use epoch index for super-block calculation --- pallets/subtensor/src/subnets/subsubnet.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pallets/subtensor/src/subnets/subsubnet.rs b/pallets/subtensor/src/subnets/subsubnet.rs index d72c696c8b..9c2ba7086f 100644 --- a/pallets/subtensor/src/subnets/subsubnet.rs +++ b/pallets/subtensor/src/subnets/subsubnet.rs @@ -124,8 +124,8 @@ impl Pallet { // Run once per super-block let super_block_tempos = u64::from(SuperBlockTempos::::get()); Self::get_all_subnet_netuids().iter().for_each(|netuid| { - let super_block = super_block_tempos.saturating_mul(u64::from(Tempo::::get(netuid))); - if let Some(rem) = current_block.checked_rem(super_block) { + let epoch_index = Self::get_epoch_index(*netuid, current_block); + if let Some(rem) = epoch_index.checked_rem(super_block_tempos) { if rem == 0 { let old_count = u8::from(SubsubnetCountCurrent::::get(netuid)); let desired_count = u8::from(SubsubnetCountDesired::::get(netuid)); From 23537679b8d4f326d6d3d504883544394f440aba Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Thu, 4 Sep 2025 11:50:40 -0400 Subject: [PATCH 23/39] Fix super-block decrease test, add super-block increase test --- pallets/subtensor/src/subnets/subsubnet.rs | 4 +-- pallets/subtensor/src/tests/subsubnet.rs | 39 ++++++++++++++++++---- 2 files changed, 35 insertions(+), 8 deletions(-) diff --git a/pallets/subtensor/src/subnets/subsubnet.rs b/pallets/subtensor/src/subnets/subsubnet.rs index 9c2ba7086f..cdee6db745 100644 --- a/pallets/subtensor/src/subnets/subsubnet.rs +++ b/pallets/subtensor/src/subnets/subsubnet.rs @@ -124,8 +124,8 @@ impl Pallet { // Run once per super-block let super_block_tempos = u64::from(SuperBlockTempos::::get()); Self::get_all_subnet_netuids().iter().for_each(|netuid| { - let epoch_index = Self::get_epoch_index(*netuid, current_block); - if let Some(rem) = epoch_index.checked_rem(super_block_tempos) { + let super_block = super_block_tempos.saturating_mul(u64::from(Tempo::::get(netuid))); + if let Some(rem) = current_block.saturating_add(u16::from(*netuid) as u64).checked_rem(super_block) { if rem == 0 { let old_count = u8::from(SubsubnetCountCurrent::::get(netuid)); let desired_count = u8::from(SubsubnetCountDesired::::get(netuid)); diff --git a/pallets/subtensor/src/tests/subsubnet.rs b/pallets/subtensor/src/tests/subsubnet.rs index 78fbefa681..2c1f4f2051 100644 --- a/pallets/subtensor/src/tests/subsubnet.rs +++ b/pallets/subtensor/src/tests/subsubnet.rs @@ -287,17 +287,17 @@ fn update_subsubnet_counts_decreases_and_cleans_on_superblock() { let netuid = NetUid::from(42u16); NetworksAdded::::insert(NetUid::from(42u16), true); - // super_block = SuperBlockTempos() * Tempo(netuid) - Tempo::::insert(netuid, 1u16); + // super_block = SuperBlockTempos() * Tempo(netuid) - netuid + Tempo::::insert(netuid, 360u16); let super_block = - u64::from(SuperBlockTempos::::get()) * u64::from(Tempo::::get(netuid)); + u64::from(SuperBlockTempos::::get()) * u64::from(Tempo::::get(netuid)) - u16::from(netuid) as u64; // Choose counts so result is deterministic for ANY decrease-per-superblock. // Let dec = GlobalSubsubnetDecreasePerSuperblock(); set old = dec + 3. let dec: u8 = u8::from(GlobalSubsubnetDecreasePerSuperblock::::get()); - let old = SubId::from(dec.saturating_add(3)); // ≥3 + let old = SubId::from(dec.saturating_add(3)); let desired = SubId::from(1u8); - // min_possible = max(old - dec, 1) = 3 → new_count = 3 + // min_capped = max(old - dec, 1) = 3 => new_count = 3 SubsubnetCountCurrent::::insert(netuid, old); SubsubnetCountDesired::::insert(netuid, desired); @@ -336,7 +336,7 @@ fn update_subsubnet_counts_decreases_and_cleans_on_superblock() { ); // Act exactly on a super-block boundary - SubtensorModule::update_subsubnet_counts_if_needed(2 * super_block); + SubtensorModule::update_subsubnet_counts_if_needed(super_block); // New count is 3 assert_eq!(SubsubnetCountCurrent::::get(netuid), SubId::from(3u8)); @@ -363,6 +363,33 @@ fn update_subsubnet_counts_decreases_and_cleans_on_superblock() { }); } +#[test] +fn update_subsubnet_counts_increases_on_superblock() { + new_test_ext(1).execute_with(|| { + // Base subnet exists + let netuid = NetUid::from(42u16); + NetworksAdded::::insert(NetUid::from(42u16), true); + + // super_block = SuperBlockTempos() * Tempo(netuid) - netuid + Tempo::::insert(netuid, 360u16); + let super_block = + u64::from(SuperBlockTempos::::get()) * u64::from(Tempo::::get(netuid)) - u16::from(netuid) as u64; + + // Choose counts so result is deterministic for ANY increase-per-superblock. + let inc: u8 = u8::from(GlobalSubsubnetDecreasePerSuperblock::::get()); + let old = SubId::from(1u8); + let desired = SubId::from(5u8); + SubsubnetCountCurrent::::insert(netuid, old); + SubsubnetCountDesired::::insert(netuid, desired); + + // Act exactly on a super-block boundary + SubtensorModule::update_subsubnet_counts_if_needed(super_block); + + // New count is old + inc + assert_eq!(SubsubnetCountCurrent::::get(netuid), SubId::from(1 + inc)); + }); +} + #[test] fn update_subsubnet_counts_no_change_when_not_superblock() { new_test_ext(1).execute_with(|| { From 0eaf230f6a7d73c2b84581cf5a76506122d00d19 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Thu, 4 Sep 2025 11:54:42 -0400 Subject: [PATCH 24/39] Add testing of SubsubnetEmissionSplit reset --- pallets/subtensor/src/subnets/subsubnet.rs | 5 +++- pallets/subtensor/src/tests/subsubnet.rs | 33 ++++++++++++++++------ 2 files changed, 29 insertions(+), 9 deletions(-) diff --git a/pallets/subtensor/src/subnets/subsubnet.rs b/pallets/subtensor/src/subnets/subsubnet.rs index cdee6db745..63f2aa0a0d 100644 --- a/pallets/subtensor/src/subnets/subsubnet.rs +++ b/pallets/subtensor/src/subnets/subsubnet.rs @@ -125,7 +125,10 @@ impl Pallet { let super_block_tempos = u64::from(SuperBlockTempos::::get()); Self::get_all_subnet_netuids().iter().for_each(|netuid| { let super_block = super_block_tempos.saturating_mul(u64::from(Tempo::::get(netuid))); - if let Some(rem) = current_block.saturating_add(u16::from(*netuid) as u64).checked_rem(super_block) { + if let Some(rem) = current_block + .saturating_add(u16::from(*netuid) as u64) + .checked_rem(super_block) + { if rem == 0 { let old_count = u8::from(SubsubnetCountCurrent::::get(netuid)); let desired_count = u8::from(SubsubnetCountDesired::::get(netuid)); diff --git a/pallets/subtensor/src/tests/subsubnet.rs b/pallets/subtensor/src/tests/subsubnet.rs index 2c1f4f2051..7a51da0768 100644 --- a/pallets/subtensor/src/tests/subsubnet.rs +++ b/pallets/subtensor/src/tests/subsubnet.rs @@ -27,15 +27,15 @@ // - [x] Per-subsubnet incentives are distributed proportionally to miner weights // - [x] Subsubnet limit can be set up to 8 (with admin pallet) // - [x] When subsubnet limit is reduced, reduction is GlobalSubsubnetDecreasePerSuperblock per super-block -// - [ ] When subsubnet limit is increased, increase is GlobalSubsubnetDecreasePerSuperblock per super-block +// - [x] When subsubnet limit is increased, increase is GlobalSubsubnetDecreasePerSuperblock per super-block // - [x] When reduction of subsubnet limit occurs, Weights, Incentive, LastUpdate, Bonds, and WeightCommits are cleared // - [ ] Epoch terms of subnet are weighted sum (or logical OR) of all subsubnet epoch terms // - [ ] Subnet epoch terms persist in state // - [x] Subsubnet epoch terms persist in state // - [ ] "Yuma Emergency Mode" (consensus sum is 0 for a subsubnet), emission distributed by stake // - [x] Miner with no weights on any subsubnet receives no reward -// - [ ] SubsubnetEmissionSplit is reset on super-block on subsubnet count increase -// - [ ] SubsubnetEmissionSplit is reset on super-block on subsubnet count decrease +// - [x] SubsubnetEmissionSplit is reset on super-block on subsubnet count increase +// - [x] SubsubnetEmissionSplit is reset on super-block on subsubnet count decrease use super::mock::*; use crate::coinbase::reveal_commits::WeightsTlockPayload; @@ -289,8 +289,9 @@ fn update_subsubnet_counts_decreases_and_cleans_on_superblock() { // super_block = SuperBlockTempos() * Tempo(netuid) - netuid Tempo::::insert(netuid, 360u16); - let super_block = - u64::from(SuperBlockTempos::::get()) * u64::from(Tempo::::get(netuid)) - u16::from(netuid) as u64; + let super_block = u64::from(SuperBlockTempos::::get()) + * u64::from(Tempo::::get(netuid)) + - u16::from(netuid) as u64; // Choose counts so result is deterministic for ANY decrease-per-superblock. // Let dec = GlobalSubsubnetDecreasePerSuperblock(); set old = dec + 3. @@ -301,6 +302,9 @@ fn update_subsubnet_counts_decreases_and_cleans_on_superblock() { SubsubnetCountCurrent::::insert(netuid, old); SubsubnetCountDesired::::insert(netuid, desired); + // Set non-default subnet emission split + SubsubnetEmissionSplit::::insert(netuid, vec![123u16, 234u16, 345u16]); + // Seed data at a kept subid (2) and a removed subid (3) let idx_keep = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(2u8)); let idx_rm3 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(3u8)); @@ -360,6 +364,9 @@ fn update_subsubnet_counts_decreases_and_cleans_on_superblock() { assert!(!TimelockedWeightCommits::::contains_key( idx_rm3, 1u64 )); + + // SubsubnetEmissionSplit is reset on super-block + assert!(SubsubnetEmissionSplit::::get(netuid).is_none()); }); } @@ -372,8 +379,9 @@ fn update_subsubnet_counts_increases_on_superblock() { // super_block = SuperBlockTempos() * Tempo(netuid) - netuid Tempo::::insert(netuid, 360u16); - let super_block = - u64::from(SuperBlockTempos::::get()) * u64::from(Tempo::::get(netuid)) - u16::from(netuid) as u64; + let super_block = u64::from(SuperBlockTempos::::get()) + * u64::from(Tempo::::get(netuid)) + - u16::from(netuid) as u64; // Choose counts so result is deterministic for ANY increase-per-superblock. let inc: u8 = u8::from(GlobalSubsubnetDecreasePerSuperblock::::get()); @@ -382,11 +390,20 @@ fn update_subsubnet_counts_increases_on_superblock() { SubsubnetCountCurrent::::insert(netuid, old); SubsubnetCountDesired::::insert(netuid, desired); + // Set non-default subnet emission split + SubsubnetEmissionSplit::::insert(netuid, vec![123u16, 234u16, 345u16]); + // Act exactly on a super-block boundary SubtensorModule::update_subsubnet_counts_if_needed(super_block); // New count is old + inc - assert_eq!(SubsubnetCountCurrent::::get(netuid), SubId::from(1 + inc)); + assert_eq!( + SubsubnetCountCurrent::::get(netuid), + SubId::from(1 + inc) + ); + + // SubsubnetEmissionSplit is reset on super-block + assert!(SubsubnetEmissionSplit::::get(netuid).is_none()); }); } From 412a24766d39361a1485eac555ef1c36f4267c98 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Thu, 4 Sep 2025 14:05:34 -0400 Subject: [PATCH 25/39] Add emission tests --- pallets/subtensor/src/subnets/subsubnet.rs | 52 +++- pallets/subtensor/src/tests/coinbase.rs | 1 + pallets/subtensor/src/tests/subsubnet.rs | 297 ++++++++++++++++++++- 3 files changed, 343 insertions(+), 7 deletions(-) diff --git a/pallets/subtensor/src/subnets/subsubnet.rs b/pallets/subtensor/src/subnets/subsubnet.rs index 63f2aa0a0d..98f339bfb5 100644 --- a/pallets/subtensor/src/subnets/subsubnet.rs +++ b/pallets/subtensor/src/subnets/subsubnet.rs @@ -286,8 +286,11 @@ impl Pallet { // Calculate subsubnet weight from the split emission (not the other way because preserving // emission accuracy is the priority) - let sub_weight = U64F64::saturating_from_num(sub_emission) - .safe_div(U64F64::saturating_from_num(rao_emission)); + // For zero emission the first subsubnet gets full weight + let sub_weight = U64F64::saturating_from_num(sub_emission).safe_div_or( + U64F64::saturating_from_num(rao_emission), + U64F64::saturating_from_num(if sub_id_u8 == 0 { 1 } else { 0 }), + ); // Produce an iterator of (hotkey, (terms, sub_weight)) tuples epoch_output @@ -346,7 +349,50 @@ impl Pallet { ); acc_terms.new_validator_permit |= terms.new_validator_permit; }) - .or_insert(terms); + .or_insert_with(|| { + // weighted insert for the first sub-subnet seen for this hotkey + EpochTerms { + uid: terms.uid, + dividend: Self::weighted_acc_u16(0, terms.dividend, sub_weight), + incentive: Self::weighted_acc_u16(0, terms.incentive, sub_weight), + validator_emission: Self::weighted_acc_alpha( + 0u64.into(), + terms.validator_emission, + sub_weight, + ), + server_emission: Self::weighted_acc_alpha( + 0u64.into(), + terms.server_emission, + sub_weight, + ), + stake_weight: Self::weighted_acc_u16( + 0, + terms.stake_weight, + sub_weight, + ), + active: terms.active, // booleans are ORed across subs + emission: Self::weighted_acc_alpha( + 0u64.into(), + terms.emission, + sub_weight, + ), + rank: Self::weighted_acc_u16(0, terms.rank, sub_weight), + trust: Self::weighted_acc_u16(0, terms.trust, sub_weight), + consensus: Self::weighted_acc_u16(0, terms.consensus, sub_weight), + pruning_score: Self::weighted_acc_u16( + 0, + terms.pruning_score, + sub_weight, + ), + validator_trust: Self::weighted_acc_u16( + 0, + terms.validator_trust, + sub_weight, + ), + new_validator_permit: terms.new_validator_permit, + bond: Vec::new(), // aggregated map doesn’t use bonds; keep empty + } + }); acc }); diff --git a/pallets/subtensor/src/tests/coinbase.rs b/pallets/subtensor/src/tests/coinbase.rs index d27e42f445..a196cfc00e 100644 --- a/pallets/subtensor/src/tests/coinbase.rs +++ b/pallets/subtensor/src/tests/coinbase.rs @@ -2441,6 +2441,7 @@ fn test_drain_pending_emission_no_miners_all_drained() { }); } +// cargo test --package pallet-subtensor --lib -- tests::coinbase::test_drain_pending_emission_zero_emission --exact --show-output #[test] fn test_drain_pending_emission_zero_emission() { new_test_ext(1).execute_with(|| { diff --git a/pallets/subtensor/src/tests/subsubnet.rs b/pallets/subtensor/src/tests/subsubnet.rs index 7a51da0768..4f4902df42 100644 --- a/pallets/subtensor/src/tests/subsubnet.rs +++ b/pallets/subtensor/src/tests/subsubnet.rs @@ -29,10 +29,10 @@ // - [x] When subsubnet limit is reduced, reduction is GlobalSubsubnetDecreasePerSuperblock per super-block // - [x] When subsubnet limit is increased, increase is GlobalSubsubnetDecreasePerSuperblock per super-block // - [x] When reduction of subsubnet limit occurs, Weights, Incentive, LastUpdate, Bonds, and WeightCommits are cleared -// - [ ] Epoch terms of subnet are weighted sum (or logical OR) of all subsubnet epoch terms -// - [ ] Subnet epoch terms persist in state +// - [x] Epoch terms of subnet are weighted sum (or logical OR) of all subsubnet epoch terms +// - [x] Subnet epoch terms persist in state // - [x] Subsubnet epoch terms persist in state -// - [ ] "Yuma Emergency Mode" (consensus sum is 0 for a subsubnet), emission distributed by stake +// - [x] "Yuma Emergency Mode" (consensus sum is 0 for a subsubnet), emission distributed by stake // - [x] Miner with no weights on any subsubnet receives no reward // - [x] SubsubnetEmissionSplit is reset on super-block on subsubnet count increase // - [x] SubsubnetEmissionSplit is reset on super-block on subsubnet count decrease @@ -41,6 +41,7 @@ use super::mock::*; use crate::coinbase::reveal_commits::WeightsTlockPayload; use crate::subnets::subsubnet::{GLOBAL_MAX_SUBNET_COUNT, MAX_SUBSUBNET_COUNT_PER_SUBNET}; use crate::*; +use alloc::collections::BTreeMap; use approx::assert_abs_diff_eq; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use codec::Encode; @@ -52,7 +53,7 @@ use sha2::Digest; use sp_core::{H256, U256}; use sp_runtime::traits::{BlakeTwo256, Hash}; use sp_std::collections::vec_deque::VecDeque; -use substrate_fixed::types::I32F32; +use substrate_fixed::types::{I32F32, U64F64}; use subtensor_runtime_common::{NetUid, NetUidStorageIndex, SubId}; use tle::{ curves::drand::TinyBLS381, ibe::fullident::Identity, @@ -663,6 +664,190 @@ fn epoch_with_subsubnets_incentives_proportional_to_weights() { }); } +#[test] +fn epoch_with_subsubnets_persists_and_aggregates_all_terms() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(1u16); + let idx0 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(0)); + let idx1 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(1)); + + // Three neurons: validator (uid=0) + two miners (uid=1,2) + let ck0 = U256::from(1); + let hk0 = U256::from(2); + let ck1 = U256::from(3); + let hk1 = U256::from(4); + let hk2 = U256::from(6); + let emission = AlphaCurrency::from(1_000_000_000u64); + + // Healthy minimal state and 3rd neuron + mock_epoch_state(netuid, ck0, hk0, ck1, hk1); + mock_3_neurons(netuid, hk2); + let uid0 = 0_usize; + let uid1 = 1_usize; + let uid2 = 2_usize; + + // Two sub-subnets with non-equal split (~25% / 75%) + SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); + let split0 = u16::MAX / 4; + let split1 = u16::MAX - split0; + SubsubnetEmissionSplit::::insert(netuid, vec![split0, split1]); + + // One validator; skew weights differently per sub-subnet + ValidatorPermit::::insert(netuid, vec![true, false, false]); + // sub 0: uid1 heavy, uid2 light + Weights::::insert( + idx0, + 0, + vec![(1u16, 0xFFFF / 5 * 3), (2u16, 0xFFFF / 5 * 2)], + ); + // sub 1: uid1 light, uid2 heavy + Weights::::insert(idx1, 0, vec![(1u16, 0xFFFF / 5), (2u16, 0xFFFF / 5 * 4)]); + + // Per-sub emissions (and weights used for aggregation) + let subsubnet_emissions = SubtensorModule::split_emissions(netuid, emission); + let w0 = U64F64::from_num(u64::from(subsubnet_emissions[0])) + / U64F64::from_num(u64::from(emission)); + let w1 = U64F64::from_num(u64::from(subsubnet_emissions[1])) + / U64F64::from_num(u64::from(emission)); + assert_abs_diff_eq!(w0.to_num::(), 0.25, epsilon = 0.0001); + assert_abs_diff_eq!(w1.to_num::(), 0.75, epsilon = 0.0001); + + // Get per-subsubnet epoch outputs to build expectations + let out0 = SubtensorModule::epoch_subsubnet(netuid, SubId::from(0), subsubnet_emissions[0]); + let out1 = SubtensorModule::epoch_subsubnet(netuid, SubId::from(1), subsubnet_emissions[1]); + + // Now run the real aggregated path (also persists terms) + let agg = SubtensorModule::epoch_with_subsubnets(netuid, emission); + + // hotkey -> (server_emission_u64, validator_emission_u64) + let agg_map: BTreeMap = agg + .into_iter() + .map(|(hk, se, ve)| (hk, (u64::from(se), u64::from(ve)))) + .collect(); + + // Helper to fetch per-sub terms by hotkey + let terms0 = |hk: &U256| out0.0.get(hk).unwrap(); + let terms1 = |hk: &U256| out1.0.get(hk).unwrap(); + + // Returned aggregated emissions match weighted sums + for hk in [&hk1, &hk2] { + let (got_se, got_ve) = agg_map.get(hk).cloned().expect("present"); + let t0 = terms0(hk); + let t1 = terms1(hk); + let exp_se = (U64F64::saturating_from_num(u64::from(t0.server_emission)) * w0 + + U64F64::saturating_from_num(u64::from(t1.server_emission)) * w1) + .saturating_to_num::(); + let exp_ve = (U64F64::saturating_from_num(u64::from(t0.validator_emission)) * w0 + + U64F64::saturating_from_num(u64::from(t1.validator_emission)) * w1) + .saturating_to_num::(); + assert_abs_diff_eq!(u64::from(got_se), exp_se, epsilon = 1); + assert_abs_diff_eq!(u64::from(got_ve), exp_ve, epsilon = 1); + } + + // Persisted per-subsubnet Incentive vectors match per-sub terms + let inc0 = Incentive::::get(idx0); + let inc1 = Incentive::::get(idx1); + let exp_inc0 = { + let mut v = vec![0u16; 3]; + v[terms0(&hk0).uid] = terms0(&hk0).incentive; + v[terms0(&hk1).uid] = terms0(&hk1).incentive; + v[terms0(&hk2).uid] = terms0(&hk2).incentive; + v + }; + let exp_inc1 = { + let mut v = vec![0u16; 3]; + v[terms1(&hk0).uid] = terms1(&hk0).incentive; + v[terms1(&hk1).uid] = terms1(&hk1).incentive; + v[terms1(&hk2).uid] = terms1(&hk2).incentive; + v + }; + for (a, e) in inc0.iter().zip(exp_inc0.iter()) { + assert_abs_diff_eq!(*a, *e, epsilon = 1); + } + for (a, e) in inc1.iter().zip(exp_inc1.iter()) { + assert_abs_diff_eq!(*a, *e, epsilon = 1); + } + + // Persisted Bonds for validator (uid0) exist and mirror per-sub terms + let b0 = Bonds::::get(idx0, 0u16); + let b1 = Bonds::::get(idx1, 0u16); + let exp_b0 = &terms0(&hk0).bond; + let exp_b1 = &terms1(&hk0).bond; + + assert!(!b0.is_empty(), "bonds sub0 empty"); + assert!(!b1.is_empty(), "bonds sub1 empty"); + assert_eq!(b0.len(), exp_b0.len()); + assert_eq!(b1.len(), exp_b1.len()); + for ((u_a, w_a), (u_e, w_e)) in b0.iter().zip(exp_b0.iter()) { + assert_eq!(u_a, u_e); + assert_abs_diff_eq!(*w_a, *w_e, epsilon = 1); + } + for ((u_a, w_a), (u_e, w_e)) in b1.iter().zip(exp_b1.iter()) { + assert_eq!(u_a, u_e); + assert_abs_diff_eq!(*w_a, *w_e, epsilon = 1); + } + + // Persisted subnet-level terms are weighted/OR aggregates of sub-subnets + // Fetch persisted vectors + let active = Active::::get(netuid); + let emission_v = Emission::::get(netuid); + let rank_v = Rank::::get(netuid); + let trust_v = Trust::::get(netuid); + let cons_v = Consensus::::get(netuid); + let div_v = Dividends::::get(netuid); + let prun_v = PruningScores::::get(netuid); + let vtrust_v = ValidatorTrust::::get(netuid); + let vperm_v = ValidatorPermit::::get(netuid); + + // Helpers for weighted u16 / u64 + let wu16 = |a: u16, b: u16| -> u16 { + (U64F64::saturating_from_num(a) * w0 + U64F64::saturating_from_num(b) * w1) + .saturating_to_num::() + }; + let wu64 = |a: u64, b: u64| -> u64 { + (U64F64::saturating_from_num(a) * w0 + U64F64::saturating_from_num(b) * w1) + .saturating_to_num::() + }; + + // For each UID, compute expected aggregate from out0/out1 terms + let check_uid = |uid: usize, hk: &U256| { + let t0 = terms0(hk); + let t1 = terms1(hk); + + // Active & ValidatorPermit are OR-aggregated + assert_eq!(active[uid], t0.active || t1.active); + assert_eq!( + vperm_v[uid], + t0.new_validator_permit || t1.new_validator_permit + ); + + // Emission (u64) + let exp_em = wu64(u64::from(t0.emission), u64::from(t1.emission)); + assert_abs_diff_eq!(u64::from(emission_v[uid]), exp_em, epsilon = 1); + + // u16 terms + assert_abs_diff_eq!(rank_v[uid], wu16(t0.rank, t1.rank), epsilon = 1); + assert_abs_diff_eq!(trust_v[uid], wu16(t0.trust, t1.trust), epsilon = 1); + assert_abs_diff_eq!(cons_v[uid], wu16(t0.consensus, t1.consensus), epsilon = 1); + assert_abs_diff_eq!(div_v[uid], wu16(t0.dividend, t1.dividend), epsilon = 1); + assert_abs_diff_eq!( + prun_v[uid], + wu16(t0.pruning_score, t1.pruning_score), + epsilon = 1 + ); + assert_abs_diff_eq!( + vtrust_v[uid], + wu16(t0.validator_trust, t1.validator_trust), + epsilon = 1 + ); + }; + + check_uid(uid0, &hk0); + check_uid(uid1, &hk1); + check_uid(uid2, &hk2); + }); +} + #[test] fn epoch_with_subsubnets_no_weight_no_incentive() { new_test_ext(1).execute_with(|| { @@ -1360,3 +1545,107 @@ fn test_do_commit_crv3_sub_weights_committing_too_fast() { )); }); } + +#[test] +fn epoch_subsubnet_emergency_mode_distributes_by_stake() { + new_test_ext(1).execute_with(|| { + // setup a single sub-subnet where consensus sum becomes 0 + let netuid = NetUid::from(1u16); + let subid = SubId::from(1u8); + let idx = SubtensorModule::get_subsubnet_storage_index(netuid, subid); + let tempo: u16 = 5; + add_network(netuid, tempo, 0); + SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); // allow subids {0,1} + SubtensorModule::set_max_registrations_per_block(netuid, 4); + SubtensorModule::set_target_registrations_per_interval(netuid, 4); + + // three neurons: make ALL permitted validators so active_stake is non-zero + let hk0 = U256::from(10); + let ck0 = U256::from(11); + let hk1 = U256::from(20); + let ck1 = U256::from(21); + let hk2 = U256::from(30); + let ck2 = U256::from(31); + let hk3 = U256::from(40); // miner + let ck3 = U256::from(41); + register_ok_neuron(netuid, hk0, ck0, 0); + register_ok_neuron(netuid, hk1, ck1, 0); + register_ok_neuron(netuid, hk2, ck2, 0); + register_ok_neuron(netuid, hk3, ck3, 0); + + // active + recent updates so they're all active + let now = SubtensorModule::get_current_block_as_u64(); + ActivityCutoff::::insert(netuid, 1_000u16); + LastUpdate::::insert(idx, vec![now, now, now, now]); + + // All staking validators permitted => active_stake = stake + ValidatorPermit::::insert(netuid, vec![true, true, true, false]); + SubtensorModule::set_stake_threshold(0); + + // force ZERO consensus/incentive path: no weights/bonds + // (leave Weights/Bonds empty for all rows on this sub-subnet) + + // stake proportions: uid0:uid1:uid2 = 10:30:60 + SubtensorModule::add_balance_to_coldkey_account(&ck0, 10); + SubtensorModule::add_balance_to_coldkey_account(&ck1, 30); + SubtensorModule::add_balance_to_coldkey_account(&ck2, 60); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hk0, + &ck0, + netuid, + AlphaCurrency::from(10), + ); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hk1, + &ck1, + netuid, + AlphaCurrency::from(30), + ); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hk2, + &ck2, + netuid, + AlphaCurrency::from(60), + ); + + let emission = AlphaCurrency::from(1_000_000u64); + + // --- act: run epoch on this sub-subnet only --- + let out = SubtensorModule::epoch_subsubnet(netuid, subid, emission); + + // collect validator emissions per hotkey + let t0 = out.0.get(&hk0).unwrap(); + let t1 = out.0.get(&hk1).unwrap(); + let t2 = out.0.get(&hk2).unwrap(); + let t3 = out.0.get(&hk3).unwrap(); + + // In emergency mode (consensus sum == 0): + // - validator_emission is distributed by (active) stake proportions + // - server_emission remains zero (incentive path is zero) + assert_eq!(u64::from(t0.server_emission), 0); + assert_eq!(u64::from(t1.server_emission), 0); + assert_eq!(u64::from(t2.server_emission), 0); + assert_eq!(u64::from(t3.server_emission), 0); + + // expected splits by stake: 10%, 30%, 60% of total emission + let e = u64::from(emission); + let exp0 = e / 10; // 10% + let exp1 = e * 3 / 10; // 30% + let exp2 = e * 6 / 10; // 60% + + // allow tiny rounding drift from fixed-point conversions + assert_abs_diff_eq!(u64::from(t0.validator_emission), exp0, epsilon = 2); + assert_abs_diff_eq!(u64::from(t1.validator_emission), exp1, epsilon = 2); + assert_abs_diff_eq!(u64::from(t2.validator_emission), exp2, epsilon = 2); + assert_eq!(u64::from(t3.validator_emission), 0); + + // all emission goes to validators + assert_abs_diff_eq!( + u64::from(t0.validator_emission) + + u64::from(t1.validator_emission) + + u64::from(t2.validator_emission), + e, + epsilon = 2 + ); + }); +} From 700a8828de105f0b4fa063309b23b3282fd658f2 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Thu, 4 Sep 2025 14:11:06 -0400 Subject: [PATCH 26/39] Format --- pallets/admin-utils/src/tests/mod.rs | 2 +- pallets/subtensor/src/macros/dispatches.rs | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/pallets/admin-utils/src/tests/mod.rs b/pallets/admin-utils/src/tests/mod.rs index b1f88b6826..3403bbf97b 100644 --- a/pallets/admin-utils/src/tests/mod.rs +++ b/pallets/admin-utils/src/tests/mod.rs @@ -2113,4 +2113,4 @@ fn test_sudo_set_desired_subsubnet_count() { ss_count_ok )); }); -} \ No newline at end of file +} diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index 8215c846a4..1b6c0c13b3 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -94,7 +94,7 @@ mod dispatches { } } - /// --- Sets the caller weights for the incentive mechanism for subsubnets. The call + /// --- Sets the caller weights for the incentive mechanism for subsubnets. The call /// can be made from the hotkey account so is potentially insecure, however, the damage /// of changing weights is minimal if caught early. This function includes all the /// checks that the passed weights meet the requirements. Stored as u16s they represent @@ -113,7 +113,7 @@ mod dispatches { /// /// * `netuid` (u16): /// - The network uid we are setting these weights on. - /// + /// /// * `subid` (`u8`): /// - The u8 subsubnet identifier. /// @@ -251,7 +251,7 @@ mod dispatches { /// /// * `netuid` (`u16`): /// - The u16 network identifier. - /// + /// /// * `subid` (`u8`): /// - The u8 subsubnet identifier. /// @@ -372,7 +372,7 @@ mod dispatches { /// /// * `netuid` (`u16`): /// - The u16 network identifier. - /// + /// /// * `subid` (`u8`): /// - The u8 subsubnet identifier. /// @@ -470,7 +470,7 @@ mod dispatches { /// /// * `netuid` (`u16`): /// - The u16 network identifier. - /// + /// /// * `subid` (`u8`): /// - The u8 subsubnet identifier. /// @@ -2293,7 +2293,7 @@ mod dispatches { Ok(()) } - /// ---- Used to commit timelock encrypted commit-reveal weight values to later be revealed for + /// ---- Used to commit timelock encrypted commit-reveal weight values to later be revealed for /// a subsubnet. /// /// # Args: From b919b5de03f14ff9cce02dfc6e53b69557e6e327 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Thu, 4 Sep 2025 14:21:58 -0400 Subject: [PATCH 27/39] Fix merge --- pallets/admin-utils/src/tests/mod.rs | 1 + pallets/subtensor/src/macros/dispatches.rs | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/pallets/admin-utils/src/tests/mod.rs b/pallets/admin-utils/src/tests/mod.rs index 3403bbf97b..79c237f526 100644 --- a/pallets/admin-utils/src/tests/mod.rs +++ b/pallets/admin-utils/src/tests/mod.rs @@ -1955,6 +1955,7 @@ fn test_sudo_set_commit_reveal_version() { }); } +#[test] fn test_sudo_set_min_burn() { new_test_ext().execute_with(|| { let netuid = NetUid::from(1); diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index 1b6c0c13b3..a94f44f849 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -155,7 +155,7 @@ mod dispatches { /// /// * 'MaxWeightExceeded': /// - Attempting to set weights with max value exceeding limit. - #[pallet::call_index(114)] + #[pallet::call_index(119)] #[pallet::weight((Weight::from_parts(15_540_000_000, 0) .saturating_add(T::DbWeight::get().reads(4111)) .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] From 56453e994af958f525d6882fb09274799046482d Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Thu, 4 Sep 2025 16:08:25 -0400 Subject: [PATCH 28/39] Fix clippy --- pallets/subtensor/src/subnets/subsubnet.rs | 4 ++++ runtime/src/lib.rs | 7 ++++++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/pallets/subtensor/src/subnets/subsubnet.rs b/pallets/subtensor/src/subnets/subsubnet.rs index 98f339bfb5..73dcef569a 100644 --- a/pallets/subtensor/src/subnets/subsubnet.rs +++ b/pallets/subtensor/src/subnets/subsubnet.rs @@ -67,6 +67,10 @@ impl Pallet { } } + pub fn get_current_subsubnet_count(netuid: NetUid) -> SubId { + SubsubnetCountCurrent::::get(netuid) + } + pub fn ensure_subsubnet_exists(netuid: NetUid, sub_id: SubId) -> DispatchResult { // Make sure the base subnet exists ensure!( diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 73ddfb751f..347f117a31 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -1056,7 +1056,12 @@ pub struct ResetBondsOnCommit; impl OnMetadataCommitment for ResetBondsOnCommit { #[cfg(not(feature = "runtime-benchmarks"))] fn on_metadata_commitment(netuid: NetUid, address: &AccountId) { - let _ = SubtensorModule::do_reset_bonds(netuid, address); + // Reset bonds for each subsubnet of this subnet + let subsub_count = SubtensorModule::get_current_subsubnet_count(netuid); + for subid in 0..u8::from(subsub_count) { + let netuid_index = SubtensorModule::get_subsubnet_storage_index(netuid, subid.into()); + let _ = SubtensorModule::do_reset_bonds(netuid, address); + } } #[cfg(feature = "runtime-benchmarks")] From 5811d0f77920c5ae48ff64a3f46dfa8a8150aebc Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Thu, 4 Sep 2025 16:34:44 -0400 Subject: [PATCH 29/39] Fix clippy --- runtime/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 347f117a31..3ab68aa3f5 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -1060,7 +1060,7 @@ impl OnMetadataCommitment for ResetBondsOnCommit { let subsub_count = SubtensorModule::get_current_subsubnet_count(netuid); for subid in 0..u8::from(subsub_count) { let netuid_index = SubtensorModule::get_subsubnet_storage_index(netuid, subid.into()); - let _ = SubtensorModule::do_reset_bonds(netuid, address); + let _ = SubtensorModule::do_reset_bonds(netuid_index, address); } } From 2cd3f5eb713fcf205280b216297017e97fad210a Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Thu, 4 Sep 2025 18:15:58 -0400 Subject: [PATCH 30/39] Fix emission aggregation --- pallets/subtensor/src/subnets/subsubnet.rs | 31 ++++++++-------------- pallets/subtensor/src/tests/subsubnet.rs | 14 +++++----- 2 files changed, 18 insertions(+), 27 deletions(-) diff --git a/pallets/subtensor/src/subnets/subsubnet.rs b/pallets/subtensor/src/subnets/subsubnet.rs index 73dcef569a..a00e8a5bfe 100644 --- a/pallets/subtensor/src/subnets/subsubnet.rs +++ b/pallets/subtensor/src/subnets/subsubnet.rs @@ -306,21 +306,20 @@ impl Pallet { .fold(BTreeMap::new(), |mut acc, (hotkey, (terms, sub_weight))| { acc.entry(hotkey) .and_modify(|acc_terms| { + // Server and validator emission come from subsubnet emission and need to be added up + acc_terms.validator_emission = acc_terms + .validator_emission + .saturating_add(terms.validator_emission); + acc_terms.server_emission = acc_terms + .server_emission + .saturating_add(terms.server_emission); + + // The rest of the terms need to be aggregated as weighted sum acc_terms.dividend = Self::weighted_acc_u16( acc_terms.dividend, terms.dividend, sub_weight, ); - acc_terms.validator_emission = Self::weighted_acc_alpha( - acc_terms.validator_emission, - terms.validator_emission, - sub_weight, - ); - acc_terms.server_emission = Self::weighted_acc_alpha( - acc_terms.server_emission, - terms.server_emission, - sub_weight, - ); acc_terms.stake_weight = Self::weighted_acc_u16( acc_terms.stake_weight, terms.stake_weight, @@ -359,16 +358,8 @@ impl Pallet { uid: terms.uid, dividend: Self::weighted_acc_u16(0, terms.dividend, sub_weight), incentive: Self::weighted_acc_u16(0, terms.incentive, sub_weight), - validator_emission: Self::weighted_acc_alpha( - 0u64.into(), - terms.validator_emission, - sub_weight, - ), - server_emission: Self::weighted_acc_alpha( - 0u64.into(), - terms.server_emission, - sub_weight, - ), + validator_emission: terms.validator_emission, + server_emission: terms.server_emission, stake_weight: Self::weighted_acc_u16( 0, terms.stake_weight, diff --git a/pallets/subtensor/src/tests/subsubnet.rs b/pallets/subtensor/src/tests/subsubnet.rs index 4f4902df42..08612ef684 100644 --- a/pallets/subtensor/src/tests/subsubnet.rs +++ b/pallets/subtensor/src/tests/subsubnet.rs @@ -729,17 +729,17 @@ fn epoch_with_subsubnets_persists_and_aggregates_all_terms() { let terms0 = |hk: &U256| out0.0.get(hk).unwrap(); let terms1 = |hk: &U256| out1.0.get(hk).unwrap(); - // Returned aggregated emissions match weighted sums + // Returned aggregated emissions match plain sums of subsubnet emissions for hk in [&hk1, &hk2] { let (got_se, got_ve) = agg_map.get(hk).cloned().expect("present"); let t0 = terms0(hk); let t1 = terms1(hk); - let exp_se = (U64F64::saturating_from_num(u64::from(t0.server_emission)) * w0 - + U64F64::saturating_from_num(u64::from(t1.server_emission)) * w1) - .saturating_to_num::(); - let exp_ve = (U64F64::saturating_from_num(u64::from(t0.validator_emission)) * w0 - + U64F64::saturating_from_num(u64::from(t1.validator_emission)) * w1) - .saturating_to_num::(); + let exp_se = (U64F64::saturating_from_num(u64::from(t0.server_emission)) + + U64F64::saturating_from_num(u64::from(t1.server_emission))) + .saturating_to_num::(); + let exp_ve = (U64F64::saturating_from_num(u64::from(t0.validator_emission)) + + U64F64::saturating_from_num(u64::from(t1.validator_emission))) + .saturating_to_num::(); assert_abs_diff_eq!(u64::from(got_se), exp_se, epsilon = 1); assert_abs_diff_eq!(u64::from(got_ve), exp_ve, epsilon = 1); } From f991ecac5d48dc17a0f913cb955af5bf7132d511 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Thu, 4 Sep 2025 18:29:02 -0400 Subject: [PATCH 31/39] Spec bump --- runtime/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 3ab68aa3f5..e7ad12af7b 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -220,7 +220,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // `spec_version`, and `authoring_version` are the same between Wasm and native. // This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use // the compatible custom types. - spec_version: 309, + spec_version: 310, impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, From 75ea5962dfa2a3724a5f5b78133e4b74e169a8ae Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Fri, 5 Sep 2025 00:37:06 +0000 Subject: [PATCH 32/39] auto-update benchmark weights --- pallets/subtensor/src/macros/dispatches.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index a94f44f849..340bba171a 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -932,7 +932,7 @@ mod dispatches { /// - The ip type v4 or v6. /// #[pallet::call_index(5)] - #[pallet::weight((Weight::from_parts(28_660_000, 0) + #[pallet::weight((Weight::from_parts(42_000_000, 0) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(1)), DispatchClass::Normal, Pays::No))] pub fn serve_prometheus( @@ -2249,7 +2249,7 @@ mod dispatches { /// - The client (bittensor-drand) version #[pallet::call_index(113)] #[pallet::weight((Weight::from_parts(80_690_000, 0) - .saturating_add(T::DbWeight::get().reads(7_u64)) + .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] pub fn commit_timelocked_weights( origin: T::RuntimeOrigin, From f45d30767253a99d4908b3c19a76a5376e7a5aa2 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Mon, 8 Sep 2025 18:06:20 -0400 Subject: [PATCH 33/39] Remove subsubnet throttling --- pallets/subtensor/src/coinbase/block_step.rs | 2 - pallets/subtensor/src/lib.rs | 9 -- pallets/subtensor/src/subnets/subsubnet.rs | 97 +++++--------- pallets/subtensor/src/tests/subsubnet.rs | 127 ++++++------------- 4 files changed, 70 insertions(+), 165 deletions(-) diff --git a/pallets/subtensor/src/coinbase/block_step.rs b/pallets/subtensor/src/coinbase/block_step.rs index 6385a7f756..6a96090b05 100644 --- a/pallets/subtensor/src/coinbase/block_step.rs +++ b/pallets/subtensor/src/coinbase/block_step.rs @@ -21,8 +21,6 @@ impl Pallet { Self::run_coinbase(block_emission); // --- 4. Set pending children on the epoch; but only after the coinbase has been run. Self::try_set_pending_children(block_number); - // --- 5. Update sub-subnet counts - Self::update_subsubnet_counts_if_needed(block_number); // Return ok. Ok(()) } diff --git a/pallets/subtensor/src/lib.rs b/pallets/subtensor/src/lib.rs index 9493956405..7be36801db 100644 --- a/pallets/subtensor/src/lib.rs +++ b/pallets/subtensor/src/lib.rs @@ -1829,15 +1829,6 @@ pub mod pallet { pub fn SuperBlockTempos() -> u16 { 20 } - #[pallet::type_value] - /// -- ITEM (Maximum allowed sub-subnet count decrease per super-block) - pub fn GlobalSubsubnetDecreasePerSuperblock() -> SubId { - SubId::from(1) - } - #[pallet::storage] - /// --- MAP ( netuid ) --> Number of sub-subnets desired by root or subnet owner. - pub type SubsubnetCountDesired = - StorageMap<_, Twox64Concat, NetUid, SubId, ValueQuery, DefaultSubsubnetCount>; #[pallet::storage] /// --- MAP ( netuid ) --> Current number of sub-subnets pub type SubsubnetCountCurrent = diff --git a/pallets/subtensor/src/subnets/subsubnet.rs b/pallets/subtensor/src/subnets/subsubnet.rs index a00e8a5bfe..904c380463 100644 --- a/pallets/subtensor/src/subnets/subsubnet.rs +++ b/pallets/subtensor/src/subnets/subsubnet.rs @@ -88,10 +88,7 @@ impl Pallet { /// Set the desired valus of sub-subnet count for a subnet identified /// by netuid - pub fn do_set_desired_subsubnet_count( - netuid: NetUid, - subsubnet_count: SubId, - ) -> DispatchResult { + pub fn do_set_subsubnet_count(netuid: NetUid, subsubnet_count: SubId) -> DispatchResult { // Make sure the subnet exists ensure!( Self::if_subnet_exist(netuid), @@ -113,75 +110,49 @@ impl Pallet { Error::::InvalidValue ); - SubsubnetCountDesired::::insert(netuid, subsubnet_count); + Self::update_subsubnet_counts_if_needed(netuid, subsubnet_count); + Ok(()) } /// Update current count for a subnet identified by netuid - /// - /// - This function should be called in every block in run_counbase /// - Cleans up all sub-subnet maps if count is reduced - /// - Decreases or increases current subsubnet count by no more than - /// `GlobalSubsubnetDecreasePerSuperblock` /// - pub fn update_subsubnet_counts_if_needed(current_block: u64) { - // Run once per super-block - let super_block_tempos = u64::from(SuperBlockTempos::::get()); - Self::get_all_subnet_netuids().iter().for_each(|netuid| { - let super_block = super_block_tempos.saturating_mul(u64::from(Tempo::::get(netuid))); - if let Some(rem) = current_block - .saturating_add(u16::from(*netuid) as u64) - .checked_rem(super_block) - { - if rem == 0 { - let old_count = u8::from(SubsubnetCountCurrent::::get(netuid)); - let desired_count = u8::from(SubsubnetCountDesired::::get(netuid)); - let min_capped_count = old_count - .saturating_sub(u8::from(GlobalSubsubnetDecreasePerSuperblock::::get())) - .max(1); - let max_capped_count = old_count - .saturating_add(u8::from(GlobalSubsubnetDecreasePerSuperblock::::get())); - let new_count = desired_count.max(min_capped_count).min(max_capped_count); - - if old_count != new_count { - if old_count > new_count { - for subid in new_count..old_count { - let netuid_index = - Self::get_subsubnet_storage_index(*netuid, SubId::from(subid)); - - // Cleanup Weights - let _ = Weights::::clear_prefix(netuid_index, u32::MAX, None); - - // Cleanup Incentive - Incentive::::remove(netuid_index); - - // Cleanup LastUpdate - LastUpdate::::remove(netuid_index); - - // Cleanup Bonds - let _ = Bonds::::clear_prefix(netuid_index, u32::MAX, None); - - // Cleanup WeightCommits - let _ = - WeightCommits::::clear_prefix(netuid_index, u32::MAX, None); - - // Cleanup TimelockedWeightCommits - let _ = TimelockedWeightCommits::::clear_prefix( - netuid_index, - u32::MAX, - None, - ); - } - } + pub fn update_subsubnet_counts_if_needed(netuid: NetUid, new_count: SubId) { + let old_count = u8::from(SubsubnetCountCurrent::::get(netuid)); + let new_count_u8 = u8::from(new_count); + if old_count != new_count_u8 { + if old_count > new_count_u8 { + for subid in new_count_u8..old_count { + let netuid_index = + Self::get_subsubnet_storage_index(netuid, SubId::from(subid)); + + // Cleanup Weights + let _ = Weights::::clear_prefix(netuid_index, u32::MAX, None); + + // Cleanup Incentive + Incentive::::remove(netuid_index); - SubsubnetCountCurrent::::insert(netuid, SubId::from(new_count)); + // Cleanup LastUpdate + LastUpdate::::remove(netuid_index); - // Reset split back to even - SubsubnetEmissionSplit::::remove(netuid); - } + // Cleanup Bonds + let _ = Bonds::::clear_prefix(netuid_index, u32::MAX, None); + + // Cleanup WeightCommits + let _ = WeightCommits::::clear_prefix(netuid_index, u32::MAX, None); + + // Cleanup TimelockedWeightCommits + let _ = + TimelockedWeightCommits::::clear_prefix(netuid_index, u32::MAX, None); } } - }); + + SubsubnetCountCurrent::::insert(netuid, SubId::from(new_count)); + + // Reset split back to even + SubsubnetEmissionSplit::::remove(netuid); + } } pub fn do_set_emission_split(netuid: NetUid, maybe_split: Option>) -> DispatchResult { diff --git a/pallets/subtensor/src/tests/subsubnet.rs b/pallets/subtensor/src/tests/subsubnet.rs index 08612ef684..8b128a7241 100644 --- a/pallets/subtensor/src/tests/subsubnet.rs +++ b/pallets/subtensor/src/tests/subsubnet.rs @@ -26,16 +26,14 @@ // - [x] Incentives are per subsubnet // - [x] Per-subsubnet incentives are distributed proportionally to miner weights // - [x] Subsubnet limit can be set up to 8 (with admin pallet) -// - [x] When subsubnet limit is reduced, reduction is GlobalSubsubnetDecreasePerSuperblock per super-block -// - [x] When subsubnet limit is increased, increase is GlobalSubsubnetDecreasePerSuperblock per super-block // - [x] When reduction of subsubnet limit occurs, Weights, Incentive, LastUpdate, Bonds, and WeightCommits are cleared // - [x] Epoch terms of subnet are weighted sum (or logical OR) of all subsubnet epoch terms // - [x] Subnet epoch terms persist in state // - [x] Subsubnet epoch terms persist in state // - [x] "Yuma Emergency Mode" (consensus sum is 0 for a subsubnet), emission distributed by stake // - [x] Miner with no weights on any subsubnet receives no reward -// - [x] SubsubnetEmissionSplit is reset on super-block on subsubnet count increase -// - [x] SubsubnetEmissionSplit is reset on super-block on subsubnet count decrease +// - [x] SubsubnetEmissionSplit is reset on subsubnet count increase +// - [x] SubsubnetEmissionSplit is reset on subsubnet count decrease use super::mock::*; use crate::coinbase::reveal_commits::WeightsTlockPayload; @@ -189,22 +187,22 @@ fn ensure_subsubnet_fails_when_subid_out_of_range() { } #[test] -fn do_set_desired_subsubnet_count_ok_minimal() { +fn do_set_subsubnet_count_ok_minimal() { new_test_ext(1).execute_with(|| { let netuid = NetUid::from(3u16); NetworksAdded::::insert(NetUid::from(3u16), true); // base subnet exists - assert_ok!(SubtensorModule::do_set_desired_subsubnet_count( + assert_ok!(SubtensorModule::do_set_subsubnet_count( netuid, SubId::from(1u8) )); - assert_eq!(SubsubnetCountDesired::::get(netuid), SubId::from(1u8)); + assert_eq!(SubsubnetCountCurrent::::get(netuid), SubId::from(1u8)); }); } #[test] -fn do_set_desired_subsubnet_count_ok_at_effective_cap() { +fn do_set_subsubnet_count_ok_at_effective_cap() { new_test_ext(1).execute_with(|| { let netuid = NetUid::from(4u16); NetworksAdded::::insert(NetUid::from(4u16), true); // base subnet exists @@ -218,69 +216,67 @@ fn do_set_desired_subsubnet_count_ok_at_effective_cap() { compile_cap }; - assert_ok!(SubtensorModule::do_set_desired_subsubnet_count( - netuid, bound - )); - assert_eq!(SubsubnetCountDesired::::get(netuid), bound); + assert_ok!(SubtensorModule::do_set_subsubnet_count(netuid, bound)); + assert_eq!(SubsubnetCountCurrent::::get(netuid), bound); }); } #[test] -fn do_set_desired_fails_when_base_subnet_missing() { +fn do_set_fails_when_base_subnet_missing() { new_test_ext(1).execute_with(|| { let netuid = NetUid::from(7u16); // No NetworksAdded insert => base subnet absent assert_noop!( - SubtensorModule::do_set_desired_subsubnet_count(netuid, SubId::from(1u8)), + SubtensorModule::do_set_subsubnet_count(netuid, SubId::from(1u8)), Error::::SubNetworkDoesNotExist ); }); } #[test] -fn do_set_desired_fails_for_zero() { +fn do_set_fails_for_zero() { new_test_ext(1).execute_with(|| { let netuid = NetUid::from(9u16); NetworksAdded::::insert(NetUid::from(9u16), true); // base subnet exists assert_noop!( - SubtensorModule::do_set_desired_subsubnet_count(netuid, SubId::from(0u8)), + SubtensorModule::do_set_subsubnet_count(netuid, SubId::from(0u8)), Error::::InvalidValue ); }); } #[test] -fn do_set_desired_fails_when_over_runtime_cap() { +fn do_set_fails_when_over_runtime_cap() { new_test_ext(1).execute_with(|| { let netuid = NetUid::from(11u16); NetworksAdded::::insert(NetUid::from(11u16), true); // base subnet exists // Runtime cap is 8 (per function), so 9 must fail assert_noop!( - SubtensorModule::do_set_desired_subsubnet_count(netuid, SubId::from(9u8)), + SubtensorModule::do_set_subsubnet_count(netuid, SubId::from(9u8)), Error::::InvalidValue ); }); } #[test] -fn do_set_desired_fails_when_over_compile_time_cap() { +fn do_set_fails_when_over_compile_time_cap() { new_test_ext(1).execute_with(|| { let netuid = NetUid::from(12u16); NetworksAdded::::insert(NetUid::from(12u16), true); // base subnet exists let too_big = SubId::from(MAX_SUBSUBNET_COUNT_PER_SUBNET + 1); assert_noop!( - SubtensorModule::do_set_desired_subsubnet_count(netuid, too_big), + SubtensorModule::do_set_subsubnet_count(netuid, too_big), Error::::InvalidValue ); }); } #[test] -fn update_subsubnet_counts_decreases_and_cleans_on_superblock() { +fn update_subsubnet_counts_decreases_and_cleans() { new_test_ext(1).execute_with(|| { let hotkey = U256::from(1); @@ -288,27 +284,17 @@ fn update_subsubnet_counts_decreases_and_cleans_on_superblock() { let netuid = NetUid::from(42u16); NetworksAdded::::insert(NetUid::from(42u16), true); - // super_block = SuperBlockTempos() * Tempo(netuid) - netuid - Tempo::::insert(netuid, 360u16); - let super_block = u64::from(SuperBlockTempos::::get()) - * u64::from(Tempo::::get(netuid)) - - u16::from(netuid) as u64; - - // Choose counts so result is deterministic for ANY decrease-per-superblock. - // Let dec = GlobalSubsubnetDecreasePerSuperblock(); set old = dec + 3. - let dec: u8 = u8::from(GlobalSubsubnetDecreasePerSuperblock::::get()); - let old = SubId::from(dec.saturating_add(3)); - let desired = SubId::from(1u8); - // min_capped = max(old - dec, 1) = 3 => new_count = 3 + // Choose counts so result is deterministic. + let old = SubId::from(3); + let desired = SubId::from(2u8); SubsubnetCountCurrent::::insert(netuid, old); - SubsubnetCountDesired::::insert(netuid, desired); // Set non-default subnet emission split SubsubnetEmissionSplit::::insert(netuid, vec![123u16, 234u16, 345u16]); - // Seed data at a kept subid (2) and a removed subid (3) - let idx_keep = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(2u8)); - let idx_rm3 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(3u8)); + // Seed data at a kept subid (1) and a removed subid (2) + let idx_keep = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(1u8)); + let idx_rm3 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(2u8)); Weights::::insert(idx_keep, 0u16, vec![(1u16, 1u16)]); Incentive::::insert(idx_keep, vec![1u16]); @@ -340,11 +326,11 @@ fn update_subsubnet_counts_decreases_and_cleans_on_superblock() { VecDeque::from([(hotkey, 1u64, Default::default(), Default::default())]), ); - // Act exactly on a super-block boundary - SubtensorModule::update_subsubnet_counts_if_needed(super_block); + // Act + SubtensorModule::update_subsubnet_counts_if_needed(netuid, desired); - // New count is 3 - assert_eq!(SubsubnetCountCurrent::::get(netuid), SubId::from(3u8)); + // New count is as desired + assert_eq!(SubsubnetCountCurrent::::get(netuid), desired); // Kept prefix intact assert_eq!(Incentive::::get(idx_keep), vec![1u16]); @@ -366,78 +352,37 @@ fn update_subsubnet_counts_decreases_and_cleans_on_superblock() { idx_rm3, 1u64 )); - // SubsubnetEmissionSplit is reset on super-block + // SubsubnetEmissionSplit is reset assert!(SubsubnetEmissionSplit::::get(netuid).is_none()); }); } #[test] -fn update_subsubnet_counts_increases_on_superblock() { +fn update_subsubnet_counts_increases() { new_test_ext(1).execute_with(|| { // Base subnet exists let netuid = NetUid::from(42u16); NetworksAdded::::insert(NetUid::from(42u16), true); - // super_block = SuperBlockTempos() * Tempo(netuid) - netuid - Tempo::::insert(netuid, 360u16); - let super_block = u64::from(SuperBlockTempos::::get()) - * u64::from(Tempo::::get(netuid)) - - u16::from(netuid) as u64; - - // Choose counts so result is deterministic for ANY increase-per-superblock. - let inc: u8 = u8::from(GlobalSubsubnetDecreasePerSuperblock::::get()); + // Choose counts let old = SubId::from(1u8); - let desired = SubId::from(5u8); + let desired = SubId::from(2u8); SubsubnetCountCurrent::::insert(netuid, old); - SubsubnetCountDesired::::insert(netuid, desired); // Set non-default subnet emission split SubsubnetEmissionSplit::::insert(netuid, vec![123u16, 234u16, 345u16]); - // Act exactly on a super-block boundary - SubtensorModule::update_subsubnet_counts_if_needed(super_block); + // Act + SubtensorModule::update_subsubnet_counts_if_needed(netuid, desired); - // New count is old + inc - assert_eq!( - SubsubnetCountCurrent::::get(netuid), - SubId::from(1 + inc) - ); + // New count is as desired + assert_eq!(SubsubnetCountCurrent::::get(netuid), desired); - // SubsubnetEmissionSplit is reset on super-block + // SubsubnetEmissionSplit is reset assert!(SubsubnetEmissionSplit::::get(netuid).is_none()); }); } -#[test] -fn update_subsubnet_counts_no_change_when_not_superblock() { - new_test_ext(1).execute_with(|| { - let netuid = NetUid::from(100u16); - NetworksAdded::::insert(NetUid::from(100u16), true); - - Tempo::::insert(netuid, 1u16); - let super_block = - u64::from(SuperBlockTempos::::get()) * u64::from(Tempo::::get(netuid)); - - // Setup counts as in the previous test - let dec: u8 = u8::from(GlobalSubsubnetDecreasePerSuperblock::::get()); - let old = SubId::from(dec.saturating_add(3)); - let desired = SubId::from(1u8); - SubsubnetCountCurrent::::insert(netuid, old); - SubsubnetCountDesired::::insert(netuid, desired); - - // Marker value at a subid that would be kept if a change happened - let idx_mark = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(2u8)); - Incentive::::insert(idx_mark, vec![77u16]); - - // Act on a non-boundary - SubtensorModule::update_subsubnet_counts_if_needed(super_block - 1); - - // Nothing changes - assert_eq!(SubsubnetCountCurrent::::get(netuid), old); - assert_eq!(Incentive::::get(idx_mark), vec![77u16]); - }); -} - #[test] fn split_emissions_even_division() { new_test_ext(1).execute_with(|| { From 7caedb1d53fbc7b41cde7f4dbb16b99928e7be06 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Mon, 8 Sep 2025 18:20:04 -0400 Subject: [PATCH 34/39] Cleanup admin-util for setting cubcubnet counts --- pallets/admin-utils/src/lib.rs | 4 ++-- pallets/admin-utils/src/tests/mod.rs | 14 +++++--------- 2 files changed, 7 insertions(+), 11 deletions(-) diff --git a/pallets/admin-utils/src/lib.rs b/pallets/admin-utils/src/lib.rs index e64fee4a84..37579c4e32 100644 --- a/pallets/admin-utils/src/lib.rs +++ b/pallets/admin-utils/src/lib.rs @@ -1625,13 +1625,13 @@ pub mod pallet { #[pallet::weight(Weight::from_parts(15_000_000, 0) .saturating_add(::DbWeight::get().reads(1_u64)) .saturating_add(::DbWeight::get().writes(1_u64)))] - pub fn sudo_set_desired_subsubnet_count( + pub fn sudo_set_subsubnet_count( origin: OriginFor, netuid: NetUid, subsub_count: SubId, ) -> DispatchResult { pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; - pallet_subtensor::Pallet::::do_set_desired_subsubnet_count(netuid, subsub_count)?; + pallet_subtensor::Pallet::::do_set_subsubnet_count(netuid, subsub_count)?; Ok(()) } diff --git a/pallets/admin-utils/src/tests/mod.rs b/pallets/admin-utils/src/tests/mod.rs index 79c237f526..ae50059142 100644 --- a/pallets/admin-utils/src/tests/mod.rs +++ b/pallets/admin-utils/src/tests/mod.rs @@ -2074,7 +2074,7 @@ fn test_sudo_set_max_burn() { } #[test] -fn test_sudo_set_desired_subsubnet_count() { +fn test_sudo_set_subsubnet_count() { new_test_ext().execute_with(|| { let netuid = NetUid::from(1); let ss_count_ok = SubId::from(8); @@ -2086,7 +2086,7 @@ fn test_sudo_set_desired_subsubnet_count() { SubnetOwner::::insert(netuid, sn_owner); assert_eq!( - AdminUtils::sudo_set_desired_subsubnet_count( + AdminUtils::sudo_set_subsubnet_count( <::RuntimeOrigin>::signed(U256::from(1)), netuid, ss_count_ok @@ -2094,21 +2094,17 @@ fn test_sudo_set_desired_subsubnet_count() { Err(DispatchError::BadOrigin) ); assert_noop!( - AdminUtils::sudo_set_desired_subsubnet_count( - RuntimeOrigin::root(), - netuid, - ss_count_bad - ), + AdminUtils::sudo_set_subsubnet_count(RuntimeOrigin::root(), netuid, ss_count_bad), pallet_subtensor::Error::::InvalidValue ); - assert_ok!(AdminUtils::sudo_set_desired_subsubnet_count( + assert_ok!(AdminUtils::sudo_set_subsubnet_count( <::RuntimeOrigin>::root(), netuid, ss_count_ok )); - assert_ok!(AdminUtils::sudo_set_desired_subsubnet_count( + assert_ok!(AdminUtils::sudo_set_subsubnet_count( <::RuntimeOrigin>::signed(sn_owner), netuid, ss_count_ok From a257909b8b5b9785c1262e7cec914e2edfa36221 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Tue, 9 Sep 2025 12:15:28 -0400 Subject: [PATCH 35/39] Add regular rate limiting for subsubnet parameters --- pallets/admin-utils/src/lib.rs | 26 ++++++++++++++++++-- pallets/subtensor/src/lib.rs | 6 ++--- pallets/subtensor/src/utils/rate_limiting.rs | 4 +++ 3 files changed, 31 insertions(+), 5 deletions(-) diff --git a/pallets/admin-utils/src/lib.rs b/pallets/admin-utils/src/lib.rs index 2131d4dbce..7a18794c8f 100644 --- a/pallets/admin-utils/src/lib.rs +++ b/pallets/admin-utils/src/lib.rs @@ -1630,8 +1630,19 @@ pub mod pallet { netuid: NetUid, subsub_count: SubId, ) -> DispatchResult { - pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; + let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( + origin, + netuid, + &[TransactionType::SubsubnetParameterUpdate], + )?; + pallet_subtensor::Pallet::::do_set_subsubnet_count(netuid, subsub_count)?; + + pallet_subtensor::Pallet::::record_owner_rl( + maybe_owner, + netuid, + &[TransactionType::SubsubnetParameterUpdate], + ); Ok(()) } @@ -1645,8 +1656,19 @@ pub mod pallet { netuid: NetUid, maybe_split: Option>, ) -> DispatchResult { - pallet_subtensor::Pallet::::ensure_subnet_owner_or_root(origin, netuid)?; + let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( + origin, + netuid, + &[TransactionType::SubsubnetParameterUpdate], + )?; + pallet_subtensor::Pallet::::do_set_emission_split(netuid, maybe_split)?; + + pallet_subtensor::Pallet::::record_owner_rl( + maybe_owner, + netuid, + &[TransactionType::SubsubnetParameterUpdate], + ); Ok(()) } } diff --git a/pallets/subtensor/src/lib.rs b/pallets/subtensor/src/lib.rs index da4dfe9e1c..bb68912465 100644 --- a/pallets/subtensor/src/lib.rs +++ b/pallets/subtensor/src/lib.rs @@ -1825,9 +1825,9 @@ pub mod pallet { SubId::from(8) } #[pallet::type_value] - /// -- ITEM (Number of tempos in subnet super-block) - pub fn SuperBlockTempos() -> u16 { - 20 + /// -- ITEM (Rate limit for subsubnet count updates) + pub fn SetSubsubnetCountRateLimit() -> u64 { + 7200 } #[pallet::storage] /// --- MAP ( netuid ) --> Current number of sub-subnets diff --git a/pallets/subtensor/src/utils/rate_limiting.rs b/pallets/subtensor/src/utils/rate_limiting.rs index de75086ea1..e68b7f066f 100644 --- a/pallets/subtensor/src/utils/rate_limiting.rs +++ b/pallets/subtensor/src/utils/rate_limiting.rs @@ -11,6 +11,7 @@ pub enum TransactionType { RegisterNetwork, SetWeightsVersionKey, SetSNOwnerHotkey, + SubsubnetParameterUpdate, } /// Implement conversion from TransactionType to u16 @@ -23,6 +24,7 @@ impl From for u16 { TransactionType::RegisterNetwork => 3, TransactionType::SetWeightsVersionKey => 4, TransactionType::SetSNOwnerHotkey => 5, + TransactionType::SubsubnetParameterUpdate => 7, } } } @@ -36,6 +38,7 @@ impl From for TransactionType { 3 => TransactionType::RegisterNetwork, 4 => TransactionType::SetWeightsVersionKey, 5 => TransactionType::SetSNOwnerHotkey, + 7 => TransactionType::SubsubnetParameterUpdate, _ => TransactionType::Unknown, } } @@ -50,6 +53,7 @@ impl Pallet { TransactionType::SetChildren => 150, // 30 minutes TransactionType::SetChildkeyTake => TxChildkeyTakeRateLimit::::get(), TransactionType::RegisterNetwork => NetworkRateLimit::::get(), + TransactionType::SubsubnetParameterUpdate => SetSubsubnetCountRateLimit::::get(), TransactionType::Unknown => 0, // Default to no limit for unknown types (no limit) _ => 0, From 75ba336d64ef02fc62a843fc09b77eb751e05911 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Tue, 9 Sep 2025 17:21:56 -0400 Subject: [PATCH 36/39] Custom subsubnet count setting rate limit for fast blocks --- pallets/subtensor/src/lib.rs | 4 ++-- pallets/subtensor/src/utils/rate_limiting.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pallets/subtensor/src/lib.rs b/pallets/subtensor/src/lib.rs index bb68912465..b2bd5d8532 100644 --- a/pallets/subtensor/src/lib.rs +++ b/pallets/subtensor/src/lib.rs @@ -1826,8 +1826,8 @@ pub mod pallet { } #[pallet::type_value] /// -- ITEM (Rate limit for subsubnet count updates) - pub fn SetSubsubnetCountRateLimit() -> u64 { - 7200 + pub fn SubsubnetCountSetRateLimit() -> u64 { + prod_or_fast!(7_200, 0) } #[pallet::storage] /// --- MAP ( netuid ) --> Current number of sub-subnets diff --git a/pallets/subtensor/src/utils/rate_limiting.rs b/pallets/subtensor/src/utils/rate_limiting.rs index e68b7f066f..e346279a42 100644 --- a/pallets/subtensor/src/utils/rate_limiting.rs +++ b/pallets/subtensor/src/utils/rate_limiting.rs @@ -53,7 +53,7 @@ impl Pallet { TransactionType::SetChildren => 150, // 30 minutes TransactionType::SetChildkeyTake => TxChildkeyTakeRateLimit::::get(), TransactionType::RegisterNetwork => NetworkRateLimit::::get(), - TransactionType::SubsubnetParameterUpdate => SetSubsubnetCountRateLimit::::get(), + TransactionType::SubsubnetParameterUpdate => SubsubnetCountSetRateLimit::::get(), TransactionType::Unknown => 0, // Default to no limit for unknown types (no limit) _ => 0, From 9c221adf159b7388d7d0310e0aaaf3dd946fbe03 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Tue, 9 Sep 2025 17:42:43 -0400 Subject: [PATCH 37/39] Fix merge --- pallets/subtensor/src/epoch/run_epoch.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pallets/subtensor/src/epoch/run_epoch.rs b/pallets/subtensor/src/epoch/run_epoch.rs index ccb1f191a3..3dfcf0ac05 100644 --- a/pallets/subtensor/src/epoch/run_epoch.rs +++ b/pallets/subtensor/src/epoch/run_epoch.rs @@ -108,7 +108,7 @@ impl Pallet { let server_emission = extract_from_sorted_terms!(terms_sorted, server_emission); Self::deposit_event(Event::IncentiveAlphaEmittedToMiners { - netuid, + netuid: netuid_index, emissions: server_emission, }); From fe056c6647b41ed7111710d74cfec3294bf3d456 Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Wed, 10 Sep 2025 11:38:46 -0400 Subject: [PATCH 38/39] Spec bump --- runtime/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index e85cc4bb6e..5832b53206 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -220,7 +220,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // `spec_version`, and `authoring_version` are the same between Wasm and native. // This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use // the compatible custom types. - spec_version: 314, + spec_version: 315, impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, From d55e68c49e906d44bad8d78f7f08ebf61e20873f Mon Sep 17 00:00:00 2001 From: Greg Zaitsev Date: Wed, 10 Sep 2025 12:34:46 -0400 Subject: [PATCH 39/39] Spec bump --- runtime/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 5832b53206..d49d5147e9 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -220,7 +220,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // `spec_version`, and `authoring_version` are the same between Wasm and native. // This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use // the compatible custom types. - spec_version: 315, + spec_version: 316, impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1,