diff --git a/common/src/lib.rs b/common/src/lib.rs index 82f0916894..26aa6b2f13 100644 --- a/common/src/lib.rs +++ b/common/src/lib.rs @@ -224,6 +224,162 @@ pub mod time { pub const DAYS: BlockNumber = HOURS * 24; } +#[freeze_struct("8e576b32bb1bb664")] +#[repr(transparent)] +#[derive( + Deserialize, + Serialize, + Clone, + Copy, + Decode, + DecodeWithMemTracking, + Default, + Encode, + Eq, + Hash, + MaxEncodedLen, + Ord, + PartialEq, + PartialOrd, + RuntimeDebug, +)] +#[serde(transparent)] +pub struct SubId(u8); + +impl SubId { + pub const MAIN: SubId = Self(0); +} + +impl From for SubId { + fn from(value: u8) -> Self { + Self(value) + } +} + +impl From for u16 { + fn from(val: SubId) -> Self { + u16::from(val.0) + } +} + +impl From for u64 { + fn from(val: SubId) -> Self { + u64::from(val.0) + } +} + +impl From for u8 { + fn from(val: SubId) -> Self { + u8::from(val.0) + } +} + +impl Display for SubId { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + Display::fmt(&self.0, f) + } +} + +impl CompactAs for SubId { + type As = u8; + + fn encode_as(&self) -> &Self::As { + &self.0 + } + + fn decode_from(v: Self::As) -> Result { + Ok(Self(v)) + } +} + +impl From> for SubId { + fn from(c: Compact) -> Self { + c.0 + } +} + +impl TypeInfo for SubId { + type Identity = ::Identity; + fn type_info() -> scale_info::Type { + ::type_info() + } +} + +#[freeze_struct("2d995c5478e16d4d")] +#[repr(transparent)] +#[derive( + Deserialize, + Serialize, + Clone, + Copy, + Decode, + DecodeWithMemTracking, + Default, + Encode, + Eq, + Hash, + MaxEncodedLen, + Ord, + PartialEq, + PartialOrd, + RuntimeDebug, +)] +#[serde(transparent)] +pub struct NetUidStorageIndex(u16); + +impl NetUidStorageIndex { + pub const ROOT: NetUidStorageIndex = Self(0); +} + +impl Display for NetUidStorageIndex { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + Display::fmt(&self.0, f) + } +} + +impl CompactAs for NetUidStorageIndex { + type As = u16; + + fn encode_as(&self) -> &Self::As { + &self.0 + } + + fn decode_from(v: Self::As) -> Result { + Ok(Self(v)) + } +} + +impl From> for NetUidStorageIndex { + fn from(c: Compact) -> Self { + c.0 + } +} + +impl From for NetUidStorageIndex { + fn from(val: NetUid) -> Self { + val.0.into() + } +} + +impl From for u16 { + fn from(val: NetUidStorageIndex) -> Self { + val.0 + } +} + +impl From for NetUidStorageIndex { + fn from(value: u16) -> Self { + Self(value) + } +} + +impl TypeInfo for NetUidStorageIndex { + type Identity = ::Identity; + fn type_info() -> scale_info::Type { + ::type_info() + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/pallets/admin-utils/src/lib.rs b/pallets/admin-utils/src/lib.rs index 5808d53a70..4af202132f 100644 --- a/pallets/admin-utils/src/lib.rs +++ b/pallets/admin-utils/src/lib.rs @@ -9,7 +9,7 @@ pub use pallet::*; // - we could use a type parameter for `AuthorityId`, but there is // no sense for this as GRANDPA's `AuthorityId` is not a parameter -- it's always the same use sp_consensus_grandpa::AuthorityList; -use sp_runtime::{DispatchResult, RuntimeAppPublic, traits::Member}; +use sp_runtime::{DispatchResult, RuntimeAppPublic, Vec, traits::Member}; mod benchmarking; @@ -28,7 +28,7 @@ pub mod pallet { use pallet_subtensor::utils::rate_limiting::TransactionType; use sp_runtime::BoundedVec; use substrate_fixed::types::I96F32; - use subtensor_runtime_common::{NetUid, TaoCurrency}; + use subtensor_runtime_common::{NetUid, SubId, TaoCurrency}; /// The main data structure of the module. #[pallet::pallet] @@ -1846,6 +1846,58 @@ pub mod pallet { log::debug!("OwnerHyperparamRateLimitSet( limit: {limit:?} ) "); Ok(()) } + + /// Sets the desired number of subsubnets in a subnet + #[pallet::call_index(76)] + #[pallet::weight(Weight::from_parts(15_000_000, 0) + .saturating_add(::DbWeight::get().reads(1_u64)) + .saturating_add(::DbWeight::get().writes(1_u64)))] + pub fn sudo_set_subsubnet_count( + origin: OriginFor, + netuid: NetUid, + subsub_count: SubId, + ) -> DispatchResult { + let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( + origin, + netuid, + &[TransactionType::SubsubnetParameterUpdate], + )?; + + pallet_subtensor::Pallet::::do_set_subsubnet_count(netuid, subsub_count)?; + + pallet_subtensor::Pallet::::record_owner_rl( + maybe_owner, + netuid, + &[TransactionType::SubsubnetParameterUpdate], + ); + Ok(()) + } + + /// Sets the emission split between subsubnets in a subnet + #[pallet::call_index(77)] + #[pallet::weight(Weight::from_parts(15_000_000, 0) + .saturating_add(::DbWeight::get().reads(1_u64)) + .saturating_add(::DbWeight::get().writes(1_u64)))] + pub fn sudo_set_subsubnet_emission_split( + origin: OriginFor, + netuid: NetUid, + maybe_split: Option>, + ) -> DispatchResult { + let maybe_owner = pallet_subtensor::Pallet::::ensure_sn_owner_or_root_with_limits( + origin, + netuid, + &[TransactionType::SubsubnetParameterUpdate], + )?; + + pallet_subtensor::Pallet::::do_set_emission_split(netuid, maybe_split)?; + + pallet_subtensor::Pallet::::record_owner_rl( + maybe_owner, + netuid, + &[TransactionType::SubsubnetParameterUpdate], + ); + Ok(()) + } } } diff --git a/pallets/admin-utils/src/tests/mod.rs b/pallets/admin-utils/src/tests/mod.rs index 9b0197860c..25b1b89607 100644 --- a/pallets/admin-utils/src/tests/mod.rs +++ b/pallets/admin-utils/src/tests/mod.rs @@ -11,7 +11,7 @@ use pallet_subtensor::Event; use sp_consensus_grandpa::AuthorityId as GrandpaId; use sp_core::{Get, Pair, U256, ed25519}; use substrate_fixed::types::I96F32; -use subtensor_runtime_common::{Currency, NetUid, TaoCurrency}; +use subtensor_runtime_common::{Currency, NetUid, SubId, TaoCurrency}; use crate::Error; use crate::pallet::PrecompileEnable; @@ -827,7 +827,7 @@ fn test_sudo_set_bonds_moving_average() { let netuid = NetUid::from(1); let to_be_set: u64 = 10; add_network(netuid, 10); - let init_value: u64 = SubtensorModule::get_bonds_moving_average(netuid); + let init_value: u64 = SubtensorModule::get_bonds_moving_average(netuid.into()); assert_eq!( AdminUtils::sudo_set_bonds_moving_average( <::RuntimeOrigin>::signed(U256::from(1)), @@ -845,7 +845,7 @@ fn test_sudo_set_bonds_moving_average() { Err(Error::::SubnetDoesNotExist.into()) ); assert_eq!( - SubtensorModule::get_bonds_moving_average(netuid), + SubtensorModule::get_bonds_moving_average(netuid.into()), init_value ); assert_ok!(AdminUtils::sudo_set_bonds_moving_average( @@ -853,7 +853,10 @@ fn test_sudo_set_bonds_moving_average() { netuid, to_be_set )); - assert_eq!(SubtensorModule::get_bonds_moving_average(netuid), to_be_set); + assert_eq!( + SubtensorModule::get_bonds_moving_average(netuid.into()), + to_be_set + ); }); } @@ -2227,3 +2230,42 @@ fn test_sudo_set_max_burn() { ); }); } + +#[test] +fn test_sudo_set_subsubnet_count() { + new_test_ext().execute_with(|| { + let netuid = NetUid::from(1); + let ss_count_ok = SubId::from(8); + let ss_count_bad = SubId::from(9); + + let sn_owner = U256::from(1324); + add_network(netuid, 10); + // Set the Subnet Owner + SubnetOwner::::insert(netuid, sn_owner); + + assert_eq!( + AdminUtils::sudo_set_subsubnet_count( + <::RuntimeOrigin>::signed(U256::from(1)), + netuid, + ss_count_ok + ), + Err(DispatchError::BadOrigin) + ); + assert_noop!( + AdminUtils::sudo_set_subsubnet_count(RuntimeOrigin::root(), netuid, ss_count_bad), + pallet_subtensor::Error::::InvalidValue + ); + + assert_ok!(AdminUtils::sudo_set_subsubnet_count( + <::RuntimeOrigin>::root(), + netuid, + ss_count_ok + )); + + assert_ok!(AdminUtils::sudo_set_subsubnet_count( + <::RuntimeOrigin>::signed(sn_owner), + netuid, + ss_count_ok + )); + }); +} diff --git a/pallets/subtensor/rpc/src/lib.rs b/pallets/subtensor/rpc/src/lib.rs index e3d5d8f1c1..ea46695142 100644 --- a/pallets/subtensor/rpc/src/lib.rs +++ b/pallets/subtensor/rpc/src/lib.rs @@ -9,7 +9,7 @@ use jsonrpsee::{ use sp_blockchain::HeaderBackend; use sp_runtime::{AccountId32, traits::Block as BlockT}; use std::sync::Arc; -use subtensor_runtime_common::{NetUid, TaoCurrency}; +use subtensor_runtime_common::{NetUid, SubId, TaoCurrency}; use sp_api::ProvideRuntimeApi; @@ -72,6 +72,15 @@ pub trait SubtensorCustomApi { fn get_all_metagraphs(&self, at: Option) -> RpcResult>; #[method(name = "subnetInfo_getMetagraph")] fn get_metagraph(&self, netuid: NetUid, at: Option) -> RpcResult>; + #[method(name = "subnetInfo_getAllSubMetagraphs")] + fn get_all_submetagraphs(&self, at: Option) -> RpcResult>; + #[method(name = "subnetInfo_getSubMetagraph")] + fn get_submetagraph( + &self, + netuid: NetUid, + subid: SubId, + at: Option, + ) -> RpcResult>; #[method(name = "subnetInfo_getSubnetState")] fn get_subnet_state(&self, netuid: NetUid, at: Option) -> RpcResult>; #[method(name = "subnetInfo_getLockCost")] @@ -83,6 +92,14 @@ pub trait SubtensorCustomApi { metagraph_index: Vec, at: Option, ) -> RpcResult>; + #[method(name = "subnetInfo_getSelectiveSubMetagraph")] + fn get_selective_submetagraph( + &self, + netuid: NetUid, + subid: SubId, + metagraph_index: Vec, + at: Option, + ) -> RpcResult>; } pub struct SubtensorCustom { @@ -319,6 +336,16 @@ where } } + fn get_all_submetagraphs(&self, at: Option<::Hash>) -> RpcResult> { + let api = self.client.runtime_api(); + let at = at.unwrap_or_else(|| self.client.info().best_hash); + + match api.get_all_submetagraphs(at) { + Ok(result) => Ok(result.encode()), + Err(e) => Err(Error::RuntimeError(format!("Unable to get metagraps: {e:?}")).into()), + } + } + fn get_dynamic_info( &self, netuid: NetUid, @@ -352,6 +379,23 @@ where } } + fn get_submetagraph( + &self, + netuid: NetUid, + subid: SubId, + at: Option<::Hash>, + ) -> RpcResult> { + let api = self.client.runtime_api(); + let at = at.unwrap_or_else(|| self.client.info().best_hash); + match api.get_submetagraph(at, netuid, subid) { + Ok(result) => Ok(result.encode()), + Err(e) => Err(Error::RuntimeError(format!( + "Unable to get dynamic subnets info: {e:?}" + )) + .into()), + } + } + fn get_subnet_state( &self, netuid: NetUid, @@ -427,4 +471,22 @@ where } } } + + fn get_selective_submetagraph( + &self, + netuid: NetUid, + subid: SubId, + metagraph_index: Vec, + at: Option<::Hash>, + ) -> RpcResult> { + let api = self.client.runtime_api(); + let at = at.unwrap_or_else(|| self.client.info().best_hash); + + match api.get_selective_submetagraph(at, netuid, subid, metagraph_index) { + Ok(result) => Ok(result.encode()), + Err(e) => { + Err(Error::RuntimeError(format!("Unable to get selective metagraph: {e:?}")).into()) + } + } + } } diff --git a/pallets/subtensor/runtime-api/src/lib.rs b/pallets/subtensor/runtime-api/src/lib.rs index 42d12eb686..3ec76df45f 100644 --- a/pallets/subtensor/runtime-api/src/lib.rs +++ b/pallets/subtensor/runtime-api/src/lib.rs @@ -12,7 +12,7 @@ use pallet_subtensor::rpc_info::{ subnet_info::{SubnetHyperparams, SubnetHyperparamsV2, SubnetInfo, SubnetInfov2}, }; use sp_runtime::AccountId32; -use subtensor_runtime_common::{AlphaCurrency, NetUid, TaoCurrency}; +use subtensor_runtime_common::{AlphaCurrency, NetUid, SubId, TaoCurrency}; // Here we declare the runtime API. It is implemented it the `impl` block in // src/neuron_info.rs, src/subnet_info.rs, and src/delegate_info.rs @@ -40,9 +40,12 @@ sp_api::decl_runtime_apis! { fn get_all_dynamic_info() -> Vec>>; fn get_all_metagraphs() -> Vec>>; fn get_metagraph(netuid: NetUid) -> Option>; + fn get_all_submetagraphs() -> Vec>>; + fn get_submetagraph(netuid: NetUid, subid: SubId) -> Option>; fn get_dynamic_info(netuid: NetUid) -> Option>; fn get_subnet_state(netuid: NetUid) -> Option>; fn get_selective_metagraph(netuid: NetUid, metagraph_indexes: Vec) -> Option>; + fn get_selective_submetagraph(netuid: NetUid, subid: SubId, metagraph_indexes: Vec) -> Option>; } pub trait StakeInfoRuntimeApi { diff --git a/pallets/subtensor/src/coinbase/reveal_commits.rs b/pallets/subtensor/src/coinbase/reveal_commits.rs index e7bc6dc008..d0c068303b 100644 --- a/pallets/subtensor/src/coinbase/reveal_commits.rs +++ b/pallets/subtensor/src/coinbase/reveal_commits.rs @@ -3,7 +3,7 @@ use ark_serialize::CanonicalDeserialize; use codec::Decode; use frame_support::{dispatch, traits::OriginTrait}; use scale_info::prelude::collections::VecDeque; -use subtensor_runtime_common::NetUid; +use subtensor_runtime_common::{NetUid, SubId}; use tle::{ curves::drand::TinyBLS381, stream_ciphers::AESGCMStreamCipherProvider, @@ -44,152 +44,159 @@ impl Pallet { // Weights revealed must have been committed during epoch `cur_epoch - reveal_period`. let reveal_epoch = cur_epoch.saturating_sub(reveal_period); - // Clean expired commits - for (epoch, _) in TimelockedWeightCommits::::iter_prefix(netuid) { - if epoch < reveal_epoch { - TimelockedWeightCommits::::remove(netuid, epoch); - } - } + // All subsubnets share the same epoch, so the reveal_period/reveal_epoch are also the same + // Reveal for all subsubnets + for subid in 0..SubsubnetCountCurrent::::get(netuid).into() { + let netuid_index = Self::get_subsubnet_storage_index(netuid, subid.into()); - // No commits to reveal until at least epoch reveal_period. - if cur_epoch < reveal_period { - log::trace!("Failed to reveal commit for subnet {netuid} Too early"); - return Ok(()); - } - - let mut entries = TimelockedWeightCommits::::take(netuid, reveal_epoch); - let mut unrevealed = VecDeque::new(); - - // Keep popping items off the front of the queue until we successfully reveal a commit. - while let Some((who, commit_block, serialized_compresssed_commit, round_number)) = - entries.pop_front() - { - // Try to get the round number from pallet_drand. - let pulse = match pallet_drand::Pulses::::get(round_number) { - Some(p) => p, - None => { - // Round number used was not found on the chain. Skip this commit. - log::trace!( - "Failed to reveal commit for subnet {netuid} submitted by {who:?} on block {commit_block} due to missing round number {round_number}; will retry every block in reveal epoch." - ); - unrevealed.push_back(( - who, - commit_block, - serialized_compresssed_commit, - round_number, - )); - continue; + // Clean expired commits + for (epoch, _) in TimelockedWeightCommits::::iter_prefix(netuid_index) { + if epoch < reveal_epoch { + TimelockedWeightCommits::::remove(netuid_index, epoch); } - }; + } - let reader = &mut &serialized_compresssed_commit[..]; - let commit = match TLECiphertext::::deserialize_compressed(reader) { - Ok(c) => c, - Err(e) => { - log::trace!( - "Failed to reveal commit for subnet {netuid} submitted by {who:?} due to error deserializing the commit: {e:?}" - ); - continue; - } - }; - - let signature_bytes = pulse - .signature - .strip_prefix(b"0x") - .unwrap_or(&pulse.signature); - - let sig_reader = &mut &signature_bytes[..]; - let sig = match ::SignatureGroup::deserialize_compressed( - sig_reader, - ) { - Ok(s) => s, - Err(e) => { - log::trace!( - "Failed to reveal commit for subnet {netuid} submitted by {who:?} due to error deserializing signature from drand pallet: {e:?}" - ); - continue; - } - }; + // No commits to reveal until at least epoch reveal_period. + if cur_epoch < reveal_period { + log::trace!("Failed to reveal commit for subsubnet {netuid_index} Too early"); + return Ok(()); + } - let decrypted_bytes: Vec = match tld::( - commit, sig, - ) { - Ok(d) => d, - Err(e) => { - log::trace!( - "Failed to reveal commit for subnet {netuid} submitted by {who:?} due to error decrypting the commit: {e:?}" - ); - continue; - } - }; - - // ------------------------------------------------------------------ - // Try to decode payload with the new and legacy formats. - // ------------------------------------------------------------------ - let (uids, values, version_key) = { - let mut reader_new = &decrypted_bytes[..]; - if let Ok(payload) = WeightsTlockPayload::decode(&mut reader_new) { - // Verify hotkey matches committer - let mut hk_reader = &payload.hotkey[..]; - match T::AccountId::decode(&mut hk_reader) { - Ok(decoded_hotkey) if decoded_hotkey == who => { - (payload.uids, payload.values, payload.version_key) - } - Ok(_) => { - log::trace!( - "Failed to reveal commit for subnet {netuid} submitted by {who:?} due to hotkey mismatch in payload" - ); - continue; - } - Err(e) => { - let mut reader_legacy = &decrypted_bytes[..]; - match LegacyWeightsTlockPayload::decode(&mut reader_legacy) { - Ok(legacy) => (legacy.uids, legacy.values, legacy.version_key), - Err(_) => { - log::trace!( - "Failed to reveal commit for subnet {netuid} submitted by {who:?} due to error deserializing hotkey: {e:?}" - ); - continue; + let mut entries = TimelockedWeightCommits::::take(netuid_index, reveal_epoch); + let mut unrevealed = VecDeque::new(); + + // Keep popping items off the front of the queue until we successfully reveal a commit. + while let Some((who, commit_block, serialized_compresssed_commit, round_number)) = + entries.pop_front() + { + // Try to get the round number from pallet_drand. + let pulse = match pallet_drand::Pulses::::get(round_number) { + Some(p) => p, + None => { + // Round number used was not found on the chain. Skip this commit. + log::trace!( + "Failed to reveal commit for subsubnet {netuid_index} submitted by {who:?} on block {commit_block} due to missing round number {round_number}; will retry every block in reveal epoch." + ); + unrevealed.push_back(( + who, + commit_block, + serialized_compresssed_commit, + round_number, + )); + continue; + } + }; + + let reader = &mut &serialized_compresssed_commit[..]; + let commit = match TLECiphertext::::deserialize_compressed(reader) { + Ok(c) => c, + Err(e) => { + log::trace!( + "Failed to reveal commit for subsubnet {netuid_index} submitted by {who:?} due to error deserializing the commit: {e:?}" + ); + continue; + } + }; + + let signature_bytes = pulse + .signature + .strip_prefix(b"0x") + .unwrap_or(&pulse.signature); + + let sig_reader = &mut &signature_bytes[..]; + let sig = match ::SignatureGroup::deserialize_compressed( + sig_reader, + ) { + Ok(s) => s, + Err(e) => { + log::trace!( + "Failed to reveal commit for subsubnet {netuid_index} submitted by {who:?} due to error deserializing signature from drand pallet: {e:?}" + ); + continue; + } + }; + + let decrypted_bytes: Vec = match tld::( + commit, sig, + ) { + Ok(d) => d, + Err(e) => { + log::trace!( + "Failed to reveal commit for subsubnet {netuid_index} submitted by {who:?} due to error decrypting the commit: {e:?}" + ); + continue; + } + }; + + // ------------------------------------------------------------------ + // Try to decode payload with the new and legacy formats. + // ------------------------------------------------------------------ + let (uids, values, version_key) = { + let mut reader_new = &decrypted_bytes[..]; + if let Ok(payload) = WeightsTlockPayload::decode(&mut reader_new) { + // Verify hotkey matches committer + let mut hk_reader = &payload.hotkey[..]; + match T::AccountId::decode(&mut hk_reader) { + Ok(decoded_hotkey) if decoded_hotkey == who => { + (payload.uids, payload.values, payload.version_key) + } + Ok(_) => { + log::trace!( + "Failed to reveal commit for subsubnet {netuid_index} submitted by {who:?} due to hotkey mismatch in payload" + ); + continue; + } + Err(e) => { + let mut reader_legacy = &decrypted_bytes[..]; + match LegacyWeightsTlockPayload::decode(&mut reader_legacy) { + Ok(legacy) => (legacy.uids, legacy.values, legacy.version_key), + Err(_) => { + log::trace!( + "Failed to reveal commit for subsubnet {netuid_index} submitted by {who:?} due to error deserializing hotkey: {e:?}" + ); + continue; + } } } } - } - } else { - // Fallback to legacy payload - let mut reader_legacy = &decrypted_bytes[..]; - match LegacyWeightsTlockPayload::decode(&mut reader_legacy) { - Ok(legacy) => (legacy.uids, legacy.values, legacy.version_key), - Err(e) => { - log::trace!( - "Failed to reveal commit for subnet {netuid} submitted by {who:?} due to error deserializing both payload formats: {e:?}" - ); - continue; + } else { + // Fallback to legacy payload + let mut reader_legacy = &decrypted_bytes[..]; + match LegacyWeightsTlockPayload::decode(&mut reader_legacy) { + Ok(legacy) => (legacy.uids, legacy.values, legacy.version_key), + Err(e) => { + log::trace!( + "Failed to reveal commit for subsubnet {netuid_index} submitted by {who:?} due to error deserializing both payload formats: {e:?}" + ); + continue; + } } } + }; + + // ------------------------------------------------------------------ + // Apply weights + // ------------------------------------------------------------------ + if let Err(e) = Self::do_set_sub_weights( + T::RuntimeOrigin::signed(who.clone()), + netuid, + SubId::from(subid), + uids, + values, + version_key, + ) { + log::trace!( + "Failed to `do_set_sub_weights` for subsubnet {netuid_index} submitted by {who:?}: {e:?}" + ); + continue; } - }; - - // ------------------------------------------------------------------ - // Apply weights - // ------------------------------------------------------------------ - if let Err(e) = Self::do_set_weights( - T::RuntimeOrigin::signed(who.clone()), - netuid, - uids, - values, - version_key, - ) { - log::trace!( - "Failed to `do_set_weights` for subnet {netuid} submitted by {who:?}: {e:?}" - ); - continue; - } - Self::deposit_event(Event::TimelockedWeightsRevealed(netuid, who)); - } + Self::deposit_event(Event::TimelockedWeightsRevealed(netuid_index, who)); + } - if !unrevealed.is_empty() { - TimelockedWeightCommits::::insert(netuid, reveal_epoch, unrevealed); + if !unrevealed.is_empty() { + TimelockedWeightCommits::::insert(netuid_index, reveal_epoch, unrevealed); + } } Ok(()) diff --git a/pallets/subtensor/src/coinbase/root.rs b/pallets/subtensor/src/coinbase/root.rs index 8bb10e0b16..796cf5614b 100644 --- a/pallets/subtensor/src/coinbase/root.rs +++ b/pallets/subtensor/src/coinbase/root.rs @@ -17,12 +17,11 @@ use super::*; use frame_support::dispatch::Pays; -use frame_support::storage::IterableStorageDoubleMap; use frame_support::weights::Weight; use safe_math::*; use sp_core::Get; use substrate_fixed::types::I64F64; -use subtensor_runtime_common::{AlphaCurrency, Currency, NetUid, TaoCurrency}; +use subtensor_runtime_common::{AlphaCurrency, Currency, NetUid, NetUidStorageIndex, TaoCurrency}; impl Pallet { /// Fetches the total count of root network validators @@ -410,6 +409,7 @@ impl Pallet { // --- 1. Return balance to subnet owner. let owner_coldkey: T::AccountId = SubnetOwner::::get(netuid); let reserved_amount = Self::get_subnet_locked_balance(netuid); + let subsubnets: u8 = SubsubnetCountCurrent::::get(netuid).into(); // --- 2. Remove network count. SubnetworkN::::remove(netuid); @@ -427,17 +427,19 @@ impl Pallet { let _ = Uids::::clear_prefix(netuid, u32::MAX, None); let keys = Keys::::iter_prefix(netuid).collect::>(); let _ = Keys::::clear_prefix(netuid, u32::MAX, None); - let _ = Bonds::::clear_prefix(netuid, u32::MAX, None); + for subid in 0..subsubnets { + let netuid_index = Self::get_subsubnet_storage_index(netuid, subid.into()); + let _ = Bonds::::clear_prefix(netuid_index, u32::MAX, None); + } // --- 7. Removes the weights for this subnet (do not remove). - let _ = Weights::::clear_prefix(netuid, u32::MAX, None); + for subid in 0..subsubnets { + let netuid_index = Self::get_subsubnet_storage_index(netuid, subid.into()); + let _ = Weights::::clear_prefix(netuid_index, u32::MAX, None); + } // --- 8. Iterate over stored weights and fill the matrix. - for (uid_i, weights_i) in - as IterableStorageDoubleMap>>::iter_prefix( - NetUid::ROOT, - ) - { + for (uid_i, weights_i) in Weights::::iter_prefix(NetUidStorageIndex::ROOT) { // Create a new vector to hold modified weights. let mut modified_weights = weights_i.clone(); // Iterate over each weight entry to potentially update it. @@ -447,7 +449,7 @@ impl Pallet { *weight = 0; // Set weight to 0 for the matching subnet_id. } } - Weights::::insert(NetUid::ROOT, uid_i, modified_weights); + Weights::::insert(NetUidStorageIndex::ROOT, uid_i, modified_weights); } // --- 9. Remove various network-related parameters. @@ -455,11 +457,17 @@ impl Pallet { Trust::::remove(netuid); Active::::remove(netuid); Emission::::remove(netuid); - Incentive::::remove(netuid); + for subid in 0..subsubnets { + let netuid_index = Self::get_subsubnet_storage_index(netuid, subid.into()); + Incentive::::remove(netuid_index); + } Consensus::::remove(netuid); Dividends::::remove(netuid); PruningScores::::remove(netuid); - LastUpdate::::remove(netuid); + for subid in 0..subsubnets { + let netuid_index = Self::get_subsubnet_storage_index(netuid, subid.into()); + LastUpdate::::remove(netuid_index); + } ValidatorPermit::::remove(netuid); ValidatorTrust::::remove(netuid); diff --git a/pallets/subtensor/src/coinbase/run_coinbase.rs b/pallets/subtensor/src/coinbase/run_coinbase.rs index 7651a4162f..4bc6d11618 100644 --- a/pallets/subtensor/src/coinbase/run_coinbase.rs +++ b/pallets/subtensor/src/coinbase/run_coinbase.rs @@ -657,7 +657,7 @@ impl Pallet { // Run the epoch. let hotkey_emission: Vec<(T::AccountId, AlphaCurrency, AlphaCurrency)> = - Self::epoch(netuid, pending_alpha.saturating_add(pending_swapped)); + Self::epoch_with_subsubnets(netuid, pending_alpha.saturating_add(pending_swapped)); log::debug!("hotkey_emission: {hotkey_emission:?}"); // Compute the pending validator alpha. diff --git a/pallets/subtensor/src/epoch/run_epoch.rs b/pallets/subtensor/src/epoch/run_epoch.rs index 7906064780..3dfcf0ac05 100644 --- a/pallets/subtensor/src/epoch/run_epoch.rs +++ b/pallets/subtensor/src/epoch/run_epoch.rs @@ -1,19 +1,163 @@ use super::*; use crate::epoch::math::*; +use alloc::collections::BTreeMap; use frame_support::IterableStorageDoubleMap; use safe_math::*; +use sp_std::collections::btree_map::IntoIter; use sp_std::vec; use substrate_fixed::types::{I32F32, I64F64, I96F32}; -use subtensor_runtime_common::{AlphaCurrency, NetUid}; +use subtensor_runtime_common::{AlphaCurrency, NetUid, NetUidStorageIndex, SubId}; + +#[derive(Debug, Default)] +pub struct EpochTerms { + pub uid: usize, + pub dividend: u16, + pub incentive: u16, + pub validator_emission: AlphaCurrency, + pub server_emission: AlphaCurrency, + pub stake_weight: u16, + pub active: bool, + pub emission: AlphaCurrency, + pub rank: u16, + pub trust: u16, + pub consensus: u16, + pub pruning_score: u16, + pub validator_trust: u16, + pub new_validator_permit: bool, + pub bond: Vec<(u16, u16)>, +} + +pub struct EpochOutput(pub BTreeMap); + +impl EpochOutput { + pub fn as_map(&self) -> &BTreeMap { + &self.0 + } +} + +impl IntoIterator for EpochOutput +where + T: frame_system::Config, + T::AccountId: Ord, +{ + type Item = (T::AccountId, EpochTerms); + type IntoIter = IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } +} + +#[macro_export] +macro_rules! extract_from_sorted_terms { + ($sorted:expr, $field:ident) => {{ + ($sorted) + .iter() + .copied() + .map(|t| t.$field) + .collect::>() + }}; +} impl Pallet { + /// Legacy epoch function interface (TODO: Is only used for tests, remove) + pub fn epoch( + netuid: NetUid, + rao_emission: AlphaCurrency, + ) -> Vec<(T::AccountId, AlphaCurrency, AlphaCurrency)> { + // Run subsubnet-style epoch + let output = Self::epoch_subsubnet(netuid, SubId::MAIN, rao_emission); + + // Persist values in legacy format + Self::persist_subsub_epoch_terms(netuid, SubId::MAIN, output.as_map()); + Self::persist_netuid_epoch_terms(netuid, output.as_map()); + + // Remap and return + output + .into_iter() + .map(|(hotkey, terms)| (hotkey, terms.server_emission, terms.validator_emission)) + .collect() + } + + /// Legacy epoch_dense function interface (TODO: Is only used for tests, remove) + pub fn epoch_dense( + netuid: NetUid, + rao_emission: AlphaCurrency, + ) -> Vec<(T::AccountId, AlphaCurrency, AlphaCurrency)> { + Self::epoch_dense_subsubnet(netuid, SubId::MAIN, rao_emission) + } + + /// Persists per-subsubnet epoch output in state + pub fn persist_subsub_epoch_terms( + netuid: NetUid, + subid: SubId, + output: &BTreeMap, + ) { + let netuid_index = Self::get_subsubnet_storage_index(netuid, subid); + let mut terms_sorted: sp_std::vec::Vec<&EpochTerms> = output.values().collect(); + terms_sorted.sort_unstable_by_key(|t| t.uid); + + let incentive = extract_from_sorted_terms!(terms_sorted, incentive); + let bonds: Vec> = terms_sorted + .iter() + .cloned() + .map(|t| t.bond.clone()) + .collect::>(); + + Incentive::::insert(netuid_index, incentive); + + let server_emission = extract_from_sorted_terms!(terms_sorted, server_emission); + Self::deposit_event(Event::IncentiveAlphaEmittedToMiners { + netuid: netuid_index, + emissions: server_emission, + }); + + bonds + .into_iter() + .enumerate() + .for_each(|(uid_usize, bond_vec)| { + let uid: u16 = uid_usize.try_into().unwrap_or_default(); + Bonds::::insert(netuid_index, uid, bond_vec); + }); + } + + /// Persists per-netuid epoch output in state + pub fn persist_netuid_epoch_terms(netuid: NetUid, output: &BTreeMap) { + let mut terms_sorted: sp_std::vec::Vec<&EpochTerms> = output.values().collect(); + terms_sorted.sort_unstable_by_key(|t| t.uid); + + let active = extract_from_sorted_terms!(terms_sorted, active); + let emission = extract_from_sorted_terms!(terms_sorted, emission); + let rank = extract_from_sorted_terms!(terms_sorted, rank); + let trust = extract_from_sorted_terms!(terms_sorted, trust); + let consensus = extract_from_sorted_terms!(terms_sorted, consensus); + let dividend = extract_from_sorted_terms!(terms_sorted, dividend); + let pruning_score = extract_from_sorted_terms!(terms_sorted, pruning_score); + let validator_trust = extract_from_sorted_terms!(terms_sorted, validator_trust); + let new_validator_permit = extract_from_sorted_terms!(terms_sorted, new_validator_permit); + + Active::::insert(netuid, active.clone()); + Emission::::insert(netuid, emission); + Rank::::insert(netuid, rank); + Trust::::insert(netuid, trust); + Consensus::::insert(netuid, consensus); + Dividends::::insert(netuid, dividend); + PruningScores::::insert(netuid, pruning_score); + ValidatorTrust::::insert(netuid, validator_trust); + ValidatorPermit::::insert(netuid, new_validator_permit); + } + /// Calculates reward consensus and returns the emissions for uids/hotkeys in a given `netuid`. /// (Dense version used only for testing purposes.) #[allow(clippy::indexing_slicing)] - pub fn epoch_dense( + pub fn epoch_dense_subsubnet( netuid: NetUid, + subid: SubId, rao_emission: AlphaCurrency, ) -> Vec<(T::AccountId, AlphaCurrency, AlphaCurrency)> { + // Calculate netuid storage index + let netuid_index = Self::get_subsubnet_storage_index(netuid, subid); + // Get subnetwork size. let n: u16 = Self::get_subnetwork_n(netuid); log::trace!("n: {n:?}"); @@ -35,7 +179,7 @@ impl Pallet { log::trace!("activity_cutoff: {activity_cutoff:?}"); // Last update vector. - let last_update: Vec = Self::get_last_update(netuid); + let last_update: Vec = Self::get_last_update(netuid_index); log::trace!("Last update: {:?}", &last_update); // Inactive mask. @@ -150,7 +294,7 @@ impl Pallet { let owner_uid: Option = Self::get_owner_uid(netuid); // Access network weights row unnormalized. - let mut weights: Vec> = Self::get_weights(netuid); + let mut weights: Vec> = Self::get_weights(netuid_index); log::trace!("W: {:?}", &weights); // Mask weights that are not from permitted validators. @@ -222,7 +366,7 @@ impl Pallet { let mut ema_bonds: Vec>; if Yuma3On::::get(netuid) { // Access network bonds. - let mut bonds: Vec> = Self::get_bonds_fixed_proportion(netuid); + let mut bonds: Vec> = Self::get_bonds_fixed_proportion(netuid_index); inplace_mask_cols(&recently_registered, &mut bonds); // mask outdated bonds log::trace!("B: {:?}", &bonds); @@ -249,7 +393,7 @@ impl Pallet { } else { // original Yuma - liquid alpha disabled // Access network bonds. - let mut bonds: Vec> = Self::get_bonds(netuid); + let mut bonds: Vec> = Self::get_bonds(netuid_index); // Remove bonds referring to neurons that have registered since last tempo. inplace_mask_cols(&recently_registered, &mut bonds); // mask recently registered bonds inplace_col_normalize(&mut bonds); // sum_i b_ij = 1 @@ -391,7 +535,7 @@ impl Pallet { Rank::::insert(netuid, cloned_ranks); Trust::::insert(netuid, cloned_trust); Consensus::::insert(netuid, cloned_consensus); - Incentive::::insert(netuid, cloned_incentive); + Incentive::::insert(NetUidStorageIndex::from(netuid), cloned_incentive); Dividends::::insert(netuid, cloned_dividends); PruningScores::::insert(netuid, cloned_pruning_scores); ValidatorTrust::::insert(netuid, cloned_validator_trust); @@ -408,11 +552,11 @@ impl Pallet { let new_bonds_row: Vec<(u16, u16)> = (0..n) .zip(vec_fixed_proportions_to_u16(ema_bond.clone())) .collect(); - Bonds::::insert(netuid, i as u16, new_bonds_row); + Bonds::::insert(netuid_index, i as u16, new_bonds_row); } else if validator_permit { // Only overwrite the intersection. let new_empty_bonds_row: Vec<(u16, u16)> = vec![]; - Bonds::::insert(netuid, i as u16, new_empty_bonds_row); + Bonds::::insert(netuid_index, i as u16, new_empty_bonds_row); } }); @@ -441,11 +585,27 @@ impl Pallet { /// * 'debug' ( bool ): /// - Print debugging outputs. /// - #[allow(clippy::indexing_slicing)] - pub fn epoch( + pub fn epoch_subsubnet( netuid: NetUid, + subid: SubId, rao_emission: AlphaCurrency, - ) -> Vec<(T::AccountId, AlphaCurrency, AlphaCurrency)> { + ) -> EpochOutput { + // Calculate netuid storage index + let netuid_index = Self::get_subsubnet_storage_index(netuid, subid); + + // Initialize output keys (neuron hotkeys) and UIDs + let mut terms_map: BTreeMap = Keys::::iter_prefix(netuid) + .map(|(uid, hotkey)| { + ( + hotkey, + EpochTerms { + uid: uid as usize, + ..Default::default() + }, + ) + }) + .collect(); + // Get subnetwork size. let n = Self::get_subnetwork_n(netuid); log::trace!("Number of Neurons in Network: {n:?}"); @@ -467,7 +627,7 @@ impl Pallet { log::trace!("activity_cutoff: {activity_cutoff:?}"); // Last update vector. - let last_update: Vec = Self::get_last_update(netuid); + let last_update: Vec = Self::get_last_update(netuid_index); log::trace!("Last update: {:?}", &last_update); // Inactive mask. @@ -488,11 +648,6 @@ impl Pallet { // == Stake == // =========== - let hotkeys: Vec<(u16, T::AccountId)> = - as IterableStorageDoubleMap>::iter_prefix(netuid) - .collect(); - log::debug!("hotkeys: {:?}", &hotkeys); - // Access network stake as normalized vector. let (total_stake, _alpha_stake, _tao_stake): (Vec, Vec, Vec) = Self::get_stake_weights_for_network(netuid); @@ -559,7 +714,7 @@ impl Pallet { let owner_uid: Option = Self::get_owner_uid(netuid); // Access network weights row unnormalized. - let mut weights: Vec> = Self::get_weights_sparse(netuid); + let mut weights: Vec> = Self::get_weights_sparse(netuid_index); log::trace!("Weights: {:?}", &weights); // Mask weights that are not from permitted validators. @@ -587,19 +742,14 @@ impl Pallet { let mut commit_blocks: Vec = vec![u64::MAX; n as usize]; // MAX ⇒ “no active commit” // helper: hotkey → uid - let uid_of = |acct: &T::AccountId| -> Option { - hotkeys - .iter() - .find(|(_, a)| a == acct) - .map(|(uid, _)| *uid as usize) - }; + let uid_of = |acct: &T::AccountId| terms_map.get(acct).map(|t| t.uid); // ---------- v2 ------------------------------------------------------ - for (who, q) in WeightCommits::::iter_prefix(netuid) { + for (who, q) in WeightCommits::::iter_prefix(netuid_index) { for (_, cb, _, _) in q.iter() { if !Self::is_commit_expired(netuid, *cb) { - if let Some(i) = uid_of(&who) { - commit_blocks[i] = commit_blocks[i].min(*cb); + if let Some(cell) = uid_of(&who).and_then(|i| commit_blocks.get_mut(i)) { + *cell = (*cell).min(*cb); } break; // earliest active found } @@ -607,11 +757,11 @@ impl Pallet { } // ---------- v3 ------------------------------------------------------ - for (_epoch, q) in TimelockedWeightCommits::::iter_prefix(netuid) { + for (_epoch, q) in TimelockedWeightCommits::::iter_prefix(netuid_index) { for (who, cb, ..) in q.iter() { if !Self::is_commit_expired(netuid, *cb) { - if let Some(i) = uid_of(who) { - commit_blocks[i] = commit_blocks[i].min(*cb); + if let Some(cell) = uid_of(who).and_then(|i| commit_blocks.get_mut(i)) { + *cell = (*cell).min(*cb); } } } @@ -688,7 +838,7 @@ impl Pallet { let mut ema_bonds: Vec>; if Yuma3On::::get(netuid) { // Access network bonds. - let mut bonds = Self::get_bonds_sparse_fixed_proportion(netuid); + let mut bonds = Self::get_bonds_sparse_fixed_proportion(netuid_index); log::trace!("Bonds: {:?}", &bonds); // Remove bonds referring to neurons that have registered since last tempo. @@ -705,7 +855,8 @@ impl Pallet { // Compute the Exponential Moving Average (EMA) of bonds. log::trace!("weights_for_bonds: {:?}", &weights_for_bonds); - ema_bonds = Self::compute_bonds_sparse(netuid, &weights_for_bonds, &bonds, &consensus); + ema_bonds = + Self::compute_bonds_sparse(netuid_index, &weights_for_bonds, &bonds, &consensus); log::trace!("emaB: {:?}", &ema_bonds); // Normalize EMA bonds. @@ -727,7 +878,7 @@ impl Pallet { } else { // original Yuma - liquid alpha disabled // Access network bonds. - let mut bonds: Vec> = Self::get_bonds_sparse(netuid); + let mut bonds: Vec> = Self::get_bonds_sparse(netuid_index); log::trace!("B: {:?}", &bonds); // Remove bonds referring to neurons that have registered since last tempo. @@ -756,7 +907,7 @@ impl Pallet { log::trace!("ΔB (norm): {:?}", &bonds_delta); // Compute the Exponential Moving Average (EMA) of bonds. - ema_bonds = Self::compute_ema_bonds_normal_sparse(&bonds_delta, &bonds, netuid); + ema_bonds = Self::compute_ema_bonds_normal_sparse(&bonds_delta, &bonds, netuid_index); // Normalize EMA bonds. inplace_col_normalize_sparse(&mut ema_bonds, n); // sum_i b_ij = 1 log::trace!("Exponential Moving Average Bonds: {:?}", &ema_bonds); @@ -855,9 +1006,9 @@ impl Pallet { let pruning_scores: Vec = normalized_combined_emission.clone(); log::trace!("Pruning Scores: {:?}", &pruning_scores); - // =================== - // == Value storage == - // =================== + // =========================== + // == Populate epoch output == + // =========================== let cloned_stake_weight: Vec = stake .iter() .map(|xi| fixed_proportion_to_u16(*xi)) @@ -888,54 +1039,55 @@ impl Pallet { .iter() .map(|xi| fixed_proportion_to_u16(*xi)) .collect::>(); - StakeWeight::::insert(netuid, cloned_stake_weight.clone()); - Active::::insert(netuid, active.clone()); - Emission::::insert(netuid, cloned_emission); - Rank::::insert(netuid, cloned_ranks); - Trust::::insert(netuid, cloned_trust); - Consensus::::insert(netuid, cloned_consensus); - Incentive::::insert(netuid, cloned_incentive); - Dividends::::insert(netuid, cloned_dividends); - PruningScores::::insert(netuid, cloned_pruning_scores); - ValidatorTrust::::insert(netuid, cloned_validator_trust); - ValidatorPermit::::insert(netuid, new_validator_permits.clone()); - - new_validator_permits - .iter() - .zip(validator_permits) - .zip(ema_bonds) - .enumerate() - .for_each(|(i, ((new_permit, validator_permit), ema_bond))| { - // Set bonds only if uid retains validator permit, otherwise clear bonds. - if *new_permit { - let new_bonds_row: Vec<(u16, u16)> = ema_bond - .iter() - .map(|(j, value)| (*j, fixed_proportion_to_u16(*value))) - .collect(); - Bonds::::insert(netuid, i as u16, new_bonds_row); - } else if validator_permit { - // Only overwrite the intersection. - let new_empty_bonds_row: Vec<(u16, u16)> = vec![]; - Bonds::::insert(netuid, i as u16, new_empty_bonds_row); - } - }); - Self::deposit_event(Event::IncentiveAlphaEmittedToMiners { - netuid, - emissions: server_emission.clone(), - }); + for (_hotkey, terms) in terms_map.iter_mut() { + terms.dividend = cloned_dividends.get(terms.uid).copied().unwrap_or_default(); + terms.incentive = cloned_incentive.get(terms.uid).copied().unwrap_or_default(); + terms.validator_emission = validator_emission + .get(terms.uid) + .copied() + .unwrap_or_default(); + terms.server_emission = server_emission.get(terms.uid).copied().unwrap_or_default(); + terms.stake_weight = cloned_stake_weight + .get(terms.uid) + .copied() + .unwrap_or_default(); + terms.active = active.get(terms.uid).copied().unwrap_or_default(); + terms.emission = cloned_emission.get(terms.uid).copied().unwrap_or_default(); + terms.rank = cloned_ranks.get(terms.uid).copied().unwrap_or_default(); + terms.trust = cloned_trust.get(terms.uid).copied().unwrap_or_default(); + terms.consensus = cloned_consensus.get(terms.uid).copied().unwrap_or_default(); + terms.pruning_score = cloned_pruning_scores + .get(terms.uid) + .copied() + .unwrap_or_default(); + terms.validator_trust = cloned_validator_trust + .get(terms.uid) + .copied() + .unwrap_or_default(); + terms.new_validator_permit = new_validator_permits + .get(terms.uid) + .copied() + .unwrap_or_default(); + let old_validator_permit = validator_permits + .get(terms.uid) + .copied() + .unwrap_or_default(); + + // Bonds + if terms.new_validator_permit { + let ema_bond = ema_bonds.get(terms.uid).cloned().unwrap_or_default(); + terms.bond = ema_bond + .iter() + .map(|(j, value)| (*j, fixed_proportion_to_u16(*value))) + .collect(); + } else if old_validator_permit { + // Only overwrite the intersection. + terms.bond = vec![]; + } + } - // Emission tuples ( hotkeys, server_emission, validator_emission ) - hotkeys - .into_iter() - .map(|(uid_i, hotkey)| { - ( - hotkey, - server_emission[uid_i as usize], - validator_emission[uid_i as usize], - ) - }) - .collect() + EpochOutput(terms_map) } pub fn get_float_rho(netuid: NetUid) -> I32F32 { @@ -965,59 +1117,54 @@ impl Pallet { } /// Output unnormalized sparse weights, input weights are assumed to be row max-upscaled in u16. - pub fn get_weights_sparse(netuid: NetUid) -> Vec> { + pub fn get_weights_sparse(netuid_index: NetUidStorageIndex) -> Vec> { + let (netuid, _) = Self::get_netuid_and_subid(netuid_index).unwrap_or_default(); let n = Self::get_subnetwork_n(netuid) as usize; let mut weights: Vec> = vec![vec![]; n]; for (uid_i, weights_i) in - as IterableStorageDoubleMap>>::iter_prefix( - netuid, - ) - .filter(|(uid_i, _)| *uid_i < n as u16) + Weights::::iter_prefix(netuid_index).filter(|(uid_i, _)| *uid_i < n as u16) { for (uid_j, weight_ij) in weights_i.iter().filter(|(uid_j, _)| *uid_j < n as u16) { - weights - .get_mut(uid_i as usize) - .expect("uid_i is filtered to be less than n; qed") - .push((*uid_j, I32F32::saturating_from_num(*weight_ij))); + if let Some(row) = weights.get_mut(uid_i as usize) { + row.push((*uid_j, I32F32::saturating_from_num(*weight_ij))); + } else { + log::error!("uid_i {uid_i:?} is filtered to be less than n"); + } } } weights } /// Output unnormalized weights in [n, n] matrix, input weights are assumed to be row max-upscaled in u16. - pub fn get_weights(netuid: NetUid) -> Vec> { + pub fn get_weights(netuid_index: NetUidStorageIndex) -> Vec> { + let (netuid, _) = Self::get_netuid_and_subid(netuid_index).unwrap_or_default(); let n = Self::get_subnetwork_n(netuid) as usize; let mut weights: Vec> = vec![vec![I32F32::saturating_from_num(0.0); n]; n]; for (uid_i, weights_vec) in - as IterableStorageDoubleMap>>::iter_prefix( - netuid, - ) - .filter(|(uid_i, _)| *uid_i < n as u16) + Weights::::iter_prefix(netuid_index).filter(|(uid_i, _)| *uid_i < n as u16) { for (uid_j, weight_ij) in weights_vec .into_iter() .filter(|(uid_j, _)| *uid_j < n as u16) { - *weights + if let Some(cell) = weights .get_mut(uid_i as usize) - .expect("uid_i is filtered to be less than n; qed") - .get_mut(uid_j as usize) - .expect("uid_j is filtered to be less than n; qed") = - I32F32::saturating_from_num(weight_ij); + .and_then(|row| row.get_mut(uid_j as usize)) + { + *cell = I32F32::saturating_from_num(weight_ij); + } } } weights } /// Output unnormalized sparse bonds, input bonds are assumed to be column max-upscaled in u16. - pub fn get_bonds_sparse(netuid: NetUid) -> Vec> { + pub fn get_bonds_sparse(netuid_index: NetUidStorageIndex) -> Vec> { + let (netuid, _) = Self::get_netuid_and_subid(netuid_index).unwrap_or_default(); let n = Self::get_subnetwork_n(netuid) as usize; let mut bonds: Vec> = vec![vec![]; n]; for (uid_i, bonds_vec) in - as IterableStorageDoubleMap>>::iter_prefix( - netuid, - ) - .filter(|(uid_i, _)| *uid_i < n as u16) + Bonds::::iter_prefix(netuid_index).filter(|(uid_i, _)| *uid_i < n as u16) { for (uid_j, bonds_ij) in bonds_vec { bonds @@ -1030,14 +1177,12 @@ impl Pallet { } /// Output unnormalized bonds in [n, n] matrix, input bonds are assumed to be column max-upscaled in u16. - pub fn get_bonds(netuid: NetUid) -> Vec> { + pub fn get_bonds(netuid_index: NetUidStorageIndex) -> Vec> { + let (netuid, _) = Self::get_netuid_and_subid(netuid_index).unwrap_or_default(); let n: usize = Self::get_subnetwork_n(netuid) as usize; let mut bonds: Vec> = vec![vec![I32F32::saturating_from_num(0.0); n]; n]; for (uid_i, bonds_vec) in - as IterableStorageDoubleMap>>::iter_prefix( - netuid, - ) - .filter(|(uid_i, _)| *uid_i < n as u16) + Bonds::::iter_prefix(netuid_index).filter(|(uid_i, _)| *uid_i < n as u16) { for (uid_j, bonds_ij) in bonds_vec.into_iter().filter(|(uid_j, _)| *uid_j < n as u16) { *bonds @@ -1051,7 +1196,7 @@ impl Pallet { bonds } - pub fn get_bonds_fixed_proportion(netuid: NetUid) -> Vec> { + pub fn get_bonds_fixed_proportion(netuid: NetUidStorageIndex) -> Vec> { let mut bonds = Self::get_bonds(netuid); bonds.iter_mut().for_each(|bonds_row| { bonds_row @@ -1061,7 +1206,9 @@ impl Pallet { bonds } - pub fn get_bonds_sparse_fixed_proportion(netuid: NetUid) -> Vec> { + pub fn get_bonds_sparse_fixed_proportion( + netuid: NetUidStorageIndex, + ) -> Vec> { let mut bonds = Self::get_bonds_sparse(netuid); bonds.iter_mut().for_each(|bonds_row| { bonds_row @@ -1083,8 +1230,10 @@ impl Pallet { pub fn compute_ema_bonds_normal_sparse( bonds_delta: &[Vec<(u16, I32F32)>], bonds: &[Vec<(u16, I32F32)>], - netuid: NetUid, + netuid_index: NetUidStorageIndex, ) -> Vec> { + let (netuid, _) = Self::get_netuid_and_subid(netuid_index).unwrap_or_default(); + // Retrieve the bonds moving average for the given network ID and scale it down. let bonds_moving_average: I64F64 = I64F64::saturating_from_num(Self::get_bonds_moving_average(netuid)) @@ -1191,11 +1340,13 @@ impl Pallet { /// # Returns: /// A vector of EMA bonds. pub fn compute_bonds_sparse( - netuid: NetUid, + netuid_index: NetUidStorageIndex, weights: &[Vec<(u16, I32F32)>], bonds: &[Vec<(u16, I32F32)>], consensus: &[I32F32], ) -> Vec> { + let (netuid, _) = Self::get_netuid_and_subid(netuid_index).unwrap_or_default(); + // Check if Liquid Alpha is enabled, consensus is not empty, and contains non-zero values. if LiquidAlphaOn::::get(netuid) && !consensus.is_empty() @@ -1418,7 +1569,12 @@ impl Pallet { Ok(()) } - pub fn do_reset_bonds(netuid: NetUid, account_id: &T::AccountId) -> Result<(), DispatchError> { + pub fn do_reset_bonds( + netuid_index: NetUidStorageIndex, + account_id: &T::AccountId, + ) -> Result<(), DispatchError> { + let (netuid, _) = Self::get_netuid_and_subid(netuid_index).unwrap_or_default(); + // check bonds reset enabled for this subnet let bonds_reset_enabled: bool = Self::get_bonds_reset(netuid); if !bonds_reset_enabled { @@ -1426,9 +1582,9 @@ impl Pallet { } if let Ok(uid) = Self::get_uid_for_net_and_hotkey(netuid, account_id) { - for (i, bonds_vec) in Bonds::::iter_prefix(netuid) { + for (i, bonds_vec) in Bonds::::iter_prefix(netuid_index) { Bonds::::insert( - netuid, + netuid_index, i, bonds_vec .clone() diff --git a/pallets/subtensor/src/lib.rs b/pallets/subtensor/src/lib.rs index c275521753..dd12a9b76b 100644 --- a/pallets/subtensor/src/lib.rs +++ b/pallets/subtensor/src/lib.rs @@ -86,7 +86,9 @@ pub mod pallet { use sp_std::vec::Vec; use substrate_fixed::types::{I96F32, U64F64}; use subtensor_macros::freeze_struct; - use subtensor_runtime_common::{AlphaCurrency, Currency, NetUid, TaoCurrency}; + use subtensor_runtime_common::{ + AlphaCurrency, Currency, NetUid, NetUidStorageIndex, SubId, TaoCurrency, + }; #[cfg(not(feature = "std"))] use alloc::boxed::Box; @@ -1544,7 +1546,7 @@ pub mod pallet { #[pallet::storage] /// --- MAP ( netuid ) --> incentive pub type Incentive = - StorageMap<_, Identity, NetUid, Vec, ValueQuery, EmptyU16Vec>; + StorageMap<_, Identity, NetUidStorageIndex, Vec, ValueQuery, EmptyU16Vec>; #[pallet::storage] /// --- MAP ( netuid ) --> dividends pub type Dividends = @@ -1555,7 +1557,7 @@ pub mod pallet { #[pallet::storage] /// --- MAP ( netuid ) --> last_update pub type LastUpdate = - StorageMap<_, Identity, NetUid, Vec, ValueQuery, EmptyU64Vec>; + StorageMap<_, Identity, NetUidStorageIndex, Vec, ValueQuery, EmptyU64Vec>; #[pallet::storage] /// --- MAP ( netuid ) --> validator_trust pub type ValidatorTrust = @@ -1573,7 +1575,7 @@ pub mod pallet { pub type Weights = StorageDoubleMap< _, Identity, - NetUid, + NetUidStorageIndex, Identity, u16, Vec<(u16, u16)>, @@ -1585,7 +1587,7 @@ pub mod pallet { pub type Bonds = StorageDoubleMap< _, Identity, - NetUid, + NetUidStorageIndex, Identity, u16, Vec<(u16, u16)>, @@ -1695,7 +1697,7 @@ pub mod pallet { pub type WeightCommits = StorageDoubleMap< _, Twox64Concat, - NetUid, + NetUidStorageIndex, Twox64Concat, T::AccountId, VecDeque<(H256, u64, u64, u64)>, @@ -1707,7 +1709,7 @@ pub mod pallet { pub type TimelockedWeightCommits = StorageDoubleMap< _, Twox64Concat, - NetUid, + NetUidStorageIndex, Twox64Concat, u64, // epoch key VecDeque<( @@ -1724,7 +1726,7 @@ pub mod pallet { pub type CRV3WeightCommits = StorageDoubleMap< _, Twox64Concat, - NetUid, + NetUidStorageIndex, Twox64Concat, u64, // epoch key VecDeque<( @@ -1740,7 +1742,7 @@ pub mod pallet { pub type CRV3WeightCommitsV2 = StorageDoubleMap< _, Twox64Concat, - NetUid, + NetUidStorageIndex, Twox64Concat, u64, // epoch key VecDeque<( @@ -1822,6 +1824,33 @@ pub mod pallet { pub type CommitRevealWeightsVersion = StorageValue<_, u16, ValueQuery, DefaultCommitRevealWeightsVersion>; + /// ====================== + /// ==== Sub-subnets ===== + /// ====================== + #[pallet::type_value] + /// -- ITEM (Default number of sub-subnets) + pub fn DefaultSubsubnetCount() -> SubId { + SubId::from(1) + } + #[pallet::type_value] + /// -- ITEM (Maximum number of sub-subnets) + pub fn MaxSubsubnetCount() -> SubId { + SubId::from(8) + } + #[pallet::type_value] + /// -- ITEM (Rate limit for subsubnet count updates) + pub fn SubsubnetCountSetRateLimit() -> u64 { + prod_or_fast!(7_200, 0) + } + #[pallet::storage] + /// --- MAP ( netuid ) --> Current number of sub-subnets + pub type SubsubnetCountCurrent = + StorageMap<_, Twox64Concat, NetUid, SubId, ValueQuery, DefaultSubsubnetCount>; + #[pallet::storage] + /// --- MAP ( netuid ) --> Normalized vector of emission split proportion between subsubnets + pub type SubsubnetEmissionSplit = + StorageMap<_, Twox64Concat, NetUid, Vec, OptionQuery>; + /// ================== /// ==== Genesis ===== /// ================== diff --git a/pallets/subtensor/src/macros/dispatches.rs b/pallets/subtensor/src/macros/dispatches.rs index f069ff0aa5..28f7ff384d 100644 --- a/pallets/subtensor/src/macros/dispatches.rs +++ b/pallets/subtensor/src/macros/dispatches.rs @@ -78,7 +78,7 @@ mod dispatches { /// - Attempting to set weights with max value exceeding limit. #[pallet::call_index(0)] #[pallet::weight((Weight::from_parts(15_540_000_000, 0) - .saturating_add(T::DbWeight::get().reads(4111)) + .saturating_add(T::DbWeight::get().reads(4112_u64)) .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] pub fn set_weights( origin: OriginFor, @@ -94,6 +94,86 @@ mod dispatches { } } + /// --- Sets the caller weights for the incentive mechanism for subsubnets. The call + /// can be made from the hotkey account so is potentially insecure, however, the damage + /// of changing weights is minimal if caught early. This function includes all the + /// checks that the passed weights meet the requirements. Stored as u16s they represent + /// rational values in the range [0,1] which sum to 1 and can be interpreted as + /// probabilities. The specific weights determine how inflation propagates outward + /// from this peer. + /// + /// Note: The 16 bit integers weights should represent 1.0 as the max u16. + /// However, the function normalizes all integers to u16_max anyway. This means that if the sum of all + /// elements is larger or smaller than the amount of elements * u16_max, all elements + /// will be corrected for this deviation. + /// + /// # Args: + /// * `origin`: (Origin): + /// - The caller, a hotkey who wishes to set their weights. + /// + /// * `netuid` (u16): + /// - The network uid we are setting these weights on. + /// + /// * `subid` (`u8`): + /// - The u8 subsubnet identifier. + /// + /// * `dests` (Vec): + /// - The edge endpoint for the weight, i.e. j for w_ij. + /// + /// * 'weights' (Vec): + /// - The u16 integer encoded weights. Interpreted as rational + /// values in the range [0,1]. They must sum to in32::MAX. + /// + /// * 'version_key' ( u64 ): + /// - The network version key to check if the validator is up to date. + /// + /// # Event: + /// * WeightsSet; + /// - On successfully setting the weights on chain. + /// + /// # Raises: + /// * 'SubNetworkDoesNotExist': + /// - Attempting to set weights on a non-existent network. + /// + /// * 'NotRegistered': + /// - Attempting to set weights from a non registered account. + /// + /// * 'WeightVecNotEqualSize': + /// - Attempting to set weights with uids not of same length. + /// + /// * 'DuplicateUids': + /// - Attempting to set weights with duplicate uids. + /// + /// * 'UidsLengthExceedUidsInSubNet': + /// - Attempting to set weights above the max allowed uids. + /// + /// * 'UidVecContainInvalidOne': + /// - Attempting to set weights with invalid uids. + /// + /// * 'WeightVecLengthIsLow': + /// - Attempting to set weights with fewer weights than min. + /// + /// * 'MaxWeightExceeded': + /// - Attempting to set weights with max value exceeding limit. + #[pallet::call_index(119)] + #[pallet::weight((Weight::from_parts(15_540_000_000, 0) + .saturating_add(T::DbWeight::get().reads(4111)) + .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] + pub fn set_sub_weights( + origin: OriginFor, + netuid: NetUid, + subid: SubId, + dests: Vec, + weights: Vec, + version_key: u64, + ) -> DispatchResult { + if Self::get_commit_reveal_weights_enabled(netuid) { + Err(Error::::CommitRevealEnabled.into()) + } else { + Self::do_set_sub_weights(origin, netuid, subid, dests, weights, version_key) + } + } + /// --- Allows a hotkey to set weights for multiple netuids as a batch. /// /// # Args: @@ -121,7 +201,7 @@ mod dispatches { /// #[pallet::call_index(80)] #[pallet::weight((Weight::from_parts(95_460_000, 0) - .saturating_add(T::DbWeight::get().reads(14_u64)) + .saturating_add(T::DbWeight::get().reads(15_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)), DispatchClass::Normal, Pays::No))] pub fn batch_set_weights( origin: OriginFor, @@ -152,8 +232,8 @@ mod dispatches { /// - Attempting to commit when the user has more than the allowed limit of unrevealed commits. /// #[pallet::call_index(96)] - #[pallet::weight((Weight::from_parts(55_130_000, 0) - .saturating_add(T::DbWeight::get().reads(7)) + #[pallet::weight((Weight::from_parts(67_770_000, 0) + .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] pub fn commit_weights( origin: T::RuntimeOrigin, @@ -163,6 +243,41 @@ mod dispatches { Self::do_commit_weights(origin, netuid, commit_hash) } + /// ---- Used to commit a hash of your weight values to later be revealed for subsubnets. + /// + /// # Args: + /// * `origin`: (`::RuntimeOrigin`): + /// - The signature of the committing hotkey. + /// + /// * `netuid` (`u16`): + /// - The u16 network identifier. + /// + /// * `subid` (`u8`): + /// - The u8 subsubnet identifier. + /// + /// * `commit_hash` (`H256`): + /// - The hash representing the committed weights. + /// + /// # Raises: + /// * `CommitRevealDisabled`: + /// - Attempting to commit when the commit-reveal mechanism is disabled. + /// + /// * `TooManyUnrevealedCommits`: + /// - Attempting to commit when the user has more than the allowed limit of unrevealed commits. + /// + #[pallet::call_index(115)] + #[pallet::weight((Weight::from_parts(55_130_000, 0) + .saturating_add(T::DbWeight::get().reads(7)) + .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] + pub fn commit_sub_weights( + origin: T::RuntimeOrigin, + netuid: NetUid, + subid: SubId, + commit_hash: H256, + ) -> DispatchResult { + Self::do_commit_sub_weights(origin, netuid, subid, commit_hash) + } + /// --- Allows a hotkey to commit weight hashes for multiple netuids as a batch. /// /// # Args: @@ -236,7 +351,7 @@ mod dispatches { /// #[pallet::call_index(97)] #[pallet::weight((Weight::from_parts(122_000_000, 0) - .saturating_add(T::DbWeight::get().reads(16)) + .saturating_add(T::DbWeight::get().reads(17_u64)) .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] pub fn reveal_weights( origin: T::RuntimeOrigin, @@ -249,6 +364,150 @@ mod dispatches { Self::do_reveal_weights(origin, netuid, uids, values, salt, version_key) } + /// ---- Used to reveal the weights for a previously committed hash for subsubnets. + /// + /// # Args: + /// * `origin`: (`::RuntimeOrigin`): + /// - The signature of the revealing hotkey. + /// + /// * `netuid` (`u16`): + /// - The u16 network identifier. + /// + /// * `subid` (`u8`): + /// - The u8 subsubnet identifier. + /// + /// * `uids` (`Vec`): + /// - The uids for the weights being revealed. + /// + /// * `values` (`Vec`): + /// - The values of the weights being revealed. + /// + /// * `salt` (`Vec`): + /// - The salt used to generate the commit hash. + /// + /// * `version_key` (`u64`): + /// - The network version key. + /// + /// # Raises: + /// * `CommitRevealDisabled`: + /// - Attempting to reveal weights when the commit-reveal mechanism is disabled. + /// + /// * `NoWeightsCommitFound`: + /// - Attempting to reveal weights without an existing commit. + /// + /// * `ExpiredWeightCommit`: + /// - Attempting to reveal a weight commit that has expired. + /// + /// * `RevealTooEarly`: + /// - Attempting to reveal weights outside the valid reveal period. + /// + /// * `InvalidRevealCommitHashNotMatch`: + /// - The revealed hash does not match any committed hash. + /// + #[pallet::call_index(116)] + #[pallet::weight((Weight::from_parts(122_000_000, 0) + .saturating_add(T::DbWeight::get().reads(16)) + .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] + pub fn reveal_sub_weights( + origin: T::RuntimeOrigin, + netuid: NetUid, + subid: SubId, + uids: Vec, + values: Vec, + salt: Vec, + version_key: u64, + ) -> DispatchResult { + Self::do_reveal_sub_weights(origin, netuid, subid, uids, values, salt, version_key) + } + + /// ---- Used to commit encrypted commit-reveal v3 weight values to later be revealed. + /// + /// # Args: + /// * `origin`: (`::RuntimeOrigin`): + /// - The committing hotkey. + /// + /// * `netuid` (`u16`): + /// - The u16 network identifier. + /// + /// * `commit` (`Vec`): + /// - The encrypted compressed commit. + /// The steps for this are: + /// 1. Instantiate [`WeightsTlockPayload`] + /// 2. Serialize it using the `parity_scale_codec::Encode` trait + /// 3. Encrypt it following the steps (here)[https://github.com/ideal-lab5/tle/blob/f8e6019f0fb02c380ebfa6b30efb61786dede07b/timelock/src/tlock.rs#L283-L336] + /// to produce a [`TLECiphertext`] type. + /// 4. Serialize and compress using the `ark-serialize` `CanonicalSerialize` trait. + /// + /// * reveal_round (`u64`): + /// - The drand reveal round which will be avaliable during epoch `n+1` from the current + /// epoch. + /// + /// # Raises: + /// * `CommitRevealV3Disabled`: + /// - Attempting to commit when the commit-reveal mechanism is disabled. + /// + /// * `TooManyUnrevealedCommits`: + /// - Attempting to commit when the user has more than the allowed limit of unrevealed commits. + /// + #[pallet::call_index(99)] + #[pallet::weight((Weight::from_parts(77_750_000, 0) + .saturating_add(T::DbWeight::get().reads(9_u64)) + .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] + pub fn commit_crv3_weights( + origin: T::RuntimeOrigin, + netuid: NetUid, + commit: BoundedVec>, + reveal_round: u64, + ) -> DispatchResult { + Self::do_commit_timelocked_weights(origin, netuid, commit, reveal_round, 4) + } + + /// ---- Used to commit encrypted commit-reveal v3 weight values to later be revealed for subsubnets. + /// + /// # Args: + /// * `origin`: (`::RuntimeOrigin`): + /// - The committing hotkey. + /// + /// * `netuid` (`u16`): + /// - The u16 network identifier. + /// + /// * `subid` (`u8`): + /// - The u8 subsubnet identifier. + /// + /// * `commit` (`Vec`): + /// - The encrypted compressed commit. + /// The steps for this are: + /// 1. Instantiate [`WeightsTlockPayload`] + /// 2. Serialize it using the `parity_scale_codec::Encode` trait + /// 3. Encrypt it following the steps (here)[https://github.com/ideal-lab5/tle/blob/f8e6019f0fb02c380ebfa6b30efb61786dede07b/timelock/src/tlock.rs#L283-L336] + /// to produce a [`TLECiphertext`] type. + /// 4. Serialize and compress using the `ark-serialize` `CanonicalSerialize` trait. + /// + /// * reveal_round (`u64`): + /// - The drand reveal round which will be avaliable during epoch `n+1` from the current + /// epoch. + /// + /// # Raises: + /// * `CommitRevealV3Disabled`: + /// - Attempting to commit when the commit-reveal mechanism is disabled. + /// + /// * `TooManyUnrevealedCommits`: + /// - Attempting to commit when the user has more than the allowed limit of unrevealed commits. + /// + #[pallet::call_index(117)] + #[pallet::weight((Weight::from_parts(77_750_000, 0) + .saturating_add(T::DbWeight::get().reads(7_u64)) + .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] + pub fn commit_crv3_sub_weights( + origin: T::RuntimeOrigin, + netuid: NetUid, + subid: SubId, + commit: BoundedVec>, + reveal_round: u64, + ) -> DispatchResult { + Self::do_commit_timelocked_sub_weights(origin, netuid, subid, commit, reveal_round, 4) + } + /// ---- The implementation for batch revealing committed weights. /// /// # Args: @@ -290,7 +549,7 @@ mod dispatches { /// - The input vectors are of mismatched lengths. #[pallet::call_index(98)] #[pallet::weight((Weight::from_parts(412_000_000, 0) - .saturating_add(T::DbWeight::get().reads(16)) + .saturating_add(T::DbWeight::get().reads(17_u64)) .saturating_add(T::DbWeight::get().writes(2_u64)), DispatchClass::Normal, Pays::No))] pub fn batch_reveal_weights( origin: T::RuntimeOrigin, @@ -431,7 +690,7 @@ mod dispatches { /// - Errors stemming from transaction pallet. /// #[pallet::call_index(2)] - #[pallet::weight((Weight::from_parts(340_400_000, 0) + #[pallet::weight((Weight::from_parts(340_800_000, 0) .saturating_add(T::DbWeight::get().reads(26)) .saturating_add(T::DbWeight::get().writes(15)), DispatchClass::Normal, Pays::Yes))] pub fn add_stake( @@ -673,7 +932,7 @@ mod dispatches { /// - The ip type v4 or v6. /// #[pallet::call_index(5)] - #[pallet::weight((Weight::from_parts(28_660_000, 0) + #[pallet::weight((Weight::from_parts(42_000_000, 0) .saturating_add(T::DbWeight::get().reads(4)) .saturating_add(T::DbWeight::get().writes(1)), DispatchClass::Normal, Pays::No))] pub fn serve_prometheus( @@ -736,7 +995,7 @@ mod dispatches { /// #[pallet::call_index(6)] #[pallet::weight((Weight::from_parts(197_900_000, 0) - .saturating_add(T::DbWeight::get().reads(26)) + .saturating_add(T::DbWeight::get().reads(27_u64)) .saturating_add(T::DbWeight::get().writes(23)), DispatchClass::Normal, Pays::Yes))] pub fn register( origin: OriginFor, @@ -752,8 +1011,8 @@ mod dispatches { /// Register the hotkey to root network #[pallet::call_index(62)] - #[pallet::weight((Weight::from_parts(111_700_000, 0) - .saturating_add(T::DbWeight::get().reads(23)) + #[pallet::weight((Weight::from_parts(135_900_000, 0) + .saturating_add(T::DbWeight::get().reads(24_u64)) .saturating_add(T::DbWeight::get().writes(20)), DispatchClass::Normal, Pays::No))] pub fn root_register(origin: OriginFor, hotkey: T::AccountId) -> DispatchResult { Self::do_root_register(origin, hotkey) @@ -771,7 +1030,7 @@ mod dispatches { /// User register a new subnetwork via burning token #[pallet::call_index(7)] #[pallet::weight((Weight::from_parts(354_200_000, 0) - .saturating_add(T::DbWeight::get().reads(49)) + .saturating_add(T::DbWeight::get().reads(50_u64)) .saturating_add(T::DbWeight::get().writes(43)), DispatchClass::Normal, Pays::Yes))] pub fn burned_register( origin: OriginFor, @@ -1327,8 +1586,8 @@ mod dispatches { /// User register a new subnetwork #[pallet::call_index(79)] #[pallet::weight((Weight::from_parts(234_200_000, 0) - .saturating_add(T::DbWeight::get().reads(35)) - .saturating_add(T::DbWeight::get().writes(51)), DispatchClass::Normal, Pays::Yes))] + .saturating_add(T::DbWeight::get().reads(35)) + .saturating_add(T::DbWeight::get().writes(51)), DispatchClass::Normal, Pays::Yes))] pub fn register_network_with_identity( origin: OriginFor, hotkey: T::AccountId, @@ -1990,7 +2249,7 @@ mod dispatches { /// - The client (bittensor-drand) version #[pallet::call_index(113)] #[pallet::weight((Weight::from_parts(80_690_000, 0) - .saturating_add(T::DbWeight::get().reads(7_u64)) + .saturating_add(T::DbWeight::get().reads(9_u64)) .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] pub fn commit_timelocked_weights( origin: T::RuntimeOrigin, @@ -2033,5 +2292,55 @@ mod dispatches { Ok(()) } + + /// ---- Used to commit timelock encrypted commit-reveal weight values to later be revealed for + /// a subsubnet. + /// + /// # Args: + /// * `origin`: (`::RuntimeOrigin`): + /// - The committing hotkey. + /// + /// * `netuid` (`u16`): + /// - The u16 network identifier. + /// + /// * `subid` (`u8`): + /// - The u8 subsubnet identifier. + /// + /// * `commit` (`Vec`): + /// - The encrypted compressed commit. + /// The steps for this are: + /// 1. Instantiate [`WeightsTlockPayload`] + /// 2. Serialize it using the `parity_scale_codec::Encode` trait + /// 3. Encrypt it following the steps (here)[https://github.com/ideal-lab5/tle/blob/f8e6019f0fb02c380ebfa6b30efb61786dede07b/timelock/src/tlock.rs#L283-L336] + /// to produce a [`TLECiphertext`] type. + /// 4. Serialize and compress using the `ark-serialize` `CanonicalSerialize` trait. + /// + /// * reveal_round (`u64`): + /// - The drand reveal round which will be avaliable during epoch `n+1` from the current + /// epoch. + /// + /// * commit_reveal_version (`u16`): + /// - The client (bittensor-drand) version + #[pallet::call_index(118)] + #[pallet::weight((Weight::from_parts(84_020_000, 0) + .saturating_add(T::DbWeight::get().reads(9_u64)) + .saturating_add(T::DbWeight::get().writes(2)), DispatchClass::Normal, Pays::No))] + pub fn commit_timelocked_sub_weights( + origin: T::RuntimeOrigin, + netuid: NetUid, + subid: SubId, + commit: BoundedVec>, + reveal_round: u64, + commit_reveal_version: u16, + ) -> DispatchResult { + Self::do_commit_timelocked_sub_weights( + origin, + netuid, + subid, + commit, + reveal_round, + commit_reveal_version, + ) + } } } diff --git a/pallets/subtensor/src/macros/events.rs b/pallets/subtensor/src/macros/events.rs index 03da588630..a0779cd8b1 100644 --- a/pallets/subtensor/src/macros/events.rs +++ b/pallets/subtensor/src/macros/events.rs @@ -41,7 +41,7 @@ mod events { TaoCurrency, ), /// a caller successfully sets their weights on a subnetwork. - WeightsSet(NetUid, u16), + WeightsSet(NetUidStorageIndex, u16), /// a new neuron account has been registered to the chain. NeuronRegistered(NetUid, u16, T::AccountId), /// multiple uids have been concurrently registered. @@ -246,20 +246,20 @@ mod events { /// - **who**: The account ID of the user committing the weights. /// - **netuid**: The network identifier. /// - **commit_hash**: The hash representing the committed weights. - CRV3WeightsCommitted(T::AccountId, NetUid, H256), + CRV3WeightsCommitted(T::AccountId, NetUidStorageIndex, H256), /// Weights have been successfully committed. /// /// - **who**: The account ID of the user committing the weights. /// - **netuid**: The network identifier. /// - **commit_hash**: The hash representing the committed weights. - WeightsCommitted(T::AccountId, NetUid, H256), + WeightsCommitted(T::AccountId, NetUidStorageIndex, H256), /// Weights have been successfully revealed. /// /// - **who**: The account ID of the user revealing the weights. /// - **netuid**: The network identifier. /// - **commit_hash**: The hash of the revealed weights. - WeightsRevealed(T::AccountId, NetUid, H256), + WeightsRevealed(T::AccountId, NetUidStorageIndex, H256), /// Weights have been successfully batch revealed. /// @@ -410,13 +410,13 @@ mod events { /// - **netuid**: The network identifier. /// - **commit_hash**: The hash representing the committed weights. /// - **reveal_round**: The round at which weights can be revealed. - TimelockedWeightsCommitted(T::AccountId, NetUid, H256, u64), + TimelockedWeightsCommitted(T::AccountId, NetUidStorageIndex, H256, u64), /// Timelocked Weights have been successfully revealed. /// /// - **netuid**: The network identifier. /// - **who**: The account ID of the user revealing the weights. - TimelockedWeightsRevealed(NetUid, T::AccountId), + TimelockedWeightsRevealed(NetUidStorageIndex, T::AccountId), /// Auto-staking hotkey received stake AutoStakeAdded { @@ -435,7 +435,7 @@ mod events { /// End-of-epoch miner incentive alpha by UID IncentiveAlphaEmittedToMiners { /// Subnet identifier. - netuid: NetUid, + netuid: NetUidStorageIndex, /// UID-indexed array of miner incentive alpha; index equals UID. emissions: Vec, }, diff --git a/pallets/subtensor/src/macros/genesis.rs b/pallets/subtensor/src/macros/genesis.rs index e50bf01d7d..b9378e38f6 100644 --- a/pallets/subtensor/src/macros/genesis.rs +++ b/pallets/subtensor/src/macros/genesis.rs @@ -96,9 +96,9 @@ mod genesis { Active::::mutate(netuid, |v| v.push(true)); Emission::::mutate(netuid, |v| v.push(0.into())); Consensus::::mutate(netuid, |v| v.push(0)); - Incentive::::mutate(netuid, |v| v.push(0)); + Incentive::::mutate(NetUidStorageIndex::from(netuid), |v| v.push(0)); Dividends::::mutate(netuid, |v| v.push(0)); - LastUpdate::::mutate(netuid, |v| v.push(block_number)); + LastUpdate::::mutate(NetUidStorageIndex::from(netuid), |v| v.push(block_number)); PruningScores::::mutate(netuid, |v| v.push(0)); ValidatorTrust::::mutate(netuid, |v| v.push(0)); ValidatorPermit::::mutate(netuid, |v| v.push(false)); diff --git a/pallets/subtensor/src/migrations/migrate_crv3_commits_add_block.rs b/pallets/subtensor/src/migrations/migrate_crv3_commits_add_block.rs index 27f2fe6d65..bf5a0bb2b5 100644 --- a/pallets/subtensor/src/migrations/migrate_crv3_commits_add_block.rs +++ b/pallets/subtensor/src/migrations/migrate_crv3_commits_add_block.rs @@ -22,9 +22,10 @@ pub fn migrate_crv3_commits_add_block() -> Weight { log::info!("Running migration '{}'", String::from_utf8_lossy(&mig_name)); // iterate over *all* (netuid, epoch, queue) triples - for (netuid, epoch, old_q) in CRV3WeightCommits::::drain() { + for (netuid_index, epoch, old_q) in CRV3WeightCommits::::drain() { total_weight = total_weight.saturating_add(T::DbWeight::get().reads_writes(1, 1)); + let (netuid, _) = Pallet::::get_netuid_and_subid(netuid_index).unwrap_or_default(); let commit_block = Pallet::::get_first_block_of_epoch(netuid, epoch); // convert VecDeque<(who,cipher,rnd)> → VecDeque<(who,cb,cipher,rnd)> @@ -34,7 +35,7 @@ pub fn migrate_crv3_commits_add_block() -> Weight { .collect(); // write back under *new* storage definition - CRV3WeightCommitsV2::::insert(netuid, epoch, new_q); + CRV3WeightCommitsV2::::insert(netuid_index, epoch, new_q); } // mark as done diff --git a/pallets/subtensor/src/migrations/migrate_delete_subnet_21.rs b/pallets/subtensor/src/migrations/migrate_delete_subnet_21.rs index b8b1138b2e..c4e79692ac 100644 --- a/pallets/subtensor/src/migrations/migrate_delete_subnet_21.rs +++ b/pallets/subtensor/src/migrations/migrate_delete_subnet_21.rs @@ -6,7 +6,7 @@ use frame_support::{ }; use log::info; use sp_std::vec::Vec; -use subtensor_runtime_common::NetUid; +use subtensor_runtime_common::{NetUid, NetUidStorageIndex}; /// Constant for logging purposes const LOG_TARGET: &str = "migrate_delete_subnet_21"; @@ -73,8 +73,8 @@ pub fn migrate_delete_subnet_21() -> Weight { // Remove incentive mechanism memory let _ = Uids::::clear_prefix(netuid, u32::MAX, None); let _ = Keys::::clear_prefix(netuid, u32::MAX, None); - let _ = Bonds::::clear_prefix(netuid, u32::MAX, None); - let _ = Weights::::clear_prefix(netuid, u32::MAX, None); + let _ = Bonds::::clear_prefix(NetUidStorageIndex::from(netuid), u32::MAX, None); + let _ = Weights::::clear_prefix(NetUidStorageIndex::from(netuid), u32::MAX, None); weight.saturating_accrue(T::DbWeight::get().writes(4)); @@ -83,11 +83,11 @@ pub fn migrate_delete_subnet_21() -> Weight { Trust::::remove(netuid); Active::::remove(netuid); Emission::::remove(netuid); - Incentive::::remove(netuid); + Incentive::::remove(NetUidStorageIndex::from(netuid)); Consensus::::remove(netuid); Dividends::::remove(netuid); PruningScores::::remove(netuid); - LastUpdate::::remove(netuid); + LastUpdate::::remove(NetUidStorageIndex::from(netuid)); ValidatorPermit::::remove(netuid); ValidatorTrust::::remove(netuid); diff --git a/pallets/subtensor/src/migrations/migrate_delete_subnet_3.rs b/pallets/subtensor/src/migrations/migrate_delete_subnet_3.rs index 289ce6cb36..3470004362 100644 --- a/pallets/subtensor/src/migrations/migrate_delete_subnet_3.rs +++ b/pallets/subtensor/src/migrations/migrate_delete_subnet_3.rs @@ -6,7 +6,7 @@ use frame_support::{ }; use log::info; use sp_std::vec::Vec; -use subtensor_runtime_common::NetUid; +use subtensor_runtime_common::{NetUid, NetUidStorageIndex}; /// Constant for logging purposes const LOG_TARGET: &str = "migrate_delete_subnet_3"; @@ -75,8 +75,8 @@ pub fn migrate_delete_subnet_3() -> Weight { // Remove incentive mechanism memory let _ = Uids::::clear_prefix(netuid, u32::MAX, None); let _ = Keys::::clear_prefix(netuid, u32::MAX, None); - let _ = Bonds::::clear_prefix(netuid, u32::MAX, None); - let _ = Weights::::clear_prefix(netuid, u32::MAX, None); + let _ = Bonds::::clear_prefix(NetUidStorageIndex::from(netuid), u32::MAX, None); + let _ = Weights::::clear_prefix(NetUidStorageIndex::from(netuid), u32::MAX, None); weight.saturating_accrue(T::DbWeight::get().writes(4)); @@ -85,11 +85,11 @@ pub fn migrate_delete_subnet_3() -> Weight { Trust::::remove(netuid); Active::::remove(netuid); Emission::::remove(netuid); - Incentive::::remove(netuid); + Incentive::::remove(NetUidStorageIndex::from(netuid)); Consensus::::remove(netuid); Dividends::::remove(netuid); PruningScores::::remove(netuid); - LastUpdate::::remove(netuid); + LastUpdate::::remove(NetUidStorageIndex::from(netuid)); ValidatorPermit::::remove(netuid); ValidatorTrust::::remove(netuid); diff --git a/pallets/subtensor/src/rpc_info/metagraph.rs b/pallets/subtensor/src/rpc_info/metagraph.rs index 1ad09d4bbb..2d3ef32509 100644 --- a/pallets/subtensor/src/rpc_info/metagraph.rs +++ b/pallets/subtensor/src/rpc_info/metagraph.rs @@ -8,7 +8,7 @@ use pallet_commitments::GetCommitments; use substrate_fixed::types::I64F64; use substrate_fixed::types::I96F32; use subtensor_macros::freeze_struct; -use subtensor_runtime_common::{AlphaCurrency, NetUid, TaoCurrency}; +use subtensor_runtime_common::{AlphaCurrency, NetUid, NetUidStorageIndex, SubId, TaoCurrency}; #[freeze_struct("6fc49d5a7dc0e339")] #[derive(Decode, Encode, PartialEq, Eq, Clone, Debug, TypeInfo)] @@ -749,7 +749,7 @@ impl Pallet { .into_iter() .map(Compact::from) .collect(), // Pruning per UID - last_update: LastUpdate::::get(netuid) + last_update: LastUpdate::::get(NetUidStorageIndex::from(netuid)) .into_iter() .map(Compact::from) .collect(), // Last update per UID @@ -761,7 +761,7 @@ impl Pallet { .into_iter() .map(Compact::from) .collect(), // Dividends per UID - incentives: Incentive::::get(netuid) + incentives: Incentive::::get(NetUidStorageIndex::from(netuid)) .into_iter() .map(Compact::from) .collect(), // Mining incentives per UID @@ -805,6 +805,45 @@ impl Pallet { metagraphs } + pub fn get_submetagraph(netuid: NetUid, subid: SubId) -> Option> { + if Self::ensure_subsubnet_exists(netuid, subid).is_err() { + return None; + } + + // Get netuid metagraph + let maybe_meta = Self::get_metagraph(netuid); + if let Some(mut meta) = maybe_meta { + let netuid_index = Self::get_subsubnet_storage_index(netuid, subid); + + // Update with subsubnet information + meta.netuid = NetUid::from(u16::from(netuid_index)).into(); + meta.last_update = LastUpdate::::get(netuid_index) + .into_iter() + .map(Compact::from) + .collect(); + meta.incentives = Incentive::::get(netuid_index) + .into_iter() + .map(Compact::from) + .collect(); + + Some(meta) + } else { + None + } + } + + pub fn get_all_submetagraphs() -> Vec>> { + let netuids = Self::get_all_subnet_netuids(); + let mut metagraphs = Vec::>>::new(); + for netuid in netuids.clone().iter() { + let subsub_count = u8::from(SubsubnetCountCurrent::::get(netuid)); + for subid in 0..subsub_count { + metagraphs.push(Self::get_submetagraph(*netuid, SubId::from(subid))); + } + } + metagraphs + } + pub fn get_selective_metagraph( netuid: NetUid, metagraph_indexes: Vec, @@ -821,6 +860,23 @@ impl Pallet { } } + pub fn get_selective_submetagraph( + netuid: NetUid, + subid: SubId, + metagraph_indexes: Vec, + ) -> Option> { + if !Self::if_subnet_exist(netuid) { + None + } else { + let mut result = SelectiveMetagraph::default(); + for index in metagraph_indexes.iter() { + let value = Self::get_single_selective_submetagraph(netuid, subid, *index); + result.merge_value(&value, *index as usize); + } + Some(result) + } + } + fn get_single_selective_metagraph( netuid: NetUid, metagraph_index: u16, @@ -1207,7 +1263,7 @@ impl Pallet { Some(SelectiveMetagraphIndex::LastUpdate) => SelectiveMetagraph { netuid: netuid.into(), last_update: Some( - LastUpdate::::get(netuid) + LastUpdate::::get(NetUidStorageIndex::from(netuid)) .into_iter() .map(Compact::from) .collect(), @@ -1240,7 +1296,7 @@ impl Pallet { Some(SelectiveMetagraphIndex::Incentives) => SelectiveMetagraph { netuid: netuid.into(), incentives: Some( - Incentive::::get(netuid) + Incentive::::get(NetUidStorageIndex::from(netuid)) .into_iter() .map(Compact::from) .collect(), @@ -1385,6 +1441,46 @@ impl Pallet { } } + fn get_single_selective_submetagraph( + netuid: NetUid, + subid: SubId, + metagraph_index: u16, + ) -> SelectiveMetagraph { + let netuid_index = Self::get_subsubnet_storage_index(netuid, subid); + + // Default to netuid, replace as needed for subid + match SelectiveMetagraphIndex::from_index(metagraph_index as usize) { + Some(SelectiveMetagraphIndex::Incentives) => SelectiveMetagraph { + netuid: netuid.into(), + incentives: Some( + Incentive::::get(NetUidStorageIndex::from(netuid)) + .into_iter() + .map(Compact::from) + .collect(), + ), + ..Default::default() + }, + + Some(SelectiveMetagraphIndex::LastUpdate) => SelectiveMetagraph { + netuid: netuid.into(), + last_update: Some( + LastUpdate::::get(NetUidStorageIndex::from(netuid)) + .into_iter() + .map(Compact::from) + .collect(), + ), + ..Default::default() + }, + + _ => { + let mut meta = Self::get_single_selective_metagraph(netuid, metagraph_index); + // Replace netuid with index + meta.netuid = NetUid::from(u16::from(netuid_index)).into(); + meta + } + } + } + fn get_validators(netuid: NetUid) -> SelectiveMetagraph { let stake_threshold = Self::get_stake_threshold(); let hotkeys: Vec<(u16, T::AccountId)> = diff --git a/pallets/subtensor/src/rpc_info/neuron_info.rs b/pallets/subtensor/src/rpc_info/neuron_info.rs index 8eae264c6e..6e29a51ef5 100644 --- a/pallets/subtensor/src/rpc_info/neuron_info.rs +++ b/pallets/subtensor/src/rpc_info/neuron_info.rs @@ -2,7 +2,7 @@ use super::*; use frame_support::pallet_prelude::{Decode, Encode}; extern crate alloc; use codec::Compact; -use subtensor_runtime_common::{AlphaCurrency, NetUid}; +use subtensor_runtime_common::{AlphaCurrency, NetUid, NetUidStorageIndex}; #[freeze_struct("9e5a291e7e71482d")] #[derive(Decode, Encode, PartialEq, Eq, Clone, Debug, TypeInfo)] @@ -87,16 +87,16 @@ impl Pallet { let active = Self::get_active_for_uid(netuid, uid); let rank = Self::get_rank_for_uid(netuid, uid); let emission = Self::get_emission_for_uid(netuid, uid); - let incentive = Self::get_incentive_for_uid(netuid, uid); + let incentive = Self::get_incentive_for_uid(netuid.into(), uid); let consensus = Self::get_consensus_for_uid(netuid, uid); let trust = Self::get_trust_for_uid(netuid, uid); let validator_trust = Self::get_validator_trust_for_uid(netuid, uid); let dividends = Self::get_dividends_for_uid(netuid, uid); let pruning_score = Self::get_pruning_score_for_uid(netuid, uid); - let last_update = Self::get_last_update_for_uid(netuid, uid); + let last_update = Self::get_last_update_for_uid(NetUidStorageIndex::from(netuid), uid); let validator_permit = Self::get_validator_permit_for_uid(netuid, uid); - let weights = Weights::::get(netuid, uid) + let weights = Weights::::get(NetUidStorageIndex::from(netuid), uid) .into_iter() .filter_map(|(i, w)| { if w > 0 { @@ -107,7 +107,7 @@ impl Pallet { }) .collect::, Compact)>>(); - let bonds = >::get(netuid, uid) + let bonds = Bonds::::get(NetUidStorageIndex::from(netuid), uid) .iter() .filter_map(|(i, b)| { if *b > 0 { @@ -173,13 +173,13 @@ impl Pallet { let active = Self::get_active_for_uid(netuid, uid); let rank = Self::get_rank_for_uid(netuid, uid); let emission = Self::get_emission_for_uid(netuid, uid); - let incentive = Self::get_incentive_for_uid(netuid, uid); + let incentive = Self::get_incentive_for_uid(netuid.into(), uid); let consensus = Self::get_consensus_for_uid(netuid, uid); let trust = Self::get_trust_for_uid(netuid, uid); let validator_trust = Self::get_validator_trust_for_uid(netuid, uid); let dividends = Self::get_dividends_for_uid(netuid, uid); let pruning_score = Self::get_pruning_score_for_uid(netuid, uid); - let last_update = Self::get_last_update_for_uid(netuid, uid); + let last_update = Self::get_last_update_for_uid(NetUidStorageIndex::from(netuid), uid); let validator_permit = Self::get_validator_permit_for_uid(netuid, uid); let stake: Vec<(T::AccountId, Compact)> = vec![( diff --git a/pallets/subtensor/src/rpc_info/show_subnet.rs b/pallets/subtensor/src/rpc_info/show_subnet.rs index 2123345a4e..abd9670bb8 100644 --- a/pallets/subtensor/src/rpc_info/show_subnet.rs +++ b/pallets/subtensor/src/rpc_info/show_subnet.rs @@ -4,7 +4,7 @@ use crate::epoch::math::*; use codec::Compact; use frame_support::pallet_prelude::{Decode, Encode}; use substrate_fixed::types::I64F64; -use subtensor_runtime_common::{AlphaCurrency, NetUid, TaoCurrency}; +use subtensor_runtime_common::{AlphaCurrency, NetUid, NetUidStorageIndex, TaoCurrency}; #[freeze_struct("9354762261420485")] #[derive(Decode, Encode, PartialEq, Eq, Clone, Debug, TypeInfo)] @@ -103,7 +103,7 @@ impl Pallet { .into_iter() .map(Compact::from) .collect(); - let last_update: Vec> = LastUpdate::::get(netuid) + let last_update: Vec> = LastUpdate::::get(NetUidStorageIndex::from(netuid)) .into_iter() .map(Compact::from) .collect(); @@ -115,7 +115,7 @@ impl Pallet { .into_iter() .map(Compact::from) .collect(); - let incentives: Vec> = Incentive::::get(netuid) + let incentives: Vec> = Incentive::::get(NetUidStorageIndex::from(netuid)) .into_iter() .map(Compact::from) .collect(); diff --git a/pallets/subtensor/src/subnets/mod.rs b/pallets/subtensor/src/subnets/mod.rs index a823773395..a3705af084 100644 --- a/pallets/subtensor/src/subnets/mod.rs +++ b/pallets/subtensor/src/subnets/mod.rs @@ -3,6 +3,7 @@ pub mod leasing; pub mod registration; pub mod serving; pub mod subnet; +pub mod subsubnet; pub mod symbols; pub mod uids; pub mod weights; diff --git a/pallets/subtensor/src/subnets/subsubnet.rs b/pallets/subtensor/src/subnets/subsubnet.rs new file mode 100644 index 0000000000..904c380463 --- /dev/null +++ b/pallets/subtensor/src/subnets/subsubnet.rs @@ -0,0 +1,376 @@ +//! This file contains all tooling to work with sub-subnets +//! + +use super::*; +use crate::epoch::run_epoch::EpochTerms; +use alloc::collections::BTreeMap; +use safe_math::*; +use substrate_fixed::types::U64F64; +use subtensor_runtime_common::{AlphaCurrency, NetUid, NetUidStorageIndex, SubId}; + +pub type LeaseId = u32; + +pub type CurrencyOf = ::Currency; + +pub type BalanceOf = + as fungible::Inspect<::AccountId>>::Balance; + +/// Theoretical maximum of subnets on bittensor. This value is used in indexed +/// storage of epoch values for sub-subnets as +/// +/// `storage_index = netuid + sub_id * GLOBAL_MAX_SUBNET_COUNT` +/// +/// For sub_id = 0 this index results in netuid and provides backward compatibility +/// for subnets with default sub-subnet count of 1. +/// +/// Changing this value will require a migration of all epoch maps. +/// +pub const GLOBAL_MAX_SUBNET_COUNT: u16 = 4096; + +// Theoretical maximum number of subsubnets per subnet +// GLOBAL_MAX_SUBNET_COUNT * MAX_SUBSUBNET_COUNT_PER_SUBNET should be 0x10000 +pub const MAX_SUBSUBNET_COUNT_PER_SUBNET: u8 = 16; + +impl Pallet { + pub fn get_subsubnet_storage_index(netuid: NetUid, sub_id: SubId) -> NetUidStorageIndex { + u16::from(sub_id) + .saturating_mul(GLOBAL_MAX_SUBNET_COUNT) + .saturating_add(u16::from(netuid)) + .into() + } + + pub fn get_netuid_and_subid( + netuid_index: NetUidStorageIndex, + ) -> Result<(NetUid, SubId), Error> { + let maybe_netuid = u16::from(netuid_index).checked_rem(GLOBAL_MAX_SUBNET_COUNT); + if let Some(netuid_u16) = maybe_netuid { + let netuid = NetUid::from(netuid_u16); + + // Make sure the base subnet exists + ensure!( + Self::if_subnet_exist(netuid), + Error::::SubNetworkDoesNotExist + ); + + // Extract sub_id + let sub_id_u8 = u8::try_from(u16::from(netuid_index).safe_div(GLOBAL_MAX_SUBNET_COUNT)) + .map_err(|_| Error::::SubNetworkDoesNotExist)?; + let sub_id = SubId::from(sub_id_u8); + + if SubsubnetCountCurrent::::get(netuid) > sub_id { + Ok((netuid, sub_id)) + } else { + Err(Error::::SubNetworkDoesNotExist.into()) + } + } else { + Err(Error::::SubNetworkDoesNotExist.into()) + } + } + + pub fn get_current_subsubnet_count(netuid: NetUid) -> SubId { + SubsubnetCountCurrent::::get(netuid) + } + + pub fn ensure_subsubnet_exists(netuid: NetUid, sub_id: SubId) -> DispatchResult { + // Make sure the base subnet exists + ensure!( + Self::if_subnet_exist(netuid), + Error::::SubNetworkDoesNotExist + ); + + // Make sure the subsub limit is not exceeded + ensure!( + SubsubnetCountCurrent::::get(netuid) > sub_id, + Error::::SubNetworkDoesNotExist + ); + Ok(()) + } + + /// Set the desired valus of sub-subnet count for a subnet identified + /// by netuid + pub fn do_set_subsubnet_count(netuid: NetUid, subsubnet_count: SubId) -> DispatchResult { + // Make sure the subnet exists + ensure!( + Self::if_subnet_exist(netuid), + Error::::SubNetworkDoesNotExist + ); + + // Count cannot be zero + ensure!(subsubnet_count > 0.into(), Error::::InvalidValue); + + // Make sure we are not exceeding the max sub-subnet count + ensure!( + subsubnet_count <= MaxSubsubnetCount::::get(), + Error::::InvalidValue + ); + + // Make sure we are not allowing numbers that will break the math + ensure!( + subsubnet_count <= SubId::from(MAX_SUBSUBNET_COUNT_PER_SUBNET), + Error::::InvalidValue + ); + + Self::update_subsubnet_counts_if_needed(netuid, subsubnet_count); + + Ok(()) + } + + /// Update current count for a subnet identified by netuid + /// - Cleans up all sub-subnet maps if count is reduced + /// + pub fn update_subsubnet_counts_if_needed(netuid: NetUid, new_count: SubId) { + let old_count = u8::from(SubsubnetCountCurrent::::get(netuid)); + let new_count_u8 = u8::from(new_count); + if old_count != new_count_u8 { + if old_count > new_count_u8 { + for subid in new_count_u8..old_count { + let netuid_index = + Self::get_subsubnet_storage_index(netuid, SubId::from(subid)); + + // Cleanup Weights + let _ = Weights::::clear_prefix(netuid_index, u32::MAX, None); + + // Cleanup Incentive + Incentive::::remove(netuid_index); + + // Cleanup LastUpdate + LastUpdate::::remove(netuid_index); + + // Cleanup Bonds + let _ = Bonds::::clear_prefix(netuid_index, u32::MAX, None); + + // Cleanup WeightCommits + let _ = WeightCommits::::clear_prefix(netuid_index, u32::MAX, None); + + // Cleanup TimelockedWeightCommits + let _ = + TimelockedWeightCommits::::clear_prefix(netuid_index, u32::MAX, None); + } + } + + SubsubnetCountCurrent::::insert(netuid, SubId::from(new_count)); + + // Reset split back to even + SubsubnetEmissionSplit::::remove(netuid); + } + } + + pub fn do_set_emission_split(netuid: NetUid, maybe_split: Option>) -> DispatchResult { + // Make sure the subnet exists + ensure!( + Self::if_subnet_exist(netuid), + Error::::SubNetworkDoesNotExist + ); + + if let Some(split) = maybe_split { + // Check the length + ensure!(!split.is_empty(), Error::::InvalidValue); + ensure!( + split.len() <= u8::from(SubsubnetCountCurrent::::get(netuid)) as usize, + Error::::InvalidValue + ); + + // Check that values add up to 65535 + let total: u64 = split.iter().map(|s| *s as u64).sum(); + ensure!(total <= u16::MAX as u64, Error::::InvalidValue); + + SubsubnetEmissionSplit::::insert(netuid, split); + } else { + SubsubnetEmissionSplit::::remove(netuid); + } + + Ok(()) + } + + /// Split alpha emission in sub-subnet proportions + /// stored in SubsubnetEmissionSplit + /// + pub fn split_emissions(netuid: NetUid, alpha: AlphaCurrency) -> Vec { + let subsubnet_count = u64::from(SubsubnetCountCurrent::::get(netuid)); + let maybe_split = SubsubnetEmissionSplit::::get(netuid); + + // Unset split means even distribution + let mut result: Vec = if let Some(split) = maybe_split { + split + .iter() + .map(|s| { + AlphaCurrency::from( + (u64::from(alpha) as u128) + .saturating_mul(*s as u128) + .safe_div(u16::MAX as u128) as u64, + ) + }) + .collect() + } else { + let per_subsubnet = u64::from(alpha).safe_div(subsubnet_count); + vec![AlphaCurrency::from(per_subsubnet); subsubnet_count as usize] + }; + + // Trim / extend and pad with zeroes if result is shorter than subsubnet_count + if result.len() != subsubnet_count as usize { + result.resize(subsubnet_count as usize, 0u64.into()); // pad with AlphaCurrency::from(0) + } + + // If there's any rounding error or lost due to truncation emission, credit it to subsubnet 0 + let rounding_err = + u64::from(alpha).saturating_sub(result.iter().map(|s| u64::from(*s)).sum()); + if let Some(cell) = result.first_mut() { + *cell = cell.saturating_add(AlphaCurrency::from(rounding_err)); + } + result + } + + fn weighted_acc_u16(existing: u16, added: u16, weight: U64F64) -> u16 { + U64F64::saturating_from_num(existing) + .saturating_add(U64F64::saturating_from_num(added).saturating_mul(weight)) + .saturating_to_num::() + } + + fn weighted_acc_alpha( + existing: AlphaCurrency, + added: AlphaCurrency, + weight: U64F64, + ) -> AlphaCurrency { + U64F64::saturating_from_num(existing) + .saturating_add(U64F64::saturating_from_num(added).saturating_mul(weight)) + .saturating_to_num::() + .into() + } + + /// Splits rao_emission between different sub-subnets using `split_emissions` function. + /// + /// Runs the epoch function for each sub-subnet and consolidates hotkey_emission + /// into a single vector. + /// + pub fn epoch_with_subsubnets( + netuid: NetUid, + rao_emission: AlphaCurrency, + ) -> Vec<(T::AccountId, AlphaCurrency, AlphaCurrency)> { + let aggregated: BTreeMap = + Self::split_emissions(netuid, rao_emission) + .into_iter() + .enumerate() + // Run epoch function for each subsubnet to distribute its portion of emissions + .flat_map(|(sub_id_usize, sub_emission)| { + let sub_id_u8: u8 = sub_id_usize.try_into().unwrap_or_default(); + let sub_id = SubId::from(sub_id_u8); + + // Run epoch function on the subsubnet emission + let epoch_output = Self::epoch_subsubnet(netuid, sub_id, sub_emission); + Self::persist_subsub_epoch_terms(netuid, sub_id, epoch_output.as_map()); + + // Calculate subsubnet weight from the split emission (not the other way because preserving + // emission accuracy is the priority) + // For zero emission the first subsubnet gets full weight + let sub_weight = U64F64::saturating_from_num(sub_emission).safe_div_or( + U64F64::saturating_from_num(rao_emission), + U64F64::saturating_from_num(if sub_id_u8 == 0 { 1 } else { 0 }), + ); + + // Produce an iterator of (hotkey, (terms, sub_weight)) tuples + epoch_output + .0 + .into_iter() + .map(move |(hotkey, terms)| (hotkey, (terms, sub_weight))) + }) + // Consolidate the hotkey emissions into a single BTreeMap + .fold(BTreeMap::new(), |mut acc, (hotkey, (terms, sub_weight))| { + acc.entry(hotkey) + .and_modify(|acc_terms| { + // Server and validator emission come from subsubnet emission and need to be added up + acc_terms.validator_emission = acc_terms + .validator_emission + .saturating_add(terms.validator_emission); + acc_terms.server_emission = acc_terms + .server_emission + .saturating_add(terms.server_emission); + + // The rest of the terms need to be aggregated as weighted sum + acc_terms.dividend = Self::weighted_acc_u16( + acc_terms.dividend, + terms.dividend, + sub_weight, + ); + acc_terms.stake_weight = Self::weighted_acc_u16( + acc_terms.stake_weight, + terms.stake_weight, + sub_weight, + ); + acc_terms.active |= terms.active; + acc_terms.emission = Self::weighted_acc_alpha( + acc_terms.emission, + terms.emission, + sub_weight, + ); + acc_terms.rank = + Self::weighted_acc_u16(acc_terms.rank, terms.rank, sub_weight); + acc_terms.trust = + Self::weighted_acc_u16(acc_terms.trust, terms.trust, sub_weight); + acc_terms.consensus = Self::weighted_acc_u16( + acc_terms.consensus, + terms.consensus, + sub_weight, + ); + acc_terms.pruning_score = Self::weighted_acc_u16( + acc_terms.pruning_score, + terms.pruning_score, + sub_weight, + ); + acc_terms.validator_trust = Self::weighted_acc_u16( + acc_terms.validator_trust, + terms.validator_trust, + sub_weight, + ); + acc_terms.new_validator_permit |= terms.new_validator_permit; + }) + .or_insert_with(|| { + // weighted insert for the first sub-subnet seen for this hotkey + EpochTerms { + uid: terms.uid, + dividend: Self::weighted_acc_u16(0, terms.dividend, sub_weight), + incentive: Self::weighted_acc_u16(0, terms.incentive, sub_weight), + validator_emission: terms.validator_emission, + server_emission: terms.server_emission, + stake_weight: Self::weighted_acc_u16( + 0, + terms.stake_weight, + sub_weight, + ), + active: terms.active, // booleans are ORed across subs + emission: Self::weighted_acc_alpha( + 0u64.into(), + terms.emission, + sub_weight, + ), + rank: Self::weighted_acc_u16(0, terms.rank, sub_weight), + trust: Self::weighted_acc_u16(0, terms.trust, sub_weight), + consensus: Self::weighted_acc_u16(0, terms.consensus, sub_weight), + pruning_score: Self::weighted_acc_u16( + 0, + terms.pruning_score, + sub_weight, + ), + validator_trust: Self::weighted_acc_u16( + 0, + terms.validator_trust, + sub_weight, + ), + new_validator_permit: terms.new_validator_permit, + bond: Vec::new(), // aggregated map doesn’t use bonds; keep empty + } + }); + acc + }); + + // State updates from epoch function + Self::persist_netuid_epoch_terms(netuid, &aggregated); + + // Remap BTreeMap back to Vec<(T::AccountId, AlphaCurrency, AlphaCurrency)> format + // for processing emissions in run_coinbase + // Emission tuples ( hotkeys, server_emission, validator_emission ) + aggregated + .into_iter() + .map(|(hotkey, terms)| (hotkey, terms.server_emission, terms.validator_emission)) + .collect() + } +} diff --git a/pallets/subtensor/src/subnets/uids.rs b/pallets/subtensor/src/subnets/uids.rs index d6b776252e..2ec6869bad 100644 --- a/pallets/subtensor/src/subnets/uids.rs +++ b/pallets/subtensor/src/subnets/uids.rs @@ -16,15 +16,34 @@ impl Pallet { } } - /// Resets the trust, emission, consensus, incentive, dividends of the neuron to default + /// Resets the trust, emission, consensus, incentive, dividends, bonds, and weights of + /// the neuron to default pub fn clear_neuron(netuid: NetUid, neuron_uid: u16) { let neuron_index: usize = neuron_uid.into(); Emission::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0.into())); Trust::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0)); Consensus::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0)); - Incentive::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0)); + for subid in 0..SubsubnetCountCurrent::::get(netuid).into() { + let netuid_index = Self::get_subsubnet_storage_index(netuid, subid.into()); + Incentive::::mutate(netuid_index, |v| Self::set_element_at(v, neuron_index, 0)); + Bonds::::remove(netuid_index, neuron_uid); // Remove bonds for Validator. + + // Clear weights set BY the neuron_uid + Weights::::remove(netuid_index, neuron_uid); + + // Set weights FOR the neuron_uid to 0 + let all_uids: Vec = Weights::::iter_key_prefix(netuid_index).collect(); + for uid in all_uids { + Weights::::mutate(netuid_index, uid, |weight_vec: &mut Vec<(u16, u16)>| { + for (weight_uid, w) in weight_vec.iter_mut() { + if *weight_uid == neuron_uid { + *w = 0; + } + } + }); + } + } Dividends::::mutate(netuid, |v| Self::set_element_at(v, neuron_index, 0)); - Bonds::::remove(netuid, neuron_uid); // Remove bonds for Validator. } /// Replace the neuron under this uid. @@ -93,9 +112,12 @@ impl Pallet { Active::::mutate(netuid, |v| v.push(true)); Emission::::mutate(netuid, |v| v.push(0.into())); Consensus::::mutate(netuid, |v| v.push(0)); - Incentive::::mutate(netuid, |v| v.push(0)); + for subid in 0..SubsubnetCountCurrent::::get(netuid).into() { + let netuid_index = Self::get_subsubnet_storage_index(netuid, subid.into()); + Incentive::::mutate(netuid_index, |v| v.push(0)); + LastUpdate::::mutate(netuid_index, |v| v.push(block_number)); + } Dividends::::mutate(netuid, |v| v.push(0)); - LastUpdate::::mutate(netuid, |v| v.push(block_number)); PruningScores::::mutate(netuid, |v| v.push(0)); ValidatorTrust::::mutate(netuid, |v| v.push(0)); ValidatorPermit::::mutate(netuid, |v| v.push(false)); diff --git a/pallets/subtensor/src/subnets/weights.rs b/pallets/subtensor/src/subnets/weights.rs index 737329ccb7..b751630d85 100644 --- a/pallets/subtensor/src/subnets/weights.rs +++ b/pallets/subtensor/src/subnets/weights.rs @@ -10,7 +10,8 @@ use sp_runtime::{ traits::{BlakeTwo256, Hash}, }; use sp_std::{collections::vec_deque::VecDeque, vec}; -use subtensor_runtime_common::NetUid; +use subtensor_runtime_common::{NetUid, NetUidStorageIndex, SubId}; + impl Pallet { /// ---- The implementation for committing weight hashes. /// @@ -45,6 +46,30 @@ impl Pallet { netuid: NetUid, commit_hash: H256, ) -> DispatchResult { + Self::internal_commit_weights(origin, netuid, SubId::MAIN, commit_hash) + } + + pub fn do_commit_sub_weights( + origin: T::RuntimeOrigin, + netuid: NetUid, + subid: SubId, + commit_hash: H256, + ) -> DispatchResult { + Self::internal_commit_weights(origin, netuid, subid, commit_hash) + } + + fn internal_commit_weights( + origin: T::RuntimeOrigin, + netuid: NetUid, + subid: SubId, + commit_hash: H256, + ) -> DispatchResult { + // Ensure netuid and subid exist + Self::ensure_subsubnet_exists(netuid, subid)?; + + // Calculate subnet storage index + let netuid_index = Self::get_subsubnet_storage_index(netuid, subid); + // 1. Verify the caller's signature (hotkey). let who = ensure_signed(origin)?; @@ -66,7 +91,8 @@ impl Pallet { let commit_block = Self::get_current_block_as_u64(); let neuron_uid = Self::get_uid_for_net_and_hotkey(netuid, &who)?; ensure!( - Self::check_rate_limit(netuid, neuron_uid, commit_block), + // Rate limiting should happen per sub-subnet, so use netuid_index here + Self::check_rate_limit(netuid_index, neuron_uid, commit_block), Error::::CommittingWeightsTooFast ); @@ -74,7 +100,7 @@ impl Pallet { let (first_reveal_block, last_reveal_block) = Self::get_reveal_blocks(netuid, commit_block); // 6. Retrieve or initialize the VecDeque of commits for the hotkey. - WeightCommits::::try_mutate(netuid, &who, |maybe_commits| -> DispatchResult { + WeightCommits::::try_mutate(netuid_index, &who, |maybe_commits| -> DispatchResult { let mut commits: VecDeque<(H256, u64, u64, u64)> = maybe_commits.take().unwrap_or_default(); @@ -102,10 +128,14 @@ impl Pallet { *maybe_commits = Some(commits); // 11. Emit the WeightsCommitted event - Self::deposit_event(Event::WeightsCommitted(who.clone(), netuid, commit_hash)); + Self::deposit_event(Event::WeightsCommitted( + who.clone(), + netuid_index, + commit_hash, + )); // 12. Update the last commit block for the hotkey's UID. - Self::set_last_update_for_uid(netuid, neuron_uid, commit_block); + Self::set_last_update_for_uid(netuid_index, neuron_uid, commit_block); // 13. Return success. Ok(()) @@ -234,6 +264,48 @@ impl Pallet { reveal_round: u64, commit_reveal_version: u16, ) -> DispatchResult { + Self::internal_commit_timelocked_weights( + origin, + netuid, + SubId::MAIN, + commit, + reveal_round, + commit_reveal_version, + ) + } + + pub fn do_commit_timelocked_sub_weights( + origin: T::RuntimeOrigin, + netuid: NetUid, + subid: SubId, + commit: BoundedVec>, + reveal_round: u64, + commit_reveal_version: u16, + ) -> DispatchResult { + Self::internal_commit_timelocked_weights( + origin, + netuid, + subid, + commit, + reveal_round, + commit_reveal_version, + ) + } + + pub fn internal_commit_timelocked_weights( + origin: T::RuntimeOrigin, + netuid: NetUid, + subid: SubId, + commit: BoundedVec>, + reveal_round: u64, + commit_reveal_version: u16, + ) -> DispatchResult { + // Ensure netuid and subid exist + Self::ensure_subsubnet_exists(netuid, subid)?; + + // Calculate netuid storage index + let netuid_index = Self::get_subsubnet_storage_index(netuid, subid); + // 1. Verify the caller's signature (hotkey). let who = ensure_signed(origin)?; @@ -261,7 +333,7 @@ impl Pallet { let commit_block = Self::get_current_block_as_u64(); let neuron_uid = Self::get_uid_for_net_and_hotkey(netuid, &who)?; ensure!( - Self::check_rate_limit(netuid, neuron_uid, commit_block), + Self::check_rate_limit(netuid_index, neuron_uid, commit_block), Error::::CommittingWeightsTooFast ); @@ -272,37 +344,41 @@ impl Pallet { false => Self::get_epoch_index(netuid, cur_block), }; - TimelockedWeightCommits::::try_mutate(netuid, cur_epoch, |commits| -> DispatchResult { - // 7. Verify that the number of unrevealed commits is within the allowed limit. + TimelockedWeightCommits::::try_mutate( + netuid_index, + cur_epoch, + |commits| -> DispatchResult { + // 7. Verify that the number of unrevealed commits is within the allowed limit. - let unrevealed_commits_for_who = commits - .iter() - .filter(|(account, _, _, _)| account == &who) - .count(); - ensure!( - unrevealed_commits_for_who < 10, - Error::::TooManyUnrevealedCommits - ); + let unrevealed_commits_for_who = commits + .iter() + .filter(|(account, _, _, _)| account == &who) + .count(); + ensure!( + unrevealed_commits_for_who < 10, + Error::::TooManyUnrevealedCommits + ); - // 8. Append the new commit with calculated reveal blocks. - // Hash the commit before it is moved, for the event - let commit_hash = BlakeTwo256::hash(&commit); - commits.push_back((who.clone(), cur_block, commit, reveal_round)); + // 8. Append the new commit with calculated reveal blocks. + // Hash the commit before it is moved, for the event + let commit_hash = BlakeTwo256::hash(&commit); + commits.push_back((who.clone(), cur_block, commit, reveal_round)); - // 9. Emit the WeightsCommitted event - Self::deposit_event(Event::TimelockedWeightsCommitted( - who.clone(), - netuid, - commit_hash, - reveal_round, - )); + // 9. Emit the WeightsCommitted event + Self::deposit_event(Event::TimelockedWeightsCommitted( + who.clone(), + netuid_index, + commit_hash, + reveal_round, + )); - // 10. Update the last commit block for the hotkey's UID. - Self::set_last_update_for_uid(netuid, neuron_uid, commit_block); + // 10. Update the last commit block for the hotkey's UID. + Self::set_last_update_for_uid(netuid_index, neuron_uid, commit_block); - // 11. Return success. - Ok(()) - }) + // 11. Return success. + Ok(()) + }, + ) } /// ---- The implementation for revealing committed weights. @@ -349,6 +425,33 @@ impl Pallet { salt: Vec, version_key: u64, ) -> DispatchResult { + Self::internal_reveal_weights(origin, netuid, SubId::MAIN, uids, values, salt, version_key) + } + + pub fn do_reveal_sub_weights( + origin: T::RuntimeOrigin, + netuid: NetUid, + subid: SubId, + uids: Vec, + values: Vec, + salt: Vec, + version_key: u64, + ) -> DispatchResult { + Self::internal_reveal_weights(origin, netuid, subid, uids, values, salt, version_key) + } + + fn internal_reveal_weights( + origin: T::RuntimeOrigin, + netuid: NetUid, + subid: SubId, + uids: Vec, + values: Vec, + salt: Vec, + version_key: u64, + ) -> DispatchResult { + // Calculate netuid storage index + let netuid_index = Self::get_subsubnet_storage_index(netuid, subid); + // --- 1. Check the caller's signature (hotkey). let who = ensure_signed(origin.clone())?; @@ -361,80 +464,95 @@ impl Pallet { ); // --- 3. Mutate the WeightCommits to retrieve existing commits for the user. - WeightCommits::::try_mutate_exists(netuid, &who, |maybe_commits| -> DispatchResult { - let commits = maybe_commits - .as_mut() - .ok_or(Error::::NoWeightsCommitFound)?; - - // --- 4. Remove any expired commits from the front of the queue, collecting their hashes. - let mut expired_hashes = Vec::new(); - while let Some((hash, commit_block, _, _)) = commits.front() { - if Self::is_commit_expired(netuid, *commit_block) { - // Collect the expired commit hash - expired_hashes.push(*hash); - commits.pop_front(); - } else { - break; - } - } - - // --- 5. Hash the provided data. - let provided_hash: H256 = - Self::get_commit_hash(&who, netuid, &uids, &values, &salt, version_key); - - // --- 6. After removing expired commits, check if any commits are left. - if commits.is_empty() { - // Check if provided_hash matches any expired commits - if expired_hashes.contains(&provided_hash) { - return Err(Error::::ExpiredWeightCommit.into()); - } else { - return Err(Error::::NoWeightsCommitFound.into()); - } - } - - // --- 7. Search for the provided_hash in the non-expired commits. - if let Some(position) = commits - .iter() - .position(|(hash, _, _, _)| *hash == provided_hash) - { - // --- 8. Get the commit block for the commit being revealed. - let (_, commit_block, _, _) = commits - .get(position) + WeightCommits::::try_mutate_exists( + netuid_index, + &who, + |maybe_commits| -> DispatchResult { + let commits = maybe_commits + .as_mut() .ok_or(Error::::NoWeightsCommitFound)?; - // --- 9. Ensure the commit is ready to be revealed in the current block range. - ensure!( - Self::is_reveal_block_range(netuid, *commit_block), - Error::::RevealTooEarly - ); - - // --- 10. Remove all commits up to and including the one being revealed. - for _ in 0..=position { - commits.pop_front(); + // --- 4. Remove any expired commits from the front of the queue, collecting their hashes. + let mut expired_hashes = Vec::new(); + while let Some((hash, commit_block, _, _)) = commits.front() { + if Self::is_commit_expired(netuid, *commit_block) { + // Collect the expired commit hash + expired_hashes.push(*hash); + commits.pop_front(); + } else { + break; + } } - // --- 11. If the queue is now empty, remove the storage entry for the user. + // --- 5. Hash the provided data. + let provided_hash: H256 = + Self::get_commit_hash(&who, netuid_index, &uids, &values, &salt, version_key); + + // --- 6. After removing expired commits, check if any commits are left. if commits.is_empty() { - *maybe_commits = None; + // Check if provided_hash matches any expired commits + if expired_hashes.contains(&provided_hash) { + return Err(Error::::ExpiredWeightCommit.into()); + } else { + return Err(Error::::NoWeightsCommitFound.into()); + } } - // --- 12. Proceed to set the revealed weights. - Self::do_set_weights(origin, netuid, uids.clone(), values.clone(), version_key)?; + // --- 7. Search for the provided_hash in the non-expired commits. + if let Some(position) = commits + .iter() + .position(|(hash, _, _, _)| *hash == provided_hash) + { + // --- 8. Get the commit block for the commit being revealed. + let (_, commit_block, _, _) = commits + .get(position) + .ok_or(Error::::NoWeightsCommitFound)?; + + // --- 9. Ensure the commit is ready to be revealed in the current block range. + ensure!( + Self::is_reveal_block_range(netuid, *commit_block), + Error::::RevealTooEarly + ); + + // --- 10. Remove all commits up to and including the one being revealed. + for _ in 0..=position { + commits.pop_front(); + } - // --- 13. Emit the WeightsRevealed event. - Self::deposit_event(Event::WeightsRevealed(who.clone(), netuid, provided_hash)); + // --- 11. If the queue is now empty, remove the storage entry for the user. + if commits.is_empty() { + *maybe_commits = None; + } - // --- 14. Return ok. - Ok(()) - } else { - // --- 15. The provided_hash does not match any non-expired commits. - if expired_hashes.contains(&provided_hash) { - Err(Error::::ExpiredWeightCommit.into()) + // --- 12. Proceed to set the revealed weights. + Self::do_set_sub_weights( + origin, + netuid, + subid, + uids.clone(), + values.clone(), + version_key, + )?; + + // --- 13. Emit the WeightsRevealed event. + Self::deposit_event(Event::WeightsRevealed( + who.clone(), + netuid_index, + provided_hash, + )); + + // --- 14. Return ok. + Ok(()) } else { - Err(Error::::InvalidRevealCommitHashNotMatch.into()) + // --- 15. The provided_hash does not match any non-expired commits. + if expired_hashes.contains(&provided_hash) { + Err(Error::::ExpiredWeightCommit.into()) + } else { + Err(Error::::InvalidRevealCommitHashNotMatch.into()) + } } - } - }) + }, + ) } /// ---- The implementation for batch revealing committed weights. @@ -484,6 +602,9 @@ impl Pallet { salts_list: Vec>, version_keys: Vec, ) -> DispatchResult { + // Calculate netuid storage index + let netuid_index = Self::get_subsubnet_storage_index(netuid, SubId::MAIN); + // --- 1. Check that the input lists are of the same length. let num_reveals = uids_list.len(); ensure!( @@ -505,176 +626,128 @@ impl Pallet { ); // --- 4. Mutate the WeightCommits to retrieve existing commits for the user. - WeightCommits::::try_mutate_exists(netuid, &who, |maybe_commits| -> DispatchResult { - let commits = maybe_commits - .as_mut() - .ok_or(Error::::NoWeightsCommitFound)?; - - // --- 5. Remove any expired commits from the front of the queue, collecting their hashes. - let mut expired_hashes = Vec::new(); - while let Some((hash, commit_block, _, _)) = commits.front() { - if Self::is_commit_expired(netuid, *commit_block) { - // Collect the expired commit hash - expired_hashes.push(*hash); - commits.pop_front(); - } else { - break; - } - } - - // --- 6. Prepare to collect all provided hashes and their corresponding reveals. - let mut provided_hashes = Vec::new(); - let mut reveals = Vec::new(); - let mut revealed_hashes: Vec = Vec::with_capacity(num_reveals); - - for ((uids, values), (salt, version_key)) in uids_list - .into_iter() - .zip(values_list) - .zip(salts_list.into_iter().zip(version_keys)) - { - // --- 6a. Hash the provided data. - let provided_hash: H256 = BlakeTwo256::hash_of(&( - who.clone(), - netuid, - uids.clone(), - values.clone(), - salt.clone(), - version_key, - )); - provided_hashes.push(provided_hash); - reveals.push((uids, values, version_key, provided_hash)); - } + WeightCommits::::try_mutate_exists( + netuid_index, + &who, + |maybe_commits| -> DispatchResult { + let commits = maybe_commits + .as_mut() + .ok_or(Error::::NoWeightsCommitFound)?; - // --- 7. Validate all reveals first to ensure atomicity. - for (_uids, _values, _version_key, provided_hash) in &reveals { - // --- 7a. Check if the provided_hash is in the non-expired commits. - if !commits - .iter() - .any(|(hash, _, _, _)| *hash == *provided_hash) - { - // --- 7b. If not found, check if it matches any expired commits. - if expired_hashes.contains(provided_hash) { - return Err(Error::::ExpiredWeightCommit.into()); + // --- 5. Remove any expired commits from the front of the queue, collecting their hashes. + let mut expired_hashes = Vec::new(); + while let Some((hash, commit_block, _, _)) = commits.front() { + if Self::is_commit_expired(netuid, *commit_block) { + // Collect the expired commit hash + expired_hashes.push(*hash); + commits.pop_front(); } else { - return Err(Error::::InvalidRevealCommitHashNotMatch.into()); + break; } } - // --- 7c. Find the commit corresponding to the provided_hash. - let commit = commits - .iter() - .find(|(hash, _, _, _)| *hash == *provided_hash) - .ok_or(Error::::NoWeightsCommitFound)?; - - // --- 7d. Check if the commit is within the reveal window. - ensure!( - Self::is_reveal_block_range(netuid, commit.1), - Error::::RevealTooEarly - ); - } + // --- 6. Prepare to collect all provided hashes and their corresponding reveals. + let mut provided_hashes = Vec::new(); + let mut reveals = Vec::new(); + let mut revealed_hashes: Vec = Vec::with_capacity(num_reveals); - // --- 8. All reveals are valid. Proceed to remove and process each reveal. - for (uids, values, version_key, provided_hash) in reveals { - // --- 8a. Find the position of the provided_hash. - if let Some(position) = commits - .iter() - .position(|(hash, _, _, _)| *hash == provided_hash) + for ((uids, values), (salt, version_key)) in uids_list + .into_iter() + .zip(values_list) + .zip(salts_list.into_iter().zip(version_keys)) { - // --- 8b. Remove the commit from the queue. - commits.remove(position); + // --- 6a. Hash the provided data. + let provided_hash: H256 = BlakeTwo256::hash_of(&( + who.clone(), + netuid, + uids.clone(), + values.clone(), + salt.clone(), + version_key, + )); + provided_hashes.push(provided_hash); + reveals.push((uids, values, version_key, provided_hash)); + } - // --- 8c. Proceed to set the revealed weights. - Self::do_set_weights(origin.clone(), netuid, uids, values, version_key)?; + // --- 7. Validate all reveals first to ensure atomicity. + for (_uids, _values, _version_key, provided_hash) in &reveals { + // --- 7a. Check if the provided_hash is in the non-expired commits. + if !commits + .iter() + .any(|(hash, _, _, _)| *hash == *provided_hash) + { + // --- 7b. If not found, check if it matches any expired commits. + if expired_hashes.contains(provided_hash) { + return Err(Error::::ExpiredWeightCommit.into()); + } else { + return Err(Error::::InvalidRevealCommitHashNotMatch.into()); + } + } - // --- 8d. Collect the revealed hash. - revealed_hashes.push(provided_hash); - } else if expired_hashes.contains(&provided_hash) { - return Err(Error::::ExpiredWeightCommit.into()); - } else { - return Err(Error::::InvalidRevealCommitHashNotMatch.into()); + // --- 7c. Find the commit corresponding to the provided_hash. + let commit = commits + .iter() + .find(|(hash, _, _, _)| *hash == *provided_hash) + .ok_or(Error::::NoWeightsCommitFound)?; + + // --- 7d. Check if the commit is within the reveal window. + ensure!( + Self::is_reveal_block_range(netuid, commit.1), + Error::::RevealTooEarly + ); } - } - // --- 9. If the queue is now empty, remove the storage entry for the user. - if commits.is_empty() { - *maybe_commits = None; - } + // --- 8. All reveals are valid. Proceed to remove and process each reveal. + for (uids, values, version_key, provided_hash) in reveals { + // --- 8a. Find the position of the provided_hash. + if let Some(position) = commits + .iter() + .position(|(hash, _, _, _)| *hash == provided_hash) + { + // --- 8b. Remove the commit from the queue. + commits.remove(position); + + // --- 8c. Proceed to set the revealed weights. + Self::do_set_weights(origin.clone(), netuid, uids, values, version_key)?; + + // --- 8d. Collect the revealed hash. + revealed_hashes.push(provided_hash); + } else if expired_hashes.contains(&provided_hash) { + return Err(Error::::ExpiredWeightCommit.into()); + } else { + return Err(Error::::InvalidRevealCommitHashNotMatch.into()); + } + } - // --- 10. Emit the WeightsBatchRevealed event with all revealed hashes. - Self::deposit_event(Event::WeightsBatchRevealed( - who.clone(), - netuid, - revealed_hashes, - )); + // --- 9. If the queue is now empty, remove the storage entry for the user. + if commits.is_empty() { + *maybe_commits = None; + } - // --- 11. Return ok. - Ok(()) - }) + // --- 10. Emit the WeightsBatchRevealed event with all revealed hashes. + Self::deposit_event(Event::WeightsBatchRevealed( + who.clone(), + netuid, + revealed_hashes, + )); + + // --- 11. Return ok. + Ok(()) + }, + ) } - /// ---- The implementation for the extrinsic set_weights. - /// - /// # Args: - /// * 'origin': (RuntimeOrigin): - /// - The signature of the calling hotkey. - /// - /// * 'netuid' (u16): - /// - The u16 network identifier. - /// - /// * 'uids' ( Vec ): - /// - The uids of the weights to be set on the chain. - /// - /// * 'values' ( Vec ): - /// - The values of the weights to set on the chain. - /// - /// * 'version_key' ( u64 ): - /// - The network version key. - /// - /// # Event: - /// * WeightsSet; - /// - On successfully setting the weights on chain. - /// - /// # Raises: - /// * 'SubNetworkDoesNotExist': - /// - Attempting to set weights on a non-existent network. - /// - /// * 'NotRegistered': - /// - Attempting to set weights from a non registered account. - /// - /// * 'IncorrectWeightVersionKey': - /// - Attempting to set weights without having an up-to-date version_key. - /// - /// * 'SettingWeightsTooFast': - /// - Attempting to set weights faster than the weights_set_rate_limit. - /// - /// * 'NeuronNoValidatorPermit': - /// - Attempting to set non-self weights without a validator permit. - /// - /// * 'WeightVecNotEqualSize': - /// - Attempting to set weights with uids not of same length. - /// - /// * 'DuplicateUids': - /// - Attempting to set weights with duplicate uids. - /// - /// * 'UidsLengthExceedUidsInSubNet': - /// - Attempting to set weights above the max allowed uids. - /// - /// * 'UidVecContainInvalidOne': - /// - Attempting to set weights with invalid uids. - /// - /// * 'WeightVecLengthIsLow': - /// - Attempting to set weights with fewer weights than min. - /// - /// * 'MaxWeightExceeded': - /// - Attempting to set weights with max value exceeding limit. - /// - pub fn do_set_weights( + fn internal_set_weights( origin: T::RuntimeOrigin, netuid: NetUid, + subid: SubId, uids: Vec, values: Vec, version_key: u64, ) -> dispatch::DispatchResult { + // Calculate subnet storage index + let netuid_index = Self::get_subsubnet_storage_index(netuid, subid); + // --- 1. Check the caller's signature. This is the hotkey of a registered account. let hotkey = ensure_signed(origin)?; log::debug!( @@ -690,11 +763,8 @@ impl Pallet { Error::::WeightVecNotEqualSize ); - // --- 3. Check to see if this is a valid network. - ensure!( - Self::if_subnet_exist(netuid), - Error::::SubNetworkDoesNotExist - ); + // --- 3. Check to see if this is a valid network and sub-subnet. + Self::ensure_subsubnet_exists(netuid, subid)?; // --- 4. Check to see if the number of uids is within the max allowed uids for this network. ensure!( @@ -725,7 +795,8 @@ impl Pallet { let current_block: u64 = Self::get_current_block_as_u64(); if !Self::get_commit_reveal_weights_enabled(netuid) { ensure!( - Self::check_rate_limit(netuid, neuron_uid, current_block), + // Rate limit should apply per sub-subnet, so use netuid_index here + Self::check_rate_limit(netuid_index, neuron_uid, current_block), Error::::SettingWeightsTooFast ); } @@ -765,22 +836,158 @@ impl Pallet { zipped_weights.push((*uid, *val)) } - // --- 17. Set weights under netuid, uid double map entry. - Weights::::insert(netuid, neuron_uid, zipped_weights); + // --- 17. Set weights under netuid_index (sub-subnet), uid double map entry. + Weights::::insert(netuid_index, neuron_uid, zipped_weights); // --- 18. Set the activity for the weights on this network. if !Self::get_commit_reveal_weights_enabled(netuid) { - Self::set_last_update_for_uid(netuid, neuron_uid, current_block); + Self::set_last_update_for_uid(netuid_index, neuron_uid, current_block); } // --- 19. Emit the tracking event. - log::debug!("WeightsSet( netuid:{netuid:?}, neuron_uid:{neuron_uid:?} )"); - Self::deposit_event(Event::WeightsSet(netuid, neuron_uid)); + log::debug!("WeightsSet( netuid:{netuid_index:?}, neuron_uid:{neuron_uid:?} )"); + Self::deposit_event(Event::WeightsSet(netuid_index, neuron_uid)); // --- 20. Return ok. Ok(()) } + /// ---- The implementation for the extrinsic set_weights. + /// + /// # Args: + /// * 'origin': (RuntimeOrigin): + /// - The signature of the calling hotkey. + /// + /// * 'netuid' (u16): + /// - The u16 network identifier. + /// + /// * 'uids' ( Vec ): + /// - The uids of the weights to be set on the chain. + /// + /// * 'values' ( Vec ): + /// - The values of the weights to set on the chain. + /// + /// * 'version_key' ( u64 ): + /// - The network version key. + /// + /// # Event: + /// * WeightsSet; + /// - On successfully setting the weights on chain. + /// + /// # Raises: + /// * 'SubNetworkDoesNotExist': + /// - Attempting to set weights on a non-existent network. + /// + /// * 'NotRegistered': + /// - Attempting to set weights from a non registered account. + /// + /// * 'IncorrectWeightVersionKey': + /// - Attempting to set weights without having an up-to-date version_key. + /// + /// * 'SettingWeightsTooFast': + /// - Attempting to set weights faster than the weights_set_rate_limit. + /// + /// * 'NeuronNoValidatorPermit': + /// - Attempting to set non-self weights without a validator permit. + /// + /// * 'WeightVecNotEqualSize': + /// - Attempting to set weights with uids not of same length. + /// + /// * 'DuplicateUids': + /// - Attempting to set weights with duplicate uids. + /// + /// * 'UidsLengthExceedUidsInSubNet': + /// - Attempting to set weights above the max allowed uids. + /// + /// * 'UidVecContainInvalidOne': + /// - Attempting to set weights with invalid uids. + /// + /// * 'WeightVecLengthIsLow': + /// - Attempting to set weights with fewer weights than min. + /// + /// * 'MaxWeightExceeded': + /// - Attempting to set weights with max value exceeding limit. + /// + pub fn do_set_weights( + origin: T::RuntimeOrigin, + netuid: NetUid, + uids: Vec, + values: Vec, + version_key: u64, + ) -> dispatch::DispatchResult { + Self::internal_set_weights(origin, netuid, SubId::MAIN, uids, values, version_key) + } + + /// ---- The implementation for the extrinsic set_weights. + /// + /// # Args: + /// * 'origin': (RuntimeOrigin): + /// - The signature of the calling hotkey. + /// + /// * 'netuid' (u16): + /// - The u16 network identifier. + /// + /// * 'subid' (u8): + /// - The u8 identifier of sub-subnet. + /// + /// * 'uids' ( Vec ): + /// - The uids of the weights to be set on the chain. + /// + /// * 'values' ( Vec ): + /// - The values of the weights to set on the chain. + /// + /// * 'version_key' ( u64 ): + /// - The network version key. + /// + /// # Event: + /// * WeightsSet; + /// - On successfully setting the weights on chain. + /// + /// # Raises: + /// * 'SubNetworkDoesNotExist': + /// - Attempting to set weights on a non-existent network. + /// + /// * 'NotRegistered': + /// - Attempting to set weights from a non registered account. + /// + /// * 'IncorrectWeightVersionKey': + /// - Attempting to set weights without having an up-to-date version_key. + /// + /// * 'SettingWeightsTooFast': + /// - Attempting to set weights faster than the weights_set_rate_limit. + /// + /// * 'NeuronNoValidatorPermit': + /// - Attempting to set non-self weights without a validator permit. + /// + /// * 'WeightVecNotEqualSize': + /// - Attempting to set weights with uids not of same length. + /// + /// * 'DuplicateUids': + /// - Attempting to set weights with duplicate uids. + /// + /// * 'UidsLengthExceedUidsInSubNet': + /// - Attempting to set weights above the max allowed uids. + /// + /// * 'UidVecContainInvalidOne': + /// - Attempting to set weights with invalid uids. + /// + /// * 'WeightVecLengthIsLow': + /// - Attempting to set weights with fewer weights than min. + /// + /// * 'MaxWeightExceeded': + /// - Attempting to set weights with max value exceeding limit. + /// + pub fn do_set_sub_weights( + origin: T::RuntimeOrigin, + netuid: NetUid, + subid: SubId, + uids: Vec, + values: Vec, + version_key: u64, + ) -> dispatch::DispatchResult { + Self::internal_set_weights(origin, netuid, subid, uids, values, version_key) + } + /// ---- The implementation for the extrinsic batch_set_weights. /// /// This call runs a batch of set weights calls, continuing on errors. @@ -888,17 +1095,25 @@ impl Pallet { /// Checks if the neuron has set weights within the weights_set_rate_limit. /// - pub fn check_rate_limit(netuid: NetUid, neuron_uid: u16, current_block: u64) -> bool { - if Self::is_uid_exist_on_network(netuid, neuron_uid) { - // --- 1. Ensure that the diff between current and last_set weights is greater than limit. - let last_set_weights: u64 = Self::get_last_update_for_uid(netuid, neuron_uid); - if last_set_weights == 0 { - return true; - } // (Storage default) Never set weights. - return current_block.saturating_sub(last_set_weights) - >= Self::get_weights_set_rate_limit(netuid); + pub fn check_rate_limit( + netuid_index: NetUidStorageIndex, + neuron_uid: u16, + current_block: u64, + ) -> bool { + let maybe_netuid_and_subid = Self::get_netuid_and_subid(netuid_index); + if let Ok((netuid, _)) = maybe_netuid_and_subid { + if Self::is_uid_exist_on_network(netuid, neuron_uid) { + // --- 1. Ensure that the diff between current and last_set weights is greater than limit. + let last_set_weights: u64 = Self::get_last_update_for_uid(netuid_index, neuron_uid); + if last_set_weights == 0 { + return true; + } // (Storage default) Never set weights. + return current_block.saturating_sub(last_set_weights) + >= Self::get_weights_set_rate_limit(netuid); + } } - // --- 3. Non registered peers cant pass. + + // --- 3. Non registered peers cant pass. Neither can non-existing subid false } @@ -1094,13 +1309,13 @@ impl Pallet { pub fn get_commit_hash( who: &T::AccountId, - netuid: NetUid, + netuid_index: NetUidStorageIndex, uids: &[u16], values: &[u16], salt: &[u16], version_key: u64, ) -> H256 { - BlakeTwo256::hash_of(&(who.clone(), netuid, uids, values, salt, version_key)) + BlakeTwo256::hash_of(&(who.clone(), netuid_index, uids, values, salt, version_key)) } pub fn find_commit_block_via_hash(hash: H256) -> Option { diff --git a/pallets/subtensor/src/swap/swap_hotkey.rs b/pallets/subtensor/src/swap/swap_hotkey.rs index 0454b1dd16..19737f765c 100644 --- a/pallets/subtensor/src/swap/swap_hotkey.rs +++ b/pallets/subtensor/src/swap/swap_hotkey.rs @@ -2,7 +2,7 @@ use super::*; use frame_support::weights::Weight; use sp_core::Get; use substrate_fixed::types::U64F64; -use subtensor_runtime_common::{Currency, NetUid}; +use subtensor_runtime_common::{Currency, NetUid, SubId}; impl Pallet { /// Swaps the hotkey of a coldkey account. @@ -411,10 +411,15 @@ impl Pallet { // 3.5 Swap WeightCommits // WeightCommits( hotkey ) --> Vec -- the weight commits for the hotkey. if is_network_member { - if let Ok(old_weight_commits) = WeightCommits::::try_get(netuid, old_hotkey) { - WeightCommits::::remove(netuid, old_hotkey); - WeightCommits::::insert(netuid, new_hotkey, old_weight_commits); - weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2)); + for subid in 0..SubsubnetCountCurrent::::get(netuid).into() { + let netuid_index = Self::get_subsubnet_storage_index(netuid, SubId::from(subid)); + if let Ok(old_weight_commits) = + WeightCommits::::try_get(netuid_index, old_hotkey) + { + WeightCommits::::remove(netuid_index, old_hotkey); + WeightCommits::::insert(netuid_index, new_hotkey, old_weight_commits); + weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 2)); + } } } diff --git a/pallets/subtensor/src/tests/children.rs b/pallets/subtensor/src/tests/children.rs index a0505fa9f3..1208add954 100644 --- a/pallets/subtensor/src/tests/children.rs +++ b/pallets/subtensor/src/tests/children.rs @@ -6,7 +6,7 @@ use super::mock::*; use approx::assert_abs_diff_eq; use frame_support::{assert_err, assert_noop, assert_ok}; use substrate_fixed::types::{I64F64, I96F32, U96F32}; -use subtensor_runtime_common::{AlphaCurrency, TaoCurrency}; +use subtensor_runtime_common::{AlphaCurrency, NetUidStorageIndex, TaoCurrency}; use subtensor_swap_interface::SwapHandler; use crate::{utils::rate_limiting::TransactionType, *}; @@ -2841,6 +2841,7 @@ fn test_set_weights_no_parent() { /// Test that drain_pending_emission sends childkey take fully to the nominators if childkey /// doesn't have its own stake, independently of parent hotkey take. +/// cargo test --package pallet-subtensor --lib -- tests::children::test_childkey_take_drain --exact --show-output #[allow(clippy::assertions_on_constants)] #[test] fn test_childkey_take_drain() { @@ -2917,12 +2918,12 @@ fn test_childkey_take_drain() { )); // Setup YUMA so that it creates emissions - Weights::::insert(netuid, 0, vec![(2, 0xFFFF)]); - Weights::::insert(netuid, 1, vec![(2, 0xFFFF)]); + Weights::::insert(NetUidStorageIndex::from(netuid), 0, vec![(2, 0xFFFF)]); + Weights::::insert(NetUidStorageIndex::from(netuid), 1, vec![(2, 0xFFFF)]); BlockAtRegistration::::set(netuid, 0, 1); BlockAtRegistration::::set(netuid, 1, 1); BlockAtRegistration::::set(netuid, 2, 1); - LastUpdate::::set(netuid, vec![2, 2, 2]); + LastUpdate::::set(NetUidStorageIndex::from(netuid), vec![2, 2, 2]); Kappa::::set(netuid, u16::MAX / 5); ActivityCutoff::::set(netuid, u16::MAX); // makes all stake active ValidatorPermit::::insert(netuid, vec![true, true, false]); diff --git a/pallets/subtensor/src/tests/coinbase.rs b/pallets/subtensor/src/tests/coinbase.rs index 30cef8556f..c772a4ac3e 100644 --- a/pallets/subtensor/src/tests/coinbase.rs +++ b/pallets/subtensor/src/tests/coinbase.rs @@ -9,7 +9,7 @@ use frame_support::assert_ok; use pallet_subtensor_swap::position::PositionId; use sp_core::U256; use substrate_fixed::types::{I64F64, I96F32, U96F32}; -use subtensor_runtime_common::AlphaCurrency; +use subtensor_runtime_common::{AlphaCurrency, NetUidStorageIndex}; use subtensor_swap_interface::SwapHandler; #[allow(clippy::arithmetic_side_effects)] @@ -2445,6 +2445,7 @@ fn test_drain_pending_emission_no_miners_all_drained() { }); } +// cargo test --package pallet-subtensor --lib -- tests::coinbase::test_drain_pending_emission_zero_emission --exact --show-output #[test] fn test_drain_pending_emission_zero_emission() { new_test_ext(1).execute_with(|| { @@ -2493,7 +2494,7 @@ fn test_drain_pending_emission_zero_emission() { run_to_block_no_epoch(netuid, 50); // Clear incentive and dividends. - Incentive::::remove(netuid); + Incentive::::remove(NetUidStorageIndex::from(netuid)); Dividends::::remove(netuid); // Set the emission to be ZERO. @@ -2511,7 +2512,12 @@ fn test_drain_pending_emission_zero_emission() { assert_eq!(new_stake, init_stake.into()); // Check that the incentive and dividends are set by epoch. - assert!(Incentive::::get(netuid).iter().sum::() > 0); + assert!( + Incentive::::get(NetUidStorageIndex::from(netuid)) + .iter() + .sum::() + > 0 + ); assert!(Dividends::::get(netuid).iter().sum::() > 0); }); } @@ -2578,7 +2584,7 @@ fn test_run_coinbase_not_started() { )); // Clear incentive and dividends. - Incentive::::remove(netuid); + Incentive::::remove(NetUidStorageIndex::from(netuid)); Dividends::::remove(netuid); // Step so tempo should run. @@ -2600,7 +2606,12 @@ fn test_run_coinbase_not_started() { assert_eq!(new_stake, init_stake.into()); // Check that the incentive and dividends are set. - assert!(Incentive::::get(netuid).iter().sum::() > 0); + assert!( + Incentive::::get(NetUidStorageIndex::from(netuid)) + .iter() + .sum::() + > 0 + ); assert!(Dividends::::get(netuid).iter().sum::() > 0); }); } @@ -2664,7 +2675,7 @@ fn test_run_coinbase_not_started_start_after() { )); // Clear incentive and dividends. - Incentive::::remove(netuid); + Incentive::::remove(NetUidStorageIndex::from(netuid)); Dividends::::remove(netuid); // Step so tempo should run. diff --git a/pallets/subtensor/src/tests/consensus.rs b/pallets/subtensor/src/tests/consensus.rs index 6a7aa7d467..7eb65c3fc0 100644 --- a/pallets/subtensor/src/tests/consensus.rs +++ b/pallets/subtensor/src/tests/consensus.rs @@ -13,6 +13,7 @@ use sp_core::U256; use std::time::Instant; use substrate_fixed::transcendental::{PI, cos, ln, sqrt}; use substrate_fixed::types::{I32F32, I64F64}; +use subtensor_runtime_common::NetUidStorageIndex; pub fn fixed(val: f32) -> I32F32 { I32F32::from_num(val) @@ -134,7 +135,7 @@ fn uid_stats(netuid: NetUid, uid: u16) { ); log::info!( "incentive: {:?}", - SubtensorModule::get_incentive_for_uid(netuid, uid) + SubtensorModule::get_incentive_for_uid(NetUidStorageIndex::from(netuid), uid) ); log::info!( "dividend: {:?}", diff --git a/pallets/subtensor/src/tests/epoch.rs b/pallets/subtensor/src/tests/epoch.rs index 25b4c48781..fec978a51d 100644 --- a/pallets/subtensor/src/tests/epoch.rs +++ b/pallets/subtensor/src/tests/epoch.rs @@ -11,7 +11,7 @@ use frame_support::{assert_err, assert_ok}; use rand::{Rng, SeedableRng, distributions::Uniform, rngs::StdRng, seq::SliceRandom, thread_rng}; use sp_core::{Get, U256}; use substrate_fixed::types::I32F32; -use subtensor_runtime_common::{AlphaCurrency, TaoCurrency}; +use subtensor_runtime_common::{AlphaCurrency, NetUidStorageIndex, TaoCurrency}; use subtensor_swap_interface::SwapHandler; use super::mock::*; @@ -128,7 +128,7 @@ fn uid_stats(netuid: NetUid, uid: u16) { ); log::info!( "incentive: {:?}", - SubtensorModule::get_incentive_for_uid(netuid, uid) + SubtensorModule::get_incentive_for_uid(NetUidStorageIndex::from(netuid), uid) ); log::info!( "dividend: {:?}", @@ -595,7 +595,10 @@ fn test_1_graph() { assert_eq!(SubtensorModule::get_rank_for_uid(netuid, uid), 0); assert_eq!(SubtensorModule::get_trust_for_uid(netuid, uid), 0); assert_eq!(SubtensorModule::get_consensus_for_uid(netuid, uid), 0); - assert_eq!(SubtensorModule::get_incentive_for_uid(netuid, uid), 0); + assert_eq!( + SubtensorModule::get_incentive_for_uid(netuid.into(), uid), + 0 + ); assert_eq!(SubtensorModule::get_dividends_for_uid(netuid, uid), 0); }); } @@ -657,7 +660,10 @@ fn test_10_graph() { assert_eq!(SubtensorModule::get_rank_for_uid(netuid, i as u16), 0); assert_eq!(SubtensorModule::get_trust_for_uid(netuid, i as u16), 0); assert_eq!(SubtensorModule::get_consensus_for_uid(netuid, i as u16), 0); - assert_eq!(SubtensorModule::get_incentive_for_uid(netuid, i as u16), 0); + assert_eq!( + SubtensorModule::get_incentive_for_uid(netuid.into(), i as u16), + 0 + ); assert_eq!(SubtensorModule::get_dividends_for_uid(netuid, i as u16), 0); assert_eq!( SubtensorModule::get_emission_for_uid(netuid, i as u16), @@ -705,7 +711,7 @@ fn test_512_graph() { false, u16::MAX, ); - let bonds = SubtensorModule::get_bonds(netuid); + let bonds = SubtensorModule::get_bonds(netuid.into()); for uid in validators { assert_eq!( SubtensorModule::get_total_stake_for_hotkey(&(U256::from(uid))), @@ -714,7 +720,10 @@ fn test_512_graph() { assert_eq!(SubtensorModule::get_rank_for_uid(netuid, uid), 0); assert_eq!(SubtensorModule::get_trust_for_uid(netuid, uid), 0); assert_eq!(SubtensorModule::get_consensus_for_uid(netuid, uid), 0); - assert_eq!(SubtensorModule::get_incentive_for_uid(netuid, uid), 0); + assert_eq!( + SubtensorModule::get_incentive_for_uid(netuid.into(), uid), + 0 + ); assert_eq!(SubtensorModule::get_dividends_for_uid(netuid, uid), 1023); // Note D = floor(1 / 64 * 65_535) = 1023 assert_eq!( SubtensorModule::get_emission_for_uid(netuid, uid), @@ -732,7 +741,10 @@ fn test_512_graph() { assert_eq!(SubtensorModule::get_rank_for_uid(netuid, uid), 146); // Note R = floor(1 / (512 - 64) * 65_535) = 146 assert_eq!(SubtensorModule::get_trust_for_uid(netuid, uid), 65535); assert_eq!(SubtensorModule::get_consensus_for_uid(netuid, uid), 146); // Note C = floor(1 / (512 - 64) * 65_535) = 146 - assert_eq!(SubtensorModule::get_incentive_for_uid(netuid, uid), 146); // Note I = floor(1 / (512 - 64) * 65_535) = 146 + assert_eq!( + SubtensorModule::get_incentive_for_uid(netuid.into(), uid), + 146 + ); // Note I = floor(1 / (512 - 64) * 65_535) = 146 assert_eq!(SubtensorModule::get_dividends_for_uid(netuid, uid), 0); assert_eq!( SubtensorModule::get_emission_for_uid(netuid, uid), @@ -795,10 +807,10 @@ fn test_512_graph_random_weights() { bonds_penalty, ); - let bond = SubtensorModule::get_bonds(netuid); + let bond = SubtensorModule::get_bonds(netuid.into()); for uid in 0..network_n { rank.push(SubtensorModule::get_rank_for_uid(netuid, uid)); - incentive.push(SubtensorModule::get_incentive_for_uid(netuid, uid)); + incentive.push(SubtensorModule::get_incentive_for_uid(netuid.into(), uid)); dividend.push(SubtensorModule::get_dividends_for_uid(netuid, uid)); emission.push(SubtensorModule::get_emission_for_uid(netuid, uid)); bondv.push(bond[uid as usize][validator]); @@ -826,14 +838,14 @@ fn test_512_graph_random_weights() { bonds_penalty, ); // Assert that dense and sparse epoch results are equal - let bond = SubtensorModule::get_bonds(netuid); + let bond = SubtensorModule::get_bonds(netuid.into()); for uid in 0..network_n { assert_eq!( SubtensorModule::get_rank_for_uid(netuid, uid), rank[uid as usize] ); assert_eq!( - SubtensorModule::get_incentive_for_uid(netuid, uid), + SubtensorModule::get_incentive_for_uid(netuid.into(), uid), incentive[uid as usize] ); assert_eq!( @@ -1070,7 +1082,7 @@ fn test_bonds() { E: [49999998, 99999999, 150000000, 200000001, 49998779, 100000610, 149996337, 200004272] P: [0.0499999989, 0.0999999992, 0.1500000006, 0.2000000011, 0.049998779, 0.1000006103, 0.1499963375, 0.2000042726] emaB: [[(4, 0.2499999937), (5, 0.2499999953), (6, 0.2499999937), (7, 0.2499999937)], [(4, 0.4999999942), (5, 0.499999997), (6, 0.4999999942), (7, 0.4999999942)], [(4, 0.7499999937), (5, 0.7499999981), (6, 0.7499999995), (7, 0.7499999995)], [(4, 1), (5, 1), (6, 1), (7, 1)], [], [], [], []] */ - let bonds = SubtensorModule::get_bonds( netuid ); + let bonds = SubtensorModule::get_bonds( netuid.into() ); assert_eq!(bonds[0][4], 16383); assert_eq!(bonds[1][4], 32767); assert_eq!(bonds[2][4], 49151); @@ -1118,7 +1130,7 @@ fn test_bonds() { E: [44998351, 101110561, 151667215, 202223870, 49998779, 100000610, 149996337, 200004272] P: [0.0449983515, 0.1011105615, 0.1516672159, 0.2022238704, 0.049998779, 0.1000006103, 0.1499963377, 0.2000042726] emaB: [[(4, 0.2225175085), (5, 0.2225175085), (6, 0.2225175085), (7, 0.2225175085)], [(4, 0.499993208), (5, 0.4999932083), (6, 0.4999932083), (7, 0.4999932083)], [(4, 0.7499966028), (5, 0.7499966032), (6, 0.7499966032), (7, 0.7499966032)], [(4, 1), (5, 1), (6, 1), (7, 1)], [], [], [], []] */ - let bonds = SubtensorModule::get_bonds( netuid ); + let bonds = SubtensorModule::get_bonds( netuid.into() ); assert_eq!(bonds[0][4], 14582); assert_eq!(bonds[1][4], 32767); assert_eq!(bonds[2][4], 49151); @@ -1155,7 +1167,7 @@ fn test_bonds() { E: [40496805, 90999783, 157929636, 210573773, 49998779, 100000610, 149996337, 200004272] P: [0.040496806, 0.0909997837, 0.157929636, 0.2105737738, 0.049998779, 0.1000006103, 0.1499963377, 0.2000042726] emaB: [[(4, 0.192316476), (5, 0.192316476), (6, 0.192316476), (7, 0.192316476)], [(4, 0.4321515555), (5, 0.4321515558), (6, 0.4321515558), (7, 0.4321515558)], [(4, 0.7499967015), (5, 0.7499967027), (6, 0.7499967027), (7, 0.7499967027)], [(4, 1), (5, 1), (6, 1), (7, 1)], [], [], [], []] */ - let bonds = SubtensorModule::get_bonds( netuid ); + let bonds = SubtensorModule::get_bonds( netuid.into() ); assert_eq!(bonds[0][4], 12603); assert_eq!(bonds[1][4], 28321); assert_eq!(bonds[2][4], 49151); @@ -1192,7 +1204,7 @@ fn test_bonds() { E: [99999999, 199999999, 299999999, 399999999, 0, 0, 0, 0] P: [0.0999999999, 0.2, 0.2999999998, 0.4, 0, 0, 0, 0] emaB: [[(4, 0.1923094518), (5, 0.1923094518), (6, 0.1923094518), (7, 0.1923094518)], [(4, 0.4321507583), (5, 0.4321507583), (6, 0.4321507583), (7, 0.4321507583)], [(4, 0.7499961846), (5, 0.7499961846), (6, 0.7499961846), (7, 0.7499961846)], [(4, 1), (5, 1), (6, 1), (7, 1)], [], [], [], []] */ - let bonds = SubtensorModule::get_bonds( netuid ); + let bonds = SubtensorModule::get_bonds( netuid.into() ); assert_eq!(bonds[0][7], 12602); assert_eq!(bonds[1][7], 28320); assert_eq!(bonds[2][7], 49150); @@ -1228,7 +1240,7 @@ fn test_bonds() { E: [36443733, 81898628, 163565493, 218092144, 0, 0, 0, 500000000] P: [0.0364437331, 0.081898629, 0.1635654932, 0.2180921442, 0, 0, 0, 0.5] emaB: [[(4, 0.1922941932), (5, 0.1922941932), (6, 0.1922941932), (7, 0.1671024568)], [(4, 0.4321354993), (5, 0.4321354993), (6, 0.4321354993), (7, 0.3755230587)], [(4, 0.7499809256), (5, 0.7499809256), (6, 0.7499809256), (7, 0.749983425)], [(4, 1), (5, 1), (6, 1), (7, 1)], [], [], [], []] */ - let bonds = SubtensorModule::get_bonds( netuid ); + let bonds = SubtensorModule::get_bonds( netuid.into() ); assert_eq!(bonds[0][7], 10951); assert_eq!(bonds[1][7], 24609); assert_eq!(bonds[2][7], 49150); @@ -1250,7 +1262,7 @@ fn test_bonds() { E: [32799427, 73706612, 168638129, 224855830, 0, 0, 0, 500000000] P: [0.0327994274, 0.0737066122, 0.1686381293, 0.2248558307, 0, 0, 0, 0.5] emaB: [[(4, 0.1922789337), (5, 0.1922789337), (6, 0.1922789337), (7, 0.1458686984)], [(4, 0.4321202405), (5, 0.4321202405), (6, 0.4321202405), (7, 0.3277949789)], [(4, 0.749965667), (5, 0.749965667), (6, 0.749965667), (7, 0.74998335)], [(4, 1), (5, 1), (6, 1), (7, 1)], [], [], [], []] */ - let bonds = SubtensorModule::get_bonds( netuid ); + let bonds = SubtensorModule::get_bonds( netuid.into() ); assert_eq!(bonds[0][7], 9559); assert_eq!(bonds[1][7], 21482); assert_eq!(bonds[2][7], 49150); @@ -1272,7 +1284,7 @@ fn test_bonds() { E: [29518068, 66336137, 173203134, 230942659, 0, 0, 0, 500000000] P: [0.029518068, 0.0663361375, 0.1732031347, 0.2309426593, 0, 0, 0, 0.5] emaB: [[(4, 0.192263675), (5, 0.192263675), (6, 0.192263675), (7, 0.1278155716)], [(4, 0.4321049813), (5, 0.4321049813), (6, 0.4321049813), (7, 0.2872407278)], [(4, 0.7499504078), (5, 0.7499504078), (6, 0.7499504078), (7, 0.7499832863)], [(4, 1), (5, 1), (6, 1), (7, 1)], [], [], [], []] */ - let bonds = SubtensorModule::get_bonds( netuid ); + let bonds = SubtensorModule::get_bonds( netuid.into() ); assert_eq!(bonds[0][7], 8376); assert_eq!(bonds[1][7], 18824); assert_eq!(bonds[2][7], 49150); @@ -1408,7 +1420,7 @@ fn test_active_stake() { } else { SubtensorModule::epoch_dense(netuid, 1_000_000_000.into()); } - let bonds = SubtensorModule::get_bonds(netuid); + let bonds = SubtensorModule::get_bonds(netuid.into()); for uid in 0..n { // log::info!("\n{uid}" ); // uid_stats(netuid, uid); @@ -1473,7 +1485,7 @@ fn test_active_stake() { E: [274999999, 224999999, 250000000, 250000000] P: [0.275, 0.2249999999, 0.25, 0.25] P (u16): [65535, 53619, 59577, 59577] */ - let bonds = SubtensorModule::get_bonds(netuid); + let bonds = SubtensorModule::get_bonds(netuid.into()); assert_eq!(SubtensorModule::get_dividends_for_uid(netuid, 0), 36044); // Note D = floor((0.5 * 0.9 + 0.1) * 65_535) assert_eq!( SubtensorModule::get_emission_for_uid(netuid, 0), @@ -1537,7 +1549,7 @@ fn test_active_stake() { E: [272501132, 227498866, 250000000, 250000000] P: [0.272501133, 0.2274988669, 0.25, 0.25] P (u16): [65535, 54711, 60123, 60123] */ - let bonds = SubtensorModule::get_bonds(netuid); + let bonds = SubtensorModule::get_bonds(netuid.into()); assert_eq!(SubtensorModule::get_dividends_for_uid(netuid, 0), 35716); // Note D = floor((0.55 * 0.9 + 0.5 * 0.1) * 65_535) assert_eq!( SubtensorModule::get_emission_for_uid(netuid, 0), @@ -1558,6 +1570,7 @@ fn test_active_stake() { } // Test that epoch masks out outdated weights and bonds of validators on deregistered servers. +// #[test] fn test_outdated_weights() { new_test_ext(1).execute_with(|| { @@ -1736,7 +1749,7 @@ fn test_outdated_weights() { E: [250000000, 250000000, 500000000, 0] P: [0.25, 0.25, 0.5, 0] P (u16): [32767, 32767, 65535, 0] */ - let bonds = SubtensorModule::get_bonds(netuid); + let bonds = SubtensorModule::get_bonds(netuid.into()); assert_eq!(SubtensorModule::get_dividends_for_uid(netuid, 0), 32767); // Note D = floor(0.5 * 65_535) assert_eq!( SubtensorModule::get_emission_for_uid(netuid, 0), @@ -2035,7 +2048,7 @@ fn test_deregistered_miner_bonds() { } // Check the bond values for the servers - let bonds = SubtensorModule::get_bonds(netuid); + let bonds = SubtensorModule::get_bonds(netuid.into()); let bond_0_2 = bonds[0][2]; let bond_0_3 = bonds[0][3]; @@ -2107,7 +2120,7 @@ fn test_deregistered_miner_bonds() { } // Check the bond values for the servers - let bonds = SubtensorModule::get_bonds(netuid); + let bonds = SubtensorModule::get_bonds(netuid.into()); let bond_0_2_new = bonds[0][2]; let bond_0_3_new = bonds[0][3]; @@ -2452,6 +2465,7 @@ fn test_blocks_since_last_step() { }); } +/// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --package pallet-subtensor --lib -- tests::epoch::test_can_set_self_weight_as_subnet_owner --exact --show-output #[test] fn test_can_set_self_weight_as_subnet_owner() { new_test_ext(1).execute_with(|| { @@ -2483,19 +2497,27 @@ fn test_can_set_self_weight_as_subnet_owner() { // Set weight of 50% to each hotkey. // This includes a self-weight let fifty_percent: u16 = u16::MAX / 2; - Weights::::insert(netuid, 0, vec![(0, fifty_percent), (1, fifty_percent)]); + Weights::::insert( + NetUidStorageIndex::from(netuid), + 0, + vec![(0, fifty_percent), (1, fifty_percent)], + ); step_block(1); // Set updated so weights are valid - LastUpdate::::insert(netuid, vec![2, 0]); + LastUpdate::::insert(NetUidStorageIndex::from(netuid), vec![2, 0]); // Run epoch let hotkey_emission = SubtensorModule::epoch(netuid, to_emit.into()); // hotkey_emission is [(hotkey, incentive, dividend)] assert_eq!(hotkey_emission.len(), 2); - assert_eq!(hotkey_emission[0].0, subnet_owner_hotkey); - assert_eq!(hotkey_emission[1].0, other_hotkey); + assert!( + hotkey_emission + .iter() + .any(|(hk, _, _)| *hk == subnet_owner_hotkey) + ); + assert!(hotkey_emission.iter().any(|(hk, _, _)| *hk == other_hotkey)); log::debug!("hotkey_emission: {hotkey_emission:?}"); // Both should have received incentive emission @@ -2742,7 +2764,7 @@ fn run_epoch_and_check_bonds_dividends( target_dividends: &[f32], ) { run_epoch(netuid, sparse); - let bonds = SubtensorModule::get_bonds_fixed_proportion(netuid); + let bonds = SubtensorModule::get_bonds_fixed_proportion(netuid.into()); let dividends = SubtensorModule::get_dividends(netuid); let epsilon = I32F32::from_num(1e-3); @@ -3485,7 +3507,7 @@ fn test_yuma_3_bonds_reset() { if epoch == 20 { let hotkey = SubtensorModule::get_hotkey_for_net_and_uid(netuid, 3) .expect("Hotkey not found"); - let _ = SubtensorModule::do_reset_bonds(netuid, &hotkey); + let _ = SubtensorModule::do_reset_bonds(netuid.into(), &hotkey); } } 21 => { @@ -3650,7 +3672,10 @@ fn test_epoch_masks_incoming_to_sniped_uid_prevents_inheritance() { SubtensorModule::epoch(netuid, 1_000.into()); assert_eq!(SubtensorModule::get_rank_for_uid(netuid, new_uid), 0); - assert_eq!(SubtensorModule::get_incentive_for_uid(netuid, new_uid), 0); + assert_eq!( + SubtensorModule::get_incentive_for_uid(netuid.into(), new_uid), + 0 + ); }); } @@ -3693,7 +3718,7 @@ fn test_epoch_no_mask_when_commit_reveal_disabled() { for _ in 0..3 { SubtensorModule::epoch(netuid, 1.into()); assert!( - !SubtensorModule::get_weights_sparse(netuid)[0].is_empty(), + !SubtensorModule::get_weights_sparse(netuid.into())[0].is_empty(), "row visible when CR disabled" ); run_to_block(System::block_number() + tempo as u64 + 1); diff --git a/pallets/subtensor/src/tests/migration.rs b/pallets/subtensor/src/tests/migration.rs index d67b86e42d..1dcc911f4b 100644 --- a/pallets/subtensor/src/tests/migration.rs +++ b/pallets/subtensor/src/tests/migration.rs @@ -22,7 +22,7 @@ use sp_io::hashing::twox_128; use sp_runtime::traits::Zero; use substrate_fixed::types::I96F32; use substrate_fixed::types::extra::U2; -use subtensor_runtime_common::TaoCurrency; +use subtensor_runtime_common::{NetUidStorageIndex, TaoCurrency}; #[allow(clippy::arithmetic_side_effects)] fn close(value: u64, target: u64, eps: u64) { @@ -1263,10 +1263,17 @@ fn test_migrate_crv3_commits_add_block() { let old_queue: VecDeque<_> = VecDeque::from(vec![(who, ciphertext.clone(), round)]); - CRV3WeightCommits::::insert(netuid, epoch, old_queue.clone()); + CRV3WeightCommits::::insert( + NetUidStorageIndex::from(netuid), + epoch, + old_queue.clone(), + ); // Sanity: entry decodes under old alias - assert_eq!(CRV3WeightCommits::::get(netuid, epoch), old_queue); + assert_eq!( + CRV3WeightCommits::::get(NetUidStorageIndex::from(netuid), epoch), + old_queue + ); assert!( !HasMigrationRun::::get(MIG_NAME.to_vec()), @@ -1291,11 +1298,11 @@ fn test_migrate_crv3_commits_add_block() { // Old storage must be empty (drained) assert!( - CRV3WeightCommits::::get(netuid, epoch).is_empty(), + CRV3WeightCommits::::get(NetUidStorageIndex::from(netuid), epoch).is_empty(), "old queue should have been drained" ); - let new_q = CRV3WeightCommitsV2::::get(netuid, epoch); + let new_q = CRV3WeightCommitsV2::::get(NetUidStorageIndex::from(netuid), epoch); assert_eq!(new_q.len(), 1, "exactly one migrated element expected"); let (who2, commit_block, cipher2, round2) = new_q.front().cloned().unwrap(); @@ -1518,18 +1525,23 @@ fn test_migrate_crv3_v2_to_timelocked() { VecDeque::from(vec![(who, commit_block, ciphertext.clone(), round)]); // Insert under the deprecated alias - CRV3WeightCommitsV2::::insert(netuid, epoch, old_queue.clone()); + CRV3WeightCommitsV2::::insert( + NetUidStorageIndex::from(netuid), + epoch, + old_queue.clone(), + ); // Sanity: entry decodes under old alias assert_eq!( - CRV3WeightCommitsV2::::get(netuid, epoch), + CRV3WeightCommitsV2::::get(NetUidStorageIndex::from(netuid), epoch), old_queue, "pre-migration: old queue should be present" ); // Destination should be empty pre-migration assert!( - TimelockedWeightCommits::::get(netuid, epoch).is_empty(), + TimelockedWeightCommits::::get(NetUidStorageIndex::from(netuid), epoch) + .is_empty(), "pre-migration: destination should be empty" ); @@ -1556,12 +1568,12 @@ fn test_migrate_crv3_v2_to_timelocked() { // Old storage must be empty (drained) assert!( - CRV3WeightCommitsV2::::get(netuid, epoch).is_empty(), + CRV3WeightCommitsV2::::get(NetUidStorageIndex::from(netuid), epoch).is_empty(), "old queue should have been drained" ); // New storage must match exactly - let new_q = TimelockedWeightCommits::::get(netuid, epoch); + let new_q = TimelockedWeightCommits::::get(NetUidStorageIndex::from(netuid), epoch); assert_eq!( new_q, old_queue, "migrated queue must exactly match the old queue" diff --git a/pallets/subtensor/src/tests/mod.rs b/pallets/subtensor/src/tests/mod.rs index 1f4aa71363..205b4977cd 100644 --- a/pallets/subtensor/src/tests/mod.rs +++ b/pallets/subtensor/src/tests/mod.rs @@ -22,6 +22,7 @@ mod serving; mod staking; mod staking2; mod subnet; +mod subsubnet; mod swap_coldkey; mod swap_hotkey; mod swap_hotkey_with_subnet; diff --git a/pallets/subtensor/src/tests/staking.rs b/pallets/subtensor/src/tests/staking.rs index 38895348cc..b36788fa31 100644 --- a/pallets/subtensor/src/tests/staking.rs +++ b/pallets/subtensor/src/tests/staking.rs @@ -13,7 +13,9 @@ use sp_core::{Get, H256, U256}; use sp_runtime::traits::Dispatchable; use substrate_fixed::traits::FromFixed; use substrate_fixed::types::{I96F32, I110F18, U64F64, U96F32}; -use subtensor_runtime_common::{AlphaCurrency, Currency as CurrencyT, NetUid, TaoCurrency}; +use subtensor_runtime_common::{ + AlphaCurrency, Currency as CurrencyT, NetUid, NetUidStorageIndex, TaoCurrency, +}; use subtensor_swap_interface::{OrderType, SwapHandler}; use super::mock; @@ -2440,12 +2442,12 @@ fn test_mining_emission_distribution_validator_valiminer_miner() { )); // Setup YUMA so that it creates emissions - Weights::::insert(netuid, 0, vec![(1, 0xFFFF)]); - Weights::::insert(netuid, 1, vec![(2, 0xFFFF)]); + Weights::::insert(NetUidStorageIndex::from(netuid), 0, vec![(1, 0xFFFF)]); + Weights::::insert(NetUidStorageIndex::from(netuid), 1, vec![(2, 0xFFFF)]); BlockAtRegistration::::set(netuid, 0, 1); BlockAtRegistration::::set(netuid, 1, 1); BlockAtRegistration::::set(netuid, 2, 1); - LastUpdate::::set(netuid, vec![2, 2, 2]); + LastUpdate::::set(NetUidStorageIndex::from(netuid), vec![2, 2, 2]); Kappa::::set(netuid, u16::MAX / 5); ActivityCutoff::::set(netuid, u16::MAX); // makes all stake active ValidatorPermit::::insert(netuid, vec![true, true, false]); diff --git a/pallets/subtensor/src/tests/subsubnet.rs b/pallets/subtensor/src/tests/subsubnet.rs new file mode 100644 index 0000000000..8b128a7241 --- /dev/null +++ b/pallets/subtensor/src/tests/subsubnet.rs @@ -0,0 +1,1596 @@ +#![allow( + clippy::arithmetic_side_effects, + clippy::indexing_slicing, + clippy::unwrap_used +)] + +// Run all tests +// SKIP_WASM_BUILD=1 RUST_LOG=debug cargo test --package pallet-subtensor --lib -- tests::subsubnet --show-output + +// Test plan: +// - [x] Netuid index math (with SubsubnetCountCurrent limiting) +// - [x] Sub-subnet validity tests +// - [x] do_set_desired tests +// - [x] Emissions are split proportionally +// - [x] Sum of split emissions is equal to rao_emission passed to epoch +// - [x] Only subnet owner or root can set desired subsubnet count (pallet admin test) +// - [x] Weights can be set by subsubnet +// - [x] Weights can be commited/revealed by subsubnet +// - [x] Weights can be commited/revealed in crv3 by subsubnet +// - [x] Prevent weight setting/commitment/revealing above subsubnet_limit_in_force +// - [x] Prevent weight commitment/revealing above subsubnet_limit_in_force +// - [x] Prevent weight commitment/revealing in crv3 above subsubnet_limit_in_force +// - [x] When a miner is deregistered, their weights are cleaned across all subsubnets +// - [x] Weight setting rate limiting is enforced by subsubnet +// - [x] Bonds are applied per subsubnet +// - [x] Incentives are per subsubnet +// - [x] Per-subsubnet incentives are distributed proportionally to miner weights +// - [x] Subsubnet limit can be set up to 8 (with admin pallet) +// - [x] When reduction of subsubnet limit occurs, Weights, Incentive, LastUpdate, Bonds, and WeightCommits are cleared +// - [x] Epoch terms of subnet are weighted sum (or logical OR) of all subsubnet epoch terms +// - [x] Subnet epoch terms persist in state +// - [x] Subsubnet epoch terms persist in state +// - [x] "Yuma Emergency Mode" (consensus sum is 0 for a subsubnet), emission distributed by stake +// - [x] Miner with no weights on any subsubnet receives no reward +// - [x] SubsubnetEmissionSplit is reset on subsubnet count increase +// - [x] SubsubnetEmissionSplit is reset on subsubnet count decrease + +use super::mock::*; +use crate::coinbase::reveal_commits::WeightsTlockPayload; +use crate::subnets::subsubnet::{GLOBAL_MAX_SUBNET_COUNT, MAX_SUBSUBNET_COUNT_PER_SUBNET}; +use crate::*; +use alloc::collections::BTreeMap; +use approx::assert_abs_diff_eq; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; +use codec::Encode; +use frame_support::{assert_noop, assert_ok}; +use frame_system::RawOrigin; +use pallet_drand::types::Pulse; +use rand_chacha::{ChaCha20Rng, rand_core::SeedableRng}; +use sha2::Digest; +use sp_core::{H256, U256}; +use sp_runtime::traits::{BlakeTwo256, Hash}; +use sp_std::collections::vec_deque::VecDeque; +use substrate_fixed::types::{I32F32, U64F64}; +use subtensor_runtime_common::{NetUid, NetUidStorageIndex, SubId}; +use tle::{ + curves::drand::TinyBLS381, ibe::fullident::Identity, + stream_ciphers::AESGCMStreamCipherProvider, tlock::tle, +}; +use w3f_bls::EngineBLS; + +#[test] +fn test_index_from_netuid_and_subnet() { + new_test_ext(1).execute_with(|| { + [ + (0_u16, 0_u8), + (GLOBAL_MAX_SUBNET_COUNT / 2, 1), + (GLOBAL_MAX_SUBNET_COUNT / 2, 7), + (GLOBAL_MAX_SUBNET_COUNT / 2, 14), + (GLOBAL_MAX_SUBNET_COUNT / 2, 15), + (GLOBAL_MAX_SUBNET_COUNT - 1, 1), + (GLOBAL_MAX_SUBNET_COUNT - 1, 7), + (GLOBAL_MAX_SUBNET_COUNT - 1, 14), + (GLOBAL_MAX_SUBNET_COUNT - 1, 15), + ] + .iter() + .for_each(|(netuid, sub_id)| { + let idx = SubtensorModule::get_subsubnet_storage_index( + NetUid::from(*netuid), + SubId::from(*sub_id), + ); + let expected = *sub_id as u64 * GLOBAL_MAX_SUBNET_COUNT as u64 + *netuid as u64; + assert_eq!(idx, NetUidStorageIndex::from(expected as u16)); + }); + }); +} + +#[test] +fn test_netuid_and_subnet_from_index() { + new_test_ext(1).execute_with(|| { + [ + 0_u16, + 1, + 14, + 15, + 16, + 17, + GLOBAL_MAX_SUBNET_COUNT - 1, + GLOBAL_MAX_SUBNET_COUNT, + GLOBAL_MAX_SUBNET_COUNT + 1, + 0xFFFE / 2, + 0xFFFE, + 0xFFFF, + ] + .iter() + .for_each(|netuid_index| { + let expected_netuid = (*netuid_index as u64 % GLOBAL_MAX_SUBNET_COUNT as u64) as u16; + let expected_subid = (*netuid_index as u64 / GLOBAL_MAX_SUBNET_COUNT as u64) as u8; + + // Allow subnet ID + NetworksAdded::::insert(NetUid::from(expected_netuid), true); + SubsubnetCountCurrent::::insert( + NetUid::from(expected_netuid), + SubId::from(expected_subid + 1), + ); + + let (netuid, subid) = + SubtensorModule::get_netuid_and_subid(NetUidStorageIndex::from(*netuid_index)) + .unwrap(); + assert_eq!(netuid, NetUid::from(expected_netuid)); + assert_eq!(subid, SubId::from(expected_subid)); + }); + }); +} + +#[test] +fn test_netuid_index_math_constants() { + assert_eq!( + GLOBAL_MAX_SUBNET_COUNT as u64 * MAX_SUBSUBNET_COUNT_PER_SUBNET as u64, + 0x10000 + ); +} + +#[test] +fn ensure_subsubnet_exists_ok() { + new_test_ext(1).execute_with(|| { + let netuid: NetUid = 3u16.into(); + let sub_id = SubId::from(1u8); + + // ensure base subnet exists + NetworksAdded::::insert(NetUid::from(netuid), true); + + // Allow at least 2 sub-subnets (so sub_id = 1 is valid) + SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); + assert_ok!(SubtensorModule::ensure_subsubnet_exists(netuid, sub_id)); + }); +} + +#[test] +fn ensure_subsubnet_fails_when_base_subnet_missing() { + new_test_ext(1).execute_with(|| { + let netuid: NetUid = 7u16.into(); + let sub_id = SubId::from(0u8); + + // Intentionally DO NOT create the base subnet + + assert_noop!( + SubtensorModule::ensure_subsubnet_exists(netuid, sub_id), + Error::::SubNetworkDoesNotExist + ); + }); +} + +#[test] +fn ensure_subsubnet_fails_when_subid_out_of_range() { + new_test_ext(1).execute_with(|| { + let netuid: NetUid = 9u16.into(); + NetworksAdded::::insert(NetUid::from(netuid), true); + + // Current allowed sub-subnet count is 2 => valid sub_ids: {0, 1} + SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); + + // sub_id == 2 is out of range (must be < 2) + let sub_id_eq = SubId::from(2u8); + assert_noop!( + SubtensorModule::ensure_subsubnet_exists(netuid, sub_id_eq), + Error::::SubNetworkDoesNotExist + ); + + // sub_id > 2 is also out of range + let sub_id_gt = SubId::from(3u8); + assert_noop!( + SubtensorModule::ensure_subsubnet_exists(netuid, sub_id_gt), + Error::::SubNetworkDoesNotExist + ); + }); +} + +#[test] +fn do_set_subsubnet_count_ok_minimal() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(3u16); + NetworksAdded::::insert(NetUid::from(3u16), true); // base subnet exists + + assert_ok!(SubtensorModule::do_set_subsubnet_count( + netuid, + SubId::from(1u8) + )); + + assert_eq!(SubsubnetCountCurrent::::get(netuid), SubId::from(1u8)); + }); +} + +#[test] +fn do_set_subsubnet_count_ok_at_effective_cap() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(4u16); + NetworksAdded::::insert(NetUid::from(4u16), true); // base subnet exists + + // Effective bound is min(runtime cap, compile-time cap) + let runtime_cap = MaxSubsubnetCount::::get(); // e.g., SubId::from(8) + let compile_cap = SubId::from(MAX_SUBSUBNET_COUNT_PER_SUBNET); + let bound = if runtime_cap <= compile_cap { + runtime_cap + } else { + compile_cap + }; + + assert_ok!(SubtensorModule::do_set_subsubnet_count(netuid, bound)); + assert_eq!(SubsubnetCountCurrent::::get(netuid), bound); + }); +} + +#[test] +fn do_set_fails_when_base_subnet_missing() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(7u16); + // No NetworksAdded insert => base subnet absent + + assert_noop!( + SubtensorModule::do_set_subsubnet_count(netuid, SubId::from(1u8)), + Error::::SubNetworkDoesNotExist + ); + }); +} + +#[test] +fn do_set_fails_for_zero() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(9u16); + NetworksAdded::::insert(NetUid::from(9u16), true); // base subnet exists + + assert_noop!( + SubtensorModule::do_set_subsubnet_count(netuid, SubId::from(0u8)), + Error::::InvalidValue + ); + }); +} + +#[test] +fn do_set_fails_when_over_runtime_cap() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(11u16); + NetworksAdded::::insert(NetUid::from(11u16), true); // base subnet exists + + // Runtime cap is 8 (per function), so 9 must fail + assert_noop!( + SubtensorModule::do_set_subsubnet_count(netuid, SubId::from(9u8)), + Error::::InvalidValue + ); + }); +} + +#[test] +fn do_set_fails_when_over_compile_time_cap() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(12u16); + NetworksAdded::::insert(NetUid::from(12u16), true); // base subnet exists + + let too_big = SubId::from(MAX_SUBSUBNET_COUNT_PER_SUBNET + 1); + assert_noop!( + SubtensorModule::do_set_subsubnet_count(netuid, too_big), + Error::::InvalidValue + ); + }); +} + +#[test] +fn update_subsubnet_counts_decreases_and_cleans() { + new_test_ext(1).execute_with(|| { + let hotkey = U256::from(1); + + // Base subnet exists + let netuid = NetUid::from(42u16); + NetworksAdded::::insert(NetUid::from(42u16), true); + + // Choose counts so result is deterministic. + let old = SubId::from(3); + let desired = SubId::from(2u8); + SubsubnetCountCurrent::::insert(netuid, old); + + // Set non-default subnet emission split + SubsubnetEmissionSplit::::insert(netuid, vec![123u16, 234u16, 345u16]); + + // Seed data at a kept subid (1) and a removed subid (2) + let idx_keep = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(1u8)); + let idx_rm3 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(2u8)); + + Weights::::insert(idx_keep, 0u16, vec![(1u16, 1u16)]); + Incentive::::insert(idx_keep, vec![1u16]); + LastUpdate::::insert(idx_keep, vec![123u64]); + Bonds::::insert(idx_keep, 0u16, vec![(1u16, 2u16)]); + WeightCommits::::insert( + idx_keep, + hotkey, + VecDeque::from([(sp_core::H256::zero(), 1u64, 2u64, 3u64)]), + ); + TimelockedWeightCommits::::insert( + idx_keep, + 1u64, + VecDeque::from([(hotkey, 1u64, Default::default(), Default::default())]), + ); + + Weights::::insert(idx_rm3, 0u16, vec![(9u16, 9u16)]); + Incentive::::insert(idx_rm3, vec![9u16]); + LastUpdate::::insert(idx_rm3, vec![999u64]); + Bonds::::insert(idx_rm3, 0u16, vec![(9u16, 9u16)]); + WeightCommits::::insert( + idx_rm3, + hotkey, + VecDeque::from([(sp_core::H256::zero(), 1u64, 2u64, 3u64)]), + ); + TimelockedWeightCommits::::insert( + idx_rm3, + 1u64, + VecDeque::from([(hotkey, 1u64, Default::default(), Default::default())]), + ); + + // Act + SubtensorModule::update_subsubnet_counts_if_needed(netuid, desired); + + // New count is as desired + assert_eq!(SubsubnetCountCurrent::::get(netuid), desired); + + // Kept prefix intact + assert_eq!(Incentive::::get(idx_keep), vec![1u16]); + assert!(Weights::::iter_prefix(idx_keep).next().is_some()); + assert!(LastUpdate::::contains_key(idx_keep)); + assert!(Bonds::::iter_prefix(idx_keep).next().is_some()); + assert!(WeightCommits::::contains_key(idx_keep, hotkey)); + assert!(TimelockedWeightCommits::::contains_key( + idx_keep, 1u64 + )); + + // Removed prefix (subid 3) cleared + assert!(Weights::::iter_prefix(idx_rm3).next().is_none()); + assert_eq!(Incentive::::get(idx_rm3), Vec::::new()); + assert!(!LastUpdate::::contains_key(idx_rm3)); + assert!(Bonds::::iter_prefix(idx_rm3).next().is_none()); + assert!(!WeightCommits::::contains_key(idx_rm3, hotkey)); + assert!(!TimelockedWeightCommits::::contains_key( + idx_rm3, 1u64 + )); + + // SubsubnetEmissionSplit is reset + assert!(SubsubnetEmissionSplit::::get(netuid).is_none()); + }); +} + +#[test] +fn update_subsubnet_counts_increases() { + new_test_ext(1).execute_with(|| { + // Base subnet exists + let netuid = NetUid::from(42u16); + NetworksAdded::::insert(NetUid::from(42u16), true); + + // Choose counts + let old = SubId::from(1u8); + let desired = SubId::from(2u8); + SubsubnetCountCurrent::::insert(netuid, old); + + // Set non-default subnet emission split + SubsubnetEmissionSplit::::insert(netuid, vec![123u16, 234u16, 345u16]); + + // Act + SubtensorModule::update_subsubnet_counts_if_needed(netuid, desired); + + // New count is as desired + assert_eq!(SubsubnetCountCurrent::::get(netuid), desired); + + // SubsubnetEmissionSplit is reset + assert!(SubsubnetEmissionSplit::::get(netuid).is_none()); + }); +} + +#[test] +fn split_emissions_even_division() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(5u16); + SubsubnetCountCurrent::::insert(netuid, SubId::from(5u8)); // 5 sub-subnets + let out = SubtensorModule::split_emissions(netuid, AlphaCurrency::from(25u64)); + assert_eq!(out, vec![AlphaCurrency::from(5u64); 5]); + }); +} + +#[test] +fn split_emissions_rounding_to_first() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(6u16); + SubsubnetCountCurrent::::insert(netuid, SubId::from(4u8)); // 4 sub-subnets + let out = SubtensorModule::split_emissions(netuid, AlphaCurrency::from(10u64)); // 10 / 4 = 2, rem=2 + assert_eq!( + out, + vec![ + AlphaCurrency::from(4u64), // 2 + remainder(2) + AlphaCurrency::from(2u64), + AlphaCurrency::from(2u64), + AlphaCurrency::from(2u64), + ] + ); + }); +} + +#[test] +fn split_emissions_fibbonacci() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(5u16); + SubsubnetCountCurrent::::insert(netuid, SubId::from(5u8)); // 5 sub-subnets + SubsubnetEmissionSplit::::insert(netuid, vec![3450, 6899, 10348, 17247, 27594]); + let out = SubtensorModule::split_emissions(netuid, AlphaCurrency::from(19u64)); + assert_eq!( + out, + vec![ + AlphaCurrency::from(1u64), + AlphaCurrency::from(2u64), + AlphaCurrency::from(3u64), + AlphaCurrency::from(5u64), + AlphaCurrency::from(8u64), + ] + ); + }); +} + +/// Seeds a 2-neuron and 2-subsubnet subnet so `epoch_subsubnet` produces non-zero +/// incentives & dividends. +/// Returns the sub-subnet storage index. +pub fn mock_epoch_state(netuid: NetUid, ck0: U256, hk0: U256, ck1: U256, hk1: U256) { + let idx0 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(0)); + let idx1 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(1)); + + // Base subnet exists; 2 neurons. + NetworksAdded::::insert(NetUid::from(u16::from(netuid)), true); + SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); + SubnetworkN::::insert(netuid, 2); + + // Register two neurons (UID 0,1) → keys drive `get_subnetwork_n`. + Keys::::insert(netuid, 0u16, hk0); + Keys::::insert(netuid, 1u16, hk1); + + // Make both ACTIVE: recent updates & old registrations. + Tempo::::insert(netuid, 1u16); + ActivityCutoff::::insert(netuid, u16::MAX); // large cutoff keeps them active + LastUpdate::::insert(idx0, vec![2, 2]); + LastUpdate::::insert(idx1, vec![2, 2]); + BlockAtRegistration::::insert(netuid, 0, 1u64); // registered long ago + BlockAtRegistration::::insert(netuid, 1, 1u64); + + // Add stake + let stake_amount = AlphaCurrency::from(1_000_000_000); // 1 Alpha + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hk0, + &ck0, + netuid, + stake_amount, + ); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hk1, + &ck1, + netuid, + stake_amount, + ); + + // Non-zero stake above threshold; permit both as validators. + StakeThreshold::::put(0u64); + ValidatorPermit::::insert(netuid, vec![true, true]); + + // Simple weights, setting for each other on both subsubnets + Weights::::insert(idx0, 0, vec![(0u16, 0xFFFF), (1u16, 0xFFFF)]); + Weights::::insert(idx0, 1, vec![(0u16, 0xFFFF), (1u16, 0xFFFF)]); + Weights::::insert(idx1, 0, vec![(0u16, 0xFFFF), (1u16, 0xFFFF)]); + Weights::::insert(idx1, 1, vec![(0u16, 0xFFFF), (1u16, 0xFFFF)]); + + // Keep weight masking off for simplicity. + CommitRevealWeightsEnabled::::insert(netuid, false); + Yuma3On::::insert(netuid, false); +} + +pub fn mock_3_neurons(netuid: NetUid, hk: U256) { + let idx0 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(0)); + let idx1 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(1)); + + SubnetworkN::::insert(netuid, 3); + Keys::::insert(netuid, 2u16, hk); + LastUpdate::::insert(idx0, vec![2, 2, 2]); + LastUpdate::::insert(idx1, vec![2, 2, 2]); + BlockAtRegistration::::insert(netuid, 2, 1u64); +} + +#[test] +fn epoch_with_subsubnets_produces_per_subsubnet_incentive() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(1u16); + let idx0 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(0)); + let idx1 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(1)); + let ck0 = U256::from(1); + let hk0 = U256::from(2); + let ck1 = U256::from(3); + let hk1 = U256::from(4); + let emission = AlphaCurrency::from(1_000_000_000); + + mock_epoch_state(netuid, ck0, hk0, ck1, hk1); + SubtensorModule::epoch_with_subsubnets(netuid, emission); + + let actual_incentive_sub0 = Incentive::::get(idx0); + let actual_incentive_sub1 = Incentive::::get(idx1); + let expected_incentive = 0xFFFF / 2; + assert_eq!(actual_incentive_sub0[0], expected_incentive); + assert_eq!(actual_incentive_sub0[1], expected_incentive); + assert_eq!(actual_incentive_sub1[0], expected_incentive); + assert_eq!(actual_incentive_sub1[1], expected_incentive); + }); +} + +#[test] +fn epoch_with_subsubnets_updates_bonds() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(1u16); + let idx0 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(0)); + let idx1 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(1)); + let ck0 = U256::from(1); + let hk0 = U256::from(2); + let ck1 = U256::from(3); + let hk1 = U256::from(4); + let emission = AlphaCurrency::from(1_000_000_000); + + mock_epoch_state(netuid, ck0, hk0, ck1, hk1); + + // Cause bonds to be asymmetric on diff subsubnets + Weights::::insert(idx1, 0, vec![(0u16, 0xFFFF), (1u16, 0)]); + Weights::::insert(idx1, 1, vec![(0u16, 0xFFFF), (1u16, 0xFFFF)]); + + SubtensorModule::epoch_with_subsubnets(netuid, emission); + + let bonds_uid0_sub0 = Bonds::::get(idx0, 0); + let bonds_uid1_sub0 = Bonds::::get(idx0, 1); + let bonds_uid0_sub1 = Bonds::::get(idx1, 0); + let bonds_uid1_sub1 = Bonds::::get(idx1, 1); + + // Subsubnet 0: UID0 fully bonds to UID1, UID1 fully bonds to UID0 + assert_eq!(bonds_uid0_sub0, vec![(1, 65535)]); + assert_eq!(bonds_uid1_sub0, vec![(0, 65535)]); + + // Subsubnet 1: UID0 no bond to UID1, UID1 fully bonds to UID0 + assert_eq!(bonds_uid0_sub1, vec![]); + assert_eq!(bonds_uid1_sub1, vec![(0, 65535)]); + }); +} + +#[test] +fn epoch_with_subsubnets_incentives_proportional_to_weights() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(1u16); + let idx0 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(0)); + let idx1 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(1)); + let ck0 = U256::from(1); + let hk0 = U256::from(2); + let ck1 = U256::from(3); + let hk1 = U256::from(4); + let hk2 = U256::from(6); + let emission = AlphaCurrency::from(1_000_000_000); + + mock_epoch_state(netuid, ck0, hk0, ck1, hk1); + mock_3_neurons(netuid, hk2); + + // Need 3 neurons for this: One validator that will be setting weights to 2 miners + ValidatorPermit::::insert(netuid, vec![true, false, false]); + + // Set greater weight to uid1 on sub-subnet 0 and to uid2 on subsubnet 1 + Weights::::insert(idx0, 0, vec![(1u16, 0xFFFF / 5 * 4), (2u16, 0xFFFF / 5)]); + Weights::::insert(idx1, 0, vec![(1u16, 0xFFFF / 5), (2u16, 0xFFFF / 5 * 4)]); + + SubtensorModule::epoch_with_subsubnets(netuid, emission); + + let actual_incentive_sub0 = Incentive::::get(idx0); + let actual_incentive_sub1 = Incentive::::get(idx1); + + let expected_incentive_high = 0xFFFF / 5 * 4; + let expected_incentive_low = 0xFFFF / 5; + assert_abs_diff_eq!( + actual_incentive_sub0[1], + expected_incentive_high, + epsilon = 1 + ); + assert_abs_diff_eq!( + actual_incentive_sub0[2], + expected_incentive_low, + epsilon = 1 + ); + assert_abs_diff_eq!( + actual_incentive_sub1[1], + expected_incentive_low, + epsilon = 1 + ); + assert_abs_diff_eq!( + actual_incentive_sub1[2], + expected_incentive_high, + epsilon = 1 + ); + }); +} + +#[test] +fn epoch_with_subsubnets_persists_and_aggregates_all_terms() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(1u16); + let idx0 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(0)); + let idx1 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(1)); + + // Three neurons: validator (uid=0) + two miners (uid=1,2) + let ck0 = U256::from(1); + let hk0 = U256::from(2); + let ck1 = U256::from(3); + let hk1 = U256::from(4); + let hk2 = U256::from(6); + let emission = AlphaCurrency::from(1_000_000_000u64); + + // Healthy minimal state and 3rd neuron + mock_epoch_state(netuid, ck0, hk0, ck1, hk1); + mock_3_neurons(netuid, hk2); + let uid0 = 0_usize; + let uid1 = 1_usize; + let uid2 = 2_usize; + + // Two sub-subnets with non-equal split (~25% / 75%) + SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); + let split0 = u16::MAX / 4; + let split1 = u16::MAX - split0; + SubsubnetEmissionSplit::::insert(netuid, vec![split0, split1]); + + // One validator; skew weights differently per sub-subnet + ValidatorPermit::::insert(netuid, vec![true, false, false]); + // sub 0: uid1 heavy, uid2 light + Weights::::insert( + idx0, + 0, + vec![(1u16, 0xFFFF / 5 * 3), (2u16, 0xFFFF / 5 * 2)], + ); + // sub 1: uid1 light, uid2 heavy + Weights::::insert(idx1, 0, vec![(1u16, 0xFFFF / 5), (2u16, 0xFFFF / 5 * 4)]); + + // Per-sub emissions (and weights used for aggregation) + let subsubnet_emissions = SubtensorModule::split_emissions(netuid, emission); + let w0 = U64F64::from_num(u64::from(subsubnet_emissions[0])) + / U64F64::from_num(u64::from(emission)); + let w1 = U64F64::from_num(u64::from(subsubnet_emissions[1])) + / U64F64::from_num(u64::from(emission)); + assert_abs_diff_eq!(w0.to_num::(), 0.25, epsilon = 0.0001); + assert_abs_diff_eq!(w1.to_num::(), 0.75, epsilon = 0.0001); + + // Get per-subsubnet epoch outputs to build expectations + let out0 = SubtensorModule::epoch_subsubnet(netuid, SubId::from(0), subsubnet_emissions[0]); + let out1 = SubtensorModule::epoch_subsubnet(netuid, SubId::from(1), subsubnet_emissions[1]); + + // Now run the real aggregated path (also persists terms) + let agg = SubtensorModule::epoch_with_subsubnets(netuid, emission); + + // hotkey -> (server_emission_u64, validator_emission_u64) + let agg_map: BTreeMap = agg + .into_iter() + .map(|(hk, se, ve)| (hk, (u64::from(se), u64::from(ve)))) + .collect(); + + // Helper to fetch per-sub terms by hotkey + let terms0 = |hk: &U256| out0.0.get(hk).unwrap(); + let terms1 = |hk: &U256| out1.0.get(hk).unwrap(); + + // Returned aggregated emissions match plain sums of subsubnet emissions + for hk in [&hk1, &hk2] { + let (got_se, got_ve) = agg_map.get(hk).cloned().expect("present"); + let t0 = terms0(hk); + let t1 = terms1(hk); + let exp_se = (U64F64::saturating_from_num(u64::from(t0.server_emission)) + + U64F64::saturating_from_num(u64::from(t1.server_emission))) + .saturating_to_num::(); + let exp_ve = (U64F64::saturating_from_num(u64::from(t0.validator_emission)) + + U64F64::saturating_from_num(u64::from(t1.validator_emission))) + .saturating_to_num::(); + assert_abs_diff_eq!(u64::from(got_se), exp_se, epsilon = 1); + assert_abs_diff_eq!(u64::from(got_ve), exp_ve, epsilon = 1); + } + + // Persisted per-subsubnet Incentive vectors match per-sub terms + let inc0 = Incentive::::get(idx0); + let inc1 = Incentive::::get(idx1); + let exp_inc0 = { + let mut v = vec![0u16; 3]; + v[terms0(&hk0).uid] = terms0(&hk0).incentive; + v[terms0(&hk1).uid] = terms0(&hk1).incentive; + v[terms0(&hk2).uid] = terms0(&hk2).incentive; + v + }; + let exp_inc1 = { + let mut v = vec![0u16; 3]; + v[terms1(&hk0).uid] = terms1(&hk0).incentive; + v[terms1(&hk1).uid] = terms1(&hk1).incentive; + v[terms1(&hk2).uid] = terms1(&hk2).incentive; + v + }; + for (a, e) in inc0.iter().zip(exp_inc0.iter()) { + assert_abs_diff_eq!(*a, *e, epsilon = 1); + } + for (a, e) in inc1.iter().zip(exp_inc1.iter()) { + assert_abs_diff_eq!(*a, *e, epsilon = 1); + } + + // Persisted Bonds for validator (uid0) exist and mirror per-sub terms + let b0 = Bonds::::get(idx0, 0u16); + let b1 = Bonds::::get(idx1, 0u16); + let exp_b0 = &terms0(&hk0).bond; + let exp_b1 = &terms1(&hk0).bond; + + assert!(!b0.is_empty(), "bonds sub0 empty"); + assert!(!b1.is_empty(), "bonds sub1 empty"); + assert_eq!(b0.len(), exp_b0.len()); + assert_eq!(b1.len(), exp_b1.len()); + for ((u_a, w_a), (u_e, w_e)) in b0.iter().zip(exp_b0.iter()) { + assert_eq!(u_a, u_e); + assert_abs_diff_eq!(*w_a, *w_e, epsilon = 1); + } + for ((u_a, w_a), (u_e, w_e)) in b1.iter().zip(exp_b1.iter()) { + assert_eq!(u_a, u_e); + assert_abs_diff_eq!(*w_a, *w_e, epsilon = 1); + } + + // Persisted subnet-level terms are weighted/OR aggregates of sub-subnets + // Fetch persisted vectors + let active = Active::::get(netuid); + let emission_v = Emission::::get(netuid); + let rank_v = Rank::::get(netuid); + let trust_v = Trust::::get(netuid); + let cons_v = Consensus::::get(netuid); + let div_v = Dividends::::get(netuid); + let prun_v = PruningScores::::get(netuid); + let vtrust_v = ValidatorTrust::::get(netuid); + let vperm_v = ValidatorPermit::::get(netuid); + + // Helpers for weighted u16 / u64 + let wu16 = |a: u16, b: u16| -> u16 { + (U64F64::saturating_from_num(a) * w0 + U64F64::saturating_from_num(b) * w1) + .saturating_to_num::() + }; + let wu64 = |a: u64, b: u64| -> u64 { + (U64F64::saturating_from_num(a) * w0 + U64F64::saturating_from_num(b) * w1) + .saturating_to_num::() + }; + + // For each UID, compute expected aggregate from out0/out1 terms + let check_uid = |uid: usize, hk: &U256| { + let t0 = terms0(hk); + let t1 = terms1(hk); + + // Active & ValidatorPermit are OR-aggregated + assert_eq!(active[uid], t0.active || t1.active); + assert_eq!( + vperm_v[uid], + t0.new_validator_permit || t1.new_validator_permit + ); + + // Emission (u64) + let exp_em = wu64(u64::from(t0.emission), u64::from(t1.emission)); + assert_abs_diff_eq!(u64::from(emission_v[uid]), exp_em, epsilon = 1); + + // u16 terms + assert_abs_diff_eq!(rank_v[uid], wu16(t0.rank, t1.rank), epsilon = 1); + assert_abs_diff_eq!(trust_v[uid], wu16(t0.trust, t1.trust), epsilon = 1); + assert_abs_diff_eq!(cons_v[uid], wu16(t0.consensus, t1.consensus), epsilon = 1); + assert_abs_diff_eq!(div_v[uid], wu16(t0.dividend, t1.dividend), epsilon = 1); + assert_abs_diff_eq!( + prun_v[uid], + wu16(t0.pruning_score, t1.pruning_score), + epsilon = 1 + ); + assert_abs_diff_eq!( + vtrust_v[uid], + wu16(t0.validator_trust, t1.validator_trust), + epsilon = 1 + ); + }; + + check_uid(uid0, &hk0); + check_uid(uid1, &hk1); + check_uid(uid2, &hk2); + }); +} + +#[test] +fn epoch_with_subsubnets_no_weight_no_incentive() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(1u16); + let idx0 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(0)); + let idx1 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(1)); + let ck0 = U256::from(1); + let hk0 = U256::from(2); + let ck1 = U256::from(3); + let hk1 = U256::from(4); + let hk2 = U256::from(5); // No weight miner + let emission = AlphaCurrency::from(1_000_000_000); + + mock_epoch_state(netuid, ck0, hk0, ck1, hk1); + mock_3_neurons(netuid, hk2); + + // Need 3 neurons for this: One validator that will be setting weights to 2 miners + ValidatorPermit::::insert(netuid, vec![true, false, false]); + + // Set no weight to uid2 on sub-subnet 0 and 1 + Weights::::insert(idx0, 0, vec![(1u16, 1), (2u16, 0)]); + Weights::::insert(idx1, 0, vec![(1u16, 1), (2u16, 0)]); + + SubtensorModule::epoch_with_subsubnets(netuid, emission); + + let actual_incentive_sub0 = Incentive::::get(idx0); + let actual_incentive_sub1 = Incentive::::get(idx1); + let expected_incentive = 0xFFFF; + assert_eq!(actual_incentive_sub0[0], 0); + assert_eq!(actual_incentive_sub0[1], expected_incentive); + assert_eq!(actual_incentive_sub0[2], 0); + assert_eq!(actual_incentive_sub1[0], 0); + assert_eq!(actual_incentive_sub1[1], expected_incentive); + assert_eq!(actual_incentive_sub1[2], 0); + assert_eq!(actual_incentive_sub0.len(), 3); + assert_eq!(actual_incentive_sub1.len(), 3); + }); +} + +#[test] +fn neuron_dereg_cleans_weights_across_subids() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(77u16); + let neuron_uid: u16 = 1; // we'll deregister UID=1 + // two sub-subnets + SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); + + // Setup initial map values + Emission::::insert( + netuid, + vec![ + AlphaCurrency::from(1u64), + AlphaCurrency::from(9u64), + AlphaCurrency::from(3u64), + ], + ); + Trust::::insert(netuid, vec![11u16, 99u16, 33u16]); + Consensus::::insert(netuid, vec![21u16, 88u16, 44u16]); + Dividends::::insert(netuid, vec![7u16, 77u16, 17u16]); + + // Clearing per-subid maps + for sub in [0u8, 1u8] { + let idx = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(sub)); + + // Incentive vector: position 1 should become 0 + Incentive::::insert(idx, vec![10u16, 20u16, 30u16]); + + // Row set BY neuron_uid (to be removed) + Weights::::insert(idx, neuron_uid, vec![(0u16, 5u16)]); + Bonds::::insert(idx, neuron_uid, vec![(0u16, 6u16)]); + + // Rows FOR neuron_uid inside other validators' vecs => value should be set to 0 (not removed) + Weights::::insert(idx, 0u16, vec![(neuron_uid, 7u16), (42u16, 3u16)]); + Bonds::::insert(idx, 0u16, vec![(neuron_uid, 8u16), (42u16, 4u16)]); + } + + // Act + SubtensorModule::clear_neuron(netuid, neuron_uid); + + // Top-level zeroed at index 1, others intact + let e = Emission::::get(netuid); + assert_eq!(e[0], 1u64.into()); + assert_eq!(e[1], 0u64.into()); + assert_eq!(e[2], 3u64.into()); + + let t = Trust::::get(netuid); + assert_eq!(t, vec![11, 0, 33]); + + let c = Consensus::::get(netuid); + assert_eq!(c, vec![21, 0, 44]); + + let d = Dividends::::get(netuid); + assert_eq!(d, vec![7, 0, 17]); + + // Per-subid cleanup + for sub in [0u8, 1u8] { + let idx = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(sub)); + + // Incentive element at index 1 set to 0 + let inc = Incentive::::get(idx); + assert_eq!(inc, vec![10, 0, 30]); + + // Rows BY neuron_uid removed + assert!(!Weights::::contains_key(idx, neuron_uid)); + assert!(!Bonds::::contains_key(idx, neuron_uid)); + + // In other rows, entries FOR neuron_uid are zeroed, others unchanged + let w0 = Weights::::get(idx, 0u16); + assert!(w0.iter().any(|&(u, w)| u == neuron_uid && w == 0)); + assert!(w0.iter().any(|&(u, w)| u == 42 && w == 3)); + } + }); +} + +#[test] +fn clear_neuron_handles_absent_rows_gracefully() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(55u16); + SubsubnetCountCurrent::::insert(netuid, SubId::from(1u8)); // single sub-subnet + + // Minimal vectors with non-zero at index 0 (we will clear UID=0) + Emission::::insert(netuid, vec![AlphaCurrency::from(5u64)]); + Trust::::insert(netuid, vec![5u16]); + Consensus::::insert(netuid, vec![6u16]); + Dividends::::insert(netuid, vec![7u16]); + + // No Weights/Bonds rows at all → function should not panic + let neuron_uid: u16 = 0; + SubtensorModule::clear_neuron(netuid, neuron_uid); + + // All zeroed at index 0 + assert_eq!( + Emission::::get(netuid), + vec![AlphaCurrency::from(0u64)] + ); + assert_eq!(Trust::::get(netuid), vec![0u16]); + assert_eq!(Consensus::::get(netuid), vec![0u16]); + assert_eq!(Dividends::::get(netuid), vec![0u16]); + }); +} + +#[test] +fn test_set_sub_weights_happy_path_sets_row_under_subid() { + new_test_ext(0).execute_with(|| { + let netuid = NetUid::from(1); + let tempo: u16 = 13; + add_network_disable_commit_reveal(netuid, tempo, 0); + + // Register validator (caller) and a destination neuron + let hk1 = U256::from(55); + let ck1 = U256::from(66); + let hk2 = U256::from(77); + let ck2 = U256::from(88); + let hk3 = U256::from(99); + let ck3 = U256::from(111); + register_ok_neuron(netuid, hk1, ck1, 0); + register_ok_neuron(netuid, hk2, ck2, 0); + register_ok_neuron(netuid, hk3, ck3, 0); + + let uid1 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hk1).expect("caller uid"); + let uid2 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hk2).expect("dest uid 1"); + let uid3 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hk3).expect("dest uid 2"); + + // Make caller a permitted validator with stake + SubtensorModule::set_stake_threshold(0); + SubtensorModule::set_validator_permit_for_uid(netuid, uid1, true); + SubtensorModule::add_balance_to_coldkey_account(&ck1, 1); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hk1, + &ck1, + netuid, + 1.into(), + ); + + // Have at least two sub-subnets; write under subid = 1 + SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); + let subid = SubId::from(1u8); + + // Call extrinsic + let dests = vec![uid2, uid3]; + let weights = vec![88u16, 0xFFFF]; + assert_ok!(SubtensorModule::set_sub_weights( + RawOrigin::Signed(hk1).into(), + netuid, + subid, + dests.clone(), + weights.clone(), + 0, // version_key + )); + + // Verify row exists under the chosen subid and not under a different subid + let idx1 = SubtensorModule::get_subsubnet_storage_index(netuid, subid); + assert_eq!( + Weights::::get(idx1, uid1), + vec![(uid2, 88u16), (uid3, 0xFFFF)] + ); + + let idx0 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(0u8)); + assert!(Weights::::get(idx0, uid1).is_empty()); + }); +} + +#[test] +fn test_set_sub_weights_above_subsubnet_count_fails() { + new_test_ext(0).execute_with(|| { + let netuid = NetUid::from(1); + let tempo: u16 = 13; + add_network_disable_commit_reveal(netuid, tempo, 0); + + // Register validator (caller) and a destination neuron + let hk1 = U256::from(55); + let ck1 = U256::from(66); + let hk2 = U256::from(77); + let ck2 = U256::from(88); + register_ok_neuron(netuid, hk1, ck1, 0); + register_ok_neuron(netuid, hk2, ck2, 0); + + let uid1 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hk1).expect("caller uid"); + let uid2 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hk2).expect("dest uid 1"); + + // Make caller a permitted validator with stake + SubtensorModule::set_stake_threshold(0); + SubtensorModule::set_validator_permit_for_uid(netuid, uid1, true); + SubtensorModule::add_balance_to_coldkey_account(&ck1, 1); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hk1, + &ck1, + netuid, + 1.into(), + ); + + // Have exactly two sub-subnets; write under subid = 1 + SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); + let subid_above = SubId::from(2u8); + + // Call extrinsic + let dests = vec![uid2]; + let weights = vec![88u16]; + assert_noop!( + SubtensorModule::set_sub_weights( + RawOrigin::Signed(hk1).into(), + netuid, + subid_above, + dests.clone(), + weights.clone(), + 0, // version_key + ), + Error::::SubNetworkDoesNotExist + ); + }); +} + +#[test] +fn test_commit_reveal_sub_weights_ok() { + new_test_ext(1).execute_with(|| { + System::set_block_number(0); + + let netuid = NetUid::from(1); + let tempo: u16 = 13; + add_network(netuid, tempo, 0); + + // Three neurons: validator (caller) + two destinations + let hk1 = U256::from(55); + let ck1 = U256::from(66); + let hk2 = U256::from(77); + let ck2 = U256::from(88); + let hk3 = U256::from(99); + let ck3 = U256::from(111); + register_ok_neuron(netuid, hk1, ck1, 0); + register_ok_neuron(netuid, hk2, ck2, 0); + register_ok_neuron(netuid, hk3, ck3, 0); + + let uid1 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hk1).unwrap(); // caller + let uid2 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hk2).unwrap(); + let uid3 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hk3).unwrap(); + + // Enable commit-reveal path and make caller a validator with stake + SubtensorModule::set_stake_threshold(0); + SubtensorModule::set_weights_set_rate_limit(netuid, 5); + SubtensorModule::set_validator_permit_for_uid(netuid, uid1, true); + SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); + SubtensorModule::add_balance_to_coldkey_account(&ck1, 1); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hk1, + &ck1, + netuid, + 1.into(), + ); + + // Ensure sub-subnet exists; write under subid = 1 + SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); + let subid = SubId::from(1u8); + let idx0 = SubtensorModule::get_subsubnet_storage_index(netuid, SubId::from(0u8)); + let idx1 = SubtensorModule::get_subsubnet_storage_index(netuid, subid); + + // Prepare payload and commit hash (include subid!) + let dests = vec![uid2, uid3]; + let weights = vec![88u16, 0xFFFFu16]; + let salt: Vec = vec![1, 2, 3, 4, 5, 6, 7, 8]; + let version_key: u64 = 0; + let commit_hash: H256 = BlakeTwo256::hash_of(&( + hk1, + idx1, + dests.clone(), + weights.clone(), + salt.clone(), + version_key, + )); + + // Commit in epoch 0 + assert_ok!(SubtensorModule::commit_sub_weights( + RuntimeOrigin::signed(hk1), + netuid, + subid, + commit_hash + )); + + // Advance one epoch, then reveal + step_epochs(1, netuid); + assert_ok!(SubtensorModule::reveal_sub_weights( + RuntimeOrigin::signed(hk1), + netuid, + subid, + dests.clone(), + weights.clone(), + salt, + version_key + )); + + // Verify weights stored under the chosen subid (normalized keeps max=0xFFFF here) + assert_eq!( + Weights::::get(idx1, uid1), + vec![(uid2, 88u16), (uid3, 0xFFFFu16)] + ); + + // And not under a different subid + assert!(Weights::::get(idx0, uid1).is_empty()); + }); +} + +#[test] +fn test_commit_reveal_above_subsubnet_count_fails() { + new_test_ext(1).execute_with(|| { + System::set_block_number(0); + + let netuid = NetUid::from(1); + let tempo: u16 = 13; + add_network(netuid, tempo, 0); + + // Two neurons: validator (caller) + miner + let hk1 = U256::from(55); + let ck1 = U256::from(66); + let hk2 = U256::from(77); + let ck2 = U256::from(88); + register_ok_neuron(netuid, hk1, ck1, 0); + register_ok_neuron(netuid, hk2, ck2, 0); + + let uid1 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hk1).unwrap(); // caller + let uid2 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hk2).unwrap(); + + // Enable commit-reveal path and make caller a validator with stake + SubtensorModule::set_stake_threshold(0); + SubtensorModule::set_weights_set_rate_limit(netuid, 5); + SubtensorModule::set_validator_permit_for_uid(netuid, uid1, true); + SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); + SubtensorModule::add_balance_to_coldkey_account(&ck1, 1); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hk1, + &ck1, + netuid, + 1.into(), + ); + + // Ensure there are two subsubnets: 0 and 1 + SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); + let subid_above = SubId::from(2u8); // non-existing sub-subnet + let idx2 = SubtensorModule::get_subsubnet_storage_index(netuid, subid_above); + + // Prepare payload and commit hash + let dests = vec![uid2]; + let weights = vec![88u16]; + let salt: Vec = vec![1, 2, 3, 4, 5, 6, 7, 8]; + let version_key: u64 = 0; + let commit_hash: H256 = BlakeTwo256::hash_of(&( + hk1, + idx2, + dests.clone(), + weights.clone(), + salt.clone(), + version_key, + )); + + // Commit in epoch 0 + assert_noop!( + SubtensorModule::commit_sub_weights( + RuntimeOrigin::signed(hk1), + netuid, + subid_above, + commit_hash + ), + Error::::SubNetworkDoesNotExist + ); + + // Advance one epoch, then attempt to reveal + step_epochs(1, netuid); + assert_noop!( + SubtensorModule::reveal_sub_weights( + RuntimeOrigin::signed(hk1), + netuid, + subid_above, + dests.clone(), + weights.clone(), + salt, + version_key + ), + Error::::NoWeightsCommitFound + ); + + // Verify that weights didn't update + assert!(Weights::::get(idx2, uid1).is_empty()); + assert!(Weights::::get(idx2, uid2).is_empty()); + }); +} + +#[test] +fn test_reveal_crv3_commits_sub_success() { + new_test_ext(100).execute_with(|| { + System::set_block_number(0); + + let netuid = NetUid::from(1); + let subid = SubId::from(1u8); // write under sub-subnet #1 + let hotkey1: AccountId = U256::from(1); + let hotkey2: AccountId = U256::from(2); + let reveal_round: u64 = 1000; + + add_network(netuid, 5, 0); + // ensure we actually have subid=1 available + SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); + + // Register neurons and set up configs + register_ok_neuron(netuid, hotkey1, U256::from(3), 100_000); + register_ok_neuron(netuid, hotkey2, U256::from(4), 100_000); + SubtensorModule::set_stake_threshold(0); + SubtensorModule::set_weights_set_rate_limit(netuid, 0); + SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); + assert_ok!(SubtensorModule::set_reveal_period(netuid, 3)); + + let uid1 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey1).expect("uid1"); + let uid2 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey2).expect("uid2"); + + SubtensorModule::set_validator_permit_for_uid(netuid, uid1, true); + SubtensorModule::set_validator_permit_for_uid(netuid, uid2, true); + SubtensorModule::add_balance_to_coldkey_account(&U256::from(3), 1); + SubtensorModule::add_balance_to_coldkey_account(&U256::from(4), 1); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet(&hotkey1, &U256::from(3), netuid, 1.into()); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet(&hotkey2, &U256::from(4), netuid, 1.into()); + + let version_key = SubtensorModule::get_weights_version_key(netuid); + + // Payload (same as legacy; subid is provided to the extrinsic) + let payload = WeightsTlockPayload { + hotkey: hotkey1.encode(), + values: vec![10, 20], + uids: vec![uid1, uid2], + version_key, + }; + let serialized_payload = payload.encode(); + + // Public key + encrypt + let esk = [2; 32]; + let rng = ChaCha20Rng::seed_from_u64(0); + let pk_bytes = hex::decode("83cf0f2896adee7eb8b5f01fcad3912212c437e0073e911fb90022d3e760183c8c4b450b6a0a6c3ac6a5776a2d1064510d1fec758c921cc22b0e17e63aaf4bcb5ed66304de9cf809bd274ca73bab4af5a6e9c76a4bc09e76eae8991ef5ece45a").unwrap(); + let pub_key = ::PublicKeyGroup::deserialize_compressed(&*pk_bytes).unwrap(); + + let message = { + let mut hasher = sha2::Sha256::new(); + hasher.update(reveal_round.to_be_bytes()); + hasher.finalize().to_vec() + }; + let identity = Identity::new(b"", vec![message]); + + let ct = tle::(pub_key, esk, &serialized_payload, identity, rng).expect("encrypt"); + let mut commit_bytes = Vec::new(); + ct.serialize_compressed(&mut commit_bytes).expect("serialize"); + + // Commit (sub variant) + assert_ok!(SubtensorModule::commit_timelocked_sub_weights( + RuntimeOrigin::signed(hotkey1), + netuid, + subid, + commit_bytes.clone().try_into().expect("bounded"), + reveal_round, + SubtensorModule::get_commit_reveal_weights_version() + )); + + // Inject drand pulse for the reveal round + let sig_bytes = hex::decode("b44679b9a59af2ec876b1a6b1ad52ea9b1615fc3982b19576350f93447cb1125e342b73a8dd2bacbe47e4b6b63ed5e39").unwrap(); + pallet_drand::Pulses::::insert( + reveal_round, + Pulse { + round: reveal_round, + randomness: vec![0; 32].try_into().unwrap(), + signature: sig_bytes.try_into().unwrap(), + }, + ); + + // Run epochs so the commit is processed + step_epochs(3, netuid); + + // Verify weights applied under the selected subid index + let idx = SubtensorModule::get_subsubnet_storage_index(netuid, subid); + let weights_sparse = SubtensorModule::get_weights_sparse(idx); + let row = weights_sparse.get(uid1 as usize).cloned().unwrap_or_default(); + assert!(!row.is_empty(), "expected weights set for validator uid1 under subid"); + + // Compare rounded normalized weights to expected proportions (like legacy test) + let expected: Vec<(u16, I32F32)> = payload.uids.iter().zip(payload.values.iter()).map(|(&u,&v)|(u, I32F32::from_num(v))).collect(); + let total: I32F32 = row.iter().map(|(_, w)| *w).sum(); + let normalized: Vec<(u16, I32F32)> = row.iter().map(|&(u,w)| (u, w * I32F32::from_num(30) / total)).collect(); + + for ((ua, wa), (ub, wb)) in normalized.iter().zip(expected.iter()) { + assert_eq!(ua, ub); + let actual = wa.to_num::().round() as i64; + let expect = wb.to_num::(); + assert_ne!(actual, 0, "actual weight for uid {ua} is zero"); + assert_eq!(actual, expect, "weight mismatch for uid {ua}"); + } + }); +} + +#[test] +fn test_crv3_above_subsubnet_count_fails() { + new_test_ext(100).execute_with(|| { + System::set_block_number(0); + + let netuid = NetUid::from(1); + let subid_above = SubId::from(2u8); // non-existing sub-subnet + let hotkey1: AccountId = U256::from(1); + let hotkey2: AccountId = U256::from(2); + let reveal_round: u64 = 1000; + + add_network(netuid, 5, 0); + // ensure we actually have subid=1 available + SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); + + // Register neurons and set up configs + register_ok_neuron(netuid, hotkey1, U256::from(3), 100_000); + register_ok_neuron(netuid, hotkey2, U256::from(4), 100_000); + SubtensorModule::set_stake_threshold(0); + SubtensorModule::set_weights_set_rate_limit(netuid, 0); + SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); + assert_ok!(SubtensorModule::set_reveal_period(netuid, 3)); + + let uid1 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey1).expect("uid1"); + let uid2 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey2).expect("uid2"); + + SubtensorModule::set_validator_permit_for_uid(netuid, uid1, true); + SubtensorModule::add_balance_to_coldkey_account(&U256::from(3), 1); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet(&hotkey1, &U256::from(3), netuid, 1.into()); + + let version_key = SubtensorModule::get_weights_version_key(netuid); + + // Payload (same as legacy; subid is provided to the extrinsic) + let payload = WeightsTlockPayload { + hotkey: hotkey1.encode(), + values: vec![10, 20], + uids: vec![uid1, uid2], + version_key, + }; + let serialized_payload = payload.encode(); + + // Public key + encrypt + let esk = [2; 32]; + let rng = ChaCha20Rng::seed_from_u64(0); + let pk_bytes = hex::decode("83cf0f2896adee7eb8b5f01fcad3912212c437e0073e911fb90022d3e760183c8c4b450b6a0a6c3ac6a5776a2d1064510d1fec758c921cc22b0e17e63aaf4bcb5ed66304de9cf809bd274ca73bab4af5a6e9c76a4bc09e76eae8991ef5ece45a").unwrap(); + let pub_key = ::PublicKeyGroup::deserialize_compressed(&*pk_bytes).unwrap(); + + let message = { + let mut hasher = sha2::Sha256::new(); + hasher.update(reveal_round.to_be_bytes()); + hasher.finalize().to_vec() + }; + let identity = Identity::new(b"", vec![message]); + + let ct = tle::(pub_key, esk, &serialized_payload, identity, rng).expect("encrypt"); + let mut commit_bytes = Vec::new(); + ct.serialize_compressed(&mut commit_bytes).expect("serialize"); + + // Commit (sub variant) + assert_noop!( + SubtensorModule::commit_timelocked_sub_weights( + RuntimeOrigin::signed(hotkey1), + netuid, + subid_above, + commit_bytes.clone().try_into().expect("bounded"), + reveal_round, + SubtensorModule::get_commit_reveal_weights_version() + ), + Error::::SubNetworkDoesNotExist + ); + }); +} + +#[test] +fn test_do_commit_crv3_sub_weights_committing_too_fast() { + new_test_ext(1).execute_with(|| { + let netuid = NetUid::from(1); + let subid = SubId::from(1u8); + let hotkey: AccountId = U256::from(1); + let commit_data_1: Vec = vec![1, 2, 3]; + let commit_data_2: Vec = vec![4, 5, 6]; + let reveal_round: u64 = 1000; + + add_network(netuid, 5, 0); + SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); // allow subids {0,1} + + register_ok_neuron(netuid, hotkey, U256::from(2), 100_000); + SubtensorModule::set_weights_set_rate_limit(netuid, 5); + SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); + + let uid = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey).expect("uid"); + let idx1 = SubtensorModule::get_subsubnet_storage_index(netuid, subid); + SubtensorModule::set_last_update_for_uid(idx1, uid, 0); + + // make validator with stake + SubtensorModule::set_stake_threshold(0); + SubtensorModule::set_validator_permit_for_uid(netuid, uid, true); + SubtensorModule::add_balance_to_coldkey_account(&U256::from(2), 1); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hotkey, + &U256::from(2), + netuid, + 1.into(), + ); + + // first commit OK on subid=1 + assert_ok!(SubtensorModule::commit_timelocked_sub_weights( + RuntimeOrigin::signed(hotkey), + netuid, + subid, + commit_data_1.clone().try_into().expect("bounded"), + reveal_round, + SubtensorModule::get_commit_reveal_weights_version() + )); + + // immediate second commit on SAME subid blocked + assert_noop!( + SubtensorModule::commit_timelocked_sub_weights( + RuntimeOrigin::signed(hotkey), + netuid, + subid, + commit_data_2.clone().try_into().expect("bounded"), + reveal_round, + SubtensorModule::get_commit_reveal_weights_version() + ), + Error::::CommittingWeightsTooFast + ); + + // BUT committing too soon on a DIFFERENT subid is allowed + let other_subid = SubId::from(0u8); + let idx0 = SubtensorModule::get_subsubnet_storage_index(netuid, other_subid); + SubtensorModule::set_last_update_for_uid(idx0, uid, 0); // baseline like above + assert_ok!(SubtensorModule::commit_timelocked_sub_weights( + RuntimeOrigin::signed(hotkey), + netuid, + other_subid, + commit_data_2.clone().try_into().expect("bounded"), + reveal_round, + SubtensorModule::get_commit_reveal_weights_version() + )); + + // still too fast on original subid after 2 blocks + step_block(2); + assert_noop!( + SubtensorModule::commit_timelocked_sub_weights( + RuntimeOrigin::signed(hotkey), + netuid, + subid, + commit_data_2.clone().try_into().expect("bounded"), + reveal_round, + SubtensorModule::get_commit_reveal_weights_version() + ), + Error::::CommittingWeightsTooFast + ); + + // after enough blocks, OK again on original subid + step_block(3); + assert_ok!(SubtensorModule::commit_timelocked_sub_weights( + RuntimeOrigin::signed(hotkey), + netuid, + subid, + commit_data_2.try_into().expect("bounded"), + reveal_round, + SubtensorModule::get_commit_reveal_weights_version() + )); + }); +} + +#[test] +fn epoch_subsubnet_emergency_mode_distributes_by_stake() { + new_test_ext(1).execute_with(|| { + // setup a single sub-subnet where consensus sum becomes 0 + let netuid = NetUid::from(1u16); + let subid = SubId::from(1u8); + let idx = SubtensorModule::get_subsubnet_storage_index(netuid, subid); + let tempo: u16 = 5; + add_network(netuid, tempo, 0); + SubsubnetCountCurrent::::insert(netuid, SubId::from(2u8)); // allow subids {0,1} + SubtensorModule::set_max_registrations_per_block(netuid, 4); + SubtensorModule::set_target_registrations_per_interval(netuid, 4); + + // three neurons: make ALL permitted validators so active_stake is non-zero + let hk0 = U256::from(10); + let ck0 = U256::from(11); + let hk1 = U256::from(20); + let ck1 = U256::from(21); + let hk2 = U256::from(30); + let ck2 = U256::from(31); + let hk3 = U256::from(40); // miner + let ck3 = U256::from(41); + register_ok_neuron(netuid, hk0, ck0, 0); + register_ok_neuron(netuid, hk1, ck1, 0); + register_ok_neuron(netuid, hk2, ck2, 0); + register_ok_neuron(netuid, hk3, ck3, 0); + + // active + recent updates so they're all active + let now = SubtensorModule::get_current_block_as_u64(); + ActivityCutoff::::insert(netuid, 1_000u16); + LastUpdate::::insert(idx, vec![now, now, now, now]); + + // All staking validators permitted => active_stake = stake + ValidatorPermit::::insert(netuid, vec![true, true, true, false]); + SubtensorModule::set_stake_threshold(0); + + // force ZERO consensus/incentive path: no weights/bonds + // (leave Weights/Bonds empty for all rows on this sub-subnet) + + // stake proportions: uid0:uid1:uid2 = 10:30:60 + SubtensorModule::add_balance_to_coldkey_account(&ck0, 10); + SubtensorModule::add_balance_to_coldkey_account(&ck1, 30); + SubtensorModule::add_balance_to_coldkey_account(&ck2, 60); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hk0, + &ck0, + netuid, + AlphaCurrency::from(10), + ); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hk1, + &ck1, + netuid, + AlphaCurrency::from(30), + ); + SubtensorModule::increase_stake_for_hotkey_and_coldkey_on_subnet( + &hk2, + &ck2, + netuid, + AlphaCurrency::from(60), + ); + + let emission = AlphaCurrency::from(1_000_000u64); + + // --- act: run epoch on this sub-subnet only --- + let out = SubtensorModule::epoch_subsubnet(netuid, subid, emission); + + // collect validator emissions per hotkey + let t0 = out.0.get(&hk0).unwrap(); + let t1 = out.0.get(&hk1).unwrap(); + let t2 = out.0.get(&hk2).unwrap(); + let t3 = out.0.get(&hk3).unwrap(); + + // In emergency mode (consensus sum == 0): + // - validator_emission is distributed by (active) stake proportions + // - server_emission remains zero (incentive path is zero) + assert_eq!(u64::from(t0.server_emission), 0); + assert_eq!(u64::from(t1.server_emission), 0); + assert_eq!(u64::from(t2.server_emission), 0); + assert_eq!(u64::from(t3.server_emission), 0); + + // expected splits by stake: 10%, 30%, 60% of total emission + let e = u64::from(emission); + let exp0 = e / 10; // 10% + let exp1 = e * 3 / 10; // 30% + let exp2 = e * 6 / 10; // 60% + + // allow tiny rounding drift from fixed-point conversions + assert_abs_diff_eq!(u64::from(t0.validator_emission), exp0, epsilon = 2); + assert_abs_diff_eq!(u64::from(t1.validator_emission), exp1, epsilon = 2); + assert_abs_diff_eq!(u64::from(t2.validator_emission), exp2, epsilon = 2); + assert_eq!(u64::from(t3.validator_emission), 0); + + // all emission goes to validators + assert_abs_diff_eq!( + u64::from(t0.validator_emission) + + u64::from(t1.validator_emission) + + u64::from(t2.validator_emission), + e, + epsilon = 2 + ); + }); +} diff --git a/pallets/subtensor/src/tests/swap_hotkey.rs b/pallets/subtensor/src/tests/swap_hotkey.rs index dae5a3f176..5991baf07a 100644 --- a/pallets/subtensor/src/tests/swap_hotkey.rs +++ b/pallets/subtensor/src/tests/swap_hotkey.rs @@ -8,7 +8,7 @@ use frame_system::{Config, RawOrigin}; use sp_core::{Get, H160, H256, U256}; use sp_runtime::SaturatedConversion; use substrate_fixed::types::U64F64; -use subtensor_runtime_common::{AlphaCurrency, TaoCurrency}; +use subtensor_runtime_common::{AlphaCurrency, NetUidStorageIndex, TaoCurrency}; use subtensor_swap_interface::SwapHandler; use super::mock; @@ -326,7 +326,11 @@ fn test_swap_weight_commits() { add_network(netuid, 1, 1); IsNetworkMember::::insert(old_hotkey, netuid, true); - WeightCommits::::insert(netuid, old_hotkey, weight_commits.clone()); + WeightCommits::::insert( + NetUidStorageIndex::from(netuid), + old_hotkey, + weight_commits.clone(), + ); assert_ok!(SubtensorModule::perform_hotkey_swap_on_all_subnets( &old_hotkey, @@ -335,9 +339,12 @@ fn test_swap_weight_commits() { &mut weight )); - assert!(!WeightCommits::::contains_key(netuid, old_hotkey)); + assert!(!WeightCommits::::contains_key( + NetUidStorageIndex::from(netuid), + old_hotkey + )); assert_eq!( - WeightCommits::::get(netuid, new_hotkey), + WeightCommits::::get(NetUidStorageIndex::from(netuid), new_hotkey), Some(weight_commits) ); }); diff --git a/pallets/subtensor/src/tests/swap_hotkey_with_subnet.rs b/pallets/subtensor/src/tests/swap_hotkey_with_subnet.rs index 349c28903a..c7baa55387 100644 --- a/pallets/subtensor/src/tests/swap_hotkey_with_subnet.rs +++ b/pallets/subtensor/src/tests/swap_hotkey_with_subnet.rs @@ -5,7 +5,7 @@ use codec::Encode; use frame_support::weights::Weight; use frame_support::{assert_err, assert_noop, assert_ok}; use frame_system::{Config, RawOrigin}; -use subtensor_runtime_common::{AlphaCurrency, Currency, TaoCurrency}; +use subtensor_runtime_common::{AlphaCurrency, Currency, NetUidStorageIndex, TaoCurrency}; use super::mock::*; use crate::*; @@ -343,7 +343,11 @@ fn test_swap_weight_commits() { SubtensorModule::add_balance_to_coldkey_account(&coldkey, u64::MAX); IsNetworkMember::::insert(old_hotkey, netuid, true); - WeightCommits::::insert(netuid, old_hotkey, weight_commits.clone()); + WeightCommits::::insert( + NetUidStorageIndex::from(netuid), + old_hotkey, + weight_commits.clone(), + ); System::set_block_number(System::block_number() + HotkeySwapOnSubnetInterval::get()); assert_ok!(SubtensorModule::do_swap_hotkey( @@ -353,9 +357,12 @@ fn test_swap_weight_commits() { Some(netuid) )); - assert!(!WeightCommits::::contains_key(netuid, old_hotkey)); + assert!(!WeightCommits::::contains_key( + NetUidStorageIndex::from(netuid), + old_hotkey + )); assert_eq!( - WeightCommits::::get(netuid, new_hotkey), + WeightCommits::::get(NetUidStorageIndex::from(netuid), new_hotkey), Some(weight_commits) ); }); diff --git a/pallets/subtensor/src/tests/uids.rs b/pallets/subtensor/src/tests/uids.rs index bca6945b44..4317337ffd 100644 --- a/pallets/subtensor/src/tests/uids.rs +++ b/pallets/subtensor/src/tests/uids.rs @@ -5,7 +5,7 @@ use crate::*; use frame_support::{assert_err, assert_ok}; use frame_system::Config; use sp_core::{H160, U256}; -use subtensor_runtime_common::AlphaCurrency; +use subtensor_runtime_common::{AlphaCurrency, NetUidStorageIndex}; /******************************************** tests for uids.rs file @@ -63,13 +63,13 @@ fn test_replace_neuron() { Consensus::::mutate(netuid, |v| { SubtensorModule::set_element_at(v, neuron_uid as usize, 5u16) }); - Incentive::::mutate(netuid, |v| { + Incentive::::mutate(NetUidStorageIndex::from(netuid), |v| { SubtensorModule::set_element_at(v, neuron_uid as usize, 5u16) }); Dividends::::mutate(netuid, |v| { SubtensorModule::set_element_at(v, neuron_uid as usize, 5u16) }); - Bonds::::insert(netuid, neuron_uid, vec![(0, 1)]); + Bonds::::insert(NetUidStorageIndex::from(netuid), neuron_uid, vec![(0, 1)]); // serve axon mock address let ip: u128 = 1676056785; @@ -130,7 +130,7 @@ fn test_replace_neuron() { 0 ); assert_eq!( - SubtensorModule::get_incentive_for_uid(netuid, neuron_uid), + SubtensorModule::get_incentive_for_uid(netuid.into(), neuron_uid), 0 ); assert_eq!( @@ -145,7 +145,10 @@ fn test_replace_neuron() { assert_eq!(axon_info.ip_type, 0); // Check bonds are cleared. - assert_eq!(Bonds::::get(netuid, neuron_uid), vec![]); + assert_eq!( + Bonds::::get(NetUidStorageIndex::from(netuid), neuron_uid), + vec![] + ); assert_eq!(AssociatedEvmAddress::::get(netuid, neuron_uid), None); }); } @@ -189,7 +192,7 @@ fn test_bonds_cleared_on_replace() { let neuron_uid = neuron_uid.unwrap(); AssociatedEvmAddress::::insert(netuid, neuron_uid, (evm_address, 1)); // set non-default bonds - Bonds::::insert(netuid, neuron_uid, vec![(0, 1)]); + Bonds::::insert(NetUidStorageIndex::from(netuid), neuron_uid, vec![(0, 1)]); // Replace the neuron. SubtensorModule::replace_neuron(netuid, neuron_uid, &new_hotkey_account_id, block_number); @@ -214,7 +217,10 @@ fn test_bonds_cleared_on_replace() { assert_eq!(curr_hotkey.unwrap(), new_hotkey_account_id); // Check bonds are cleared. - assert_eq!(Bonds::::get(netuid, neuron_uid), vec![]); + assert_eq!( + Bonds::::get(NetUidStorageIndex::from(netuid), neuron_uid), + vec![] + ); assert_eq!(AssociatedEvmAddress::::get(netuid, neuron_uid), None); }); } diff --git a/pallets/subtensor/src/tests/weights.rs b/pallets/subtensor/src/tests/weights.rs index d25a3eb34f..bc9af5cf07 100644 --- a/pallets/subtensor/src/tests/weights.rs +++ b/pallets/subtensor/src/tests/weights.rs @@ -21,7 +21,7 @@ use sp_runtime::{ }; use sp_std::collections::vec_deque::VecDeque; use substrate_fixed::types::I32F32; -use subtensor_runtime_common::TaoCurrency; +use subtensor_runtime_common::{NetUidStorageIndex, TaoCurrency}; use subtensor_swap_interface::SwapHandler; use tle::{ curves::drand::TinyBLS381, @@ -340,8 +340,14 @@ fn test_reveal_weights_validate() { version_key, }); - let commit_hash: H256 = - SubtensorModule::get_commit_hash(&who, netuid, &dests, &weights, &salt, version_key); + let commit_hash: H256 = SubtensorModule::get_commit_hash( + &who, + NetUidStorageIndex::from(netuid), + &dests, + &weights, + &salt, + version_key, + ); let commit_block = SubtensorModule::get_current_block_as_u64(); let (first_reveal_block, last_reveal_block) = SubtensorModule::get_reveal_blocks(netuid, commit_block); @@ -412,7 +418,7 @@ fn test_reveal_weights_validate() { ); // Add the commit to the hotkey - WeightCommits::::mutate(netuid, hotkey, |maybe_commits| { + WeightCommits::::mutate(NetUidStorageIndex::from(netuid), hotkey, |maybe_commits| { let mut commits: VecDeque<(H256, u64, u64, u64)> = maybe_commits.take().unwrap_or_default(); commits.push_back(( @@ -1377,7 +1383,7 @@ fn test_set_weights_sum_larger_than_u16_max() { assert_ok!(result); // Get max-upscaled unnormalized weights. - let all_weights: Vec> = SubtensorModule::get_weights(netuid); + let all_weights: Vec> = SubtensorModule::get_weights(netuid.into()); let weights_set: &[I32F32] = &all_weights[neuron_uid as usize]; assert_eq!(weights_set[0], I32F32::from_num(u16::MAX)); assert_eq!(weights_set[1], I32F32::from_num(u16::MAX)); @@ -2535,8 +2541,9 @@ fn test_commit_reveal_multiple_commits() { )); // Check that commits before the revealed one are removed - let remaining_commits = crate::WeightCommits::::get(netuid, hotkey) - .expect("expected 8 remaining commits"); + let remaining_commits = + crate::WeightCommits::::get(NetUidStorageIndex::from(netuid), hotkey) + .expect("expected 8 remaining commits"); assert_eq!(remaining_commits.len(), 8); // 10 commits - 2 removed (index 0 and 1) // 4. Reveal the last commit next @@ -2551,7 +2558,8 @@ fn test_commit_reveal_multiple_commits() { )); // Remaining commits should have removed up to index 9 - let remaining_commits = crate::WeightCommits::::get(netuid, hotkey); + let remaining_commits = + crate::WeightCommits::::get(NetUidStorageIndex::from(netuid), hotkey); assert!(remaining_commits.is_none()); // All commits removed // After revealing all commits, attempt to commit again should now succeed @@ -2796,7 +2804,8 @@ fn test_commit_reveal_multiple_commits() { )); // Check that the first commit has been removed - let remaining_commits = crate::WeightCommits::::get(netuid, hotkey); + let remaining_commits = + crate::WeightCommits::::get(NetUidStorageIndex::from(netuid), hotkey); assert!(remaining_commits.is_none()); // Attempting to reveal the first commit should fail as it was removed @@ -2956,7 +2965,8 @@ fn test_expired_commits_handling_in_commit_and_reveal() { // 6. Verify that the number of unrevealed, non-expired commits is now 6 let commits: VecDeque<(H256, u64, u64, u64)> = - crate::WeightCommits::::get(netuid, hotkey).expect("Expected a commit"); + crate::WeightCommits::::get(NetUidStorageIndex::from(netuid), hotkey) + .expect("Expected a commit"); assert_eq!(commits.len(), 6); // 5 non-expired commits from epoch 1 + new commit // 7. Attempt to reveal an expired commit (from epoch 0) @@ -3002,7 +3012,7 @@ fn test_expired_commits_handling_in_commit_and_reveal() { )); // 10. Verify that all commits have been revealed and the queue is empty - let commits = crate::WeightCommits::::get(netuid, hotkey); + let commits = crate::WeightCommits::::get(NetUidStorageIndex::from(netuid), hotkey); assert!(commits.is_none()); // 11. Attempt to reveal again, should fail with NoWeightsCommitFound @@ -3193,7 +3203,7 @@ fn test_reveal_at_exact_epoch() { Error::::ExpiredWeightCommit ); - crate::WeightCommits::::remove(netuid, hotkey); + crate::WeightCommits::::remove(NetUidStorageIndex::from(netuid), hotkey); } }); } @@ -3471,7 +3481,8 @@ fn test_commit_reveal_order_enforcement() { // Check that commits A and B are removed let remaining_commits = - crate::WeightCommits::::get(netuid, hotkey).expect("expected 1 remaining commit"); + crate::WeightCommits::::get(NetUidStorageIndex::from(netuid), hotkey) + .expect("expected 1 remaining commit"); assert_eq!(remaining_commits.len(), 1); // Only commit C should remain // Attempt to reveal C (index 2), should succeed @@ -3652,7 +3663,7 @@ fn test_reveal_at_exact_block() { ); // Clean up for next iteration - crate::WeightCommits::::remove(netuid, hotkey); + crate::WeightCommits::::remove(NetUidStorageIndex::from(netuid), hotkey); } }); } @@ -3730,7 +3741,7 @@ fn test_successful_batch_reveal() { )); // 4. Ensure all commits are removed - let commits = crate::WeightCommits::::get(netuid, hotkey); + let commits = crate::WeightCommits::::get(NetUidStorageIndex::from(netuid), hotkey); assert!(commits.is_none()); }); } @@ -3831,8 +3842,8 @@ fn test_batch_reveal_with_expired_commits() { assert_err!(result, Error::::ExpiredWeightCommit); // 5. Expired commit is not removed until a successful call - let commits = - crate::WeightCommits::::get(netuid, hotkey).expect("Expected remaining commits"); + let commits = crate::WeightCommits::::get(NetUidStorageIndex::from(netuid), hotkey) + .expect("Expected remaining commits"); assert_eq!(commits.len(), 3); // 6. Try revealing the remaining commits @@ -3851,7 +3862,7 @@ fn test_batch_reveal_with_expired_commits() { )); // 7. Ensure all commits are removed - let commits = crate::WeightCommits::::get(netuid, hotkey); + let commits = crate::WeightCommits::::get(NetUidStorageIndex::from(netuid), hotkey); assert!(commits.is_none()); }); } @@ -4258,7 +4269,7 @@ fn test_batch_reveal_with_out_of_order_commits() { )); // 6. Ensure all commits are removed - let commits = crate::WeightCommits::::get(netuid, hotkey); + let commits = crate::WeightCommits::::get(NetUidStorageIndex::from(netuid), hotkey); assert!(commits.is_none()); }); } @@ -4322,7 +4333,7 @@ fn test_highly_concurrent_commits_and_reveals_with_multiple_hotkeys() { for i in 0..commits_per_hotkey { for hotkey in &hotkeys { - let current_commits = crate::WeightCommits::::get(netuid, hotkey) + let current_commits = crate::WeightCommits::::get(NetUidStorageIndex::from(netuid), hotkey) .unwrap_or_default(); if current_commits.len() >= max_unrevealed_commits { continue; @@ -4671,7 +4682,7 @@ fn test_get_reveal_blocks() { assert_err!(result, Error::::NoWeightsCommitFound); // **15. Verify that All Commits Have Been Removed from Storage** - let commits = crate::WeightCommits::::get(netuid, hotkey); + let commits = crate::WeightCommits::::get(NetUidStorageIndex::from(netuid), hotkey); assert!( commits.is_none(), "Commits should be cleared after successful reveal" @@ -4727,7 +4738,7 @@ fn test_commit_weights_rate_limit() { let neuron_uid = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey).expect("expected uid"); - SubtensorModule::set_last_update_for_uid(netuid, neuron_uid, 0); + SubtensorModule::set_last_update_for_uid(NetUidStorageIndex::from(netuid), neuron_uid, 0); assert_ok!(SubtensorModule::commit_weights( RuntimeOrigin::signed(hotkey), @@ -4984,7 +4995,7 @@ fn test_reveal_crv3_commits_success() { // Step epochs to run the epoch via the blockstep step_epochs(3, netuid); - let weights_sparse = SubtensorModule::get_weights_sparse(netuid); + let weights_sparse = SubtensorModule::get_weights_sparse(netuid.into()); let weights = weights_sparse.get(neuron_uid1 as usize).cloned().unwrap_or_default(); assert!( @@ -5106,7 +5117,7 @@ fn test_reveal_crv3_commits_cannot_reveal_after_reveal_epoch() { step_epochs(3, netuid); // Verify that weights are not set - let weights_sparse = SubtensorModule::get_weights_sparse(netuid); + let weights_sparse = SubtensorModule::get_weights_sparse(netuid.into()); let weights = weights_sparse .get(neuron_uid1 as usize) .cloned() @@ -5141,7 +5152,7 @@ fn test_reveal_crv3_commits_cannot_reveal_after_reveal_epoch() { assert_ok!(SubtensorModule::reveal_crv3_commits(netuid)); // Verify that the weights for the neuron have not been set - let weights_sparse = SubtensorModule::get_weights_sparse(netuid); + let weights_sparse = SubtensorModule::get_weights_sparse(netuid.into()); let weights = weights_sparse .get(neuron_uid1 as usize) .cloned() @@ -5181,7 +5192,8 @@ fn test_do_commit_crv3_weights_success() { let cur_epoch = SubtensorModule::get_epoch_index(netuid, SubtensorModule::get_current_block_as_u64()); - let commits = TimelockedWeightCommits::::get(netuid, cur_epoch); + let commits = + TimelockedWeightCommits::::get(NetUidStorageIndex::from(netuid), cur_epoch); assert_eq!(commits.len(), 1); assert_eq!(commits[0].0, hotkey); assert_eq!(commits[0].2, commit_data); @@ -5264,7 +5276,7 @@ fn test_do_commit_crv3_weights_committing_too_fast() { SubtensorModule::set_commit_reveal_weights_enabled(netuid, true); let neuron_uid = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey).expect("Expected uid"); - SubtensorModule::set_last_update_for_uid(netuid, neuron_uid, 0); + SubtensorModule::set_last_update_for_uid(NetUidStorageIndex::from(netuid), neuron_uid, 0); assert_ok!(SubtensorModule::do_commit_timelocked_weights( RuntimeOrigin::signed(hotkey), @@ -5478,7 +5490,7 @@ fn test_reveal_crv3_commits_decryption_failure() { let neuron_uid = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey) .expect("Failed to get neuron UID for hotkey") as usize; - let weights_matrix = SubtensorModule::get_weights(netuid); + let weights_matrix = SubtensorModule::get_weights(netuid.into()); let weights = weights_matrix.get(neuron_uid).cloned().unwrap_or_default(); assert!(weights.iter().all(|&w| w == I32F32::from_num(0))); }); @@ -5591,7 +5603,7 @@ fn test_reveal_crv3_commits_multiple_commits_some_fail_some_succeed() { // Verify that weights are set for hotkey1 let neuron_uid1 = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey1) .expect("Failed to get neuron UID for hotkey1") as usize; - let weights_sparse = SubtensorModule::get_weights_sparse(netuid); + let weights_sparse = SubtensorModule::get_weights_sparse(netuid.into()); let weights1 = weights_sparse.get(neuron_uid1).cloned().unwrap_or_default(); assert!( !weights1.is_empty(), @@ -5686,7 +5698,7 @@ fn test_reveal_crv3_commits_do_set_weights_failure() { // Verify that weights are not set due to `do_set_weights` failure let neuron_uid = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey) .expect("Failed to get neuron UID for hotkey") as usize; - let weights_sparse = SubtensorModule::get_weights_sparse(netuid); + let weights_sparse = SubtensorModule::get_weights_sparse(netuid.into()); let weights = weights_sparse.get(neuron_uid).cloned().unwrap_or_default(); assert!( weights.is_empty(), @@ -5764,7 +5776,7 @@ fn test_reveal_crv3_commits_payload_decoding_failure() { // Verify that weights are not set let neuron_uid = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey) .expect("Failed to get neuron UID for hotkey") as usize; - let weights_sparse = SubtensorModule::get_weights_sparse(netuid); + let weights_sparse = SubtensorModule::get_weights_sparse(netuid.into()); let weights = weights_sparse.get(neuron_uid).cloned().unwrap_or_default(); assert!( weights.is_empty(), @@ -5846,7 +5858,7 @@ fn test_reveal_crv3_commits_signature_deserialization_failure() { // Verify that weights are not set let neuron_uid = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hotkey) .expect("Failed to get neuron UID for hotkey") as usize; - let weights_sparse = SubtensorModule::get_weights_sparse(netuid); + let weights_sparse = SubtensorModule::get_weights_sparse(netuid.into()); let weights = weights_sparse.get(neuron_uid).cloned().unwrap_or_default(); assert!( weights.is_empty(), @@ -5911,7 +5923,7 @@ fn test_reveal_crv3_commits_with_empty_commit_queue() { step_epochs(2, netuid); - let weights_sparse = SubtensorModule::get_weights_sparse(netuid); + let weights_sparse = SubtensorModule::get_weights_sparse(netuid.into()); assert!( weights_sparse.is_empty(), "Weights should be empty as there were no commits to reveal" @@ -5998,7 +6010,7 @@ fn test_reveal_crv3_commits_with_incorrect_identity_message() { // Verify that weights are not set due to decryption failure let neuron_uid = neuron_uid as usize; - let weights_sparse = SubtensorModule::get_weights_sparse(netuid); + let weights_sparse = SubtensorModule::get_weights_sparse(netuid.into()); let weights = weights_sparse.get(neuron_uid).cloned().unwrap_or_default(); assert!( weights.is_empty(), @@ -6036,7 +6048,8 @@ fn test_multiple_commits_by_same_hotkey_within_limit() { let cur_epoch = SubtensorModule::get_epoch_index(netuid, SubtensorModule::get_current_block_as_u64()); - let commits = TimelockedWeightCommits::::get(netuid, cur_epoch); + let commits = + TimelockedWeightCommits::::get(NetUidStorageIndex::from(netuid), cur_epoch); assert_eq!( commits.len(), 10, @@ -6071,7 +6084,7 @@ fn test_reveal_crv3_commits_removes_past_epoch_commits() { let bounded_commit = vec![epoch as u8; 5].try_into().expect("bounded vec"); assert_ok!(TimelockedWeightCommits::::try_mutate( - netuid, + NetUidStorageIndex::from(netuid), epoch, |q| -> DispatchResult { q.push_back((hotkey, cur_block, bounded_commit, reveal_round)); @@ -6081,8 +6094,14 @@ fn test_reveal_crv3_commits_removes_past_epoch_commits() { } // Sanity – both epochs presently hold a commit. - assert!(!TimelockedWeightCommits::::get(netuid, past_epoch).is_empty()); - assert!(!TimelockedWeightCommits::::get(netuid, reveal_epoch).is_empty()); + assert!( + !TimelockedWeightCommits::::get(NetUidStorageIndex::from(netuid), past_epoch) + .is_empty() + ); + assert!( + !TimelockedWeightCommits::::get(NetUidStorageIndex::from(netuid), reveal_epoch) + .is_empty() + ); // --------------------------------------------------------------------- // Run the reveal pass WITHOUT a pulse – only expiry housekeeping runs. @@ -6091,13 +6110,15 @@ fn test_reveal_crv3_commits_removes_past_epoch_commits() { // past_epoch (< reveal_epoch) must be gone assert!( - TimelockedWeightCommits::::get(netuid, past_epoch).is_empty(), + TimelockedWeightCommits::::get(NetUidStorageIndex::from(netuid), past_epoch) + .is_empty(), "expired epoch {past_epoch} should be cleared" ); // reveal_epoch queue is *kept* because its commit could still be revealed later. assert!( - !TimelockedWeightCommits::::get(netuid, reveal_epoch).is_empty(), + !TimelockedWeightCommits::::get(NetUidStorageIndex::from(netuid), reveal_epoch) + .is_empty(), "reveal-epoch {reveal_epoch} must be retained until commit can be revealed" ); }); @@ -6208,7 +6229,7 @@ fn test_reveal_crv3_commits_multiple_valid_commits_all_processed() { step_epochs(2, netuid); // ───── assertions ─────────────────────────────────────────────────── - let w_sparse = SubtensorModule::get_weights_sparse(netuid); + let w_sparse = SubtensorModule::get_weights_sparse(netuid.into()); for hk in hotkeys { let uid = SubtensorModule::get_uid_for_net_and_hotkey(netuid, &hk).unwrap() as usize; assert!( @@ -6323,7 +6344,7 @@ fn test_reveal_crv3_commits_max_neurons() { step_epochs(2, netuid); // ───── verify weights ─────────────────────────────────────────────── - let w_sparse = SubtensorModule::get_weights_sparse(netuid); + let w_sparse = SubtensorModule::get_weights_sparse(netuid.into()); for hk in &committing_hotkeys { let uid = SubtensorModule::get_uid_for_net_and_hotkey(netuid, hk).unwrap() as usize; assert!( @@ -6553,7 +6574,7 @@ fn test_reveal_crv3_commits_hotkey_check() { // Step epochs to run the epoch via the blockstep step_epochs(3, netuid); - let weights_sparse = SubtensorModule::get_weights_sparse(netuid); + let weights_sparse = SubtensorModule::get_weights_sparse(netuid.into()); let weights = weights_sparse.get(neuron_uid1 as usize).cloned().unwrap_or_default(); assert!( @@ -6670,7 +6691,7 @@ fn test_reveal_crv3_commits_hotkey_check() { // Step epochs to run the epoch via the blockstep step_epochs(3, netuid); - let weights_sparse = SubtensorModule::get_weights_sparse(netuid); + let weights_sparse = SubtensorModule::get_weights_sparse(netuid.into()); let weights = weights_sparse.get(neuron_uid1 as usize).cloned().unwrap_or_default(); assert!( @@ -6773,10 +6794,11 @@ fn test_reveal_crv3_commits_retry_on_missing_pulse() { )); // epoch in which commit was stored - let stored_epoch = TimelockedWeightCommits::::iter_prefix(netuid) - .next() - .map(|(e, _)| e) - .expect("commit stored"); + let stored_epoch = + TimelockedWeightCommits::::iter_prefix(NetUidStorageIndex::from(netuid)) + .next() + .map(|(e, _)| e) + .expect("commit stored"); // first block of reveal epoch (commit_epoch + RP) let first_reveal_epoch = stored_epoch + SubtensorModule::get_reveal_period(netuid); @@ -6787,7 +6809,8 @@ fn test_reveal_crv3_commits_retry_on_missing_pulse() { // run *one* block inside reveal epoch without pulse → commit should stay queued step_block(1); assert!( - !TimelockedWeightCommits::::get(netuid, stored_epoch).is_empty(), + !TimelockedWeightCommits::::get(NetUidStorageIndex::from(netuid), stored_epoch) + .is_empty(), "commit must remain queued when pulse is missing" ); @@ -6808,14 +6831,15 @@ fn test_reveal_crv3_commits_retry_on_missing_pulse() { step_block(1); // automatic reveal runs here - let weights = SubtensorModule::get_weights_sparse(netuid) + let weights = SubtensorModule::get_weights_sparse(netuid.into()) .get(uid as usize) .cloned() .unwrap_or_default(); assert!(!weights.is_empty(), "weights must be set after pulse"); assert!( - TimelockedWeightCommits::::get(netuid, stored_epoch).is_empty(), + TimelockedWeightCommits::::get(NetUidStorageIndex::from(netuid), stored_epoch) + .is_empty(), "queue should be empty after successful reveal" ); }); @@ -6943,7 +6967,7 @@ fn test_reveal_crv3_commits_legacy_payload_success() { // ───────────────────────────────────── // 5 ▸ assertions // ───────────────────────────────────── - let weights_sparse = SubtensorModule::get_weights_sparse(netuid); + let weights_sparse = SubtensorModule::get_weights_sparse(netuid.into()); let w1 = weights_sparse .get(uid1 as usize) .cloned() @@ -6958,7 +6982,8 @@ fn test_reveal_crv3_commits_legacy_payload_success() { // commit should be gone assert!( - TimelockedWeightCommits::::get(netuid, commit_epoch).is_empty(), + TimelockedWeightCommits::::get(NetUidStorageIndex::from(netuid), commit_epoch) + .is_empty(), "commit storage should be cleaned after reveal" ); }); diff --git a/pallets/subtensor/src/transaction_extension.rs b/pallets/subtensor/src/transaction_extension.rs index 064a242f91..62c6a3c8ca 100644 --- a/pallets/subtensor/src/transaction_extension.rs +++ b/pallets/subtensor/src/transaction_extension.rs @@ -17,7 +17,7 @@ use sp_runtime::transaction_validity::{ use sp_std::marker::PhantomData; use sp_std::vec::Vec; use subtensor_macros::freeze_struct; -use subtensor_runtime_common::NetUid; +use subtensor_runtime_common::{NetUid, NetUidStorageIndex}; #[freeze_struct("2e02eb32e5cb25d3")] #[derive(Default, Encode, Decode, DecodeWithMemTracking, Clone, Eq, PartialEq, TypeInfo)] @@ -149,7 +149,7 @@ where if Self::check_weights_min_stake(who, *netuid) { let provided_hash = Pallet::::get_commit_hash( who, - *netuid, + NetUidStorageIndex::from(*netuid), uids, values, salt, @@ -186,7 +186,7 @@ where .map(|i| { Pallet::::get_commit_hash( who, - *netuid, + NetUidStorageIndex::from(*netuid), uids_list.get(i).unwrap_or(&Vec::new()), values_list.get(i).unwrap_or(&Vec::new()), salts_list.get(i).unwrap_or(&Vec::new()), diff --git a/pallets/subtensor/src/utils/misc.rs b/pallets/subtensor/src/utils/misc.rs index cc4485bf55..8703b1774b 100644 --- a/pallets/subtensor/src/utils/misc.rs +++ b/pallets/subtensor/src/utils/misc.rs @@ -8,7 +8,7 @@ use sp_core::Get; use sp_core::U256; use sp_runtime::Saturating; use substrate_fixed::types::{I32F32, U96F32}; -use subtensor_runtime_common::{AlphaCurrency, NetUid, TaoCurrency}; +use subtensor_runtime_common::{AlphaCurrency, NetUid, NetUidStorageIndex, TaoCurrency}; impl Pallet { pub fn ensure_subnet_owner_or_root( @@ -200,13 +200,13 @@ impl Pallet { pub fn get_consensus(netuid: NetUid) -> Vec { Consensus::::get(netuid) } - pub fn get_incentive(netuid: NetUid) -> Vec { + pub fn get_incentive(netuid: NetUidStorageIndex) -> Vec { Incentive::::get(netuid) } pub fn get_dividends(netuid: NetUid) -> Vec { Dividends::::get(netuid) } - pub fn get_last_update(netuid: NetUid) -> Vec { + pub fn get_last_update(netuid: NetUidStorageIndex) -> Vec { LastUpdate::::get(netuid) } pub fn get_pruning_score(netuid: NetUid) -> Vec { @@ -222,7 +222,7 @@ impl Pallet { // ================================== // ==== YumaConsensus UID params ==== // ================================== - pub fn set_last_update_for_uid(netuid: NetUid, uid: u16, last_update: u64) { + pub fn set_last_update_for_uid(netuid: NetUidStorageIndex, uid: u16, last_update: u64) { let mut updated_last_update_vec = Self::get_last_update(netuid); let Some(updated_last_update) = updated_last_update_vec.get_mut(uid as usize) else { return; @@ -285,7 +285,7 @@ impl Pallet { let vec = Consensus::::get(netuid); vec.get(uid as usize).copied().unwrap_or(0) } - pub fn get_incentive_for_uid(netuid: NetUid, uid: u16) -> u16 { + pub fn get_incentive_for_uid(netuid: NetUidStorageIndex, uid: u16) -> u16 { let vec = Incentive::::get(netuid); vec.get(uid as usize).copied().unwrap_or(0) } @@ -293,7 +293,7 @@ impl Pallet { let vec = Dividends::::get(netuid); vec.get(uid as usize).copied().unwrap_or(0) } - pub fn get_last_update_for_uid(netuid: NetUid, uid: u16) -> u64 { + pub fn get_last_update_for_uid(netuid: NetUidStorageIndex, uid: u16) -> u64 { let vec = LastUpdate::::get(netuid); vec.get(uid as usize).copied().unwrap_or(0) } diff --git a/pallets/subtensor/src/utils/rate_limiting.rs b/pallets/subtensor/src/utils/rate_limiting.rs index 5d1005333c..e9a8bb7b12 100644 --- a/pallets/subtensor/src/utils/rate_limiting.rs +++ b/pallets/subtensor/src/utils/rate_limiting.rs @@ -12,6 +12,7 @@ pub enum TransactionType { SetWeightsVersionKey, SetSNOwnerHotkey, OwnerHyperparamUpdate, + SubsubnetParameterUpdate, } /// Implement conversion from TransactionType to u16 @@ -25,6 +26,7 @@ impl From for u16 { TransactionType::SetWeightsVersionKey => 4, TransactionType::SetSNOwnerHotkey => 5, TransactionType::OwnerHyperparamUpdate => 6, + TransactionType::SubsubnetParameterUpdate => 7, } } } @@ -39,6 +41,7 @@ impl From for TransactionType { 4 => TransactionType::SetWeightsVersionKey, 5 => TransactionType::SetSNOwnerHotkey, 6 => TransactionType::OwnerHyperparamUpdate, + 7 => TransactionType::SubsubnetParameterUpdate, _ => TransactionType::Unknown, } } @@ -54,6 +57,7 @@ impl Pallet { TransactionType::SetChildkeyTake => TxChildkeyTakeRateLimit::::get(), TransactionType::RegisterNetwork => NetworkRateLimit::::get(), TransactionType::OwnerHyperparamUpdate => OwnerHyperparamRateLimit::::get(), + TransactionType::SubsubnetParameterUpdate => SubsubnetCountSetRateLimit::::get(), TransactionType::Unknown => 0, // Default to no limit for unknown types (no limit) _ => 0, diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index aee1e04895..d49d5147e9 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -220,7 +220,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // `spec_version`, and `authoring_version` are the same between Wasm and native. // This value is set to 100 to notify Polkadot-JS App (https://polkadot.js.org/apps) to use // the compatible custom types. - spec_version: 315, + spec_version: 316, impl_version: 1, apis: RUNTIME_API_VERSIONS, transaction_version: 1, @@ -1056,7 +1056,12 @@ pub struct ResetBondsOnCommit; impl OnMetadataCommitment for ResetBondsOnCommit { #[cfg(not(feature = "runtime-benchmarks"))] fn on_metadata_commitment(netuid: NetUid, address: &AccountId) { - let _ = SubtensorModule::do_reset_bonds(netuid, address); + // Reset bonds for each subsubnet of this subnet + let subsub_count = SubtensorModule::get_current_subsubnet_count(netuid); + for subid in 0..u8::from(subsub_count) { + let netuid_index = SubtensorModule::get_subsubnet_storage_index(netuid, subid.into()); + let _ = SubtensorModule::do_reset_bonds(netuid_index, address); + } } #[cfg(feature = "runtime-benchmarks")] @@ -2326,6 +2331,10 @@ impl_runtime_apis! { SubtensorModule::get_metagraph(netuid) } + fn get_submetagraph(netuid: NetUid, subid: SubId) -> Option> { + SubtensorModule::get_submetagraph(netuid, subid) + } + fn get_subnet_state(netuid: NetUid) -> Option> { SubtensorModule::get_subnet_state(netuid) } @@ -2334,6 +2343,10 @@ impl_runtime_apis! { SubtensorModule::get_all_metagraphs() } + fn get_all_submetagraphs() -> Vec>> { + SubtensorModule::get_all_submetagraphs() + } + fn get_all_dynamic_info() -> Vec>> { SubtensorModule::get_all_dynamic_info() } @@ -2342,6 +2355,9 @@ impl_runtime_apis! { SubtensorModule::get_selective_metagraph(netuid, metagraph_indexes) } + fn get_selective_submetagraph(netuid: NetUid, subid: SubId, metagraph_indexes: Vec) -> Option> { + SubtensorModule::get_selective_submetagraph(netuid, subid, metagraph_indexes) + } } impl subtensor_custom_rpc_runtime_api::StakeInfoRuntimeApi for Runtime {