From 5753b843b8e8149ece7297b9341adc7bba64fde6 Mon Sep 17 00:00:00 2001 From: redactedontop Date: Sun, 16 Mar 2025 16:32:43 +0000 Subject: [PATCH 1/7] Code refactoring and performance boosts --- Cargo.toml | 10 +- src/atomic_owned.rs | 57 +++----- src/atomic_shared.rs | 54 +++----- src/collectible.rs | 10 +- src/collector.rs | 315 ++++++++++++++++++++++--------------------- src/exit_guard.rs | 8 +- src/owned.rs | 10 +- src/ptr.rs | 16 +-- src/ref_counted.rs | 64 +++------ src/shared.rs | 8 +- src/tag.rs | 52 +------ 11 files changed, 249 insertions(+), 355 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 547283f..659915d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,11 +15,15 @@ keywords = ["concurrent", "epoch", "garbage", "lock-free", "memory"] [workspace] members = [".", "examples"] -[dependencies] -loom = { version = "0.7", optional = true } +[profile.release] +lto = true + +[dependencies.loom] +version = "0.7.2" +optional = true [dev-dependencies] -criterion = "0.5" +criterion = "0.5.1" static_assertions = "1.1" [[bench]] diff --git a/src/atomic_owned.rs b/src/atomic_owned.rs index c611740..f5ccc66 100644 --- a/src/atomic_owned.rs +++ b/src/atomic_owned.rs @@ -1,9 +1,13 @@ use super::maybe_std::AtomicPtr; use super::ref_counted::RefCounted; use super::{Guard, Owned, Ptr, Tag}; +#[cfg(feature = "loom")] +use loom::sync::atomic::AtomicPtr as AtomicPtrType; use std::mem::forget; use std::panic::UnwindSafe; use std::ptr::{null, null_mut, NonNull}; +#[cfg(not(feature = "loom"))] +use std::sync::atomic::AtomicPtr as AtomicPtrType; use std::sync::atomic::Ordering::{self, Relaxed}; /// [`AtomicOwned`] owns the underlying instance, and allows users to perform atomic operations @@ -51,26 +55,12 @@ impl AtomicOwned { /// let owned: Owned = Owned::new(10); /// let atomic_owned: AtomicOwned = AtomicOwned::from(owned); /// ``` - #[cfg(not(feature = "loom"))] #[inline] #[must_use] pub const fn from(owned: Owned) -> Self { let ptr = owned.underlying_ptr(); forget(owned); - let instance_ptr: std::sync::atomic::AtomicPtr> = - AtomicPtr::new(ptr.cast_mut()); - Self { instance_ptr } - } - - /// Creates a new [`AtomicOwned`] from an [`Owned`] of `T`. - #[cfg(feature = "loom")] - #[inline] - #[must_use] - pub fn from(owned: Owned) -> Self { - let ptr = owned.underlying_ptr(); - forget(owned); - let instance_ptr: loom::sync::atomic::AtomicPtr> = - AtomicPtr::new(ptr.cast_mut()); + let instance_ptr: AtomicPtrType> = AtomicPtr::new(ptr.cast_mut()); Self { instance_ptr } } @@ -83,20 +73,10 @@ impl AtomicOwned { /// /// let atomic_owned: AtomicOwned = AtomicOwned::null(); /// ``` - #[cfg(not(feature = "loom"))] #[inline] #[must_use] pub const fn null() -> Self { - let instance_ptr: std::sync::atomic::AtomicPtr> = AtomicPtr::new(null_mut()); - Self { instance_ptr } - } - - /// Creates a null [`AtomicOwned`]. - #[cfg(feature = "loom")] - #[inline] - #[must_use] - pub fn null() -> Self { - let instance_ptr: loom::sync::atomic::AtomicPtr> = AtomicPtr::new(null_mut()); + let instance_ptr: AtomicPtrType> = AtomicPtr::new(null_mut()); Self { instance_ptr } } @@ -164,10 +144,12 @@ impl AtomicOwned { new.1, ) .cast_mut(); + let prev = self.instance_ptr.swap(desired, order); let tag = Tag::into_tag(prev); let prev_ptr = Tag::unset_tag(prev).cast_mut(); forget(new); + (NonNull::new(prev_ptr).map(Owned::from), tag) } @@ -212,11 +194,7 @@ impl AtomicOwned { ) -> bool { self.instance_ptr .fetch_update(set_order, fetch_order, |ptr| { - if condition(Ptr::from(ptr)) { - Some(Tag::update_tag(ptr, tag).cast_mut()) - } else { - None - } + condition(Ptr::from(ptr)).then_some(Tag::update_tag(ptr, tag).cast_mut()) }) .is_ok() } @@ -269,6 +247,7 @@ impl AtomicOwned { new.1, ) .cast_mut(); + match self.instance_ptr.compare_exchange( current.as_underlying_ptr().cast_mut(), desired, @@ -332,6 +311,7 @@ impl AtomicOwned { new.1, ) .cast_mut(); + match self.instance_ptr.compare_exchange_weak( current.as_underlying_ptr().cast_mut(), desired, @@ -365,10 +345,7 @@ impl AtomicOwned { #[must_use] pub fn into_owned(self, order: Ordering) -> Option> { let ptr = self.instance_ptr.swap(null_mut(), order); - if let Some(underlying_ptr) = NonNull::new(Tag::unset_tag(ptr).cast_mut()) { - return Some(Owned::from(underlying_ptr)); - } - None + NonNull::new(Tag::unset_tag(ptr).cast_mut()).map(Owned::from) } } @@ -382,10 +359,12 @@ impl Default for AtomicOwned { impl Drop for AtomicOwned { #[inline] fn drop(&mut self) { - if let Some(ptr) = NonNull::new(Tag::unset_tag(self.instance_ptr.load(Relaxed)).cast_mut()) - { - drop(Owned::from(ptr)); - } + let Some(ptr) = NonNull::new(Tag::unset_tag(self.instance_ptr.load(Relaxed)).cast_mut()) + else { + return; + }; + + drop(Owned::from(ptr)); } } diff --git a/src/atomic_shared.rs b/src/atomic_shared.rs index bc34f17..f2e4554 100644 --- a/src/atomic_shared.rs +++ b/src/atomic_shared.rs @@ -1,9 +1,13 @@ use super::maybe_std::AtomicPtr; use super::ref_counted::RefCounted; use super::{Guard, Ptr, Shared, Tag}; +#[cfg(feature = "loom")] +use loom::sync::atomic::AtomicPtr as AtomicPtrType; use std::mem::forget; use std::panic::UnwindSafe; use std::ptr::{null, null_mut, NonNull}; +#[cfg(not(feature = "loom"))] +use std::sync::atomic::AtomicPtr as AtomicPtrType; use std::sync::atomic::Ordering::{self, Acquire, Relaxed}; /// [`AtomicShared`] owns the underlying instance, and allows users to perform atomic operations @@ -51,26 +55,12 @@ impl AtomicShared { /// let shared: Shared = Shared::new(10); /// let atomic_shared: AtomicShared = AtomicShared::from(shared); /// ``` - #[cfg(not(feature = "loom"))] #[inline] #[must_use] pub const fn from(shared: Shared) -> Self { let ptr = shared.underlying_ptr(); forget(shared); - let instance_ptr: std::sync::atomic::AtomicPtr> = - AtomicPtr::new(ptr.cast_mut()); - Self { instance_ptr } - } - - /// Creates a new [`AtomicShared`] from a [`Shared`] of `T`. - #[cfg(feature = "loom")] - #[inline] - #[must_use] - pub fn from(shared: Shared) -> Self { - let ptr = shared.underlying_ptr(); - forget(shared); - let instance_ptr: loom::sync::atomic::AtomicPtr> = - AtomicPtr::new(ptr.cast_mut()); + let instance_ptr: AtomicPtrType> = AtomicPtr::new(ptr.cast_mut()); Self { instance_ptr } } @@ -83,20 +73,10 @@ impl AtomicShared { /// /// let atomic_shared: AtomicShared = AtomicShared::null(); /// ``` - #[cfg(not(feature = "loom"))] #[inline] #[must_use] pub const fn null() -> Self { - let instance_ptr: std::sync::atomic::AtomicPtr> = AtomicPtr::new(null_mut()); - Self { instance_ptr } - } - - /// Creates a null [`AtomicShared`]. - #[cfg(feature = "loom")] - #[inline] - #[must_use] - pub fn null() -> Self { - let instance_ptr: loom::sync::atomic::AtomicPtr> = AtomicPtr::new(null_mut()); + let instance_ptr: AtomicPtrType> = AtomicPtr::new(null_mut()); Self { instance_ptr } } @@ -164,10 +144,12 @@ impl AtomicShared { new.1, ) .cast_mut(); + let prev = self.instance_ptr.swap(desired, order); let tag = Tag::into_tag(prev); let prev_ptr = Tag::unset_tag(prev).cast_mut(); forget(new); + (NonNull::new(prev_ptr).map(Shared::from), tag) } @@ -212,11 +194,7 @@ impl AtomicShared { ) -> bool { self.instance_ptr .fetch_update(set_order, fetch_order, |ptr| { - if condition(Ptr::from(ptr)) { - Some(Tag::update_tag(ptr, tag).cast_mut()) - } else { - None - } + condition(Ptr::from(ptr)).then_some(Tag::update_tag(ptr, tag).cast_mut()) }) .is_ok() } @@ -273,6 +251,7 @@ impl AtomicShared { new.1, ) .cast_mut(); + match self.instance_ptr.compare_exchange( current.as_underlying_ptr().cast_mut(), desired, @@ -336,6 +315,7 @@ impl AtomicShared { new.1, ) .cast_mut(); + match self.instance_ptr.compare_exchange_weak( current.as_underlying_ptr().cast_mut(), desired, @@ -398,6 +378,7 @@ impl AtomicShared { if unsafe { (*ptr).try_add_ref(Acquire) } { return NonNull::new(ptr.cast_mut()).map(Shared::from); } + ptr = Tag::unset_tag(self.instance_ptr.load(order)); } None @@ -424,6 +405,7 @@ impl AtomicShared { if let Some(underlying_ptr) = NonNull::new(Tag::unset_tag(ptr).cast_mut()) { return Some(Shared::from(underlying_ptr)); } + None } } @@ -445,10 +427,12 @@ impl Default for AtomicShared { impl Drop for AtomicShared { #[inline] fn drop(&mut self) { - if let Some(ptr) = NonNull::new(Tag::unset_tag(self.instance_ptr.load(Relaxed)).cast_mut()) - { - drop(Shared::from(ptr)); - } + let Some(ptr) = NonNull::new(Tag::unset_tag(self.instance_ptr.load(Relaxed)).cast_mut()) + else { + return; + }; + + drop(Shared::from(ptr)); } } diff --git a/src/collectible.rs b/src/collectible.rs index ab15ab6..38810c3 100644 --- a/src/collectible.rs +++ b/src/collectible.rs @@ -53,6 +53,7 @@ impl Collectible for Link { self.data.0.load(Relaxed) as *mut usize, self.data.1.load(Relaxed), ); + unsafe { std::mem::transmute(fat_ptr) } } @@ -63,6 +64,7 @@ impl Collectible for Link { || (ptr::null_mut(), ptr::null_mut()), |p| unsafe { std::mem::transmute(p) }, ); + self.data.0.store(data.0 as usize, Relaxed); self.data.1.store(data.1, Relaxed); } @@ -94,8 +96,10 @@ impl Collectible for DeferredClosure { impl Drop for DeferredClosure { #[inline] fn drop(&mut self) { - if let Some(f) = self.f.take() { - f(); - } + let Some(f) = self.f.take() else { + return; + }; + + f(); } } diff --git a/src/collector.rs b/src/collector.rs index 7e81c13..d897321 100644 --- a/src/collector.rs +++ b/src/collector.rs @@ -2,6 +2,7 @@ use super::collectible::{Collectible, Link}; use super::exit_guard::ExitGuard; use super::maybe_std::fence as maybe_std_fence; use super::{Epoch, Tag}; +use std::mem; use std::ptr::{self, addr_of_mut, NonNull}; use std::sync::atomic::Ordering::{Acquire, Relaxed, Release, SeqCst}; use std::sync::atomic::{AtomicPtr, AtomicU8}; @@ -64,41 +65,7 @@ impl Collector { /// The method may panic if the number of readers has reached `u32::MAX`. #[inline] pub(super) unsafe fn new_guard(collector_ptr: *mut Collector, collect_garbage: bool) { - if (*collector_ptr).num_readers == 0 { - debug_assert_eq!( - (*collector_ptr).state.load(Relaxed) & Self::INACTIVE, - Self::INACTIVE - ); - (*collector_ptr).num_readers = 1; - let new_epoch = Epoch::from_u8(GLOBAL_ROOT.epoch.load(Relaxed)); - if cfg!(feature = "loom") || cfg!(not(any(target_arch = "x86", target_arch = "x86_64"))) - { - // What will happen after the fence strictly happens after the fence. - (*collector_ptr).state.store(new_epoch.into(), Relaxed); - maybe_std_fence(SeqCst); - } else { - // This special optimization is excerpted from - // [`crossbeam_epoch`](https://docs.rs/crossbeam-epoch/). - // - // The rationale behind the code is, it compiles to `lock xchg` that - // practically acts as a full memory barrier on `X86`, and is much faster than - // `mfence`. - (*collector_ptr).state.swap(new_epoch.into(), SeqCst); - } - if (*collector_ptr).announcement != new_epoch { - (*collector_ptr).announcement = new_epoch; - if collect_garbage { - let mut exit_guard = - ExitGuard::new((collector_ptr, false), |(collector_ptr, result)| { - if !result { - Self::end_guard(collector_ptr); - } - }); - Collector::epoch_updated(exit_guard.0); - exit_guard.1 = true; - } - } - } else { + if (*collector_ptr).num_readers > 0 { debug_assert_eq!((*collector_ptr).state.load(Relaxed) & Self::INACTIVE, 0); assert_ne!( (*collector_ptr).num_readers, @@ -106,7 +73,46 @@ impl Collector { "Too many EBR guards" ); (*collector_ptr).num_readers += 1; + return; } + + debug_assert_eq!( + (*collector_ptr).state.load(Relaxed) & Self::INACTIVE, + Self::INACTIVE + ); + (*collector_ptr).num_readers = 1; + let new_epoch = Epoch::from_u8(GLOBAL_ROOT.epoch.load(Relaxed)); + if cfg!(feature = "loom") || cfg!(not(any(target_arch = "x86", target_arch = "x86_64"))) { + // What will happen after the fence strictly happens after the fence. + (*collector_ptr).state.store(new_epoch.into(), Relaxed); + maybe_std_fence(SeqCst); + } else { + // This special optimization is excerpted from + // [`crossbeam_epoch`](https://docs.rs/crossbeam-epoch/). + // + // The rationale behind the code is, it compiles to `lock xchg` that + // practically acts as a full memory barrier on `X86`, and is much faster than + // `mfence`. + (*collector_ptr).state.swap(new_epoch.into(), SeqCst); + } + + if (*collector_ptr).announcement == new_epoch { + return; + } + + (*collector_ptr).announcement = new_epoch; + + if !collect_garbage { + return; + } + + let mut exit_guard = ExitGuard::new((collector_ptr, false), |(collector_ptr, result)| { + if !result { + Self::end_guard(collector_ptr); + } + }); + Collector::epoch_updated(exit_guard.0); + exit_guard.1 = true; } /// Acknowledges an existing [`Guard`](super::Guard) being dropped. @@ -118,33 +124,35 @@ impl Collector { u8::from((*collector_ptr).announcement) ); - if (*collector_ptr).num_readers == 1 { - (*collector_ptr).num_readers = 0; - if (*collector_ptr).next_epoch_update == 0 { - if (*collector_ptr).has_garbage - || Tag::into_tag(GLOBAL_ROOT.chain_head.load(Relaxed)) == Tag::Second - { - Collector::scan(collector_ptr); - } - (*collector_ptr).next_epoch_update = if (*collector_ptr).has_garbage { - Self::CADENCE / 4 - } else { - Self::CADENCE - }; - } else { - (*collector_ptr).next_epoch_update = - (*collector_ptr).next_epoch_update.saturating_sub(1); + if (*collector_ptr).num_readers != 1 { + (*collector_ptr).num_readers -= 1; + return; + } + + (*collector_ptr).num_readers = 0; + if (*collector_ptr).next_epoch_update == 0 { + if (*collector_ptr).has_garbage + || Tag::into_tag(GLOBAL_ROOT.chain_head.load(Relaxed)) == Tag::Second + { + Collector::scan(collector_ptr); } - // What has happened cannot be observed after the thread setting itself inactive has - // been witnessed. - (*collector_ptr).state.store( - u8::from((*collector_ptr).announcement) | Self::INACTIVE, - Release, - ); + (*collector_ptr).next_epoch_update = if (*collector_ptr).has_garbage { + Self::CADENCE / 4 + } else { + Self::CADENCE + }; } else { - (*collector_ptr).num_readers -= 1; + (*collector_ptr).next_epoch_update = + (*collector_ptr).next_epoch_update.saturating_sub(1); } + + // What has happened cannot be observed after the thread setting itself inactive has + // been witnessed. + (*collector_ptr).state.store( + u8::from((*collector_ptr).announcement) | Self::INACTIVE, + Release, + ); } /// Returns the current epoch. @@ -186,27 +194,35 @@ impl Collector { /// Passes its garbage instances to other threads. #[inline] pub(super) fn pass_garbage() -> bool { - LOCAL_COLLECTOR.with(|local_collector| { - let collector_ptr = local_collector.load(Relaxed); + fn pass_garbage(collector: &AtomicPtr) -> bool { + let collector_ptr = collector.load(Relaxed); + if collector_ptr.is_null() { return true; } + + let old_collector = collector; let collector = unsafe { &*collector_ptr }; + if collector.num_readers != 0 { return false; } + if collector.has_garbage { collector.state.fetch_or(Collector::INVALID, Release); - local_collector.store(ptr::null_mut(), Relaxed); + old_collector.store(ptr::null_mut(), Relaxed); mark_scan_enforced(); } + true - }) + } + + LOCAL_COLLECTOR.with(pass_garbage) } /// Allocates a new [`Collector`]. fn alloc() -> *mut Collector { - let boxed = Box::new(Collector::default()); + let boxed: Box = Box::default(); boxed.state.store(Self::INACTIVE, Relaxed); let ptr = Box::into_raw(boxed); @@ -226,10 +242,12 @@ impl Collector { .compare_exchange_weak(current, new, Release, Relaxed) { current = actual; - } else { - break; + continue; } + + break; } + ptr } @@ -246,6 +264,7 @@ impl Collector { (*collector_ptr).previous_instance_link = (*collector_ptr).current_instance_link.take(); (*collector_ptr).has_garbage = (*collector_ptr).next_instance_link.is_some() || (*collector_ptr).previous_instance_link.is_some(); + while let Some(instance_ptr) = garbage_link.take() { garbage_link = (*instance_ptr.as_ptr()).next_ptr(); let mut guard = ExitGuard::new(garbage_link, |mut garbage_link| { @@ -268,17 +287,16 @@ impl Collector { /// Clears all the garbage instances for dropping the [`Collector`]. unsafe fn clear_for_drop(collector_ptr: *mut Collector) { - loop { - let garbage_containers = [ + let mut valid = true; + + while mem::take(&mut valid) { + for mut link in [ (*collector_ptr).previous_instance_link.take(), (*collector_ptr).current_instance_link.take(), (*collector_ptr).next_instance_link.take(), - ]; - if !garbage_containers.iter().any(Option::is_some) { - break; - } - for mut link in garbage_containers { + ] { while let Some(instance_ptr) = link { + valid = true; link = (*instance_ptr.as_ptr()).next_ptr(); drop(Box::from_raw(instance_ptr.as_ptr())); } @@ -292,95 +310,85 @@ impl Collector { // Only one thread that acquires the chain lock is allowed to scan the thread-local // collectors. - let lock_result = Self::lock_chain(); - if let Ok(mut current_collector_ptr) = lock_result { - let _guard = ExitGuard::new((), |()| Self::unlock_chain()); + let Ok(mut current_ptr) = Self::lock_chain() else { + return false; + }; - let known_epoch = (*collector_ptr).state.load(Relaxed); - let mut update_global_epoch = true; - let mut prev_collector_ptr: *mut Collector = ptr::null_mut(); - while !current_collector_ptr.is_null() { - if ptr::eq(collector_ptr, current_collector_ptr) { - prev_collector_ptr = current_collector_ptr; - current_collector_ptr = (*collector_ptr).next_link.load(Relaxed); - continue; - } + let _guard = ExitGuard::new((), |_| Self::unlock_chain()); + let known_epoch = (*collector_ptr).state.load(Relaxed); + let mut previous_ptr: *mut Collector = ptr::null_mut(); - let collector_state = (*current_collector_ptr).state.load(Acquire); - let next_collector_ptr = (*current_collector_ptr).next_link.load(Relaxed); - if (collector_state & Self::INVALID) != 0 { - // The collector is obsolete. - let result = if prev_collector_ptr.is_null() { - GLOBAL_ROOT - .chain_head - .fetch_update(Release, Relaxed, |p| { - let tag = Tag::into_tag(p); - debug_assert!(tag == Tag::First || tag == Tag::Both); - if ptr::eq(Tag::unset_tag(p), current_collector_ptr) { - Some(Tag::update_tag(next_collector_ptr, tag).cast_mut()) - } else { - None - } - }) - .is_ok() - } else { - (*prev_collector_ptr) - .next_link - .store(next_collector_ptr, Relaxed); - true - }; - if result { - Self::collect(collector_ptr, current_collector_ptr); - current_collector_ptr = next_collector_ptr; - continue; - } - } else if (collector_state & Self::INACTIVE) == 0 && collector_state != known_epoch - { - // Not ready for an epoch update. - update_global_epoch = false; - break; - } - prev_collector_ptr = current_collector_ptr; - current_collector_ptr = next_collector_ptr; + while !current_ptr.is_null() { + if collector_ptr == current_ptr { + previous_ptr = current_ptr; + current_ptr = (*collector_ptr).next_link.load(Relaxed); + continue; } - if update_global_epoch { - // It is a new era; a fence is required. - maybe_std_fence(SeqCst); - GLOBAL_ROOT - .epoch - .store(Epoch::from_u8(known_epoch).next().into(), Relaxed); - return true; + let state = (*current_ptr).state.load(Acquire); + let next_ptr = (*current_ptr).next_link.load(Relaxed); + if (state & Self::INVALID) != 0 { + // The collector is obsolete. + let result = if previous_ptr.is_null() { + GLOBAL_ROOT + .chain_head + .fetch_update(Release, Relaxed, |p| { + let tag = Tag::into_tag(p); + debug_assert!(tag == Tag::First || tag == Tag::Both); + (Tag::unset_tag(p) == current_ptr) + .then_some(Tag::update_tag(next_ptr, tag).cast_mut()) + }) + .is_ok() + } else { + (*previous_ptr).next_link.store(next_ptr, Relaxed); + true + }; + + if result { + Self::collect(collector_ptr, current_ptr); + current_ptr = next_ptr; + continue; + } + } else if state != known_epoch { + // Not ready for an epoch update. + return false; } + previous_ptr = current_ptr; + current_ptr = next_ptr; } - false + // It is a new era; a fence is required. + maybe_std_fence(SeqCst); + GLOBAL_ROOT + .epoch + .store(Epoch::from_u8(known_epoch).next().into(), Relaxed); + + true } /// Clears the [`Collector`] chain to if all are invalid. unsafe fn clear_chain() -> bool { let lock_result = Self::lock_chain(); if let Ok(collector_head) = lock_result { - let _guard = ExitGuard::new((), |()| Self::unlock_chain()); + let _guard = ExitGuard::new((), |_| Self::unlock_chain()); let mut current_collector_ptr = collector_head; while !current_collector_ptr.is_null() { if ((*current_collector_ptr).state.load(Acquire) & Self::INVALID) == 0 { return false; } + current_collector_ptr = (*current_collector_ptr).next_link.load(Relaxed); } // Reaching here means that there is no `Ptr` that possibly sees any garbage instances // in those `Collector` instances in the chain. let result = GLOBAL_ROOT.chain_head.fetch_update(Release, Relaxed, |p| { - if Tag::unset_tag(p) == collector_head { + (Tag::unset_tag(p) == collector_head).then(|| { let tag = Tag::into_tag(p); debug_assert!(tag == Tag::First || tag == Tag::Both); - Some(Tag::update_tag(ptr::null::(), tag).cast_mut()) - } else { - None - } + Tag::update_tag(ptr::null::(), tag).cast_mut() + }) }); if result.is_ok() { @@ -390,9 +398,11 @@ impl Collector { drop(Box::from_raw(current_collector_ptr)); current_collector_ptr = next_collector_ptr; } + return true; } } + false } @@ -402,33 +412,31 @@ impl Collector { .chain_head .fetch_update(Acquire, Acquire, |p| { let tag = Tag::into_tag(p); - if tag == Tag::First || tag == Tag::Both { - None - } else { - Some(Tag::update_tag(p, Tag::First).cast_mut()) - } + + (tag == Tag::None || tag == Tag::Second) + .then_some(Tag::update_tag(p, Tag::First).cast_mut()) }) .map(|p| Tag::unset_tag(p).cast_mut()) } /// Unlocks the chain. fn unlock_chain() { - loop { + while { let result = GLOBAL_ROOT.chain_head.fetch_update(Release, Relaxed, |p| { let tag = Tag::into_tag(p); debug_assert!(tag == Tag::First || tag == Tag::Both); + let new_tag = if tag == Tag::First { Tag::None } else { - // Retain the mark. Tag::Second }; + Some(Tag::update_tag(p, new_tag).cast_mut()) }); - if result.is_ok() { - break; - } - } + + result.is_err() + } {} } } @@ -464,25 +472,27 @@ impl CollectorAnchor { impl Drop for CollectorAnchor { #[inline] fn drop(&mut self) { - unsafe { - // `LOCAL_COLLECTOR` is the last thread-local variable to be dropped. - LOCAL_COLLECTOR.with(|local_collector| { - let collector_ptr = local_collector.load(Relaxed); + fn drop(collector: &AtomicPtr) { + unsafe { + let collector_ptr = collector.load(Relaxed); if !collector_ptr.is_null() { (*collector_ptr).state.fetch_or(Collector::INVALID, Release); } let mut temp_collector = Collector::default(); temp_collector.state.store(Collector::INACTIVE, Relaxed); - local_collector.store(addr_of_mut!(temp_collector), Release); + collector.store(addr_of_mut!(temp_collector), Release); if !Collector::clear_chain() { mark_scan_enforced(); } Collector::clear_for_drop(addr_of_mut!(temp_collector)); - local_collector.store(ptr::null_mut(), Release); - }); + collector.store(ptr::null_mut(), Release); + } } + + // `LOCAL_COLLECTOR` is the last thread-local variable to be dropped. + LOCAL_COLLECTOR.with(drop); } } @@ -495,6 +505,7 @@ fn mark_scan_enforced() { Tag::First => Tag::Both, Tag::Second | Tag::Both => return None, }; + Some(Tag::update_tag(p, new_tag).cast_mut()) }); } diff --git a/src/exit_guard.rs b/src/exit_guard.rs index c753cbb..a1c283c 100644 --- a/src/exit_guard.rs +++ b/src/exit_guard.rs @@ -21,9 +21,11 @@ impl ExitGuard { impl Drop for ExitGuard { #[inline] fn drop(&mut self) { - if let Some((c, f)) = self.drop_callback.take() { - f(c); - } + let Some((c, f)) = self.drop_callback.take() else { + return; + }; + + f(c); } } diff --git a/src/owned.rs b/src/owned.rs index e2b80d2..ea02867 100644 --- a/src/owned.rs +++ b/src/owned.rs @@ -4,6 +4,7 @@ use std::mem::forget; use std::ops::Deref; use std::panic::UnwindSafe; use std::ptr::{addr_of, NonNull}; +use std::sync::atomic::Ordering::Relaxed; /// [`Owned`] uniquely owns an instance. /// @@ -180,14 +181,7 @@ impl Owned { /// Creates a new [`Owned`] from the given pointer. #[inline] pub(super) fn from(ptr: NonNull>) -> Self { - debug_assert_eq!( - unsafe { - (*ptr.as_ptr()) - .ref_cnt() - .load(std::sync::atomic::Ordering::Relaxed) - }, - 0 - ); + debug_assert_eq!(unsafe { (*ptr.as_ptr()).ref_cnt().load(Relaxed) }, 0); Self { instance_ptr: ptr.as_ptr(), } diff --git a/src/ptr.rs b/src/ptr.rs index 4c3808b..1fa0718 100644 --- a/src/ptr.rs +++ b/src/ptr.rs @@ -44,7 +44,7 @@ impl<'g, T> Ptr<'g, T> { #[inline] #[must_use] pub fn is_null(&self) -> bool { - Tag::unset_tag(self.instance_ptr).is_null() + self.instance_ptr as usize >> 3 == 0 } /// Tries to create a reference to the underlying instance. @@ -64,10 +64,7 @@ impl<'g, T> Ptr<'g, T> { #[must_use] pub fn as_ref(&self) -> Option<&'g T> { let ptr = Tag::unset_tag(self.instance_ptr); - if ptr.is_null() { - return None; - } - unsafe { Some(&*ptr) } + (!ptr.is_null()).then_some(unsafe { &*ptr }) } /// Provides a raw pointer to the instance. @@ -207,13 +204,10 @@ impl<'g, T> Ptr<'g, T> { #[must_use] pub fn get_shared(self) -> Option> { unsafe { - if let Some(ptr) = NonNull::new(Tag::unset_tag(self.instance_ptr).cast_mut()) { - if (*ptr.as_ptr()).try_add_ref(Relaxed) { - return Some(Shared::from(ptr)); - } - } + let ptr = NonNull::new(Tag::unset_tag(self.instance_ptr).cast_mut())?; + let condition = (*ptr.as_ptr()).try_add_ref(Relaxed); + condition.then(|| Shared::from(ptr)) } - None } /// Creates a new [`Ptr`] from a raw pointer. diff --git a/src/ref_counted.rs b/src/ref_counted.rs index ac66e2c..f5dc51a 100644 --- a/src/ref_counted.rs +++ b/src/ref_counted.rs @@ -1,7 +1,8 @@ use super::collectible::{Collectible, Link}; use super::collector::Collector; +use std::mem::offset_of; use std::ops::Deref; -use std::ptr::{self, addr_of, NonNull}; +use std::ptr::NonNull; use std::sync::atomic::AtomicUsize; use std::sync::atomic::Ordering::{self, Relaxed}; @@ -42,28 +43,15 @@ impl RefCounted { #[inline] pub(super) fn try_add_ref(&self, order: Ordering) -> bool { self.ref_cnt() - .fetch_update( - order, - order, - |r| { - if r % 2 == 1 { - Some(r + 2) - } else { - None - } - }, - ) + .fetch_update(order, order, |r| (r & 1 == 1).then_some(r + 2)) .is_ok() } /// Returns a mutable reference to the instance if the number of owners is `1`. #[inline] pub(super) fn get_mut_shared(&mut self) -> Option<&mut T> { - if self.ref_cnt().load(Relaxed) == 1 { - Some(&mut self.instance) - } else { - None - } + let value = self.ref_cnt().load(Relaxed); + (value == 1).then_some(&mut self.instance) } /// Returns a mutable reference to the instance if it is uniquely owned. @@ -76,20 +64,7 @@ impl RefCounted { /// Adds a strong reference to the underlying instance. #[inline] pub(super) fn add_ref(&self) { - let mut current = self.ref_cnt().load(Relaxed); - loop { - debug_assert_eq!(current % 2, 1); - debug_assert!(current <= usize::MAX - 2, "reference count overflow"); - match self - .ref_cnt() - .compare_exchange_weak(current, current + 2, Relaxed, Relaxed) - { - Ok(_) => break, - Err(actual) => { - current = actual; - } - } - } + self.ref_cnt().fetch_add(2, Relaxed); } /// Drops a strong reference to the underlying instance. @@ -100,18 +75,13 @@ impl RefCounted { // It does not have to be a load-acquire as everything's synchronized via the global // epoch. let mut current = self.ref_cnt().load(Relaxed); - loop { - debug_assert_ne!(current, 0); - let new = if current <= 1 { 0 } else { current - 2 }; - match self - .ref_cnt() - .compare_exchange_weak(current, new, Relaxed, Relaxed) - { - Ok(_) => break, - Err(actual) => { - current = actual; - } - } + while let Err(updated) = self.ref_cnt().compare_exchange_weak( + current, + current.saturating_sub(2), + Relaxed, + Relaxed, + ) { + current = updated; } current == 1 } @@ -119,11 +89,9 @@ impl RefCounted { /// Returns a pointer to the instance. #[inline] pub(super) fn inst_ptr(self_ptr: *const Self) -> *const T { - if self_ptr.is_null() { - ptr::null() - } else { - unsafe { addr_of!((*self_ptr).instance) } - } + let offset = offset_of!(Self, instance); + let is_valid = !self_ptr.is_null() as usize; + unsafe { self_ptr.byte_add(offset * is_valid) as *const T } } /// Returns a reference to its reference count. diff --git a/src/shared.rs b/src/shared.rs index 7e840ff..31466dd 100644 --- a/src/shared.rs +++ b/src/shared.rs @@ -128,7 +128,7 @@ impl Shared { self.instance_ptr .cast_mut() .as_mut() - .and_then(|r| r.get_mut_shared()) + .and_then(RefCounted::get_mut_shared) } /// Provides a raw pointer to the instance. @@ -280,11 +280,7 @@ impl<'g, T> TryFrom> for Shared { #[inline] fn try_from(ptr: Ptr<'g, T>) -> Result { - if let Some(shared) = ptr.get_shared() { - Ok(shared) - } else { - Err(ptr) - } + ptr.get_shared().ok_or(ptr) } } diff --git a/src/tag.rs b/src/tag.rs index df10e50..bb60335 100644 --- a/src/tag.rs +++ b/src/tag.rs @@ -1,7 +1,8 @@ -use std::cmp::PartialEq; +use std::{cmp::PartialEq, mem}; /// [`Tag`] is a four-state `Enum` that can be embedded in a pointer as the two least /// significant bits of the pointer value. +#[repr(usize)] #[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd)] pub enum Tag { /// None tagged. @@ -15,64 +16,21 @@ pub enum Tag { } impl Tag { - /// Interprets the [`Tag`] as an integer. - #[inline] - pub(super) const fn value(self) -> usize { - match self { - Self::None => 0, - Self::First => 1, - Self::Second => 2, - Self::Both => 3, - } - } - /// Returns the tag embedded in the pointer. #[inline] pub(super) fn into_tag

(ptr: *const P) -> Self { - match ((ptr as usize & 1) == 1, (ptr as usize & 2) == 2) { - (false, false) => Tag::None, - (true, false) => Tag::First, - (false, true) => Tag::Second, - _ => Tag::Both, - } + unsafe { mem::transmute(ptr as usize & Tag::Both as usize) } } /// Sets a tag, overwriting any existing tag in the pointer. #[inline] pub(super) fn update_tag

(ptr: *const P, tag: Tag) -> *const P { - (((ptr as usize) & (!3)) | tag.value()) as *const P + (ptr as usize & !(Tag::Both as usize) | tag as usize) as *const P } /// Returns the pointer with the tag bits erased. #[inline] pub(super) fn unset_tag

(ptr: *const P) -> *const P { - ((ptr as usize) & (!3)) as *const P - } -} - -impl TryFrom for Tag { - type Error = u8; - - #[inline] - fn try_from(val: u8) -> Result { - match val { - 0 => Ok(Tag::None), - 1 => Ok(Tag::First), - 2 => Ok(Tag::Second), - 3 => Ok(Tag::Both), - _ => Err(val), - } - } -} - -impl From for u8 { - #[inline] - fn from(t: Tag) -> Self { - match t { - Tag::None => 0, - Tag::First => 1, - Tag::Second => 2, - Tag::Both => 3, - } + (ptr as usize & !(Tag::Both as usize)) as *const P } } From 5b4e587f0fdcc0e527c5718256d2e98bc7c5b4ab Mon Sep 17 00:00:00 2001 From: Saltigrade Date: Fri, 4 Apr 2025 14:48:31 +0100 Subject: [PATCH 2/7] Remove LTO by default as it regresses on one of the benchmarks Will look back after and check if I can avoid the regression. --- Cargo.toml | 3 --- 1 file changed, 3 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 659915d..15b060a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,9 +15,6 @@ keywords = ["concurrent", "epoch", "garbage", "lock-free", "memory"] [workspace] members = [".", "examples"] -[profile.release] -lto = true - [dependencies.loom] version = "0.7.2" optional = true From ae44e556fb08614f0a8bfd0583423542365e4125 Mon Sep 17 00:00:00 2001 From: redactedontop Date: Fri, 4 Apr 2025 20:32:55 +0100 Subject: [PATCH 3/7] Round 2 of code changes + used type-state pattern for both Atomic types. --- Cargo.toml | 5 +- src/atomic.rs | 704 +++++++++++++++++++++++++++++++++++++++++++ src/atomic_owned.rs | 375 ----------------------- src/atomic_shared.rs | 443 --------------------------- src/collectible.rs | 27 +- src/collector.rs | 235 ++++++++------- src/epoch.rs | 31 +- src/guard.rs | 9 +- src/lib.rs | 14 +- src/model.rs | 97 ++++++ src/owned.rs | 14 +- src/ptr.rs | 7 +- src/ref_counted.rs | 15 +- src/shared.rs | 36 +-- src/tag.rs | 32 +- 15 files changed, 1034 insertions(+), 1010 deletions(-) create mode 100644 src/atomic.rs delete mode 100644 src/atomic_owned.rs delete mode 100644 src/atomic_shared.rs create mode 100644 src/model.rs diff --git a/Cargo.toml b/Cargo.toml index 659915d..c634c58 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -2,7 +2,7 @@ name = "sdd" description = "Scalable lock-free delayed memory reclaimer" documentation = "https://docs.rs/sdd" -version = "3.0.8" +version = "4.0.0" authors = ["wvwwvwwv "] edition = "2021" rust-version = "1.65.0" @@ -15,9 +15,6 @@ keywords = ["concurrent", "epoch", "garbage", "lock-free", "memory"] [workspace] members = [".", "examples"] -[profile.release] -lto = true - [dependencies.loom] version = "0.7.2" optional = true diff --git a/src/atomic.rs b/src/atomic.rs new file mode 100644 index 0000000..20400ad --- /dev/null +++ b/src/atomic.rs @@ -0,0 +1,704 @@ +use crate::{maybe_std::AtomicPtr, ref_counted::RefCounted, Guard, Owned, Ptr, Shared, Tag}; +use std::{marker::PhantomData, mem, panic::UnwindSafe, ptr, sync::atomic::Ordering}; + +pub(super) mod ownership { + use crate::ref_counted::RefCounted; + + pub(super) trait Type { + const IS_OWNED: bool; + + fn generate_refcounted(instance: T) -> *const RefCounted; + } + + pub struct Owned; + + impl Type for Owned { + const IS_OWNED: bool = true; + + fn generate_refcounted(instance: T) -> *const RefCounted { + RefCounted::new_unique(instance) + } + } + + pub struct Shared; + + impl Type for Shared { + const IS_OWNED: bool = false; + + fn generate_refcounted(instance: T) -> *const RefCounted { + RefCounted::new_shared(instance) + } + } +} + +/// [`AtomicOwned`] owns the underlying instance, and allows users to perform atomic operations +/// on the pointer to it. +pub type AtomicOwned = Atomic; + +/// [`AtomicShared`] owns the underlying instance, and allows users to perform atomic operations +/// on the pointer to it. +pub type AtomicShared = Atomic; + +#[allow(private_bounds)] +/// [`Atomic`] owns the underlying instance, and allows users to perform atomic operations +/// on the pointer to it. +pub struct Atomic(AtomicPtr>, PhantomData); + +#[allow(private_bounds)] +impl Atomic { + /// Creates a new [`Atomic`] from an instance of `T`. + /// + /// The type of the instance must be determined at compile-time, must not contain non-static + /// references, and must not be a non-static reference since the instance can, theoretically, + /// live as long as the process. For instance, `struct Disallowed<'l, T>(&'l T)` is not + /// allowed, because an instance of the type cannot outlive `'l` whereas the garbage collector + /// does not guarantee that the instance is dropped within `'l`. + /// + /// # Examples + /// + /// ``` + /// use sdd::{Atomic, AtomicOwned, AtomicShared}; + /// + /// let atomic_owned: AtomicOwned = Atomic::new(10); // OR: AtomicOwned::new(10) + /// let atomic_shared: AtomicOwned = Atomic::new(10); // OR: AtomicOwned::new(10) + /// ``` + #[inline] + pub fn new(instance: T) -> Self { + Atomic( + AtomicPtr::new(O::generate_refcounted(instance).cast_mut()), + PhantomData, + ) + } +} + +#[allow(private_bounds)] +impl Atomic { + /// Creates a null [`Atomic`]. + /// + /// # Examples + /// + /// ``` + /// use sdd::{AtomicOwned, AtomicShared}; + /// + /// let atomic_owned: AtomicOwned = AtomicOwned::null(); + /// let atomic_shared: AtomicShared = AtomicShared::null(); + /// ``` + #[inline] + #[must_use] + pub const fn null() -> Self { + Self(AtomicPtr::new(ptr::null_mut()), PhantomData) + } + + /// Returns `true` if the [`Atomic`] is null. + /// + /// # Examples + /// + /// ``` + /// use sdd::{AtomicOwned, AtomicShared, Tag}; + /// use std::sync::atomic::Ordering::Relaxed; + /// + /// let atomic_owned: AtomicOwned = AtomicOwned::null(); + /// atomic_owned.update_tag_if(Tag::Both, |p| p.tag() == Tag::None, Relaxed, Relaxed); + /// assert!(atomic_owned.is_null(Relaxed)); + /// + /// let atomic_shared: AtomicShared = AtomicShared::null(); + /// atomic_shared.update_tag_if(Tag::Both, |p| p.tag() == Tag::None, Relaxed, Relaxed); + /// assert!(atomic_shared.is_null(Relaxed)); + /// ``` + #[inline] + #[must_use] + pub fn is_null(&self, order: Ordering) -> bool { + Tag::unset_tag(self.0.load(order)).is_null() + } + + /// Loads a pointer value from the [`Atomic`]. + /// + /// # Examples + /// + /// ``` + /// use sdd::{AtomicOwned, AtomicShared, Guard}; + /// use std::sync::atomic::Ordering::Relaxed; + /// + /// let atomic_owned: AtomicOwned = AtomicOwned::new(11); + /// let guard = Guard::new(); + /// let ptr = atomic_owned.load(Relaxed, &guard); + /// assert_eq!(*ptr.as_ref().unwrap(), 11); + /// + /// let atomic_shared: AtomicShared = AtomicShared::new(11); + /// let guard = Guard::new(); + /// let ptr = atomic_shared.load(Relaxed, &guard); + /// assert_eq!(*ptr.as_ref().unwrap(), 11); + /// ``` + #[inline] + #[must_use] + pub fn load<'g>(&self, order: Ordering, _: &'g Guard) -> Ptr<'g, T> { + Ptr::from(self.0.load(order)) + } + + /// Returns its [`Tag`]. + /// + /// # Examples + /// + /// ``` + /// use sdd::{AtomicOwned, AtomicShared, Tag}; + /// use std::sync::atomic::Ordering::Relaxed; + /// + /// let atomic_owned: AtomicOwned = AtomicOwned::null(); + /// assert_eq!(atomic_owned.tag(Relaxed), Tag::None); + /// + /// let atomic_shared: AtomicShared = AtomicShared::null(); + /// assert_eq!(atomic_shared.tag(Relaxed), Tag::None); + /// ``` + #[inline] + #[must_use] + pub fn tag(&self, order: Ordering) -> Tag { + Tag::into_tag(self.0.load(order)) + } + + /// Sets a new [`Tag`] if the given condition is met. + /// + /// Returns `true` if the new [`Tag`] has been successfully set. + /// + /// # Examples + /// + /// ``` + /// use sdd::{AtomicOwned, AtomicShared, Tag}; + /// use std::sync::atomic::Ordering::Relaxed; + /// + /// let atomic_owned: AtomicOwned = AtomicOwned::null(); + /// assert!(atomic_owned.update_tag_if(Tag::Both, |p| p.tag() == Tag::None, Relaxed, Relaxed)); + /// assert_eq!(atomic_owned.tag(Relaxed), Tag::Both); + /// + /// let atomic_shared: AtomicShared = AtomicShared::null(); + /// assert!(atomic_shared.update_tag_if(Tag::Both, |p| p.tag() == Tag::None, Relaxed, Relaxed)); + /// assert_eq!(atomic_shared.tag(Relaxed), Tag::Both); + /// ``` + #[inline] + pub fn update_tag_if( + &self, + tag: Tag, + mut condition: impl FnMut(Ptr) -> bool, + set_order: Ordering, + fetch_order: Ordering, + ) -> bool { + self.0 + .fetch_update(set_order, fetch_order, |ptr| { + condition(Ptr::from(ptr)).then(|| Tag::update_tag(ptr, tag).cast_mut()) + }) + .is_ok() + } +} + +unsafe impl Send for Atomic {} + +unsafe impl Sync for Atomic {} + +impl UnwindSafe for Atomic {} + +impl AtomicOwned { + /// Creates a new [`AtomicOwned`] from an [`Owned`] of `T`. + /// + /// # Examples + /// + /// ``` + /// use sdd::{AtomicOwned, Owned}; + /// + /// let owned: Owned = Owned::new(10); + /// let atomic_owned: AtomicOwned = AtomicOwned::from(owned); + /// ``` + #[inline] + #[must_use] + pub const fn from(r#type: Owned) -> Self { + let ptr = r#type.underlying_ptr(); + mem::forget(r#type); + + Self(AtomicPtr::new(ptr.cast_mut()), PhantomData) + } + + /// Stores the given value into the [`AtomicOwned`] and returns the original value. + /// + /// # Examples + /// + /// ``` + /// use sdd::{AtomicOwned, Guard, Owned, Tag}; + /// use std::sync::atomic::Ordering::Relaxed; + /// + /// let atomic_owned: AtomicOwned = AtomicOwned::new(14); + /// let guard = Guard::new(); + /// let (old, tag) = atomic_owned.swap((Some(Owned::new(15)), Tag::Second), Relaxed); + /// assert_eq!(tag, Tag::None); + /// assert_eq!(*old.unwrap(), 14); + /// let (old, tag) = atomic_owned.swap((None, Tag::First), Relaxed); + /// assert_eq!(tag, Tag::Second); + /// assert_eq!(*old.unwrap(), 15); + /// let (old, tag) = atomic_owned.swap((None, Tag::None), Relaxed); + /// assert_eq!(tag, Tag::First); + /// assert!(old.is_none()); + /// ``` + #[inline] + pub fn swap( + &self, + (ptr, tag): (Option>, Tag), + order: Ordering, + ) -> (Option>, Tag) { + let desired = Tag::update_tag( + ptr.as_ref().map_or_else(ptr::null, Owned::underlying_ptr), + tag, + ) + .cast_mut(); + + let previous = self.0.swap(desired, order); + let tag = Tag::into_tag(previous); + let previous_ptr = Tag::unset_tag(previous).cast_mut(); + mem::forget(ptr); + + (ptr::NonNull::new(previous_ptr).map(Owned::from), tag) + } + + /// Stores `new` into the [`AtomicOwned`] if the current value is the same as `current`. + /// + /// Returns the previously held value and the updated [`Ptr`]. + /// + /// # Errors + /// + /// Returns `Err` with the supplied [`Owned`] and the current [`Ptr`]. + /// + /// # Examples + /// + /// ``` + /// use sdd::{AtomicOwned, Guard, Owned, Tag}; + /// use std::sync::atomic::Ordering::Relaxed; + /// + /// let atomic_owned: AtomicOwned = AtomicOwned::new(17); + /// let guard = Guard::new(); + /// + /// let mut ptr = atomic_owned.load(Relaxed, &guard); + /// assert_eq!(*ptr.as_ref().unwrap(), 17); + /// + /// atomic_owned.update_tag_if(Tag::Both, |_| true, Relaxed, Relaxed); + /// assert!(atomic_owned.compare_exchange( + /// ptr, (Some(Owned::new(18)), Tag::First), Relaxed, Relaxed, &guard).is_err()); + /// + /// ptr.set_tag(Tag::Both); + /// let old: Owned = atomic_owned.compare_exchange( + /// ptr, + /// (Some(Owned::new(18)), Tag::First), + /// Relaxed, + /// Relaxed, + /// &guard).unwrap().0.unwrap(); + /// assert_eq!(*old, 17); + /// drop(old); + /// + /// assert!(atomic_owned.compare_exchange( + /// ptr, (Some(Owned::new(19)), Tag::None), Relaxed, Relaxed, &guard).is_err()); + /// assert_eq!(*ptr.as_ref().unwrap(), 17); + /// ``` + #[inline] + #[allow(clippy::type_complexity)] + pub fn compare_exchange<'g>( + &self, + current: Ptr<'g, T>, + (ptr, tag): (Option>, Tag), + success: Ordering, + failure: Ordering, + _: &'g Guard, + ) -> Result<(Option>, Ptr<'g, T>), (Option>, Ptr<'g, T>)> { + let desired = Tag::update_tag( + ptr.as_ref().map_or_else(ptr::null, Owned::underlying_ptr), + tag, + ) + .cast_mut(); + + match self.0.compare_exchange( + current.as_underlying_ptr().cast_mut(), + desired, + success, + failure, + ) { + Ok(previous) => { + let previous = ptr::NonNull::new(Tag::unset_tag(previous).cast_mut()); + mem::forget(ptr); + Ok((previous.map(Owned::from), Ptr::from(desired))) + } + Err(actual) => Err((ptr, Ptr::from(actual))), + } + } + + /// Stores `new` into the [`AtomicOwned`] if the current value is the same as `current`. + /// + /// This method is allowed to spuriously fail even when the comparison succeeds. + /// + /// Returns the previously held value and the updated [`Ptr`]. + /// + /// # Errors + /// + /// Returns `Err` with the supplied [`Owned`] and the current [`Ptr`]. + /// + /// # Examples + /// + /// ``` + /// use sdd::{AtomicOwned, Guard, Owned, Tag}; + /// use std::sync::atomic::Ordering::Relaxed; + /// + /// let atomic_owned: AtomicOwned = AtomicOwned::new(17); + /// let guard = Guard::new(); + /// + /// let mut ptr = atomic_owned.load(Relaxed, &guard); + /// assert_eq!(*ptr.as_ref().unwrap(), 17); + /// + /// while let Err((_, actual)) = atomic_owned.compare_exchange_weak( + /// ptr, + /// (Some(Owned::new(18)), Tag::First), + /// Relaxed, + /// Relaxed, + /// &guard) { + /// ptr = actual; + /// } + /// + /// let mut ptr = atomic_owned.load(Relaxed, &guard); + /// assert_eq!(*ptr.as_ref().unwrap(), 18); + /// ``` + #[inline] + #[allow(clippy::type_complexity)] + pub fn compare_exchange_weak<'g>( + &self, + current: Ptr<'g, T>, + (ptr, tag): (Option>, Tag), + success: Ordering, + failure: Ordering, + _: &'g Guard, + ) -> Result<(Option>, Ptr<'g, T>), (Option>, Ptr<'g, T>)> { + let desired = Tag::update_tag( + ptr.as_ref().map_or_else(ptr::null, Owned::underlying_ptr), + tag, + ) + .cast_mut(); + + match self.0.compare_exchange_weak( + current.as_underlying_ptr().cast_mut(), + desired, + success, + failure, + ) { + Ok(previous) => { + let previous = ptr::NonNull::new(Tag::unset_tag(previous).cast_mut()); + mem::forget(ptr); + Ok((previous.map(Owned::from), Ptr::from(desired))) + } + Err(actual) => Err((ptr, Ptr::from(actual))), + } + } + + /// Converts `self` into an [`Owned`]. + /// + /// Returns `None` if `self` did not own an instance. + /// + /// # Examples + /// + /// ``` + /// use sdd::{AtomicOwned, Owned}; + /// use std::sync::atomic::Ordering::Relaxed; + /// + /// let atomic_owned: AtomicOwned = AtomicOwned::new(55); + /// let owned: Owned = atomic_owned.into_owned(Relaxed).unwrap(); + /// assert_eq!(*owned, 55); + /// ``` + #[inline] + #[must_use] + pub fn into_owned(self, order: Ordering) -> Option> { + let ptr = self.0.swap(ptr::null_mut(), order); + ptr::NonNull::new(Tag::unset_tag(ptr).cast_mut()).map(Owned::from) + } +} + +impl AtomicShared { + /// Creates a new [`AtomicShared`] from a [`Shared`] of `T`. + /// + /// # Examples + /// + /// ``` + /// use sdd::{AtomicShared, Shared}; + /// + /// let shared: Shared = Shared::new(10); + /// let atomic_shared: AtomicShared = AtomicShared::from(shared); + /// ``` + #[inline] + #[must_use] + pub const fn from(r#type: Shared) -> Self { + let ptr = r#type.underlying_ptr(); + mem::forget(r#type); + + Self(AtomicPtr::new(ptr.cast_mut()), PhantomData) + } + + /// Stores the given value into the [`AtomicShared`] and returns the original value. + /// + /// # Examples + /// + /// ``` + /// use sdd::{AtomicShared, Guard, Shared, Tag}; + /// use std::sync::atomic::Ordering::Relaxed; + /// + /// let atomic_shared: AtomicShared = AtomicShared::new(14); + /// let guard = Guard::new(); + /// let (old, tag) = atomic_shared.swap((Some(Shared::new(15)), Tag::Second), Relaxed); + /// assert_eq!(tag, Tag::None); + /// assert_eq!(*old.unwrap(), 14); + /// let (old, tag) = atomic_shared.swap((None, Tag::First), Relaxed); + /// assert_eq!(tag, Tag::Second); + /// assert_eq!(*old.unwrap(), 15); + /// let (old, tag) = atomic_shared.swap((None, Tag::None), Relaxed); + /// assert_eq!(tag, Tag::First); + /// assert!(old.is_none()); + /// ``` + #[inline] + pub fn swap( + &self, + (ptr, tag): (Option>, Tag), + order: Ordering, + ) -> (Option>, Tag) { + let desired = Tag::update_tag( + ptr.as_ref().map_or_else(ptr::null, Shared::underlying_ptr), + tag, + ) + .cast_mut(); + + let previous = self.0.swap(desired, order); + let tag = Tag::into_tag(previous); + let previous_ptr = Tag::unset_tag(previous).cast_mut(); + mem::forget(ptr); + + (ptr::NonNull::new(previous_ptr).map(Shared::from), tag) + } + + /// Stores `new` into the [`AtomicShared`] if the current value is the same as `current`. + /// + /// Returns the previously held value and the updated [`Ptr`]. + /// + /// # Errors + /// + /// Returns `Err` with the supplied [`Shared`] and the current [`Ptr`]. + /// + /// # Examples + /// + /// ``` + /// use sdd::{AtomicShared, Guard, Shared, Tag}; + /// use std::sync::atomic::Ordering::Relaxed; + /// + /// let atomic_shared: AtomicShared = AtomicShared::new(17); + /// let guard = Guard::new(); + /// + /// let mut ptr = atomic_shared.load(Relaxed, &guard); + /// assert_eq!(*ptr.as_ref().unwrap(), 17); + /// + /// atomic_shared.update_tag_if(Tag::Both, |_| true, Relaxed, Relaxed); + /// assert!(atomic_shared.compare_exchange( + /// ptr, (Some(Shared::new(18)), Tag::First), Relaxed, Relaxed, &guard).is_err()); + /// + /// ptr.set_tag(Tag::Both); + /// let old: Shared = atomic_shared.compare_exchange( + /// ptr, + /// (Some(Shared::new(18)), Tag::First), + /// Relaxed, + /// Relaxed, + /// &guard).unwrap().0.unwrap(); + /// assert_eq!(*old, 17); + /// drop(old); + /// + /// assert!(atomic_shared.compare_exchange( + /// ptr, (Some(Shared::new(19)), Tag::None), Relaxed, Relaxed, &guard).is_err()); + /// assert_eq!(*ptr.as_ref().unwrap(), 17); + /// ``` + #[inline] + #[allow(clippy::type_complexity)] + pub fn compare_exchange<'g>( + &self, + current: Ptr<'g, T>, + (ptr, tag): (Option>, Tag), + success: Ordering, + failure: Ordering, + _: &'g Guard, + ) -> Result<(Option>, Ptr<'g, T>), (Option>, Ptr<'g, T>)> { + let desired = Tag::update_tag( + ptr.as_ref().map_or_else(ptr::null, Shared::underlying_ptr), + tag, + ) + .cast_mut(); + + match self.0.compare_exchange( + current.as_underlying_ptr().cast_mut(), + desired, + success, + failure, + ) { + Ok(previous) => { + let previous = ptr::NonNull::new(Tag::unset_tag(previous).cast_mut()); + mem::forget(ptr); + Ok((previous.map(Shared::from), Ptr::from(desired))) + } + Err(actual) => Err((ptr, Ptr::from(actual))), + } + } + + /// Stores `new` into the [`AtomicShared`] if the current value is the same as `current`. + /// + /// This method is allowed to spuriously fail even when the comparison succeeds. + /// + /// Returns the previously held value and the updated [`Ptr`]. + /// + /// # Errors + /// + /// Returns `Err` with the supplied [`Shared`] and the current [`Ptr`]. + /// + /// # Examples + /// + /// ``` + /// use sdd::{AtomicShared, Guard, Shared, Tag}; + /// use std::sync::atomic::Ordering::Relaxed; + /// + /// let atomic_shared: AtomicShared = AtomicShared::new(17); + /// let guard = Guard::new(); + /// + /// let mut ptr = atomic_shared.load(Relaxed, &guard); + /// assert_eq!(*ptr.as_ref().unwrap(), 17); + /// + /// while let Err((_, actual)) = atomic_shared.compare_exchange_weak( + /// ptr, + /// (Some(Shared::new(18)), Tag::First), + /// Relaxed, + /// Relaxed, + /// &guard) { + /// ptr = actual; + /// } + /// + /// let mut ptr = atomic_shared.load(Relaxed, &guard); + /// assert_eq!(*ptr.as_ref().unwrap(), 18); + /// ``` + #[inline] + #[allow(clippy::type_complexity)] + pub fn compare_exchange_weak<'g>( + &self, + current: Ptr<'g, T>, + (ptr, tag): (Option>, Tag), + success: Ordering, + failure: Ordering, + _: &'g Guard, + ) -> Result<(Option>, Ptr<'g, T>), (Option>, Ptr<'g, T>)> { + let desired = Tag::update_tag( + ptr.as_ref().map_or_else(ptr::null, Shared::underlying_ptr), + tag, + ) + .cast_mut(); + + match self.0.compare_exchange_weak( + current.as_underlying_ptr().cast_mut(), + desired, + success, + failure, + ) { + Ok(previous) => { + let previous = ptr::NonNull::new(Tag::unset_tag(previous).cast_mut()); + mem::forget(ptr); + Ok((previous.map(Shared::from), Ptr::from(desired))) + } + Err(actual) => Err((ptr, Ptr::from(actual))), + } + } + + /// Clones `self` including tags. + /// + /// If `self` is not supposed to be an `AtomicShared::null`, this will never return an + /// `AtomicShared::null`. + /// + /// # Examples + /// + /// ``` + /// use sdd::{AtomicShared, Guard}; + /// use std::sync::atomic::Ordering::Relaxed; + /// + /// let atomic_shared: AtomicShared = AtomicShared::new(59); + /// let guard = Guard::new(); + /// let atomic_shared_clone = atomic_shared.clone(Relaxed, &guard); + /// let ptr = atomic_shared_clone.load(Relaxed, &guard); + /// assert_eq!(*ptr.as_ref().unwrap(), 59); + /// ``` + #[inline] + #[must_use] + pub fn clone(&self, order: Ordering, guard: &Guard) -> AtomicShared { + self.get_shared(order, guard) + .map_or_else(Self::null, Self::from) + } + + /// Tries to create a [`Shared`] out of `self`. + /// + /// If `self` is not supposed to be an `AtomicShared::null`, this will never return `None`. + /// + /// # Examples + /// + /// ``` + /// use sdd::{AtomicShared, Guard, Shared}; + /// use std::sync::atomic::Ordering::Relaxed; + /// + /// let atomic_shared: AtomicShared = AtomicShared::new(47); + /// let guard = Guard::new(); + /// let shared: Shared = atomic_shared.get_shared(Relaxed, &guard).unwrap(); + /// assert_eq!(*shared, 47); + /// ``` + #[inline] + #[must_use] + pub fn get_shared(&self, order: Ordering, _: &Guard) -> Option> { + let mut ptr = Tag::unset_tag(self.0.load(order)); + + while !ptr.is_null() { + if unsafe { (*ptr).try_add_ref(Ordering::Acquire) } { + return ptr::NonNull::new(ptr.cast_mut()).map(Shared::from); + } + + ptr = Tag::unset_tag(self.0.load(order)); + } + + None + } + + /// Converts `self` into a [`Shared`]. + /// + /// Returns `None` if `self` did not own an instance. + /// + /// # Examples + /// + /// ``` + /// use sdd::{AtomicShared, Shared}; + /// use std::sync::atomic::Ordering::Relaxed; + /// + /// let atomic_shared: AtomicShared = AtomicShared::new(55); + /// let owned: Shared = atomic_shared.into_shared(Relaxed).unwrap(); + /// assert_eq!(*owned, 55); + /// ``` + #[inline] + #[must_use] + pub fn into_shared(self, order: Ordering) -> Option> { + let ptr = self.0.swap(ptr::null_mut(), order); + ptr::NonNull::new(Tag::unset_tag(ptr).cast_mut()).map(Shared::from) + } +} + +impl Clone for AtomicShared { + #[inline] + fn clone(&self) -> AtomicShared { + self.clone(Ordering::Acquire, &Guard::new()) + } +} + +impl Drop for Atomic { + #[inline] + fn drop(&mut self) { + let ptr = Tag::unset_tag(self.0.load(Ordering::Relaxed)); + if ptr.is_null() { + return; + } + + if O::IS_OWNED || unsafe { (*ptr).drop_ref() } { + RefCounted::pass_to_collector(ptr.cast_mut()); + } + } +} diff --git a/src/atomic_owned.rs b/src/atomic_owned.rs deleted file mode 100644 index f5ccc66..0000000 --- a/src/atomic_owned.rs +++ /dev/null @@ -1,375 +0,0 @@ -use super::maybe_std::AtomicPtr; -use super::ref_counted::RefCounted; -use super::{Guard, Owned, Ptr, Tag}; -#[cfg(feature = "loom")] -use loom::sync::atomic::AtomicPtr as AtomicPtrType; -use std::mem::forget; -use std::panic::UnwindSafe; -use std::ptr::{null, null_mut, NonNull}; -#[cfg(not(feature = "loom"))] -use std::sync::atomic::AtomicPtr as AtomicPtrType; -use std::sync::atomic::Ordering::{self, Relaxed}; - -/// [`AtomicOwned`] owns the underlying instance, and allows users to perform atomic operations -/// on the pointer to it. -#[derive(Debug)] -pub struct AtomicOwned { - instance_ptr: AtomicPtr>, -} - -/// A pair of [`Owned`] and [`Ptr`] of the same type. -pub type OwnedPtrPair<'g, T> = (Option>, Ptr<'g, T>); - -impl AtomicOwned { - /// Creates a new [`AtomicOwned`] from an instance of `T`. - /// - /// The type of the instance must be determined at compile-time, must not contain non-static - /// references, and must not be a non-static reference since the instance can, theoretically, - /// live as long as the process. For instance, `struct Disallowed<'l, T>(&'l T)` is not - /// allowed, because an instance of the type cannot outlive `'l` whereas the garbage collector - /// does not guarantee that the instance is dropped within `'l`. - /// - /// # Examples - /// - /// ``` - /// use sdd::AtomicOwned; - /// - /// let atomic_owned: AtomicOwned = AtomicOwned::new(10); - /// ``` - #[inline] - pub fn new(t: T) -> Self { - Self { - instance_ptr: AtomicPtr::new(RefCounted::new_unique(t).cast_mut()), - } - } -} - -impl AtomicOwned { - /// Creates a new [`AtomicOwned`] from an [`Owned`] of `T`. - /// - /// # Examples - /// - /// ``` - /// use sdd::{AtomicOwned, Owned}; - /// - /// let owned: Owned = Owned::new(10); - /// let atomic_owned: AtomicOwned = AtomicOwned::from(owned); - /// ``` - #[inline] - #[must_use] - pub const fn from(owned: Owned) -> Self { - let ptr = owned.underlying_ptr(); - forget(owned); - let instance_ptr: AtomicPtrType> = AtomicPtr::new(ptr.cast_mut()); - Self { instance_ptr } - } - - /// Creates a null [`AtomicOwned`]. - /// - /// # Examples - /// - /// ``` - /// use sdd::AtomicOwned; - /// - /// let atomic_owned: AtomicOwned = AtomicOwned::null(); - /// ``` - #[inline] - #[must_use] - pub const fn null() -> Self { - let instance_ptr: AtomicPtrType> = AtomicPtr::new(null_mut()); - Self { instance_ptr } - } - - /// Returns `true` if the [`AtomicOwned`] is null. - /// - /// # Examples - /// - /// ``` - /// use sdd::{AtomicOwned, Tag}; - /// use std::sync::atomic::Ordering::Relaxed; - /// - /// let atomic_owned: AtomicOwned = AtomicOwned::null(); - /// atomic_owned.update_tag_if(Tag::Both, |p| p.tag() == Tag::None, Relaxed, Relaxed); - /// assert!(atomic_owned.is_null(Relaxed)); - /// ``` - #[inline] - #[must_use] - pub fn is_null(&self, order: Ordering) -> bool { - Tag::unset_tag(self.instance_ptr.load(order)).is_null() - } - - /// Loads a pointer value from the [`AtomicOwned`]. - /// - /// # Examples - /// - /// ``` - /// use sdd::{AtomicOwned, Guard}; - /// use std::sync::atomic::Ordering::Relaxed; - /// - /// let atomic_owned: AtomicOwned = AtomicOwned::new(11); - /// let guard = Guard::new(); - /// let ptr = atomic_owned.load(Relaxed, &guard); - /// assert_eq!(*ptr.as_ref().unwrap(), 11); - /// ``` - #[inline] - #[must_use] - pub fn load<'g>(&self, order: Ordering, _guard: &'g Guard) -> Ptr<'g, T> { - Ptr::from(self.instance_ptr.load(order)) - } - - /// Stores the given value into the [`AtomicOwned`] and returns the original value. - /// - /// # Examples - /// - /// ``` - /// use sdd::{AtomicOwned, Guard, Owned, Tag}; - /// use std::sync::atomic::Ordering::Relaxed; - /// - /// let atomic_owned: AtomicOwned = AtomicOwned::new(14); - /// let guard = Guard::new(); - /// let (old, tag) = atomic_owned.swap((Some(Owned::new(15)), Tag::Second), Relaxed); - /// assert_eq!(tag, Tag::None); - /// assert_eq!(*old.unwrap(), 14); - /// let (old, tag) = atomic_owned.swap((None, Tag::First), Relaxed); - /// assert_eq!(tag, Tag::Second); - /// assert_eq!(*old.unwrap(), 15); - /// let (old, tag) = atomic_owned.swap((None, Tag::None), Relaxed); - /// assert_eq!(tag, Tag::First); - /// assert!(old.is_none()); - /// ``` - #[inline] - pub fn swap(&self, new: (Option>, Tag), order: Ordering) -> (Option>, Tag) { - let desired = Tag::update_tag( - new.0.as_ref().map_or_else(null, Owned::underlying_ptr), - new.1, - ) - .cast_mut(); - - let prev = self.instance_ptr.swap(desired, order); - let tag = Tag::into_tag(prev); - let prev_ptr = Tag::unset_tag(prev).cast_mut(); - forget(new); - - (NonNull::new(prev_ptr).map(Owned::from), tag) - } - - /// Returns its [`Tag`]. - /// - /// # Examples - /// - /// ``` - /// use sdd::{AtomicOwned, Tag}; - /// use std::sync::atomic::Ordering::Relaxed; - /// - /// let atomic_owned: AtomicOwned = AtomicOwned::null(); - /// assert_eq!(atomic_owned.tag(Relaxed), Tag::None); - /// ``` - #[inline] - #[must_use] - pub fn tag(&self, order: Ordering) -> Tag { - Tag::into_tag(self.instance_ptr.load(order)) - } - - /// Sets a new [`Tag`] if the given condition is met. - /// - /// Returns `true` if the new [`Tag`] has been successfully set. - /// - /// # Examples - /// - /// ``` - /// use sdd::{AtomicOwned, Tag}; - /// use std::sync::atomic::Ordering::Relaxed; - /// - /// let atomic_owned: AtomicOwned = AtomicOwned::null(); - /// assert!(atomic_owned.update_tag_if(Tag::Both, |p| p.tag() == Tag::None, Relaxed, Relaxed)); - /// assert_eq!(atomic_owned.tag(Relaxed), Tag::Both); - /// ``` - #[inline] - pub fn update_tag_if) -> bool>( - &self, - tag: Tag, - mut condition: F, - set_order: Ordering, - fetch_order: Ordering, - ) -> bool { - self.instance_ptr - .fetch_update(set_order, fetch_order, |ptr| { - condition(Ptr::from(ptr)).then_some(Tag::update_tag(ptr, tag).cast_mut()) - }) - .is_ok() - } - - /// Stores `new` into the [`AtomicOwned`] if the current value is the same as `current`. - /// - /// Returns the previously held value and the updated [`Ptr`]. - /// - /// # Errors - /// - /// Returns `Err` with the supplied [`Owned`] and the current [`Ptr`]. - /// - /// # Examples - /// - /// ``` - /// use sdd::{AtomicOwned, Guard, Owned, Tag}; - /// use std::sync::atomic::Ordering::Relaxed; - /// - /// let atomic_owned: AtomicOwned = AtomicOwned::new(17); - /// let guard = Guard::new(); - /// - /// let mut ptr = atomic_owned.load(Relaxed, &guard); - /// assert_eq!(*ptr.as_ref().unwrap(), 17); - /// - /// atomic_owned.update_tag_if(Tag::Both, |_| true, Relaxed, Relaxed); - /// assert!(atomic_owned.compare_exchange( - /// ptr, (Some(Owned::new(18)), Tag::First), Relaxed, Relaxed, &guard).is_err()); - /// - /// ptr.set_tag(Tag::Both); - /// let old: Owned = atomic_owned.compare_exchange( - /// ptr, (Some(Owned::new(18)), Tag::First), Relaxed, Relaxed, &guard).unwrap().0.unwrap(); - /// assert_eq!(*old, 17); - /// drop(old); - /// - /// assert!(atomic_owned.compare_exchange( - /// ptr, (Some(Owned::new(19)), Tag::None), Relaxed, Relaxed, &guard).is_err()); - /// assert_eq!(*ptr.as_ref().unwrap(), 17); - /// ``` - #[inline] - pub fn compare_exchange<'g>( - &self, - current: Ptr<'g, T>, - new: (Option>, Tag), - success: Ordering, - failure: Ordering, - _guard: &'g Guard, - ) -> Result, OwnedPtrPair<'g, T>> { - let desired = Tag::update_tag( - new.0.as_ref().map_or_else(null, Owned::underlying_ptr), - new.1, - ) - .cast_mut(); - - match self.instance_ptr.compare_exchange( - current.as_underlying_ptr().cast_mut(), - desired, - success, - failure, - ) { - Ok(prev) => { - let prev_owned = NonNull::new(Tag::unset_tag(prev).cast_mut()).map(Owned::from); - forget(new); - Ok((prev_owned, Ptr::from(desired))) - } - Err(actual) => Err((new.0, Ptr::from(actual))), - } - } - - /// Stores `new` into the [`AtomicOwned`] if the current value is the same as `current`. - /// - /// This method is allowed to spuriously fail even when the comparison succeeds. - /// - /// Returns the previously held value and the updated [`Ptr`]. - /// - /// # Errors - /// - /// Returns `Err` with the supplied [`Owned`] and the current [`Ptr`]. - /// - /// # Examples - /// - /// ``` - /// use sdd::{AtomicOwned, Owned, Guard, Tag}; - /// use std::sync::atomic::Ordering::Relaxed; - /// - /// let atomic_owned: AtomicOwned = AtomicOwned::new(17); - /// let guard = Guard::new(); - /// - /// let mut ptr = atomic_owned.load(Relaxed, &guard); - /// assert_eq!(*ptr.as_ref().unwrap(), 17); - /// - /// while let Err((_, actual)) = atomic_owned.compare_exchange_weak( - /// ptr, - /// (Some(Owned::new(18)), Tag::First), - /// Relaxed, - /// Relaxed, - /// &guard) { - /// ptr = actual; - /// } - /// - /// let mut ptr = atomic_owned.load(Relaxed, &guard); - /// assert_eq!(*ptr.as_ref().unwrap(), 18); - /// ``` - #[inline] - pub fn compare_exchange_weak<'g>( - &self, - current: Ptr<'g, T>, - new: (Option>, Tag), - success: Ordering, - failure: Ordering, - _guard: &'g Guard, - ) -> Result, OwnedPtrPair<'g, T>> { - let desired = Tag::update_tag( - new.0.as_ref().map_or_else(null, Owned::underlying_ptr), - new.1, - ) - .cast_mut(); - - match self.instance_ptr.compare_exchange_weak( - current.as_underlying_ptr().cast_mut(), - desired, - success, - failure, - ) { - Ok(prev) => { - let prev_owned = NonNull::new(Tag::unset_tag(prev).cast_mut()).map(Owned::from); - forget(new); - Ok((prev_owned, Ptr::from(desired))) - } - Err(actual) => Err((new.0, Ptr::from(actual))), - } - } - - /// Converts `self` into an [`Owned`]. - /// - /// Returns `None` if `self` did not own an instance. - /// - /// # Examples - /// - /// ``` - /// use sdd::{AtomicOwned, Owned}; - /// use std::sync::atomic::Ordering::Relaxed; - /// - /// let atomic_owned: AtomicOwned = AtomicOwned::new(55); - /// let owned: Owned = atomic_owned.into_owned(Relaxed).unwrap(); - /// assert_eq!(*owned, 55); - /// ``` - #[inline] - #[must_use] - pub fn into_owned(self, order: Ordering) -> Option> { - let ptr = self.instance_ptr.swap(null_mut(), order); - NonNull::new(Tag::unset_tag(ptr).cast_mut()).map(Owned::from) - } -} - -impl Default for AtomicOwned { - #[inline] - fn default() -> Self { - Self::null() - } -} - -impl Drop for AtomicOwned { - #[inline] - fn drop(&mut self) { - let Some(ptr) = NonNull::new(Tag::unset_tag(self.instance_ptr.load(Relaxed)).cast_mut()) - else { - return; - }; - - drop(Owned::from(ptr)); - } -} - -unsafe impl Send for AtomicOwned {} - -unsafe impl Sync for AtomicOwned {} - -impl UnwindSafe for AtomicOwned {} diff --git a/src/atomic_shared.rs b/src/atomic_shared.rs deleted file mode 100644 index f2e4554..0000000 --- a/src/atomic_shared.rs +++ /dev/null @@ -1,443 +0,0 @@ -use super::maybe_std::AtomicPtr; -use super::ref_counted::RefCounted; -use super::{Guard, Ptr, Shared, Tag}; -#[cfg(feature = "loom")] -use loom::sync::atomic::AtomicPtr as AtomicPtrType; -use std::mem::forget; -use std::panic::UnwindSafe; -use std::ptr::{null, null_mut, NonNull}; -#[cfg(not(feature = "loom"))] -use std::sync::atomic::AtomicPtr as AtomicPtrType; -use std::sync::atomic::Ordering::{self, Acquire, Relaxed}; - -/// [`AtomicShared`] owns the underlying instance, and allows users to perform atomic operations -/// on the pointer to it. -#[derive(Debug)] -pub struct AtomicShared { - instance_ptr: AtomicPtr>, -} - -/// A pair of [`Shared`] and [`Ptr`] of the same type. -pub type SharedPtrPair<'g, T> = (Option>, Ptr<'g, T>); - -impl AtomicShared { - /// Creates a new [`AtomicShared`] from an instance of `T`. - /// - /// The type of the instance must be determined at compile-time, must not contain non-static - /// references, and must not be a non-static reference since the instance can, theoretically, - /// live as long as the process. For instance, `struct Disallowed<'l, T>(&'l T)` is not - /// allowed, because an instance of the type cannot outlive `'l` whereas the garbage collector - /// does not guarantee that the instance is dropped within `'l`. - /// - /// # Examples - /// - /// ``` - /// use sdd::AtomicShared; - /// - /// let atomic_shared: AtomicShared = AtomicShared::new(10); - /// ``` - #[inline] - pub fn new(t: T) -> Self { - Self { - instance_ptr: AtomicPtr::new(RefCounted::new_shared(t).cast_mut()), - } - } -} - -impl AtomicShared { - /// Creates a new [`AtomicShared`] from a [`Shared`] of `T`. - /// - /// # Examples - /// - /// ``` - /// use sdd::{AtomicShared, Shared}; - /// - /// let shared: Shared = Shared::new(10); - /// let atomic_shared: AtomicShared = AtomicShared::from(shared); - /// ``` - #[inline] - #[must_use] - pub const fn from(shared: Shared) -> Self { - let ptr = shared.underlying_ptr(); - forget(shared); - let instance_ptr: AtomicPtrType> = AtomicPtr::new(ptr.cast_mut()); - Self { instance_ptr } - } - - /// Creates a null [`AtomicShared`]. - /// - /// # Examples - /// - /// ``` - /// use sdd::AtomicShared; - /// - /// let atomic_shared: AtomicShared = AtomicShared::null(); - /// ``` - #[inline] - #[must_use] - pub const fn null() -> Self { - let instance_ptr: AtomicPtrType> = AtomicPtr::new(null_mut()); - Self { instance_ptr } - } - - /// Returns `true` if the [`AtomicShared`] is null. - /// - /// # Examples - /// - /// ``` - /// use sdd::{AtomicShared, Tag}; - /// use std::sync::atomic::Ordering::Relaxed; - /// - /// let atomic_shared: AtomicShared = AtomicShared::null(); - /// atomic_shared.update_tag_if(Tag::Both, |p| p.tag() == Tag::None, Relaxed, Relaxed); - /// assert!(atomic_shared.is_null(Relaxed)); - /// ``` - #[inline] - #[must_use] - pub fn is_null(&self, order: Ordering) -> bool { - Tag::unset_tag(self.instance_ptr.load(order)).is_null() - } - - /// Loads a pointer value from the [`AtomicShared`]. - /// - /// # Examples - /// - /// ``` - /// use sdd::{AtomicShared, Guard}; - /// use std::sync::atomic::Ordering::Relaxed; - /// - /// let atomic_shared: AtomicShared = AtomicShared::new(11); - /// let guard = Guard::new(); - /// let ptr = atomic_shared.load(Relaxed, &guard); - /// assert_eq!(*ptr.as_ref().unwrap(), 11); - /// ``` - #[inline] - #[must_use] - pub fn load<'g>(&self, order: Ordering, _guard: &'g Guard) -> Ptr<'g, T> { - Ptr::from(self.instance_ptr.load(order)) - } - - /// Stores the given value into the [`AtomicShared`] and returns the original value. - /// - /// # Examples - /// - /// ``` - /// use sdd::{AtomicShared, Guard, Shared, Tag}; - /// use std::sync::atomic::Ordering::Relaxed; - /// - /// let atomic_shared: AtomicShared = AtomicShared::new(14); - /// let guard = Guard::new(); - /// let (old, tag) = atomic_shared.swap((Some(Shared::new(15)), Tag::Second), Relaxed); - /// assert_eq!(tag, Tag::None); - /// assert_eq!(*old.unwrap(), 14); - /// let (old, tag) = atomic_shared.swap((None, Tag::First), Relaxed); - /// assert_eq!(tag, Tag::Second); - /// assert_eq!(*old.unwrap(), 15); - /// let (old, tag) = atomic_shared.swap((None, Tag::None), Relaxed); - /// assert_eq!(tag, Tag::First); - /// assert!(old.is_none()); - /// ``` - #[inline] - pub fn swap(&self, new: (Option>, Tag), order: Ordering) -> (Option>, Tag) { - let desired = Tag::update_tag( - new.0.as_ref().map_or_else(null, Shared::underlying_ptr), - new.1, - ) - .cast_mut(); - - let prev = self.instance_ptr.swap(desired, order); - let tag = Tag::into_tag(prev); - let prev_ptr = Tag::unset_tag(prev).cast_mut(); - forget(new); - - (NonNull::new(prev_ptr).map(Shared::from), tag) - } - - /// Returns its [`Tag`]. - /// - /// # Examples - /// - /// ``` - /// use sdd::{AtomicShared, Tag}; - /// use std::sync::atomic::Ordering::Relaxed; - /// - /// let atomic_shared: AtomicShared = AtomicShared::null(); - /// assert_eq!(atomic_shared.tag(Relaxed), Tag::None); - /// ``` - #[inline] - #[must_use] - pub fn tag(&self, order: Ordering) -> Tag { - Tag::into_tag(self.instance_ptr.load(order)) - } - - /// Sets a new [`Tag`] if the given condition is met. - /// - /// Returns `true` if the new [`Tag`] has been successfully set. - /// - /// # Examples - /// - /// ``` - /// use sdd::{AtomicShared, Tag}; - /// use std::sync::atomic::Ordering::Relaxed; - /// - /// let atomic_shared: AtomicShared = AtomicShared::null(); - /// assert!(atomic_shared.update_tag_if(Tag::Both, |p| p.tag() == Tag::None, Relaxed, Relaxed)); - /// assert_eq!(atomic_shared.tag(Relaxed), Tag::Both); - /// ``` - #[inline] - pub fn update_tag_if) -> bool>( - &self, - tag: Tag, - mut condition: F, - set_order: Ordering, - fetch_order: Ordering, - ) -> bool { - self.instance_ptr - .fetch_update(set_order, fetch_order, |ptr| { - condition(Ptr::from(ptr)).then_some(Tag::update_tag(ptr, tag).cast_mut()) - }) - .is_ok() - } - - /// Stores `new` into the [`AtomicShared`] if the current value is the same as `current`. - /// - /// Returns the previously held value and the updated [`Ptr`]. - /// - /// # Errors - /// - /// Returns `Err` with the supplied [`Shared`] and the current [`Ptr`]. - /// - /// # Examples - /// - /// ``` - /// use sdd::{AtomicShared, Guard, Shared, Tag}; - /// use std::sync::atomic::Ordering::Relaxed; - /// - /// let atomic_shared: AtomicShared = AtomicShared::new(17); - /// let guard = Guard::new(); - /// - /// let mut ptr = atomic_shared.load(Relaxed, &guard); - /// assert_eq!(*ptr.as_ref().unwrap(), 17); - /// - /// atomic_shared.update_tag_if(Tag::Both, |_| true, Relaxed, Relaxed); - /// assert!(atomic_shared.compare_exchange( - /// ptr, (Some(Shared::new(18)), Tag::First), Relaxed, Relaxed, &guard).is_err()); - /// - /// ptr.set_tag(Tag::Both); - /// let old: Shared = atomic_shared.compare_exchange( - /// ptr, - /// (Some(Shared::new(18)), Tag::First), - /// Relaxed, - /// Relaxed, - /// &guard).unwrap().0.unwrap(); - /// assert_eq!(*old, 17); - /// drop(old); - /// - /// assert!(atomic_shared.compare_exchange( - /// ptr, (Some(Shared::new(19)), Tag::None), Relaxed, Relaxed, &guard).is_err()); - /// assert_eq!(*ptr.as_ref().unwrap(), 17); - /// ``` - #[inline] - pub fn compare_exchange<'g>( - &self, - current: Ptr<'g, T>, - new: (Option>, Tag), - success: Ordering, - failure: Ordering, - _guard: &'g Guard, - ) -> Result, SharedPtrPair<'g, T>> { - let desired = Tag::update_tag( - new.0.as_ref().map_or_else(null, Shared::underlying_ptr), - new.1, - ) - .cast_mut(); - - match self.instance_ptr.compare_exchange( - current.as_underlying_ptr().cast_mut(), - desired, - success, - failure, - ) { - Ok(prev) => { - let prev_shared = NonNull::new(Tag::unset_tag(prev).cast_mut()).map(Shared::from); - forget(new); - Ok((prev_shared, Ptr::from(desired))) - } - Err(actual) => Err((new.0, Ptr::from(actual))), - } - } - - /// Stores `new` into the [`AtomicShared`] if the current value is the same as `current`. - /// - /// This method is allowed to spuriously fail even when the comparison succeeds. - /// - /// Returns the previously held value and the updated [`Ptr`]. - /// - /// # Errors - /// - /// Returns `Err` with the supplied [`Shared`] and the current [`Ptr`]. - /// - /// # Examples - /// - /// ``` - /// use sdd::{AtomicShared, Guard, Shared, Tag}; - /// use std::sync::atomic::Ordering::Relaxed; - /// - /// let atomic_shared: AtomicShared = AtomicShared::new(17); - /// let guard = Guard::new(); - /// - /// let mut ptr = atomic_shared.load(Relaxed, &guard); - /// assert_eq!(*ptr.as_ref().unwrap(), 17); - /// - /// while let Err((_, actual)) = atomic_shared.compare_exchange_weak( - /// ptr, - /// (Some(Shared::new(18)), Tag::First), - /// Relaxed, - /// Relaxed, - /// &guard) { - /// ptr = actual; - /// } - /// - /// let mut ptr = atomic_shared.load(Relaxed, &guard); - /// assert_eq!(*ptr.as_ref().unwrap(), 18); - /// ``` - #[inline] - pub fn compare_exchange_weak<'g>( - &self, - current: Ptr<'g, T>, - new: (Option>, Tag), - success: Ordering, - failure: Ordering, - _guard: &'g Guard, - ) -> Result, SharedPtrPair<'g, T>> { - let desired = Tag::update_tag( - new.0.as_ref().map_or_else(null, Shared::underlying_ptr), - new.1, - ) - .cast_mut(); - - match self.instance_ptr.compare_exchange_weak( - current.as_underlying_ptr().cast_mut(), - desired, - success, - failure, - ) { - Ok(prev) => { - let prev_shared = NonNull::new(Tag::unset_tag(prev).cast_mut()).map(Shared::from); - forget(new); - Ok((prev_shared, Ptr::from(desired))) - } - Err(actual) => Err((new.0, Ptr::from(actual))), - } - } - - /// Clones `self` including tags. - /// - /// If `self` is not supposed to be an `AtomicShared::null`, this will never return an - /// `AtomicShared::null`. - /// - /// # Examples - /// - /// ``` - /// use sdd::{AtomicShared, Guard}; - /// use std::sync::atomic::Ordering::Relaxed; - /// - /// let atomic_shared: AtomicShared = AtomicShared::new(59); - /// let guard = Guard::new(); - /// let atomic_shared_clone = atomic_shared.clone(Relaxed, &guard); - /// let ptr = atomic_shared_clone.load(Relaxed, &guard); - /// assert_eq!(*ptr.as_ref().unwrap(), 59); - /// ``` - #[inline] - #[must_use] - pub fn clone(&self, order: Ordering, guard: &Guard) -> AtomicShared { - self.get_shared(order, guard) - .map_or_else(Self::null, |s| Self::from(s)) - } - - /// Tries to create a [`Shared`] out of `self`. - /// - /// If `self` is not supposed to be an `AtomicShared::null`, this will never return `None`. - /// - /// # Examples - /// - /// ``` - /// use sdd::{AtomicShared, Guard, Shared}; - /// use std::sync::atomic::Ordering::Relaxed; - /// - /// let atomic_shared: AtomicShared = AtomicShared::new(47); - /// let guard = Guard::new(); - /// let shared: Shared = atomic_shared.get_shared(Relaxed, &guard).unwrap(); - /// assert_eq!(*shared, 47); - /// ``` - #[inline] - #[must_use] - pub fn get_shared(&self, order: Ordering, _guard: &Guard) -> Option> { - let mut ptr = Tag::unset_tag(self.instance_ptr.load(order)); - while !ptr.is_null() { - if unsafe { (*ptr).try_add_ref(Acquire) } { - return NonNull::new(ptr.cast_mut()).map(Shared::from); - } - - ptr = Tag::unset_tag(self.instance_ptr.load(order)); - } - None - } - - /// Converts `self` into a [`Shared`]. - /// - /// Returns `None` if `self` did not hold a strong reference. - /// - /// # Examples - /// - /// ``` - /// use sdd::{AtomicShared, Shared}; - /// use std::sync::atomic::Ordering::Relaxed; - /// - /// let atomic_shared: AtomicShared = AtomicShared::new(55); - /// let shared: Shared = atomic_shared.into_shared(Relaxed).unwrap(); - /// assert_eq!(*shared, 55); - /// ``` - #[inline] - #[must_use] - pub fn into_shared(self, order: Ordering) -> Option> { - let ptr = self.instance_ptr.swap(null_mut(), order); - if let Some(underlying_ptr) = NonNull::new(Tag::unset_tag(ptr).cast_mut()) { - return Some(Shared::from(underlying_ptr)); - } - - None - } -} - -impl Clone for AtomicShared { - #[inline] - fn clone(&self) -> AtomicShared { - self.clone(Acquire, &Guard::new()) - } -} - -impl Default for AtomicShared { - #[inline] - fn default() -> Self { - Self::null() - } -} - -impl Drop for AtomicShared { - #[inline] - fn drop(&mut self) { - let Some(ptr) = NonNull::new(Tag::unset_tag(self.instance_ptr.load(Relaxed)).cast_mut()) - else { - return; - }; - - drop(Shared::from(ptr)); - } -} - -unsafe impl Send for AtomicShared {} - -unsafe impl Sync for AtomicShared {} - -impl UnwindSafe for AtomicShared {} diff --git a/src/collectible.rs b/src/collectible.rs index 38810c3..25430bf 100644 --- a/src/collectible.rs +++ b/src/collectible.rs @@ -15,7 +15,8 @@ pub(super) trait Collectible { /// [`Link`] implements [`Collectible`]. #[derive(Debug, Default)] pub struct Link { - data: (AtomicUsize, AtomicPtr), + ref_cnt: AtomicUsize, + ptr: AtomicPtr, } /// [`DeferredClosure`] implements [`Collectible`] for a closure to execute it after all the @@ -29,20 +30,22 @@ impl Link { #[inline] pub(super) const fn new_shared() -> Self { Link { - data: (AtomicUsize::new(1), AtomicPtr::new(ptr::null_mut())), + ref_cnt: AtomicUsize::new(1), + ptr: AtomicPtr::new(ptr::null_mut()), } } #[inline] pub(super) const fn new_unique() -> Self { Link { - data: (AtomicUsize::new(0), AtomicPtr::new(ptr::null_mut())), + ref_cnt: AtomicUsize::new(0), + ptr: AtomicPtr::new(ptr::null_mut()), } } #[inline] pub(super) const fn ref_cnt(&self) -> &AtomicUsize { - &self.data.0 + &self.ref_cnt } } @@ -50,23 +53,27 @@ impl Collectible for Link { #[inline] fn next_ptr(&self) -> Option> { let fat_ptr: (*mut usize, *mut usize) = ( - self.data.0.load(Relaxed) as *mut usize, - self.data.1.load(Relaxed), + self.ref_cnt.load(Relaxed) as *mut usize, + self.ptr.load(Relaxed), ); - unsafe { std::mem::transmute(fat_ptr) } + #[allow(clippy::missing_transmute_annotations)] + unsafe { + std::mem::transmute(fat_ptr) + } } #[allow(clippy::not_unsafe_ptr_arg_deref)] #[inline] fn set_next_ptr(&self, next_ptr: Option>) { - let data: (*mut usize, *mut usize) = next_ptr.map_or_else( + let (ref_cnt, ptr): (*mut usize, *mut usize) = next_ptr.map_or_else( || (ptr::null_mut(), ptr::null_mut()), + #[allow(clippy::missing_transmute_annotations)] |p| unsafe { std::mem::transmute(p) }, ); - self.data.0.store(data.0 as usize, Relaxed); - self.data.1.store(data.1, Relaxed); + self.ref_cnt.store(ref_cnt as usize, Relaxed); + self.ptr.store(ptr, Relaxed); } } diff --git a/src/collector.rs b/src/collector.rs index d897321..de225e1 100644 --- a/src/collector.rs +++ b/src/collector.rs @@ -1,11 +1,11 @@ -use super::collectible::{Collectible, Link}; -use super::exit_guard::ExitGuard; -use super::maybe_std::fence as maybe_std_fence; -use super::{Epoch, Tag}; +use crate::collectible::{Collectible, Link}; +use crate::exit_guard::ExitGuard; +use crate::maybe_std::fence as maybe_std_fence; +use crate::{Epoch, Tag}; use std::mem; use std::ptr::{self, addr_of_mut, NonNull}; use std::sync::atomic::Ordering::{Acquire, Relaxed, Release, SeqCst}; -use std::sync::atomic::{AtomicPtr, AtomicU8}; +use std::sync::atomic::{compiler_fence, AtomicPtr, AtomicU8}; /// [`Collector`] is a garbage collector that reclaims thread-locally unreachable instances /// when they are globally unreachable. @@ -39,7 +39,7 @@ impl Collector { const CADENCE: u8 = u8::MAX; /// A bit field representing a thread state where the thread does not have a - /// [`Guard`](super::Guard). + /// [`Guard`](crate::Guard). const INACTIVE: u8 = 1_u8 << 2; /// A bit field representing a thread state where the thread has been terminated. @@ -50,15 +50,17 @@ impl Collector { pub(super) fn current() -> *mut Collector { LOCAL_COLLECTOR.with(|local_collector| { let mut collector_ptr = local_collector.load(Relaxed); + if collector_ptr.is_null() { collector_ptr = COLLECTOR_ANCHOR.with(CollectorAnchor::alloc); local_collector.store(collector_ptr, Relaxed); } + collector_ptr }) } - /// Acknowledges a new [`Guard`](super::Guard) being instantiated. + /// Acknowledges a new [`Guard`](crate::Guard) being instantiated. /// /// # Panics /// @@ -72,6 +74,7 @@ impl Collector { u32::MAX, "Too many EBR guards" ); + (*collector_ptr).num_readers += 1; return; } @@ -81,7 +84,8 @@ impl Collector { Self::INACTIVE ); (*collector_ptr).num_readers = 1; - let new_epoch = Epoch::from_u8(GLOBAL_ROOT.epoch.load(Relaxed)); + let new_epoch = Epoch::from(GLOBAL_ROOT.epoch.load(Relaxed)); + if cfg!(feature = "loom") || cfg!(not(any(target_arch = "x86", target_arch = "x86_64"))) { // What will happen after the fence strictly happens after the fence. (*collector_ptr).state.store(new_epoch.into(), Relaxed); @@ -99,7 +103,6 @@ impl Collector { if (*collector_ptr).announcement == new_epoch { return; } - (*collector_ptr).announcement = new_epoch; if !collect_garbage { @@ -111,17 +114,18 @@ impl Collector { Self::end_guard(collector_ptr); } }); + Collector::epoch_updated(exit_guard.0); exit_guard.1 = true; } - /// Acknowledges an existing [`Guard`](super::Guard) being dropped. + /// Acknowledges an existing [`Guard`](crate::Guard) being dropped. #[inline] pub(super) unsafe fn end_guard(collector_ptr: *mut Collector) { debug_assert_eq!((*collector_ptr).state.load(Relaxed) & Self::INACTIVE, 0); debug_assert_eq!( (*collector_ptr).state.load(Relaxed), - u8::from((*collector_ptr).announcement) + (*collector_ptr).announcement.into() ); if (*collector_ptr).num_readers != 1 { @@ -130,22 +134,19 @@ impl Collector { } (*collector_ptr).num_readers = 0; - if (*collector_ptr).next_epoch_update == 0 { + (*collector_ptr).next_epoch_update = if (*collector_ptr).next_epoch_update == 0 { if (*collector_ptr).has_garbage || Tag::into_tag(GLOBAL_ROOT.chain_head.load(Relaxed)) == Tag::Second { Collector::scan(collector_ptr); } - (*collector_ptr).next_epoch_update = if (*collector_ptr).has_garbage { - Self::CADENCE / 4 - } else { - Self::CADENCE - }; + #[allow(clippy::cast_lossless)] + let has_garbage = (*collector_ptr).has_garbage as usize; + Self::CADENCE >> (has_garbage << 1) } else { - (*collector_ptr).next_epoch_update = - (*collector_ptr).next_epoch_update.saturating_sub(1); - } + (*collector_ptr).next_epoch_update.saturating_sub(1) + }; // What has happened cannot be observed after the thread setting itself inactive has // been witnessed. @@ -163,7 +164,7 @@ impl Collector { // are globally ordered. If the `SeqCst` event during the `Guard` creation happened before // the other `SeqCst` event, this will either load the last previous epoch value, or the // current value. If not, it is guaranteed that it reads the latest global epoch value. - Epoch::from_u8(GLOBAL_ROOT.epoch.load(Relaxed)) + Epoch::from(GLOBAL_ROOT.epoch.load(Relaxed)) } #[inline] @@ -201,8 +202,7 @@ impl Collector { return true; } - let old_collector = collector; - let collector = unsafe { &*collector_ptr }; + let (old_collector, collector) = (collector, unsafe { &*collector_ptr }); if collector.num_readers != 0 { return false; @@ -227,25 +227,25 @@ impl Collector { let ptr = Box::into_raw(boxed); let mut current = GLOBAL_ROOT.chain_head.load(Relaxed); - loop { + + unsafe { + (*ptr) + .next_link + .store(Tag::unset_tag(current).cast_mut(), Relaxed); + } + + while let Err(actual) = GLOBAL_ROOT.chain_head.compare_exchange_weak( + current, + Tag::update_tag(ptr, Tag::into_tag(current)).cast_mut(), + Release, + Relaxed, + ) { + current = actual; unsafe { (*ptr) .next_link .store(Tag::unset_tag(current).cast_mut(), Relaxed); } - - // It keeps the tag intact. - let tag = Tag::into_tag(current); - let new = Tag::update_tag(ptr, tag).cast_mut(); - if let Err(actual) = GLOBAL_ROOT - .chain_head - .compare_exchange_weak(current, new, Release, Relaxed) - { - current = actual; - continue; - } - - break; } ptr @@ -267,19 +267,20 @@ impl Collector { while let Some(instance_ptr) = garbage_link.take() { garbage_link = (*instance_ptr.as_ptr()).next_ptr(); + let mut guard = ExitGuard::new(garbage_link, |mut garbage_link| { while let Some(instance_ptr) = garbage_link.take() { // Something went wrong during dropping and deallocating an instance. garbage_link = (*instance_ptr.as_ptr()).next_ptr(); // Previous `drop_and_dealloc` may have accessed `self.current_instance_link`. - std::sync::atomic::compiler_fence(Acquire); + compiler_fence(Acquire); Collector::collect(collector_ptr, instance_ptr.as_ptr()); } }); // The `drop` below may access `self.current_instance_link`. - std::sync::atomic::compiler_fence(Acquire); + compiler_fence(Acquire); drop(Box::from_raw(instance_ptr.as_ptr())); garbage_link = guard.take(); } @@ -314,11 +315,12 @@ impl Collector { return false; }; - let _guard = ExitGuard::new((), |_| Self::unlock_chain()); + let _guard = ExitGuard::new((), |()| Self::unlock_chain()); let known_epoch = (*collector_ptr).state.load(Relaxed); let mut previous_ptr: *mut Collector = ptr::null_mut(); while !current_ptr.is_null() { + #[allow(clippy::ptr_eq)] if collector_ptr == current_ptr { previous_ptr = current_ptr; current_ptr = (*collector_ptr).next_link.load(Relaxed); @@ -327,33 +329,40 @@ impl Collector { let state = (*current_ptr).state.load(Acquire); let next_ptr = (*current_ptr).next_link.load(Relaxed); - if (state & Self::INVALID) != 0 { - // The collector is obsolete. - let result = if previous_ptr.is_null() { - GLOBAL_ROOT - .chain_head - .fetch_update(Release, Relaxed, |p| { - let tag = Tag::into_tag(p); - debug_assert!(tag == Tag::First || tag == Tag::Both); - (Tag::unset_tag(p) == current_ptr) - .then_some(Tag::update_tag(next_ptr, tag).cast_mut()) - }) - .is_ok() - } else { - (*previous_ptr).next_link.store(next_ptr, Relaxed); - true - }; - - if result { - Self::collect(collector_ptr, current_ptr); - current_ptr = next_ptr; - continue; - } - } else if state != known_epoch { + + if (state & Self::INVALID) == 0 && state != known_epoch { // Not ready for an epoch update. return false; + } else if state == known_epoch { + previous_ptr = current_ptr; + current_ptr = next_ptr; + continue; + } + + // The collector is obsolete. + let result = if previous_ptr.is_null() { + GLOBAL_ROOT + .chain_head + .fetch_update(Release, Relaxed, |p| { + let tag = Tag::into_tag(p); + debug_assert!(tag == Tag::First || tag == Tag::Both); + #[allow(clippy::ptr_eq)] + (Tag::unset_tag(p) == current_ptr) + .then(|| Tag::update_tag(next_ptr, tag).cast_mut()) + }) + .is_ok() + } else { + (*previous_ptr).next_link.store(next_ptr, Relaxed); + true + }; + + if !result { + previous_ptr = current_ptr; + current_ptr = next_ptr; + continue; } - previous_ptr = current_ptr; + + Self::collect(collector_ptr, current_ptr); current_ptr = next_ptr; } @@ -361,7 +370,7 @@ impl Collector { maybe_std_fence(SeqCst); GLOBAL_ROOT .epoch - .store(Epoch::from_u8(known_epoch).next().into(), Relaxed); + .store(Epoch::from(known_epoch).next().into(), Relaxed); true } @@ -369,41 +378,44 @@ impl Collector { /// Clears the [`Collector`] chain to if all are invalid. unsafe fn clear_chain() -> bool { let lock_result = Self::lock_chain(); - if let Ok(collector_head) = lock_result { - let _guard = ExitGuard::new((), |_| Self::unlock_chain()); + let Ok(collector_head) = lock_result else { + return false; + }; - let mut current_collector_ptr = collector_head; - while !current_collector_ptr.is_null() { - if ((*current_collector_ptr).state.load(Acquire) & Self::INVALID) == 0 { - return false; - } + let _guard = ExitGuard::new((), |()| Self::unlock_chain()); - current_collector_ptr = (*current_collector_ptr).next_link.load(Relaxed); + let mut current_collector_ptr = collector_head; + while !current_collector_ptr.is_null() { + if ((*current_collector_ptr).state.load(Acquire) & Self::INVALID) == 0 { + return false; } - // Reaching here means that there is no `Ptr` that possibly sees any garbage instances - // in those `Collector` instances in the chain. - let result = GLOBAL_ROOT.chain_head.fetch_update(Release, Relaxed, |p| { - (Tag::unset_tag(p) == collector_head).then(|| { - let tag = Tag::into_tag(p); - debug_assert!(tag == Tag::First || tag == Tag::Both); - Tag::update_tag(ptr::null::(), tag).cast_mut() - }) - }); + current_collector_ptr = (*current_collector_ptr).next_link.load(Relaxed); + } - if result.is_ok() { - let mut current_collector_ptr = collector_head; - while !current_collector_ptr.is_null() { - let next_collector_ptr = (*current_collector_ptr).next_link.load(Relaxed); - drop(Box::from_raw(current_collector_ptr)); - current_collector_ptr = next_collector_ptr; - } + // Reaching here means that there is no `Ptr` that possibly sees any garbage instances + // in those `Collector` instances in the chain. + let result = GLOBAL_ROOT.chain_head.fetch_update(Release, Relaxed, |p| { + #[allow(clippy::ptr_eq)] + (Tag::unset_tag(p) == collector_head).then(|| { + let tag = Tag::into_tag(p); + debug_assert!(tag == Tag::First || tag == Tag::Both); + Tag::update_tag(ptr::null::(), tag).cast_mut() + }) + }); - return true; - } + if result.is_err() { + return false; } - false + let mut current_collector_ptr = collector_head; + while !current_collector_ptr.is_null() { + let next_collector_ptr = (*current_collector_ptr).next_link.load(Relaxed); + drop(Box::from_raw(current_collector_ptr)); + current_collector_ptr = next_collector_ptr; + } + + true } /// Locks the chain. @@ -414,29 +426,27 @@ impl Collector { let tag = Tag::into_tag(p); (tag == Tag::None || tag == Tag::Second) - .then_some(Tag::update_tag(p, Tag::First).cast_mut()) + .then(|| Tag::update_tag(p, Tag::First).cast_mut()) }) .map(|p| Tag::unset_tag(p).cast_mut()) } /// Unlocks the chain. fn unlock_chain() { - while { - let result = GLOBAL_ROOT.chain_head.fetch_update(Release, Relaxed, |p| { + let mut result = Err(ptr::null_mut()); + + while result.is_err() { + result = GLOBAL_ROOT.chain_head.fetch_update(Release, Relaxed, |p| { let tag = Tag::into_tag(p); debug_assert!(tag == Tag::First || tag == Tag::Both); - let new_tag = if tag == Tag::First { - Tag::None - } else { - Tag::Second - }; - - Some(Tag::update_tag(p, new_tag).cast_mut()) + Some( + #[allow(clippy::missing_transmute_annotations)] + Tag::update_tag(p, unsafe { mem::transmute(tag as usize & !Tag::First) }) + .cast_mut(), + ) }); - - result.is_err() - } {} + } } } @@ -444,6 +454,7 @@ impl Drop for Collector { #[inline] fn drop(&mut self) { let collector_ptr = addr_of_mut!(*self); + unsafe { Self::clear_for_drop(collector_ptr); } @@ -499,14 +510,14 @@ impl Drop for CollectorAnchor { /// Marks the head of a chain that there is a potentially unreachable `Collector` in the chain. fn mark_scan_enforced() { // `Tag::Second` indicates that there is a garbage `Collector`. - let _result = GLOBAL_ROOT.chain_head.fetch_update(Release, Relaxed, |p| { - let new_tag = match Tag::into_tag(p) { - Tag::None => Tag::Second, - Tag::First => Tag::Both, - Tag::Second | Tag::Both => return None, - }; + let _ = GLOBAL_ROOT.chain_head.fetch_update(Release, Relaxed, |p| { + let tag = Tag::into_tag(p); + + if tag == Tag::Second || tag == Tag::Both { + return None; + } - Some(Tag::update_tag(p, new_tag).cast_mut()) + Some(Tag::update_tag(p, tag + Tag::Second).cast_mut()) }); } diff --git a/src/epoch.rs b/src/epoch.rs index b6316a7..4298891 100644 --- a/src/epoch.rs +++ b/src/epoch.rs @@ -6,13 +6,11 @@ /// from [`Guard::epoch`](crate::Guard::epoch), e.g., if an [`Owned`](crate::Owned) was retired in /// epoch `1`, then the [`Owned`](crate::Owned) will become completely unreachable in epoch `0`. #[derive(Clone, Copy, Debug, Default, Eq, Ord, PartialEq, PartialOrd)] -pub struct Epoch { - value: u8, -} +pub struct Epoch(u8); impl Epoch { - /// This crates uses `4` epoch values. - const NUM_EPOCHS: u8 = 4; + /// This crate uses `4` epoch values. + const EPOCHS: u8 = 3; // For use with AND instead of modulo. /// Returns a future [`Epoch`] when the current readers will not be present. /// @@ -46,12 +44,11 @@ impl Epoch { /// let next_next = next.next(); /// assert!(next < next_next); /// ``` + #[allow(clippy::precedence)] #[inline] #[must_use] pub const fn next(self) -> Epoch { - Epoch { - value: (self.value + 1) % Self::NUM_EPOCHS, - } + Epoch(self.0 + 1 & Self::EPOCHS) } /// Returns the previous [`Epoch`] value. @@ -67,24 +64,24 @@ impl Epoch { /// let prev_prev = prev.prev(); /// assert!(prev_prev < prev); /// ``` + #[allow(clippy::precedence)] #[inline] #[must_use] pub const fn prev(self) -> Epoch { - Epoch { - value: (self.value + Self::NUM_EPOCHS - 1) % Self::NUM_EPOCHS, - } + Epoch(self.0 + Self::EPOCHS & Self::EPOCHS) } +} - /// Construct an [`Epoch`] from a [`u8`] value. +impl From for u8 { #[inline] - pub(super) const fn from_u8(value: u8) -> Epoch { - Epoch { value } + fn from(epoch: Epoch) -> Self { + epoch.0 } } -impl From for u8 { +impl From for Epoch { #[inline] - fn from(epoch: Epoch) -> Self { - epoch.value + fn from(value: u8) -> Self { + Epoch(value) } } diff --git a/src/guard.rs b/src/guard.rs index b70db97..e5f4554 100644 --- a/src/guard.rs +++ b/src/guard.rs @@ -1,9 +1,9 @@ -use super::collectible::DeferredClosure; -use super::collector::Collector; -use super::Epoch; +use crate::collectible::DeferredClosure; +use crate::collector::Collector; +use crate::Epoch; use std::panic::UnwindSafe; -/// [`Guard`] allows the user to read [`AtomicShared`](super::AtomicShared) and keeps the +/// [`Guard`] allows the user to read [`AtomicShared`](crate::AtomicShared) and keeps the /// underlying instance pinned to the thread. /// /// [`Guard`] internally prevents the global epoch value from passing through the value @@ -35,6 +35,7 @@ impl Guard { unsafe { Collector::new_guard(collector_ptr, true); } + Self { collector_ptr } } diff --git a/src/lib.rs b/src/lib.rs index 6fdcffb..e7305b4 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,11 +1,8 @@ #![deny(missing_docs, warnings, clippy::all, clippy::pedantic)] #![doc = include_str!("../README.md")] -mod atomic_owned; -pub use atomic_owned::AtomicOwned; - -mod atomic_shared; -pub use atomic_shared::AtomicShared; +mod atomic; +pub use atomic::{Atomic, AtomicOwned, AtomicShared}; mod guard; pub use guard::Guard; @@ -55,13 +52,10 @@ pub fn suspend() -> bool { collector::Collector::pass_garbage() } -#[cfg(feature = "loom")] mod maybe_std { + #[cfg(feature = "loom")] pub(crate) use loom::sync::atomic::{fence, AtomicPtr}; -} - -#[cfg(not(feature = "loom"))] -mod maybe_std { + #[cfg(not(feature = "loom"))] pub(crate) use std::sync::atomic::{fence, AtomicPtr}; } diff --git a/src/model.rs b/src/model.rs new file mode 100644 index 0000000..0ce718e --- /dev/null +++ b/src/model.rs @@ -0,0 +1,97 @@ +#[cfg(feature = "loom")] +#[cfg(test)] +mod test_model { + use crate::{suspend, AtomicOwned, AtomicShared, Guard}; + use loom::sync::atomic::AtomicUsize; + use loom::thread::{spawn, yield_now}; + use std::sync::atomic::Ordering::Relaxed; + use std::sync::{Arc, Mutex}; + + struct A(String, Arc); + impl Drop for A { + fn drop(&mut self) { + self.1.fetch_add(1, Relaxed); + } + } + + static SERIALIZER: Mutex<()> = Mutex::new(()); + + #[test] + fn ebr_owned() { + let _guard = SERIALIZER.lock().unwrap(); + loom::model(|| { + let str = "HOW ARE YOU HOW ARE YOU"; + let drop_count = Arc::new(AtomicUsize::new(0)); + let data_owned = AtomicOwned::new(A(str.to_string(), drop_count.clone())); + + let guard = Guard::new(); + let ptr = data_owned.load(Relaxed, &guard); + + let thread = spawn(move || { + let guard = Guard::new(); + let ptr = data_owned.load(Relaxed, &guard); + drop(data_owned); + + assert_eq!(ptr.as_ref().unwrap().0, str); + guard.accelerate(); + drop(guard); + + assert!(suspend()); + }); + + assert_eq!(ptr.as_ref().unwrap().0, str); + guard.accelerate(); + drop(guard); + + while drop_count.load(Relaxed) != 1 { + Guard::new().accelerate(); + yield_now(); + } + + assert!(thread.join().is_ok()); + assert_eq!(drop_count.load(Relaxed), 1); + }); + } + + #[test] + fn ebr_shared() { + let _guard = SERIALIZER.lock().unwrap(); + loom::model(|| { + let str = "HOW ARE YOU HOW ARE YOU"; + let drop_count = Arc::new(AtomicUsize::new(0)); + let data_shared = AtomicShared::new(A(str.to_string(), drop_count.clone())); + + let guard = Guard::new(); + let ptr = data_shared.load(Relaxed, &guard); + + let thread = spawn(move || { + let data_shared_clone = data_shared.get_shared(Relaxed, &Guard::new()).unwrap(); + drop(data_shared); + + assert_eq!(data_shared_clone.0, str); + + let guard = Guard::new(); + let ptr = data_shared_clone.get_guarded_ptr(&guard); + drop(data_shared_clone); + guard.accelerate(); + + assert_eq!(ptr.as_ref().unwrap().0, str); + drop(guard); + + assert!(suspend()); + }); + + assert_eq!(ptr.as_ref().unwrap().0, str); + guard.accelerate(); + drop(guard); + + while drop_count.load(Relaxed) != 1 { + Guard::new().accelerate(); + yield_now(); + } + + assert!(thread.join().is_ok()); + assert_eq!(drop_count.load(Relaxed), 1); + }); + } +} diff --git a/src/owned.rs b/src/owned.rs index ea02867..6938d9d 100644 --- a/src/owned.rs +++ b/src/owned.rs @@ -1,5 +1,5 @@ -use super::ref_counted::RefCounted; -use super::{Guard, Ptr}; +use crate::ref_counted::RefCounted; +use crate::{Guard, Ptr}; use std::mem::forget; use std::ops::Deref; use std::panic::UnwindSafe; @@ -77,7 +77,7 @@ impl Owned { /// ``` #[inline] #[must_use] - pub fn get_guarded_ptr<'g>(&self, _guard: &'g Guard) -> Ptr<'g, T> { + pub fn get_guarded_ptr<'g>(&self, _: &'g Guard) -> Ptr<'g, T> { Ptr::from(self.instance_ptr) } @@ -97,8 +97,11 @@ impl Owned { /// ``` #[inline] #[must_use] - pub fn get_guarded_ref<'g>(&self, _guard: &'g Guard) -> &'g T { - unsafe { std::mem::transmute::<&T, _>(&**self) } + pub fn get_guarded_ref<'g>(&self, _: &'g Guard) -> &'g T { + #[allow(clippy::missing_transmute_annotations)] + unsafe { + std::mem::transmute(&**self) + } } /// Returns a mutable reference to the instance. @@ -182,6 +185,7 @@ impl Owned { #[inline] pub(super) fn from(ptr: NonNull>) -> Self { debug_assert_eq!(unsafe { (*ptr.as_ptr()).ref_cnt().load(Relaxed) }, 0); + Self { instance_ptr: ptr.as_ptr(), } diff --git a/src/ptr.rs b/src/ptr.rs index 1fa0718..c9c4d1b 100644 --- a/src/ptr.rs +++ b/src/ptr.rs @@ -1,5 +1,5 @@ -use super::ref_counted::RefCounted; -use super::{Shared, Tag}; +use crate::ref_counted::RefCounted; +use crate::{Shared, Tag}; use std::marker::PhantomData; use std::panic::UnwindSafe; use std::sync::atomic::Ordering::Relaxed; @@ -44,7 +44,7 @@ impl<'g, T> Ptr<'g, T> { #[inline] #[must_use] pub fn is_null(&self) -> bool { - self.instance_ptr as usize >> 3 == 0 + Tag::unset_tag(self.instance_ptr).is_null() } /// Tries to create a reference to the underlying instance. @@ -246,6 +246,7 @@ impl Eq for Ptr<'_, T> {} impl PartialEq for Ptr<'_, T> { #[inline] + #[allow(clippy::ptr_eq)] fn eq(&self, other: &Self) -> bool { self.instance_ptr == other.instance_ptr } diff --git a/src/ref_counted.rs b/src/ref_counted.rs index f5dc51a..2e3f6b7 100644 --- a/src/ref_counted.rs +++ b/src/ref_counted.rs @@ -1,5 +1,5 @@ -use super::collectible::{Collectible, Link}; -use super::collector::Collector; +use crate::collectible::{Collectible, Link}; +use crate::collector::Collector; use std::mem::offset_of; use std::ops::Deref; use std::ptr::NonNull; @@ -21,6 +21,7 @@ impl RefCounted { instance, next_or_refcnt: Link::new_shared(), }); + Box::into_raw(boxed) } @@ -33,6 +34,7 @@ impl RefCounted { instance, next_or_refcnt: Link::new_unique(), }); + Box::into_raw(boxed) } @@ -83,6 +85,7 @@ impl RefCounted { ) { current = updated; } + current == 1 } @@ -90,8 +93,9 @@ impl RefCounted { #[inline] pub(super) fn inst_ptr(self_ptr: *const Self) -> *const T { let offset = offset_of!(Self, instance); + #[allow(clippy::cast_lossless)] let is_valid = !self_ptr.is_null() as usize; - unsafe { self_ptr.byte_add(offset * is_valid) as *const T } + unsafe { self_ptr.cast::().add(offset * is_valid).cast() } } /// Returns a reference to its reference count. @@ -103,11 +107,8 @@ impl RefCounted { /// Passes a pointer to [`RefCounted`] to the garbage collector. #[inline] pub(super) fn pass_to_collector(ptr: *mut Self) { - let dyn_mut_ptr: *mut dyn Collectible = ptr; - #[allow(clippy::transmute_ptr_to_ptr)] - let dyn_mut_ptr: *mut dyn Collectible = unsafe { std::mem::transmute(dyn_mut_ptr) }; unsafe { - Collector::collect(Collector::current(), dyn_mut_ptr); + Collector::collect(Collector::current(), ptr as *mut dyn Collectible); } } } diff --git a/src/shared.rs b/src/shared.rs index 31466dd..67fe211 100644 --- a/src/shared.rs +++ b/src/shared.rs @@ -1,5 +1,5 @@ -use super::ref_counted::RefCounted; -use super::{Guard, Ptr}; +use crate::ref_counted::RefCounted; +use crate::{Guard, Ptr}; use std::mem::forget; use std::ops::Deref; use std::panic::UnwindSafe; @@ -80,7 +80,7 @@ impl Shared { /// ``` #[inline] #[must_use] - pub fn get_guarded_ptr<'g>(&self, _guard: &'g Guard) -> Ptr<'g, T> { + pub fn get_guarded_ptr<'g>(&self, _: &'g Guard) -> Ptr<'g, T> { Ptr::from(self.instance_ptr) } @@ -100,8 +100,11 @@ impl Shared { /// ``` #[inline] #[must_use] - pub fn get_guarded_ref<'g>(&self, _guard: &'g Guard) -> &'g T { - unsafe { std::mem::transmute::<&T, _>(&**self) } + pub fn get_guarded_ref<'g>(&self, _: &'g Guard) -> &'g T { + #[allow(clippy::missing_transmute_annotations)] + unsafe { + std::mem::transmute(&**self) + } } /// Returns a mutable reference to the instance if the [`Shared`] is holding the only strong @@ -169,12 +172,10 @@ impl Shared { #[inline] #[must_use] pub fn release(self) -> bool { - let released = if unsafe { (*self.instance_ptr).drop_ref() } { - RefCounted::pass_to_collector(self.instance_ptr.cast_mut()); - true - } else { - false - }; + let released = unsafe { (*self.instance_ptr).drop_ref() } + .then(|| RefCounted::pass_to_collector(self.instance_ptr.cast_mut())) + .is_some(); + forget(self); released } @@ -215,12 +216,10 @@ impl Shared { #[inline] #[must_use] pub unsafe fn drop_in_place(self) -> bool { - let dropped = if (*self.instance_ptr).drop_ref() { - drop(Box::from_raw(self.instance_ptr.cast_mut())); - true - } else { - false - }; + let dropped = unsafe { (*self.instance_ptr).drop_ref() } + .then(|| drop(Box::from_raw(self.instance_ptr.cast_mut()))) + .is_some(); + forget(self); dropped } @@ -250,7 +249,8 @@ impl AsRef for Shared { impl Clone for Shared { #[inline] fn clone(&self) -> Self { - unsafe { (*self.instance_ptr).add_ref() } + unsafe { (*self.instance_ptr).add_ref() }; + Self { instance_ptr: self.instance_ptr, } diff --git a/src/tag.rs b/src/tag.rs index bb60335..f0b6f5a 100644 --- a/src/tag.rs +++ b/src/tag.rs @@ -1,4 +1,8 @@ -use std::{cmp::PartialEq, mem}; +use std::{ + cmp::PartialEq, + mem, + ops::{Add, Not}, +}; /// [`Tag`] is a four-state `Enum` that can be embedded in a pointer as the two least /// significant bits of the pointer value. @@ -19,7 +23,10 @@ impl Tag { /// Returns the tag embedded in the pointer. #[inline] pub(super) fn into_tag

(ptr: *const P) -> Self { - unsafe { mem::transmute(ptr as usize & Tag::Both as usize) } + #[allow(clippy::missing_transmute_annotations)] + unsafe { + mem::transmute(ptr as usize & Tag::Both as usize) + } } /// Sets a tag, overwriting any existing tag in the pointer. @@ -34,3 +41,24 @@ impl Tag { (ptr as usize & !(Tag::Both as usize)) as *const P } } + +impl Add for Tag { + type Output = Tag; + + #[inline] + fn add(self, rhs: Self) -> Tag { + #[allow(clippy::missing_transmute_annotations)] + unsafe { + mem::transmute(self as usize + rhs as usize) + } + } +} + +impl Not for Tag { + type Output = usize; + + #[inline] + fn not(self) -> usize { + !(self as usize) + } +} From c064f2250f9d51d2e2ef2303b7764e00dbcc5c45 Mon Sep 17 00:00:00 2001 From: redactedontop Date: Sat, 5 Apr 2025 08:16:48 +0100 Subject: [PATCH 4/7] Cleaned up Drop block in Atomic --- src/atomic.rs | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/src/atomic.rs b/src/atomic.rs index 20400ad..a89d060 100644 --- a/src/atomic.rs +++ b/src/atomic.rs @@ -5,29 +5,32 @@ pub(super) mod ownership { use crate::ref_counted::RefCounted; pub(super) trait Type { - const IS_OWNED: bool; - fn generate_refcounted(instance: T) -> *const RefCounted; + fn can_drop(ptr: *const RefCounted) -> bool; } pub struct Owned; impl Type for Owned { - const IS_OWNED: bool = true; - fn generate_refcounted(instance: T) -> *const RefCounted { RefCounted::new_unique(instance) } + + fn can_drop(_: *const RefCounted) -> bool { + true + } } pub struct Shared; impl Type for Shared { - const IS_OWNED: bool = false; - fn generate_refcounted(instance: T) -> *const RefCounted { RefCounted::new_shared(instance) } + + fn can_drop(ptr: *const RefCounted) -> bool { + unsafe { &*ptr }.drop_ref() + } } } @@ -697,7 +700,7 @@ impl Drop for Atomic { return; } - if O::IS_OWNED || unsafe { (*ptr).drop_ref() } { + if O::can_drop(ptr) { RefCounted::pass_to_collector(ptr.cast_mut()); } } From 341a98586ee182361eb3d84327d10c8f2722b711 Mon Sep 17 00:00:00 2001 From: redactedontop Date: Sat, 5 Apr 2025 08:21:59 +0100 Subject: [PATCH 5/7] Minor cleanup --- src/atomic.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/atomic.rs b/src/atomic.rs index a89d060..792dd9d 100644 --- a/src/atomic.rs +++ b/src/atomic.rs @@ -696,11 +696,8 @@ impl Drop for Atomic { #[inline] fn drop(&mut self) { let ptr = Tag::unset_tag(self.0.load(Ordering::Relaxed)); - if ptr.is_null() { - return; - } - if O::can_drop(ptr) { + if !ptr.is_null() && O::can_drop(ptr) { RefCounted::pass_to_collector(ptr.cast_mut()); } } From a7ce537d7a53aad31782b157562454be79a731d0 Mon Sep 17 00:00:00 2001 From: redactedontop Date: Sat, 5 Apr 2025 08:29:32 +0100 Subject: [PATCH 6/7] Inlined trait functions --- src/atomic.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/atomic.rs b/src/atomic.rs index 792dd9d..fac10a0 100644 --- a/src/atomic.rs +++ b/src/atomic.rs @@ -12,10 +12,12 @@ pub(super) mod ownership { pub struct Owned; impl Type for Owned { + #[inline] fn generate_refcounted(instance: T) -> *const RefCounted { RefCounted::new_unique(instance) } + #[inline] fn can_drop(_: *const RefCounted) -> bool { true } @@ -24,10 +26,12 @@ pub(super) mod ownership { pub struct Shared; impl Type for Shared { + #[inline] fn generate_refcounted(instance: T) -> *const RefCounted { RefCounted::new_shared(instance) } + #[inline] fn can_drop(ptr: *const RefCounted) -> bool { unsafe { &*ptr }.drop_ref() } From fbda955bc27b86595b795068193667fc70008ed0 Mon Sep 17 00:00:00 2001 From: Saltigrade Date: Fri, 11 Apr 2025 13:05:15 +0100 Subject: [PATCH 7/7] Removed const from functions. --- src/atomic.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/atomic.rs b/src/atomic.rs index fac10a0..8769000 100644 --- a/src/atomic.rs +++ b/src/atomic.rs @@ -92,7 +92,7 @@ impl Atomic { /// ``` #[inline] #[must_use] - pub const fn null() -> Self { + pub fn null() -> Self { Self(AtomicPtr::new(ptr::null_mut()), PhantomData) } @@ -215,7 +215,7 @@ impl AtomicOwned { /// ``` #[inline] #[must_use] - pub const fn from(r#type: Owned) -> Self { + pub fn from(r#type: Owned) -> Self { let ptr = r#type.underlying_ptr(); mem::forget(r#type); @@ -431,7 +431,7 @@ impl AtomicShared { /// ``` #[inline] #[must_use] - pub const fn from(r#type: Shared) -> Self { + pub fn from(r#type: Shared) -> Self { let ptr = r#type.underlying_ptr(); mem::forget(r#type);