From fb12863da29378485846c5e92956786c89600d36 Mon Sep 17 00:00:00 2001 From: Tianle Qiu Date: Fri, 25 Jul 2025 03:07:44 +0000 Subject: [PATCH 01/59] WIP --- src/global_state.rs | 4 + src/lib.rs | 8 + src/plan/barriers.rs | 89 ++++ src/plan/concurrent/barrier.rs | 146 ++++++ .../concurrent/concurrent_marking_work.rs | 244 ++++++++++ src/plan/concurrent/immix/gc_work.rs | 25 + src/plan/concurrent/immix/global.rs | 440 ++++++++++++++++++ src/plan/concurrent/immix/mod.rs | 7 + src/plan/concurrent/immix/mutator.rs | 84 ++++ src/plan/concurrent/mod.rs | 23 + src/plan/global.rs | 23 + src/plan/mod.rs | 5 +- src/plan/tracing.rs | 77 ++- src/policy/immix/immixspace.rs | 33 ++ src/policy/immix/line.rs | 14 +- src/policy/largeobjectspace.rs | 53 +++ src/policy/space.rs | 4 + src/scheduler/gc_work.rs | 38 +- src/scheduler/mod.rs | 2 +- src/scheduler/scheduler.rs | 121 ++++- src/scheduler/work_bucket.rs | 63 ++- src/util/address.rs | 5 + src/util/alloc/immix_allocator.rs | 29 ++ src/util/options.rs | 2 + src/vm/collection.rs | 3 + 25 files changed, 1524 insertions(+), 18 deletions(-) create mode 100644 src/plan/concurrent/barrier.rs create mode 100644 src/plan/concurrent/concurrent_marking_work.rs create mode 100644 src/plan/concurrent/immix/gc_work.rs create mode 100644 src/plan/concurrent/immix/global.rs create mode 100644 src/plan/concurrent/immix/mod.rs create mode 100644 src/plan/concurrent/immix/mutator.rs create mode 100644 src/plan/concurrent/mod.rs diff --git a/src/global_state.rs b/src/global_state.rs index b5a78d9bbe..8abe617b49 100644 --- a/src/global_state.rs +++ b/src/global_state.rs @@ -49,6 +49,8 @@ pub struct GlobalState { pub(crate) malloc_bytes: AtomicUsize, /// This stores the live bytes and the used bytes (by pages) for each space in last GC. This counter is only updated in the GC release phase. pub(crate) live_bytes_in_last_gc: AtomicRefCell>, + pub(crate) concurrent_marking_active: AtomicBool, + pub(crate) concurrent_marking_threshold: AtomicUsize, } impl GlobalState { @@ -206,6 +208,8 @@ impl Default for GlobalState { #[cfg(feature = "malloc_counted_size")] malloc_bytes: AtomicUsize::new(0), live_bytes_in_last_gc: AtomicRefCell::new(HashMap::new()), + concurrent_marking_threshold: AtomicUsize::new(0), + concurrent_marking_active: AtomicBool::new(false), } } } diff --git a/src/lib.rs b/src/lib.rs index afe094885f..e1a7b627e5 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -32,6 +32,8 @@ extern crate static_assertions; extern crate probe; mod mmtk; +use std::sync::atomic::AtomicUsize; + pub use mmtk::MMTKBuilder; pub(crate) use mmtk::MMAPPER; pub use mmtk::MMTK; @@ -51,3 +53,9 @@ pub mod vm; pub use crate::plan::{ AllocationSemantics, BarrierSelector, Mutator, MutatorContext, ObjectQueue, Plan, }; + +static NUM_CONCURRENT_TRACING_PACKETS: AtomicUsize = AtomicUsize::new(0); + +fn concurrent_marking_packets_drained() -> bool { + crate::NUM_CONCURRENT_TRACING_PACKETS.load(std::sync::atomic::Ordering::SeqCst) == 0 +} diff --git a/src/plan/barriers.rs b/src/plan/barriers.rs index 56c069c982..07152bbde7 100644 --- a/src/plan/barriers.rs +++ b/src/plan/barriers.rs @@ -21,6 +21,7 @@ pub enum BarrierSelector { NoBarrier, /// Object remembering barrier is used. ObjectBarrier, + SATBBarrier, } impl BarrierSelector { @@ -45,6 +46,9 @@ impl BarrierSelector { pub trait Barrier: 'static + Send + Downcast { fn flush(&mut self) {} + /// load referent from java.lang.Reference + fn load_reference(&mut self, _referent: ObjectReference) {} + /// Subsuming barrier for object reference write fn object_reference_write( &mut self, @@ -92,6 +96,8 @@ pub trait Barrier: 'static + Send + Downcast { self.memory_region_copy_post(src, dst); } + fn object_reference_clone_pre(&mut self, _obj: ObjectReference) {} + /// Full pre-barrier for array copy fn memory_region_copy_pre(&mut self, _src: VM::VMMemorySlice, _dst: VM::VMMemorySlice) {} @@ -159,6 +165,10 @@ pub trait BarrierSemantics: 'static + Send { /// Object will probably be modified fn object_probable_write_slow(&mut self, _obj: ObjectReference) {} + + fn load_reference(&mut self, _o: ObjectReference) {} + + fn object_reference_clone_pre(&mut self, _obj: ObjectReference) {} } /// Generic object barrier with a type argument defining it's slow-path behaviour. @@ -250,3 +260,82 @@ impl Barrier for ObjectBarrier { } } } + +pub struct SATBBarrier { + semantics: S, +} + +impl SATBBarrier { + pub fn new(semantics: S) -> Self { + Self { semantics } + } + fn object_is_unlogged(&self, object: ObjectReference) -> bool { + // unsafe { S::UNLOG_BIT_SPEC.load::(object, None) != 0 } + S::UNLOG_BIT_SPEC.load_atomic::(object, None, Ordering::SeqCst) != 0 + } +} + +impl Barrier for SATBBarrier { + fn flush(&mut self) { + self.semantics.flush(); + } + + fn load_reference(&mut self, o: ObjectReference) { + self.semantics.load_reference(o) + } + + fn object_reference_clone_pre(&mut self, obj: ObjectReference) { + self.semantics.object_reference_clone_pre(obj); + } + + fn object_probable_write(&mut self, obj: ObjectReference) { + self.semantics.object_probable_write_slow(obj); + } + + fn object_reference_write_pre( + &mut self, + src: ObjectReference, + slot: ::VMSlot, + target: Option, + ) { + if self.object_is_unlogged(src) { + self.semantics + .object_reference_write_slow(src, slot, target); + } + } + + fn object_reference_write_post( + &mut self, + _src: ObjectReference, + _slot: ::VMSlot, + _target: Option, + ) { + unimplemented!() + } + + fn object_reference_write_slow( + &mut self, + src: ObjectReference, + slot: ::VMSlot, + target: Option, + ) { + self.semantics + .object_reference_write_slow(src, slot, target); + } + + fn memory_region_copy_pre( + &mut self, + src: ::VMMemorySlice, + dst: ::VMMemorySlice, + ) { + self.semantics.memory_region_copy_slow(src, dst); + } + + fn memory_region_copy_post( + &mut self, + _src: ::VMMemorySlice, + _dst: ::VMMemorySlice, + ) { + unimplemented!() + } +} diff --git a/src/plan/concurrent/barrier.rs b/src/plan/concurrent/barrier.rs new file mode 100644 index 0000000000..cb76910147 --- /dev/null +++ b/src/plan/concurrent/barrier.rs @@ -0,0 +1,146 @@ +use std::sync::atomic::Ordering; + +use crate::{ + plan::{barriers::BarrierSemantics, concurrent::immix::global::ConcurrentImmix, VectorQueue}, + scheduler::WorkBucketStage, + util::ObjectReference, + vm::{ + slot::{MemorySlice, Slot}, + VMBinding, + }, + MMTK, +}; + +use super::{concurrent_marking_work::ProcessModBufSATB, Pause}; + +pub struct SATBBarrierSemantics { + mmtk: &'static MMTK, + satb: VectorQueue, + refs: VectorQueue, + immix: &'static ConcurrentImmix, +} + +impl SATBBarrierSemantics { + pub fn new(mmtk: &'static MMTK) -> Self { + Self { + mmtk, + satb: VectorQueue::default(), + refs: VectorQueue::default(), + immix: mmtk + .get_plan() + .downcast_ref::>() + .unwrap(), + } + } + + fn slow(&mut self, _src: Option, _slot: VM::VMSlot, old: ObjectReference) { + self.satb.push(old); + if self.satb.is_full() { + self.flush_satb(); + } + } + + fn enqueue_node( + &mut self, + src: Option, + slot: VM::VMSlot, + _new: Option, + ) -> bool { + if let Some(old) = slot.load() { + self.slow(src, slot, old); + } + true + } + + /// Attempt to atomically log an object. + /// Returns true if the object is not logged previously. + fn log_object(&self, object: ObjectReference) -> bool { + Self::UNLOG_BIT_SPEC.store_atomic::(object, 0, None, Ordering::SeqCst); + true + } + + fn flush_satb(&mut self) { + if !self.satb.is_empty() { + if self.should_create_satb_packets() { + let satb = self.satb.take(); + if let Some(pause) = self.immix.current_pause() { + debug_assert_ne!(pause, Pause::InitialMark); + self.mmtk.scheduler.work_buckets[WorkBucketStage::Closure] + .add(ProcessModBufSATB::new(satb)); + } else { + self.mmtk.scheduler.work_buckets[WorkBucketStage::Unconstrained] + .add(ProcessModBufSATB::new(satb)); + } + } else { + let _ = self.satb.take(); + }; + } + } + + #[cold] + fn flush_weak_refs(&mut self) { + if !self.refs.is_empty() { + // debug_assert!(self.should_create_satb_packets()); + let nodes = self.refs.take(); + if let Some(pause) = self.immix.current_pause() { + debug_assert_ne!(pause, Pause::InitialMark); + self.mmtk.scheduler.work_buckets[WorkBucketStage::Closure] + .add(ProcessModBufSATB::new(nodes)); + } else { + self.mmtk.scheduler.work_buckets[WorkBucketStage::Unconstrained] + .add(ProcessModBufSATB::new(nodes)); + } + } + } + + fn should_create_satb_packets(&self) -> bool { + self.immix.concurrent_marking_in_progress() + || self.immix.current_pause() == Some(Pause::FinalMark) + } +} + +impl BarrierSemantics for SATBBarrierSemantics { + type VM = VM; + + #[cold] + fn flush(&mut self) { + self.flush_satb(); + self.flush_weak_refs(); + } + + fn object_reference_write_slow( + &mut self, + src: ObjectReference, + _slot: ::VMSlot, + _target: Option, + ) { + self.object_probable_write_slow(src); + self.log_object(src); + } + + fn memory_region_copy_slow( + &mut self, + _src: ::VMMemorySlice, + dst: ::VMMemorySlice, + ) { + for s in dst.iter_slots() { + self.enqueue_node(None, s, None); + } + } + + fn load_reference(&mut self, o: ObjectReference) { + if !self.immix.concurrent_marking_in_progress() { + return; + } + self.refs.push(o); + if self.refs.is_full() { + self.flush_weak_refs(); + } + } + + fn object_probable_write_slow(&mut self, obj: ObjectReference) { + obj.iterate_fields::(|s| { + self.enqueue_node(Some(obj), s, None); + }); + } +} diff --git a/src/plan/concurrent/concurrent_marking_work.rs b/src/plan/concurrent/concurrent_marking_work.rs new file mode 100644 index 0000000000..6252b81eab --- /dev/null +++ b/src/plan/concurrent/concurrent_marking_work.rs @@ -0,0 +1,244 @@ +use crate::plan::concurrent::immix::global::ConcurrentImmix; +use crate::plan::concurrent::Pause; +use crate::plan::VectorQueue; +use crate::policy::gc_work::PolicyTraceObject; +use crate::policy::immix::TRACE_KIND_FAST; +use crate::policy::space::Space; +use crate::scheduler::gc_work::{ScanObjects, SlotOf}; +use crate::util::ObjectReference; +use crate::vm::slot::Slot; + +use crate::Plan; +use crate::{ + plan::ObjectQueue, + scheduler::{gc_work::ProcessEdgesBase, GCWork, GCWorker, ProcessEdgesWork, WorkBucketStage}, + vm::*, + MMTK, +}; +use atomic::Ordering; +use std::ops::{Deref, DerefMut}; + +pub struct ConcurrentTraceObjects { + plan: &'static ConcurrentImmix, + // objects to mark and scan + objects: Option>, + // recursively generated objects + next_objects: VectorQueue, + worker: *mut GCWorker, +} + +impl ConcurrentTraceObjects { + const SATB_BUFFER_SIZE: usize = 8192; + + pub fn new(objects: Vec, mmtk: &'static MMTK) -> Self { + let plan = mmtk + .get_plan() + .downcast_ref::>() + .unwrap(); + crate::NUM_CONCURRENT_TRACING_PACKETS.fetch_add(1, Ordering::SeqCst); + Self { + plan, + objects: Some(objects), + next_objects: VectorQueue::default(), + worker: std::ptr::null_mut(), + } + } + + pub fn worker(&self) -> &'static mut GCWorker { + debug_assert_ne!(self.worker, std::ptr::null_mut()); + unsafe { &mut *self.worker } + } + + #[cold] + fn flush(&mut self) { + if !self.next_objects.is_empty() { + let objects = self.next_objects.take(); + let worker = self.worker(); + let w = Self::new(objects, worker.mmtk); + worker.add_work(WorkBucketStage::Unconstrained, w); + } + } + + fn trace_object(&mut self, object: ObjectReference) -> ObjectReference { + if self.plan.immix_space.in_space(object) { + self.plan + .immix_space + .trace_object::(self, object, None, self.worker()); + } else { + self.plan.common().get_los().trace_object(self, object); + } + object + } + + fn trace_objects(&mut self, objects: &[ObjectReference]) { + for o in objects.iter() { + self.trace_object(*o); + } + } + + fn scan_and_enqueue(&mut self, object: ObjectReference) { + object.iterate_fields::(|s| { + let Some(t) = s.load() else { + return; + }; + + self.next_objects.push(t); + if self.next_objects.len() > Self::SATB_BUFFER_SIZE { + self.flush(); + } + }); + } +} + +impl ObjectQueue for ConcurrentTraceObjects { + fn enqueue(&mut self, object: ObjectReference) { + debug_assert!( + object.to_raw_address().is_mapped(), + "Invalid obj {:?}: address is not mapped", + object + ); + self.scan_and_enqueue(object); + } +} + +unsafe impl Send for ConcurrentTraceObjects {} + +impl GCWork for ConcurrentTraceObjects { + fn do_work(&mut self, worker: &mut GCWorker, _mmtk: &'static MMTK) { + self.worker = worker; + // mark objects + if let Some(objects) = self.objects.take() { + self.trace_objects(&objects) + } + let pause_opt = self.plan.current_pause(); + if pause_opt == Some(Pause::FinalMark) || pause_opt.is_none() { + let mut next_objects = vec![]; + while !self.next_objects.is_empty() { + let pause_opt = self.plan.current_pause(); + if !(pause_opt == Some(Pause::FinalMark) || pause_opt.is_none()) { + break; + } + next_objects.clear(); + self.next_objects.swap(&mut next_objects); + self.trace_objects(&next_objects); + } + } + self.flush(); + crate::NUM_CONCURRENT_TRACING_PACKETS.fetch_sub(1, Ordering::SeqCst); + } +} + +pub struct ProcessModBufSATB { + nodes: Option>, +} + +impl ProcessModBufSATB { + pub fn new(nodes: Vec) -> Self { + Self { nodes: Some(nodes) } + } +} + +impl GCWork for ProcessModBufSATB { + fn do_work(&mut self, worker: &mut GCWorker, mmtk: &'static MMTK) { + let mut w = if let Some(nodes) = self.nodes.take() { + if nodes.is_empty() { + return; + } + + ConcurrentTraceObjects::new(nodes, mmtk) + } else { + return; + }; + GCWork::do_work(&mut w, worker, mmtk); + } +} + +pub struct ProcessRootSlots { + base: ProcessEdgesBase, +} + +impl ProcessEdgesWork for ProcessRootSlots { + type VM = VM; + type ScanObjectsWorkType = ScanObjects; + const OVERWRITE_REFERENCE: bool = false; + const SCAN_OBJECTS_IMMEDIATELY: bool = true; + + fn new( + slots: Vec>, + roots: bool, + mmtk: &'static MMTK, + bucket: WorkBucketStage, + ) -> Self { + debug_assert!(roots); + let base = ProcessEdgesBase::new(slots, roots, mmtk, bucket); + Self { base } + } + + fn flush(&mut self) {} + + fn trace_object(&mut self, _object: ObjectReference) -> ObjectReference { + unreachable!() + } + + fn process_slots(&mut self) { + let pause = self + .base + .plan() + .downcast_ref::>() + .unwrap() + .current_pause() + .unwrap(); + // No need to scan roots in the final mark + if pause == Pause::FinalMark { + return; + } + let mut root_objects = Vec::with_capacity(Self::CAPACITY); + if !self.slots.is_empty() { + let slots = std::mem::take(&mut self.slots); + for slot in slots { + if let Some(object) = slot.load() { + root_objects.push(object); + if root_objects.len() == Self::CAPACITY { + // create the packet + let worker = self.worker(); + let mmtk = self.mmtk(); + let w = ConcurrentTraceObjects::new(root_objects.clone(), mmtk); + + match pause { + Pause::InitialMark => worker.scheduler().postpone(w), + _ => unreachable!(), + } + + root_objects.clear(); + } + } + } + if !root_objects.is_empty() { + let worker = self.worker(); + let w = ConcurrentTraceObjects::new(root_objects.clone(), self.mmtk()); + + match pause { + Pause::InitialMark => worker.scheduler().postpone(w), + _ => unreachable!(), + } + } + } + } + + fn create_scan_work(&self, _nodes: Vec) -> Self::ScanObjectsWorkType { + unimplemented!() + } +} + +impl Deref for ProcessRootSlots { + type Target = ProcessEdgesBase; + fn deref(&self) -> &Self::Target { + &self.base + } +} + +impl DerefMut for ProcessRootSlots { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.base + } +} diff --git a/src/plan/concurrent/immix/gc_work.rs b/src/plan/concurrent/immix/gc_work.rs new file mode 100644 index 0000000000..6372105313 --- /dev/null +++ b/src/plan/concurrent/immix/gc_work.rs @@ -0,0 +1,25 @@ +use crate::plan::concurrent::immix::global::ConcurrentImmix; +use crate::policy::gc_work::{TraceKind, TRACE_KIND_TRANSITIVE_PIN}; +use crate::scheduler::gc_work::{PlanProcessEdges, UnsupportedProcessEdges}; +use crate::scheduler::ProcessEdgesWork; +use crate::vm::VMBinding; + +pub(super) struct ConcurrentImmixSTWGCWorkContext( + std::marker::PhantomData, +); +impl crate::scheduler::GCWorkContext + for ConcurrentImmixSTWGCWorkContext +{ + type VM = VM; + type PlanType = ConcurrentImmix; + type DefaultProcessEdges = PlanProcessEdges, KIND>; + type PinningProcessEdges = PlanProcessEdges, TRACE_KIND_TRANSITIVE_PIN>; +} +pub(super) struct ConcurrentImmixGCWorkContext(std::marker::PhantomData); + +impl crate::scheduler::GCWorkContext for ConcurrentImmixGCWorkContext { + type VM = E::VM; + type PlanType = ConcurrentImmix; + type DefaultProcessEdges = E; + type PinningProcessEdges = UnsupportedProcessEdges; +} diff --git a/src/plan/concurrent/immix/global.rs b/src/plan/concurrent/immix/global.rs new file mode 100644 index 0000000000..bb8d7f6879 --- /dev/null +++ b/src/plan/concurrent/immix/global.rs @@ -0,0 +1,440 @@ +use crate::plan::concurrent::concurrent_marking_work::ProcessRootSlots; +use crate::plan::concurrent::immix::gc_work::ConcurrentImmixGCWorkContext; +use crate::plan::concurrent::immix::gc_work::ConcurrentImmixSTWGCWorkContext; +use crate::plan::concurrent::Pause; +use crate::plan::global::BasePlan; +use crate::plan::global::CommonPlan; +use crate::plan::global::CreateGeneralPlanArgs; +use crate::plan::global::CreateSpecificPlanArgs; +use crate::plan::immix::mutator::ALLOCATOR_MAPPING; +use crate::plan::AllocationSemantics; +use crate::plan::Plan; +use crate::plan::PlanConstraints; +use crate::policy::immix::ImmixSpaceArgs; +use crate::policy::immix::TRACE_KIND_DEFRAG; +use crate::policy::immix::TRACE_KIND_FAST; +use crate::policy::space::Space; +use crate::scheduler::gc_work::Release; +use crate::scheduler::gc_work::StopMutators; +use crate::scheduler::gc_work::UnsupportedProcessEdges; +use crate::scheduler::*; +use crate::util::alloc::allocators::AllocatorSelector; +use crate::util::copy::*; +use crate::util::heap::gc_trigger::SpaceStats; +use crate::util::heap::VMRequest; +use crate::util::metadata::side_metadata::SideMetadataContext; +use crate::vm::VMBinding; +use crate::{policy::immix::ImmixSpace, util::opaque_pointer::VMWorkerThread}; +use std::sync::atomic::AtomicBool; + +use atomic::Atomic; +use atomic::Ordering; +use enum_map::EnumMap; + +use mmtk_macros::{HasSpaces, PlanTraceObject}; + +#[derive(Debug, Clone, Copy, bytemuck::NoUninit, PartialEq, Eq)] +#[repr(u8)] +enum GCCause { + Unknown, + FullHeap, + InitialMark, + FinalMark, +} + +#[derive(HasSpaces, PlanTraceObject)] +pub struct ConcurrentImmix { + #[post_scan] + #[space] + #[copy_semantics(CopySemantics::DefaultCopy)] + pub immix_space: ImmixSpace, + #[parent] + pub common: CommonPlan, + last_gc_was_defrag: AtomicBool, + current_pause: Atomic>, + previous_pause: Atomic>, + gc_cause: Atomic, +} + +/// The plan constraints for the immix plan. +pub const CONCURRENT_IMMIX_CONSTRAINTS: PlanConstraints = PlanConstraints { + // If we disable moving in Immix, this is a non-moving plan. + moves_objects: false, + // Max immix object size is half of a block. + max_non_los_default_alloc_bytes: crate::policy::immix::MAX_IMMIX_OBJECT_SIZE, + needs_prepare_mutator: true, + barrier: crate::BarrierSelector::SATBBarrier, + ..PlanConstraints::default() +}; + +impl Plan for ConcurrentImmix { + fn collection_required(&self, space_full: bool, _space: Option>) -> bool { + if self.base().collection_required(self, space_full) { + self.gc_cause.store(GCCause::FullHeap, Ordering::Release); + return true; + } + + let concurrent_marking_in_progress = self.concurrent_marking_in_progress(); + + if concurrent_marking_in_progress && crate::concurrent_marking_packets_drained() { + self.gc_cause.store(GCCause::FinalMark, Ordering::Release); + return true; + } + let threshold = self.get_total_pages() >> 1; + let concurrent_marking_threshold = self + .common + .base + .global_state + .concurrent_marking_threshold + .load(Ordering::Acquire); + if !concurrent_marking_in_progress && concurrent_marking_threshold > threshold { + debug_assert!(crate::concurrent_marking_packets_drained()); + debug_assert!(!self.concurrent_marking_in_progress()); + let prev_pause = self.previous_pause(); + debug_assert!(prev_pause.is_none() || prev_pause.unwrap() != Pause::InitialMark); + self.gc_cause.store(GCCause::InitialMark, Ordering::Release); + return true; + } + false + } + + fn last_collection_was_exhaustive(&self) -> bool { + self.immix_space + .is_last_gc_exhaustive(self.last_gc_was_defrag.load(Ordering::Relaxed)) + } + + fn constraints(&self) -> &'static PlanConstraints { + &CONCURRENT_IMMIX_CONSTRAINTS + } + + fn create_copy_config(&'static self) -> CopyConfig { + use enum_map::enum_map; + CopyConfig { + copy_mapping: enum_map! { + CopySemantics::DefaultCopy => CopySelector::Immix(0), + _ => CopySelector::Unused, + }, + space_mapping: vec![(CopySelector::Immix(0), &self.immix_space)], + constraints: &CONCURRENT_IMMIX_CONSTRAINTS, + } + } + + fn schedule_collection(&'static self, scheduler: &GCWorkScheduler) { + self.current_pause + .store(Some(Pause::Full), Ordering::SeqCst); + + Self::schedule_immix_full_heap_collection::< + ConcurrentImmix, + ConcurrentImmixSTWGCWorkContext, + ConcurrentImmixSTWGCWorkContext, + >(self, &self.immix_space, scheduler); + } + + fn schedule_concurrent_collection(&'static self, scheduler: &GCWorkScheduler) { + let pause = self.select_collection_kind(); + if pause == Pause::Full { + self.current_pause + .store(Some(Pause::Full), Ordering::SeqCst); + + Self::schedule_immix_full_heap_collection::< + ConcurrentImmix, + ConcurrentImmixSTWGCWorkContext, + ConcurrentImmixSTWGCWorkContext, + >(self, &self.immix_space, scheduler); + } else { + // Set current pause kind + self.current_pause.store(Some(pause), Ordering::SeqCst); + // Schedule work + match pause { + Pause::InitialMark => self.schedule_concurrent_marking_initial_pause(scheduler), + Pause::FinalMark => self.schedule_concurrent_marking_final_pause(scheduler), + _ => unreachable!(), + } + } + } + + fn get_allocator_mapping(&self) -> &'static EnumMap { + &ALLOCATOR_MAPPING + } + + fn prepare(&mut self, tls: VMWorkerThread) { + let pause = self.current_pause().unwrap(); + match pause { + Pause::Full => { + self.common.prepare(tls, true); + self.immix_space.prepare( + true, + Some(crate::policy::immix::defrag::StatsForDefrag::new(self)), + ); + } + Pause::InitialMark => { + // init prepare has to be executed first, otherwise, los objects will not be + // dealt with properly + self.common.initial_pause_prepare(); + self.immix_space.initial_pause_prepare(); + self.common.prepare(tls, true); + self.immix_space.prepare( + true, + Some(crate::policy::immix::defrag::StatsForDefrag::new(self)), + ); + } + Pause::FinalMark => (), + } + } + + fn release(&mut self, tls: VMWorkerThread) { + let pause = self.current_pause().unwrap(); + match pause { + Pause::InitialMark => (), + Pause::Full | Pause::FinalMark => { + self.immix_space.final_pause_release(); + self.common.final_pause_release(); + self.common.release(tls, true); + // release the collected region + self.immix_space.release(true); + } + } + // reset the concurrent marking page counting + self.common() + .base + .global_state + .concurrent_marking_threshold + .store(0, Ordering::Release); + } + + fn end_of_gc(&mut self, _tls: VMWorkerThread) { + self.last_gc_was_defrag + .store(self.immix_space.end_of_gc(), Ordering::Relaxed); + } + + fn current_gc_may_move_object(&self) -> bool { + self.immix_space.in_defrag() + } + + fn get_collection_reserved_pages(&self) -> usize { + self.immix_space.defrag_headroom_pages() + } + + fn get_used_pages(&self) -> usize { + self.immix_space.reserved_pages() + self.common.get_used_pages() + } + + fn base(&self) -> &BasePlan { + &self.common.base + } + + fn base_mut(&mut self) -> &mut BasePlan { + &mut self.common.base + } + + fn common(&self) -> &CommonPlan { + &self.common + } + + fn gc_pause_start(&self, _scheduler: &GCWorkScheduler) { + use crate::vm::ActivePlan; + let pause = self.current_pause().unwrap(); + match pause { + Pause::Full => { + self.set_concurrent_marking_state(false); + } + Pause::InitialMark => { + debug_assert!( + !self.concurrent_marking_in_progress(), + "prev pause: {:?}", + self.previous_pause().unwrap() + ); + } + Pause::FinalMark => { + debug_assert!(self.concurrent_marking_in_progress()); + // Flush barrier buffers + for mutator in ::VMActivePlan::mutators() { + mutator.barrier.flush(); + } + self.set_concurrent_marking_state(false); + } + } + println!("{:?} start", pause); + } + + fn gc_pause_end(&self) { + let pause = self.current_pause().unwrap(); + if pause == Pause::InitialMark { + self.set_concurrent_marking_state(true); + } + self.previous_pause.store(Some(pause), Ordering::SeqCst); + self.current_pause.store(None, Ordering::SeqCst); + println!("{:?} end", pause); + } +} + +impl ConcurrentImmix { + pub fn new(args: CreateGeneralPlanArgs) -> Self { + use crate::vm::ObjectModel; + + let spec = crate::util::metadata::extract_side_metadata(&[ + *VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC, + ]); + + let plan_args = CreateSpecificPlanArgs { + global_args: args, + constraints: &CONCURRENT_IMMIX_CONSTRAINTS, + global_side_metadata_specs: SideMetadataContext::new_global_specs(&spec), + }; + Self::new_with_args( + plan_args, + ImmixSpaceArgs { + unlog_object_when_traced: false, + #[cfg(feature = "vo_bit")] + mixed_age: false, + never_move_objects: true, + }, + ) + } + + pub fn new_with_args( + mut plan_args: CreateSpecificPlanArgs, + space_args: ImmixSpaceArgs, + ) -> Self { + let immix = ConcurrentImmix { + immix_space: ImmixSpace::new( + plan_args.get_space_args("immix", true, false, VMRequest::discontiguous()), + space_args, + ), + common: CommonPlan::new(plan_args), + last_gc_was_defrag: AtomicBool::new(false), + current_pause: Atomic::new(None), + previous_pause: Atomic::new(None), + gc_cause: Atomic::new(GCCause::Unknown), + }; + + immix.verify_side_metadata_sanity(); + + immix + } + + /// Schedule a full heap immix collection. This method is used by immix/genimmix/stickyimmix + /// to schedule a full heap collection. A plan must call set_collection_kind and set_gc_status before this method. + pub(crate) fn schedule_immix_full_heap_collection< + PlanType: Plan, + FastContext: GCWorkContext, + DefragContext: GCWorkContext, + >( + plan: &'static DefragContext::PlanType, + immix_space: &ImmixSpace, + scheduler: &GCWorkScheduler, + ) -> bool { + let in_defrag = immix_space.decide_whether_to_defrag( + plan.base().global_state.is_emergency_collection(), + true, + plan.base() + .global_state + .cur_collection_attempts + .load(Ordering::SeqCst), + plan.base().global_state.is_user_triggered_collection(), + *plan.base().options.full_heap_system_gc, + ); + + if in_defrag { + scheduler.schedule_common_work::(plan); + } else { + scheduler.schedule_common_work::(plan); + } + in_defrag + } + + fn select_collection_kind(&self) -> Pause { + let emergency = self.base().global_state.is_emergency_collection(); + let user_triggered = self.base().global_state.is_user_triggered_collection(); + let concurrent_marking_in_progress = self.concurrent_marking_in_progress(); + let concurrent_marking_packets_drained = crate::concurrent_marking_packets_drained(); + + if emergency || user_triggered { + return Pause::Full; + } else if !concurrent_marking_in_progress && concurrent_marking_packets_drained { + return Pause::InitialMark; + } else if concurrent_marking_in_progress && concurrent_marking_packets_drained { + return Pause::FinalMark; + } + + Pause::Full + } + + fn disable_unnecessary_buckets(&'static self, scheduler: &GCWorkScheduler, pause: Pause) { + if pause == Pause::InitialMark { + scheduler.work_buckets[WorkBucketStage::Closure].set_as_disabled(); + scheduler.work_buckets[WorkBucketStage::WeakRefClosure].set_as_disabled(); + scheduler.work_buckets[WorkBucketStage::FinalRefClosure].set_as_disabled(); + scheduler.work_buckets[WorkBucketStage::PhantomRefClosure].set_as_disabled(); + } + scheduler.work_buckets[WorkBucketStage::TPinningClosure].set_as_disabled(); + scheduler.work_buckets[WorkBucketStage::PinningRootsTrace].set_as_disabled(); + scheduler.work_buckets[WorkBucketStage::VMRefClosure].set_as_disabled(); + scheduler.work_buckets[WorkBucketStage::VMRefForwarding].set_as_disabled(); + scheduler.work_buckets[WorkBucketStage::SoftRefClosure].set_as_disabled(); + scheduler.work_buckets[WorkBucketStage::CalculateForwarding].set_as_disabled(); + scheduler.work_buckets[WorkBucketStage::SecondRoots].set_as_disabled(); + scheduler.work_buckets[WorkBucketStage::RefForwarding].set_as_disabled(); + scheduler.work_buckets[WorkBucketStage::FinalizableForwarding].set_as_disabled(); + scheduler.work_buckets[WorkBucketStage::Compact].set_as_disabled(); + } + + pub(crate) fn schedule_concurrent_marking_initial_pause( + &'static self, + scheduler: &GCWorkScheduler, + ) { + use crate::scheduler::gc_work::{Prepare, StopMutators, UnsupportedProcessEdges}; + + self.disable_unnecessary_buckets(scheduler, Pause::InitialMark); + + scheduler.work_buckets[WorkBucketStage::Unconstrained].add_prioritized(Box::new( + StopMutators::>>::new_args( + Pause::InitialMark, + ), + )); + scheduler.work_buckets[WorkBucketStage::Prepare].add(Prepare::< + ConcurrentImmixGCWorkContext>, + >::new(self)); + } + + fn schedule_concurrent_marking_final_pause(&'static self, scheduler: &GCWorkScheduler) { + self.disable_unnecessary_buckets(scheduler, Pause::FinalMark); + + scheduler.work_buckets[WorkBucketStage::Unconstrained].add_prioritized(Box::new( + StopMutators::>>::new_args( + Pause::FinalMark, + ), + )); + + scheduler.work_buckets[WorkBucketStage::Release].add(Release::< + ConcurrentImmixGCWorkContext>, + >::new(self)); + } + + pub fn concurrent_marking_in_progress(&self) -> bool { + self.common() + .base + .global_state + .concurrent_marking_active + .load(Ordering::Acquire) + } + + fn set_concurrent_marking_state(&self, active: bool) { + use crate::vm::Collection; + + ::VMCollection::set_concurrent_marking_state(active); + self.common() + .base + .global_state + .concurrent_marking_active + .store(active, Ordering::SeqCst); + } + + pub fn current_pause(&self) -> Option { + self.current_pause.load(Ordering::SeqCst) + } + + pub fn previous_pause(&self) -> Option { + self.previous_pause.load(Ordering::SeqCst) + } +} diff --git a/src/plan/concurrent/immix/mod.rs b/src/plan/concurrent/immix/mod.rs new file mode 100644 index 0000000000..0f55961897 --- /dev/null +++ b/src/plan/concurrent/immix/mod.rs @@ -0,0 +1,7 @@ +//! Plan: concurrent immix + +pub(in crate::plan) mod gc_work; +pub(in crate::plan) mod global; +pub(in crate::plan) mod mutator; + +pub use global::ConcurrentImmix; diff --git a/src/plan/concurrent/immix/mutator.rs b/src/plan/concurrent/immix/mutator.rs new file mode 100644 index 0000000000..1304e34ea0 --- /dev/null +++ b/src/plan/concurrent/immix/mutator.rs @@ -0,0 +1,84 @@ +use crate::plan::barriers::SATBBarrier; +use crate::plan::concurrent::barrier::SATBBarrierSemantics; +use crate::plan::concurrent::immix::ConcurrentImmix; +use crate::plan::mutator_context::create_allocator_mapping; +use crate::plan::mutator_context::create_space_mapping; + +use crate::plan::mutator_context::Mutator; +use crate::plan::mutator_context::MutatorBuilder; +use crate::plan::mutator_context::MutatorConfig; +use crate::plan::mutator_context::ReservedAllocators; +use crate::plan::AllocationSemantics; +use crate::util::alloc::allocators::AllocatorSelector; +use crate::util::alloc::ImmixAllocator; +use crate::util::opaque_pointer::{VMMutatorThread, VMWorkerThread}; +use crate::vm::VMBinding; +use crate::MMTK; +use enum_map::EnumMap; + +pub fn concurrent_immix_mutator_release( + mutator: &mut Mutator, + _tls: VMWorkerThread, +) { + let immix_allocator = unsafe { + mutator + .allocators + .get_allocator_mut(mutator.config.allocator_mapping[AllocationSemantics::Default]) + } + .downcast_mut::>() + .unwrap(); + immix_allocator.reset(); +} + +pub fn concurent_immix_mutator_prepare( + mutator: &mut Mutator, + _tls: VMWorkerThread, +) { + let immix_allocator = unsafe { + mutator + .allocators + .get_allocator_mut(mutator.config.allocator_mapping[AllocationSemantics::Default]) + } + .downcast_mut::>() + .unwrap(); + immix_allocator.reset(); +} + +pub(in crate::plan) const RESERVED_ALLOCATORS: ReservedAllocators = ReservedAllocators { + n_immix: 1, + ..ReservedAllocators::DEFAULT +}; + +lazy_static! { + pub static ref ALLOCATOR_MAPPING: EnumMap = { + let mut map = create_allocator_mapping(RESERVED_ALLOCATORS, true); + map[AllocationSemantics::Default] = AllocatorSelector::Immix(0); + map + }; +} + +pub fn create_concurrent_immix_mutator( + mutator_tls: VMMutatorThread, + mmtk: &'static MMTK, +) -> Mutator { + let immix = mmtk + .get_plan() + .downcast_ref::>() + .unwrap(); + let config = MutatorConfig { + allocator_mapping: &ALLOCATOR_MAPPING, + space_mapping: Box::new({ + let mut vec = create_space_mapping(RESERVED_ALLOCATORS, true, immix); + vec.push((AllocatorSelector::Immix(0), &immix.immix_space)); + vec + }), + + prepare_func: &concurent_immix_mutator_prepare, + release_func: &concurrent_immix_mutator_release, + }; + + let builder = MutatorBuilder::new(mutator_tls, mmtk, config); + builder + .barrier(Box::new(SATBBarrier::new(SATBBarrierSemantics::new(mmtk)))) + .build() +} diff --git a/src/plan/concurrent/mod.rs b/src/plan/concurrent/mod.rs new file mode 100644 index 0000000000..94a582a4aa --- /dev/null +++ b/src/plan/concurrent/mod.rs @@ -0,0 +1,23 @@ +pub mod barrier; +pub mod concurrent_marking_work; +pub mod immix; + +use bytemuck::NoUninit; + +#[repr(u8)] +#[derive(Debug, PartialEq, Eq, Copy, Clone, NoUninit)] +pub enum Pause { + Full = 1, + InitialMark, + FinalMark, +} + +unsafe impl bytemuck::ZeroableInOption for Pause {} + +unsafe impl bytemuck::PodInOption for Pause {} + +impl Default for Pause { + fn default() -> Self { + Self::Full + } +} diff --git a/src/plan/global.rs b/src/plan/global.rs index 5a5bb38ab5..ddbed6c6f1 100644 --- a/src/plan/global.rs +++ b/src/plan/global.rs @@ -58,6 +58,9 @@ pub fn create_mutator( PlanSelector::StickyImmix => { crate::plan::sticky::immix::mutator::create_stickyimmix_mutator(tls, mmtk) } + PlanSelector::ConcurrentImmix => { + crate::plan::concurrent::immix::mutator::create_concurrent_immix_mutator(tls, mmtk) + } }) } @@ -91,6 +94,10 @@ pub fn create_plan( PlanSelector::StickyImmix => { Box::new(crate::plan::sticky::immix::StickyImmix::new(args)) as Box> } + PlanSelector::ConcurrentImmix => { + Box::new(crate::plan::concurrent::immix::ConcurrentImmix::new(args)) + as Box> + } }; // We have created Plan in the heap, and we won't explicitly move it. @@ -160,6 +167,11 @@ pub trait Plan: 'static + HasSpaces + Sync + Downcast { /// Schedule work for the upcoming GC. fn schedule_collection(&'static self, _scheduler: &GCWorkScheduler); + /// Schedule work for the upcoming concurrent GC. + fn schedule_concurrent_collection(&'static self, _scheduler: &GCWorkScheduler) { + self.schedule_collection(_scheduler); + } + /// Get the common plan. CommonPlan is included by most of MMTk GC plans. fn common(&self) -> &CommonPlan { panic!("Common Plan not handled!") @@ -331,6 +343,9 @@ pub trait Plan: 'static + HasSpaces + Sync + Downcast { space.verify_side_metadata_sanity(&mut side_metadata_sanity_checker); }) } + + fn gc_pause_start(&self, _scheduler: &GCWorkScheduler) {} + fn gc_pause_end(&self) {} } impl_downcast!(Plan assoc VM); @@ -601,6 +616,14 @@ impl CommonPlan { + self.base.get_used_pages() } + pub fn initial_pause_prepare(&mut self) { + self.los.initial_pause_prepare(); + } + + pub fn final_pause_release(&mut self) { + self.los.final_pause_release(); + } + pub fn prepare(&mut self, tls: VMWorkerThread, full_heap: bool) { self.immortal.prepare(); self.los.prepare(full_heap); diff --git a/src/plan/mod.rs b/src/plan/mod.rs index 74fcac2811..91e8a6a240 100644 --- a/src/plan/mod.rs +++ b/src/plan/mod.rs @@ -19,6 +19,7 @@ pub use barriers::BarrierSelector; pub(crate) mod gc_requester; mod global; +pub(crate) use concurrent::Pause; pub(crate) use global::create_gc_worker_context; pub(crate) use global::create_mutator; pub(crate) use global::create_plan; @@ -37,13 +38,15 @@ pub use plan_constraints::PlanConstraints; pub(crate) use plan_constraints::DEFAULT_PLAN_CONSTRAINTS; mod tracing; -pub use tracing::{ObjectQueue, ObjectsClosure, VectorObjectQueue, VectorQueue}; +pub use tracing::{ObjectQueue, ObjectsClosure, SlotIterator, VectorObjectQueue, VectorQueue}; /// Generational plans (with a copying nursery) mod generational; /// Sticky plans (using sticky marks for generational behaviors without a copying nursery) mod sticky; +mod concurrent; + mod immix; mod markcompact; mod marksweep; diff --git a/src/plan/tracing.rs b/src/plan/tracing.rs index eecd40cbaf..e9dad03de8 100644 --- a/src/plan/tracing.rs +++ b/src/plan/tracing.rs @@ -1,10 +1,12 @@ //! This module contains code useful for tracing, //! i.e. visiting the reachable objects by traversing all or part of an object graph. +use std::marker::PhantomData; + use crate::scheduler::gc_work::{ProcessEdgesWork, SlotOf}; use crate::scheduler::{GCWorker, WorkBucketStage, EDGES_WORK_BUFFER_SIZE}; -use crate::util::ObjectReference; -use crate::vm::SlotVisitor; +use crate::util::{ObjectReference, VMThread, VMWorkerThread}; +use crate::vm::{Scanning, SlotVisitor, VMBinding}; /// This trait represents an object queue to enqueue objects during tracing. pub trait ObjectQueue { @@ -63,6 +65,21 @@ impl VectorQueue { } self.buffer.push(v); } + + /// Return the len of the queue + pub fn len(&self) -> usize { + self.buffer.len() + } + + /// Replace what was in the queue with data in new_buffer + pub fn swap(&mut self, new_buffer: &mut Vec) { + std::mem::swap(&mut self.buffer, new_buffer) + } + + /// Empty the queue + pub fn clear(&mut self) { + self.buffer.clear() + } } impl Default for VectorQueue { @@ -134,3 +151,59 @@ impl Drop for ObjectsClosure<'_, E> { self.flush(); } } + +struct SlotIteratorImpl { + f: F, + // should_discover_references: bool, + // should_claim_clds: bool, + // should_follow_clds: bool, + _p: PhantomData, +} + +impl SlotVisitor for SlotIteratorImpl { + fn visit_slot(&mut self, slot: VM::VMSlot) { + (self.f)(slot); + } +} + +pub struct SlotIterator { + _p: PhantomData, +} + +impl SlotIterator { + pub fn iterate( + o: ObjectReference, + // should_discover_references: bool, + // should_claim_clds: bool, + // should_follow_clds: bool, + f: impl FnMut(VM::VMSlot), + // klass: Option
, + ) { + let mut x = SlotIteratorImpl:: { + f, + // should_discover_references, + // should_claim_clds, + // should_follow_clds, + _p: PhantomData, + }; + // if let Some(klass) = klass { + // >::scan_object_with_klass( + // VMWorkerThread(VMThread::UNINITIALIZED), + // o, + // &mut x, + // klass, + // ); + // } else { + // >::scan_object( + // VMWorkerThread(VMThread::UNINITIALIZED), + // o, + // &mut x, + // ); + // } + >::scan_object( + VMWorkerThread(VMThread::UNINITIALIZED), + o, + &mut x, + ); + } +} diff --git a/src/policy/immix/immixspace.rs b/src/policy/immix/immixspace.rs index a8bcd54fda..e205f3bc3d 100644 --- a/src/policy/immix/immixspace.rs +++ b/src/policy/immix/immixspace.rs @@ -203,6 +203,13 @@ impl Space for ImmixSpace { fn enumerate_objects(&self, enumerator: &mut dyn ObjectEnumerator) { object_enum::enumerate_blocks_from_chunk_map::(enumerator, &self.chunk_map); } + + fn concurrent_marking_active(&self) -> bool { + self.common() + .global_state + .concurrent_marking_active + .load(Ordering::Acquire) + } } impl crate::policy::gc_work::PolicyTraceObject for ImmixSpace { @@ -411,6 +418,24 @@ impl ImmixSpace { &self.scheduler } + pub fn initial_pause_prepare(&mut self) { + // make sure all allocated blocks have unlog bit set during initial mark + if let MetadataSpec::OnSide(side) = *VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC { + for chunk in self.chunk_map.all_chunks() { + side.bset_metadata(chunk.start(), Chunk::BYTES); + } + } + } + + pub fn final_pause_release(&mut self) { + // clear the unlog bit so that during normal mutator phase, stab barrier is effectively disabled (all objects are considered as logged and thus no slow path will be taken) + if let MetadataSpec::OnSide(side) = *VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC { + for chunk in self.chunk_map.all_chunks() { + side.bzero_metadata(chunk.start(), Chunk::BYTES); + } + } + } + pub fn prepare(&mut self, major_gc: bool, plan_stats: Option) { if major_gc { // Update mark_state @@ -572,6 +597,10 @@ impl ImmixSpace { self.chunk_map.set_allocated(block.chunk(), true); self.lines_consumed .fetch_add(Block::LINES, Ordering::SeqCst); + self.common() + .global_state + .concurrent_marking_threshold + .fetch_add(Block::PAGES, Ordering::Relaxed); Some(block) } @@ -598,6 +627,10 @@ impl ImmixSpace { self.lines_consumed.fetch_add(lines_delta, Ordering::SeqCst); block.init(copy); + self.common() + .global_state + .concurrent_marking_threshold + .fetch_add(Block::PAGES, Ordering::Relaxed); return Some(block); } else { return None; diff --git a/src/policy/immix/line.rs b/src/policy/immix/line.rs index 94036ecc65..f48ea7d271 100644 --- a/src/policy/immix/line.rs +++ b/src/policy/immix/line.rs @@ -1,6 +1,8 @@ +use std::ops::Range; + use super::block::Block; use crate::util::linear_scan::{Region, RegionIterator}; -use crate::util::metadata::side_metadata::SideMetadataSpec; +use crate::util::metadata::side_metadata::{address_to_meta_address, SideMetadataSpec}; use crate::{ util::{Address, ObjectReference}, vm::*, @@ -81,4 +83,14 @@ impl Line { } marked_lines } + + pub fn initialize_mark_table_as_marked(lines: Range) { + let meta = VM::VMObjectModel::LOCAL_MARK_BIT_SPEC.extract_side_spec(); + let start: *mut u8 = address_to_meta_address(meta, lines.start.start()).to_mut_ptr(); + let limit: *mut u8 = address_to_meta_address(meta, lines.end.start()).to_mut_ptr(); + unsafe { + let bytes = limit.offset_from(start) as usize; + std::ptr::write_bytes(start, 0xffu8, bytes); + } + } } diff --git a/src/policy/largeobjectspace.rs b/src/policy/largeobjectspace.rs index abe7976082..369b913444 100644 --- a/src/policy/largeobjectspace.rs +++ b/src/policy/largeobjectspace.rs @@ -9,6 +9,7 @@ use crate::util::alloc::allocator::AllocationOptions; use crate::util::constants::BYTES_IN_PAGE; use crate::util::heap::{FreeListPageResource, PageResource}; use crate::util::metadata; +use crate::util::object_enum::ClosureObjectEnumerator; use crate::util::object_enum::ObjectEnumerator; use crate::util::opaque_pointer::*; use crate::util::treadmill::TreadMill; @@ -59,6 +60,24 @@ impl SFT for LargeObjectSpace { true } fn initialize_object_metadata(&self, object: ObjectReference, alloc: bool) { + if self.concurrent_marking_active() { + VM::VMObjectModel::LOCAL_LOS_MARK_NURSERY_SPEC.store_atomic::( + object, + self.mark_state, + None, + Ordering::SeqCst, + ); + debug_assert!( + VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.load_atomic::( + object, + None, + Ordering::Acquire + ) == 0 + ); + + self.treadmill.add_to_treadmill(object, false); + return; + } let old_value = VM::VMObjectModel::LOCAL_LOS_MARK_NURSERY_SPEC.load_atomic::( object, None, @@ -192,6 +211,13 @@ impl Space for LargeObjectSpace { fn enumerate_objects(&self, enumerator: &mut dyn ObjectEnumerator) { self.treadmill.enumerate_objects(enumerator); } + + fn concurrent_marking_active(&self) -> bool { + self.common() + .global_state + .concurrent_marking_active + .load(Ordering::Acquire) + } } use crate::scheduler::GCWorker; @@ -243,6 +269,24 @@ impl LargeObjectSpace { } } + pub fn initial_pause_prepare(&self) { + use crate::util::object_enum::ClosureObjectEnumerator; + + debug_assert!(self.treadmill.is_from_space_empty()); + debug_assert!(self.treadmill.is_nursery_empty()); + let mut enumator = ClosureObjectEnumerator::<_, VM>::new(|object| { + VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.mark_as_unlogged::(object, Ordering::SeqCst); + }); + self.treadmill.enumerate_objects(&mut enumator); + } + + pub fn final_pause_release(&self) { + let mut enumator = ClosureObjectEnumerator::<_, VM>::new(|object| { + VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.clear::(object, Ordering::SeqCst); + }); + self.treadmill.enumerate_objects(&mut enumator); + } + pub fn prepare(&mut self, full_heap: bool) { if full_heap { debug_assert!(self.treadmill.is_from_space_empty()); @@ -259,6 +303,7 @@ impl LargeObjectSpace { self.sweep_large_pages(false); } } + // Allow nested-if for this function to make it clear that test_and_mark() is only executed // for the outer condition is met. #[allow(clippy::collapsible_if)] @@ -332,6 +377,10 @@ impl LargeObjectSpace { pages: usize, alloc_options: AllocationOptions, ) -> Address { + self.common() + .global_state + .concurrent_marking_threshold + .fetch_add(pages, Ordering::Relaxed); self.acquire(tls, pages, alloc_options) } @@ -391,6 +440,10 @@ impl LargeObjectSpace { ) & NURSERY_BIT == NURSERY_BIT } + + pub fn is_marked(&self, object: ObjectReference) -> bool { + self.test_mark_bit(object, self.mark_state) + } } fn get_super_page(cell: Address) -> Address { diff --git a/src/policy/space.rs b/src/policy/space.rs index e44874fe5b..a0676d9da0 100644 --- a/src/policy/space.rs +++ b/src/policy/space.rs @@ -433,6 +433,10 @@ pub trait Space: 'static + SFT + Sync + Downcast { /// the execution time. For LOS, it will be cheaper to enumerate individual objects than /// scanning VO bits because it is sparse. fn enumerate_objects(&self, enumerator: &mut dyn ObjectEnumerator); + + fn concurrent_marking_active(&self) -> bool { + false + } } /// Print the VM map for a space. diff --git a/src/scheduler/gc_work.rs b/src/scheduler/gc_work.rs index 7e50c86aa3..c80d12a5e5 100644 --- a/src/scheduler/gc_work.rs +++ b/src/scheduler/gc_work.rs @@ -2,6 +2,7 @@ use super::work_bucket::WorkBucketStage; use super::*; use crate::global_state::GcStatus; use crate::plan::ObjectsClosure; +use crate::plan::Pause; use crate::plan::VectorObjectQueue; use crate::util::*; use crate::vm::slot::Slot; @@ -29,7 +30,14 @@ impl GCWork for ScheduleCollection { mmtk.set_gc_status(GcStatus::GcPrepare); // Let the plan to schedule collection work - mmtk.get_plan().schedule_collection(worker.scheduler()); + if mmtk.is_user_triggered_collection() || is_emergency { + // user triggered collection is always stop-the-world + mmtk.get_plan().schedule_collection(worker.scheduler()); + } else { + // Let the plan to schedule collection work + mmtk.get_plan() + .schedule_concurrent_collection(worker.scheduler()); + } } } @@ -191,11 +199,24 @@ impl GCWork for ReleaseCollector { /// /// TODO: Smaller work granularity #[derive(Default)] -pub struct StopMutators(PhantomData); +pub struct StopMutators { + pause: Pause, + phantom: PhantomData, +} impl StopMutators { pub fn new() -> Self { - Self(PhantomData) + Self { + pause: Pause::Full, + phantom: PhantomData, + } + } + + pub fn new_args(pause: Pause) -> Self { + Self { + pause, + phantom: PhantomData, + } } } @@ -206,9 +227,16 @@ impl GCWork for StopMutators { ::VMCollection::stop_all_mutators(worker.tls, |mutator| { // TODO: The stack scanning work won't start immediately, as the `Prepare` bucket is not opened yet (the bucket is opened in notify_mutators_paused). // Should we push to Unconstrained instead? - mmtk.scheduler.work_buckets[WorkBucketStage::Prepare] - .add(ScanMutatorRoots::(mutator)); + + if self.pause != Pause::FinalMark { + mmtk.scheduler.work_buckets[WorkBucketStage::Prepare] + .add(ScanMutatorRoots::(mutator)); + } else { + mutator.flush(); + } }); + mmtk.scheduler.set_in_gc_pause(true); + mmtk.get_plan().gc_pause_start(&mmtk.scheduler); trace!("stop_all_mutators end"); mmtk.scheduler.notify_mutators_paused(mmtk); mmtk.scheduler.work_buckets[WorkBucketStage::Prepare].add(ScanVMSpecificRoots::::new()); diff --git a/src/scheduler/mod.rs b/src/scheduler/mod.rs index 33e89be0fe..5006edbe1d 100644 --- a/src/scheduler/mod.rs +++ b/src/scheduler/mod.rs @@ -17,7 +17,7 @@ pub(crate) use scheduler::GCWorkScheduler; mod stat; mod work_counter; -mod work; +pub(crate) mod work; pub use work::GCWork; pub(crate) use work::GCWorkContext; diff --git a/src/scheduler/scheduler.rs b/src/scheduler/scheduler.rs index e849f9df07..6a331aed1f 100644 --- a/src/scheduler/scheduler.rs +++ b/src/scheduler/scheduler.rs @@ -14,7 +14,7 @@ use crate::util::options::AffinityKind; use crate::vm::Collection; use crate::vm::VMBinding; use crate::Plan; -use crossbeam::deque::Steal; +use crossbeam::deque::{Injector, Steal}; use enum_map::{Enum, EnumMap}; use std::collections::HashMap; use std::sync::Arc; @@ -29,6 +29,12 @@ pub struct GCWorkScheduler { pub(crate) worker_monitor: Arc, /// How to assign the affinity of each GC thread. Specified by the user. affinity: AffinityKind, + + pub(super) postponed_concurrent_work: + spin::RwLock>>>, + pub(super) postponed_concurrent_work_prioritized: + spin::RwLock>>>, + in_gc_pause: std::sync::atomic::AtomicBool, } // FIXME: GCWorkScheduler should be naturally Sync, but we cannot remove this `impl` yet. @@ -47,12 +53,25 @@ impl GCWorkScheduler { WorkBucket::new(active, worker_monitor.clone()) }); + work_buckets[WorkBucketStage::Unconstrained].enable_prioritized_queue(); + // Set the open condition of each bucket. { let first_stw_stage = WorkBucketStage::first_stw_stage(); let mut open_stages: Vec = vec![first_stw_stage]; let stages = (0..WorkBucketStage::LENGTH).map(WorkBucketStage::from_usize); for stage in stages { + { + if stage == WorkBucketStage::ConcurrentSentinel { + work_buckets[stage].set_open_condition( + move |scheduler: &GCWorkScheduler| { + scheduler.work_buckets[WorkBucketStage::Unconstrained].is_drained() + }, + ); + open_stages.push(stage); + continue; + } + } // Unconstrained is always open. // The first STW stage (Prepare) will be opened when the world stopped // (i.e. when all mutators are suspended). @@ -75,9 +94,44 @@ impl GCWorkScheduler { worker_group, worker_monitor, affinity, + postponed_concurrent_work: spin::RwLock::new(crossbeam::deque::Injector::new()), + postponed_concurrent_work_prioritized: spin::RwLock::new( + crossbeam::deque::Injector::new(), + ), + in_gc_pause: std::sync::atomic::AtomicBool::new(false), }) } + pub fn postpone(&self, w: impl GCWork) { + self.postponed_concurrent_work.read().push(Box::new(w)) + } + + pub fn postpone_prioritized(&self, w: impl GCWork) { + self.postponed_concurrent_work_prioritized + .read() + .push(Box::new(w)) + } + + pub fn postpone_dyn(&self, w: Box>) { + self.postponed_concurrent_work.read().push(w) + } + + pub fn postpone_dyn_prioritized(&self, w: Box>) { + self.postponed_concurrent_work_prioritized.read().push(w) + } + + pub fn postpone_all(&self, ws: Vec>>) { + let postponed_concurrent_work = self.postponed_concurrent_work.read(); + ws.into_iter() + .for_each(|w| postponed_concurrent_work.push(w)); + } + + pub fn postpone_all_prioritized(&self, ws: Vec>>) { + let postponed_concurrent_work = self.postponed_concurrent_work_prioritized.read(); + ws.into_iter() + .for_each(|w| postponed_concurrent_work.push(w)); + } + pub fn num_workers(&self) -> usize { self.worker_group.as_ref().worker_count() } @@ -289,6 +343,7 @@ impl GCWorkScheduler { self.work_buckets.iter().for_each(|(id, bkt)| { if id != WorkBucketStage::Unconstrained { bkt.deactivate(); + bkt.set_as_enabled(); } }); } @@ -298,6 +353,7 @@ impl GCWorkScheduler { self.work_buckets.iter().for_each(|(id, bkt)| { if id != WorkBucketStage::Unconstrained && id != first_stw_stage { bkt.deactivate(); + bkt.set_as_enabled(); } }); } @@ -330,6 +386,18 @@ impl GCWorkScheduler { } } + pub(super) fn set_in_gc_pause(&self, in_gc_pause: bool) { + self.in_gc_pause + .store(in_gc_pause, std::sync::atomic::Ordering::SeqCst); + for wb in self.work_buckets.values() { + wb.set_in_concurrent(!in_gc_pause); + } + } + + pub fn in_concurrent(&self) -> bool { + !self.in_gc_pause.load(std::sync::atomic::Ordering::SeqCst) + } + /// Get a schedulable work packet without retry. fn poll_schedulable_work_once(&self, worker: &GCWorker) -> Steal>> { let mut should_retry = false; @@ -524,6 +592,8 @@ impl GCWorkScheduler { let mmtk = worker.mmtk; + let (queue, pqueue) = self.schedule_postponed_concurrent_packets(); + // Tell GC trigger that GC ended - this happens before we resume mutators. mmtk.gc_trigger.policy.on_gc_end(mmtk); @@ -574,13 +644,17 @@ impl GCWorkScheduler { // reset the logging info at the end of each GC mmtk.slot_logger.reset(); } - + mmtk.get_plan().gc_pause_end(); // Reset the triggering information. mmtk.state.reset_collection_trigger(); // Set to NotInGC after everything, and right before resuming mutators. mmtk.set_gc_status(GcStatus::NotInGC); ::VMCollection::resume_mutators(worker.tls); + + self.set_in_gc_pause(false); + self.schedule_concurrent_packets(queue, pqueue); + self.debug_assert_all_buckets_deactivated(); } pub fn enable_stat(&self) { @@ -613,4 +687,47 @@ impl GCWorkScheduler { first_stw_bucket.activate(); self.worker_monitor.notify_work_available(true); } + + fn schedule_postponed_concurrent_packets( + &self, + ) -> (Injector>>, Injector>>) { + let mut queue = Injector::new(); + type Q = Injector>>; + std::mem::swap::>(&mut queue, &mut self.postponed_concurrent_work.write()); + + let mut pqueue = Injector::new(); + std::mem::swap::>( + &mut pqueue, + &mut self.postponed_concurrent_work_prioritized.write(), + ); + (queue, pqueue) + } + + pub(super) fn schedule_concurrent_packets( + &self, + queue: Injector>>, + pqueue: Injector>>, + ) { + // crate::MOVE_CONCURRENT_MARKING_TO_STW.store(false, Ordering::SeqCst); + // crate::PAUSE_CONCURRENT_MARKING.store(false, Ordering::SeqCst); + let mut notify = false; + if !queue.is_empty() { + let old_queue = self.work_buckets[WorkBucketStage::Unconstrained].swap_queue(queue); + debug_assert!(old_queue.is_empty()); + notify = true; + } + if !pqueue.is_empty() { + let old_queue = + self.work_buckets[WorkBucketStage::Unconstrained].swap_queue_prioritized(pqueue); + debug_assert!(old_queue.is_empty()); + notify = true; + } + if notify { + self.wakeup_all_concurrent_workers(); + } + } + + pub fn wakeup_all_concurrent_workers(&self) { + self.worker_monitor.notify_work_available(true); + } } diff --git a/src/scheduler/work_bucket.rs b/src/scheduler/work_bucket.rs index ab55093bad..563c43a9b5 100644 --- a/src/scheduler/work_bucket.rs +++ b/src/scheduler/work_bucket.rs @@ -7,34 +7,35 @@ use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::{Arc, Mutex}; struct BucketQueue { - queue: Injector>>, + // queue: Injector>>, + queue: std::sync::RwLock>>>, } impl BucketQueue { fn new() -> Self { Self { - queue: Injector::new(), + queue: std::sync::RwLock::new(Injector::new()), } } fn is_empty(&self) -> bool { - self.queue.is_empty() + self.queue.read().unwrap().is_empty() } fn steal_batch_and_pop( &self, dest: &Worker>>, ) -> Steal>> { - self.queue.steal_batch_and_pop(dest) + self.queue.read().unwrap().steal_batch_and_pop(dest) } fn push(&self, w: Box>) { - self.queue.push(w); + self.queue.read().unwrap().push(w); } fn push_all(&self, ws: Vec>>) { for w in ws { - self.queue.push(w); + self.queue.read().unwrap().push(w); } } } @@ -59,6 +60,8 @@ pub struct WorkBucket { /// recursively, such as ephemerons and Java-style SoftReference and finalizers. Sentinels /// can be used repeatedly to discover and process more such objects. sentinel: Mutex>>>, + in_concurrent: AtomicBool, + disable: AtomicBool, } impl WorkBucket { @@ -70,9 +73,55 @@ impl WorkBucket { monitor, can_open: None, sentinel: Mutex::new(None), + in_concurrent: AtomicBool::new(true), + disable: AtomicBool::new(false), } } + pub fn set_in_concurrent(&self, in_concurrent: bool) { + self.in_concurrent.store(in_concurrent, Ordering::SeqCst); + } + + pub fn set_as_enabled(&self) { + self.disable.store(false, Ordering::SeqCst) + } + + pub fn set_as_disabled(&self) { + self.disable.store(true, Ordering::SeqCst) + } + + pub fn disabled(&self) -> bool { + self.disable.load(Ordering::Relaxed) + } + + pub fn enable_prioritized_queue(&mut self) { + self.prioritized_queue = Some(BucketQueue::new()); + } + + pub fn swap_queue( + &self, + mut new_queue: Injector>>, + ) -> Injector>> { + let mut queue = self.queue.queue.write().unwrap(); + std::mem::swap::>>>(&mut queue, &mut new_queue); + new_queue + } + + pub fn swap_queue_prioritized( + &self, + mut new_queue: Injector>>, + ) -> Injector>> { + let mut queue = self + .prioritized_queue + .as_ref() + .unwrap() + .queue + .write() + .unwrap(); + std::mem::swap::>>>(&mut queue, &mut new_queue); + new_queue + } + fn notify_one_worker(&self) { // If the bucket is not activated, don't notify anyone. if !self.is_activated() { @@ -240,6 +289,8 @@ impl WorkBucket { pub enum WorkBucketStage { /// This bucket is always open. Unconstrained, + Initial, + ConcurrentSentinel, /// Preparation work. Plans, spaces, GC workers, mutators, etc. should be prepared for GC at /// this stage. Prepare, diff --git a/src/util/address.rs b/src/util/address.rs index c87a5d3abb..45192157d0 100644 --- a/src/util/address.rs +++ b/src/util/address.rs @@ -465,6 +465,7 @@ mod tests { } } +use crate::plan::SlotIterator; use crate::vm::VMBinding; /// `ObjectReference` represents address for an object. Compared with `Address`, operations allowed @@ -699,6 +700,10 @@ impl ObjectReference { pub fn is_sane(self) -> bool { unsafe { SFT_MAP.get_unchecked(self.to_raw_address()) }.is_sane() } + + pub fn iterate_fields(self, f: F) { + SlotIterator::::iterate(self, f) + } } /// allows print Address as upper-case hex value diff --git a/src/util/alloc/immix_allocator.rs b/src/util/alloc/immix_allocator.rs index 807ddded90..bf910c1e33 100644 --- a/src/util/alloc/immix_allocator.rs +++ b/src/util/alloc/immix_allocator.rs @@ -265,6 +265,21 @@ impl ImmixAllocator { // Update the hole-searching cursor to None. Some(end_line) }; + // mark objects if concurrent marking is active + if self.immix_space().concurrent_marking_active() { + let state = self + .space + .line_mark_state + .load(std::sync::atomic::Ordering::Acquire); + + for line in + crate::util::linear_scan::RegionIterator::::new(start_line, end_line) + { + line.mark(state); + } + + Line::initialize_mark_table_as_marked::(start_line..end_line); + } return true; } else { // No more recyclable lines. Set the hole-searching cursor to None. @@ -305,6 +320,20 @@ impl ImmixAllocator { // Bulk clear stale line mark state Line::MARK_TABLE .bzero_metadata(block.start(), crate::policy::immix::block::Block::BYTES); + // mark objects if concurrent marking is active + if self.immix_space().concurrent_marking_active() { + let state = self + .space + .line_mark_state + .load(std::sync::atomic::Ordering::Acquire); + for line in block.lines() { + line.mark(state); + } + + Line::initialize_mark_table_as_marked::( + block.start_line()..block.end_line(), + ); + } if self.request_for_large { self.large_bump_pointer.cursor = block.start(); self.large_bump_pointer.limit = block.end(); diff --git a/src/util/options.rs b/src/util/options.rs index abfada69c8..6adf7d3b84 100644 --- a/src/util/options.rs +++ b/src/util/options.rs @@ -48,6 +48,8 @@ pub enum PlanSelector { MarkCompact, /// An Immix collector that uses a sticky mark bit to allow generational behaviors without a copying nursery. StickyImmix, + /// Concurrent non-moving immix using SATB + ConcurrentImmix, } /// MMTk option for perf events diff --git a/src/vm/collection.rs b/src/vm/collection.rs index 16e87eebe0..98121317b9 100644 --- a/src/vm/collection.rs +++ b/src/vm/collection.rs @@ -162,4 +162,7 @@ pub trait Collection { fn create_gc_trigger() -> Box> { unimplemented!() } + + /// Inform the VM of concurrent marking status + fn set_concurrent_marking_state(_active: bool) {} } From 9ac22bd52c2563fc93ae6f9b85304ecab02188e9 Mon Sep 17 00:00:00 2001 From: Tianle Qiu Date: Fri, 25 Jul 2025 05:11:20 +0000 Subject: [PATCH 02/59] minor --- src/plan/concurrent/immix/global.rs | 1 + src/plan/plan_constraints.rs | 2 ++ src/policy/immix/mod.rs | 2 +- src/policy/largeobjectspace.rs | 5 +++-- src/policy/space.rs | 2 ++ 5 files changed, 9 insertions(+), 3 deletions(-) diff --git a/src/plan/concurrent/immix/global.rs b/src/plan/concurrent/immix/global.rs index bb8d7f6879..4da77ec505 100644 --- a/src/plan/concurrent/immix/global.rs +++ b/src/plan/concurrent/immix/global.rs @@ -64,6 +64,7 @@ pub const CONCURRENT_IMMIX_CONSTRAINTS: PlanConstraints = PlanConstraints { max_non_los_default_alloc_bytes: crate::policy::immix::MAX_IMMIX_OBJECT_SIZE, needs_prepare_mutator: true, barrier: crate::BarrierSelector::SATBBarrier, + needs_satb: true, ..PlanConstraints::default() }; diff --git a/src/plan/plan_constraints.rs b/src/plan/plan_constraints.rs index 3110eb7538..6025020c10 100644 --- a/src/plan/plan_constraints.rs +++ b/src/plan/plan_constraints.rs @@ -45,6 +45,7 @@ pub struct PlanConstraints { /// `MutatorConfig::prepare_func`). Those plans can set this to `false` so that the /// `PrepareMutator` work packets will not be created at all. pub needs_prepare_mutator: bool, + pub needs_satb: bool, } impl PlanConstraints { @@ -67,6 +68,7 @@ impl PlanConstraints { barrier: BarrierSelector::NoBarrier, // If we use mark sweep as non moving space, we need to prepare mutator. See [`common_prepare_func`]. needs_prepare_mutator: cfg!(feature = "marksweep_as_nonmoving"), + needs_satb: false, } } } diff --git a/src/policy/immix/mod.rs b/src/policy/immix/mod.rs index d5895e9470..7243832b3a 100644 --- a/src/policy/immix/mod.rs +++ b/src/policy/immix/mod.rs @@ -16,4 +16,4 @@ pub const BLOCK_ONLY: bool = false; /// Mark lines when scanning objects. /// Otherwise, do it at mark time. -pub const MARK_LINE_AT_SCAN_TIME: bool = true; +pub const MARK_LINE_AT_SCAN_TIME: bool = false; diff --git a/src/policy/largeobjectspace.rs b/src/policy/largeobjectspace.rs index 369b913444..2563235e3a 100644 --- a/src/policy/largeobjectspace.rs +++ b/src/policy/largeobjectspace.rs @@ -274,6 +274,7 @@ impl LargeObjectSpace { debug_assert!(self.treadmill.is_from_space_empty()); debug_assert!(self.treadmill.is_nursery_empty()); + debug_assert!(self.common.needs_satb); let mut enumator = ClosureObjectEnumerator::<_, VM>::new(|object| { VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.mark_as_unlogged::(object, Ordering::SeqCst); }); @@ -289,7 +290,7 @@ impl LargeObjectSpace { pub fn prepare(&mut self, full_heap: bool) { if full_heap { - debug_assert!(self.treadmill.is_from_space_empty()); + // debug_assert!(self.treadmill.is_from_space_empty()); self.mark_state = MARK_BIT - self.mark_state; } self.treadmill.flip(full_heap); @@ -353,7 +354,7 @@ impl LargeObjectSpace { #[cfg(feature = "vo_bit")] crate::util::metadata::vo_bit::unset_vo_bit(object); // Clear log bits for dead objects to prevent a new nursery object having the unlog bit set - if self.common.needs_log_bit { + if self.common.needs_log_bit || self.common.needs_satb { VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.clear::(object, Ordering::SeqCst); } self.pr diff --git a/src/policy/space.rs b/src/policy/space.rs index a0676d9da0..80bbc2b424 100644 --- a/src/policy/space.rs +++ b/src/policy/space.rs @@ -528,6 +528,7 @@ pub struct CommonSpace { /// This field equals to needs_log_bit in the plan constraints. // TODO: This should be a constant for performance. pub needs_log_bit: bool, + pub needs_satb: bool, /// A lock used during acquire() to make sure only one thread can allocate. pub acquire_lock: Mutex<()>, @@ -598,6 +599,7 @@ impl CommonSpace { vm_map: args.plan_args.vm_map, mmapper: args.plan_args.mmapper, needs_log_bit: args.plan_args.constraints.needs_log_bit, + needs_satb: args.plan_args.constraints.needs_satb, gc_trigger: args.plan_args.gc_trigger, metadata: SideMetadataContext { global: args.plan_args.global_side_metadata_specs, From 100c04969bb6241339eaa45eba0e9b9ba219ebb3 Mon Sep 17 00:00:00 2001 From: Tianle Qiu Date: Mon, 28 Jul 2025 04:39:20 +0000 Subject: [PATCH 03/59] minor --- src/scheduler/scheduler.rs | 19 +++++++++++-------- .../mock_tests/mock_test_allocator_info.rs | 1 + 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/src/scheduler/scheduler.rs b/src/scheduler/scheduler.rs index 6a331aed1f..eb40544028 100644 --- a/src/scheduler/scheduler.rs +++ b/src/scheduler/scheduler.rs @@ -20,6 +20,8 @@ use std::collections::HashMap; use std::sync::Arc; use std::time::Instant; +type PostponeQueue = Injector>>; + pub struct GCWorkScheduler { /// Work buckets pub work_buckets: EnumMap>, @@ -688,15 +690,16 @@ impl GCWorkScheduler { self.worker_monitor.notify_work_available(true); } - fn schedule_postponed_concurrent_packets( - &self, - ) -> (Injector>>, Injector>>) { + fn schedule_postponed_concurrent_packets(&self) -> (PostponeQueue, PostponeQueue) { let mut queue = Injector::new(); - type Q = Injector>>; - std::mem::swap::>(&mut queue, &mut self.postponed_concurrent_work.write()); + + std::mem::swap::>( + &mut queue, + &mut self.postponed_concurrent_work.write(), + ); let mut pqueue = Injector::new(); - std::mem::swap::>( + std::mem::swap::>( &mut pqueue, &mut self.postponed_concurrent_work_prioritized.write(), ); @@ -705,8 +708,8 @@ impl GCWorkScheduler { pub(super) fn schedule_concurrent_packets( &self, - queue: Injector>>, - pqueue: Injector>>, + queue: PostponeQueue, + pqueue: PostponeQueue, ) { // crate::MOVE_CONCURRENT_MARKING_TO_STW.store(false, Ordering::SeqCst); // crate::PAUSE_CONCURRENT_MARKING.store(false, Ordering::SeqCst); diff --git a/src/vm/tests/mock_tests/mock_test_allocator_info.rs b/src/vm/tests/mock_tests/mock_test_allocator_info.rs index fc288e8041..a856278e94 100644 --- a/src/vm/tests/mock_tests/mock_test_allocator_info.rs +++ b/src/vm/tests/mock_tests/mock_test_allocator_info.rs @@ -29,6 +29,7 @@ pub fn test_allocator_info() { | PlanSelector::GenImmix | PlanSelector::MarkCompact | PlanSelector::Compressor + | PlanSelector::ConcurrentImmix | PlanSelector::StickyImmix => { // These plans all use bump pointer allocator. let AllocatorInfo::BumpPointer { From 2bbb200c195ac372acaa862e95c67fd3094f58a7 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Thu, 31 Jul 2025 02:17:19 +0000 Subject: [PATCH 04/59] Add ref/finalizer packets for final pause. Use log instead of println. --- src/plan/concurrent/immix/global.rs | 32 +++++++++++++++++++++++++++-- 1 file changed, 30 insertions(+), 2 deletions(-) diff --git a/src/plan/concurrent/immix/global.rs b/src/plan/concurrent/immix/global.rs index 4da77ec505..ed52096212 100644 --- a/src/plan/concurrent/immix/global.rs +++ b/src/plan/concurrent/immix/global.rs @@ -255,7 +255,7 @@ impl Plan for ConcurrentImmix { self.set_concurrent_marking_state(false); } } - println!("{:?} start", pause); + info!("{:?} start", pause); } fn gc_pause_end(&self) { @@ -265,7 +265,7 @@ impl Plan for ConcurrentImmix { } self.previous_pause.store(Some(pause), Ordering::SeqCst); self.current_pause.store(None, Ordering::SeqCst); - println!("{:?} end", pause); + info!("{:?} end", pause); } } @@ -410,6 +410,34 @@ impl ConcurrentImmix { scheduler.work_buckets[WorkBucketStage::Release].add(Release::< ConcurrentImmixGCWorkContext>, >::new(self)); + + // Deal with weak ref and finalizers + // TODO: Check against schedule_common_work and see if we are still missing any work packet + type RefProcessingEdges = + crate::scheduler::gc_work::PlanProcessEdges, TRACE_KIND_FAST>; + // Reference processing + if !*self.base().options.no_reference_types { + use crate::util::reference_processor::{ + PhantomRefProcessing, SoftRefProcessing, WeakRefProcessing, + }; + scheduler.work_buckets[WorkBucketStage::SoftRefClosure] + .add(SoftRefProcessing::>::new()); + scheduler.work_buckets[WorkBucketStage::WeakRefClosure] + .add(WeakRefProcessing::::new()); + scheduler.work_buckets[WorkBucketStage::PhantomRefClosure] + .add(PhantomRefProcessing::::new()); + + use crate::util::reference_processor::RefEnqueue; + scheduler.work_buckets[WorkBucketStage::Release].add(RefEnqueue::::new()); + } + + // Finalization + if !*self.base().options.no_finalizer { + use crate::util::finalizable_processor::{Finalization, ForwardFinalization}; + // finalization + scheduler.work_buckets[WorkBucketStage::FinalRefClosure] + .add(Finalization::>::new()); + } } pub fn concurrent_marking_in_progress(&self) -> bool { From 90f451886da0cceb0c87d770ac9294192cda7355 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Thu, 31 Jul 2025 05:35:52 +0000 Subject: [PATCH 05/59] schedule_concurrent_packets before resuming mutators --- src/scheduler/scheduler.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/scheduler/scheduler.rs b/src/scheduler/scheduler.rs index eb40544028..5b5448c1d8 100644 --- a/src/scheduler/scheduler.rs +++ b/src/scheduler/scheduler.rs @@ -650,13 +650,13 @@ impl GCWorkScheduler { // Reset the triggering information. mmtk.state.reset_collection_trigger(); - // Set to NotInGC after everything, and right before resuming mutators. - mmtk.set_gc_status(GcStatus::NotInGC); - ::VMCollection::resume_mutators(worker.tls); - self.set_in_gc_pause(false); self.schedule_concurrent_packets(queue, pqueue); self.debug_assert_all_buckets_deactivated(); + + // Set to NotInGC after everything, and right before resuming mutators. + mmtk.set_gc_status(GcStatus::NotInGC); + ::VMCollection::resume_mutators(worker.tls); } pub fn enable_stat(&self) { From 521adea97dd394a2c46c6aa07a5abb05f83791f3 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Thu, 31 Jul 2025 16:55:02 +0800 Subject: [PATCH 06/59] eBPF tracing tools for concurrent Immix --- .../concurrent/concurrent_marking_work.rs | 19 ++++++++++++--- src/plan/gc_requester.rs | 1 + src/scheduler/scheduler.rs | 1 + tools/tracing/timeline/capture.bt | 15 ++++++++++++ tools/tracing/timeline/visualize.py | 24 +++++++++++++++++++ 5 files changed, 57 insertions(+), 3 deletions(-) diff --git a/src/plan/concurrent/concurrent_marking_work.rs b/src/plan/concurrent/concurrent_marking_work.rs index 6252b81eab..113776d9f2 100644 --- a/src/plan/concurrent/concurrent_marking_work.rs +++ b/src/plan/concurrent/concurrent_marking_work.rs @@ -35,7 +35,10 @@ impl ConcurrentTraceObjects { .get_plan() .downcast_ref::>() .unwrap(); - crate::NUM_CONCURRENT_TRACING_PACKETS.fetch_add(1, Ordering::SeqCst); + let old_value = crate::NUM_CONCURRENT_TRACING_PACKETS.fetch_add(1, Ordering::SeqCst); + let new_value = old_value + 1; + probe!(mmtk, num_concurrent_tracing_packets_change, new_value); + Self { plan, objects: Some(objects), @@ -106,9 +109,13 @@ unsafe impl Send for ConcurrentTraceObjects {} impl GCWork for ConcurrentTraceObjects { fn do_work(&mut self, worker: &mut GCWorker, _mmtk: &'static MMTK) { self.worker = worker; + let mut num_objects = 0; + let mut num_next_objects = 0; + let mut iterations = 0; // mark objects if let Some(objects) = self.objects.take() { - self.trace_objects(&objects) + self.trace_objects(&objects); + num_objects = objects.len(); } let pause_opt = self.plan.current_pause(); if pause_opt == Some(Pause::FinalMark) || pause_opt.is_none() { @@ -121,10 +128,16 @@ impl GCWork for ConcurrentTraceObjects { next_objects.clear(); self.next_objects.swap(&mut next_objects); self.trace_objects(&next_objects); + num_next_objects += next_objects.len(); + iterations += 1; } } + probe!(mmtk, concurrent_trace_objects, num_objects, num_next_objects, iterations); self.flush(); - crate::NUM_CONCURRENT_TRACING_PACKETS.fetch_sub(1, Ordering::SeqCst); + + let old_value = crate::NUM_CONCURRENT_TRACING_PACKETS.fetch_sub(1, Ordering::SeqCst); + let new_value = old_value - 1; + probe!(mmtk, num_concurrent_tracing_packets_change, new_value); } } diff --git a/src/plan/gc_requester.rs b/src/plan/gc_requester.rs index e3a8462f96..944558184d 100644 --- a/src/plan/gc_requester.rs +++ b/src/plan/gc_requester.rs @@ -30,6 +30,7 @@ impl GCRequester { // `GCWorkScheduler::request_schedule_collection` needs to hold a mutex to communicate // with GC workers, which is expensive for functions like `poll`. We use the atomic // flag `request_flag` to elide the need to acquire the mutex in subsequent calls. + probe!(mmtk, gcrequester_request); self.scheduler.request_schedule_collection(); } } diff --git a/src/scheduler/scheduler.rs b/src/scheduler/scheduler.rs index 5b5448c1d8..2f37702104 100644 --- a/src/scheduler/scheduler.rs +++ b/src/scheduler/scheduler.rs @@ -188,6 +188,7 @@ impl GCWorkScheduler { /// Add the `ScheduleCollection` packet. Called by the last parked worker. fn add_schedule_collection_packet(&self) { // We are still holding the mutex `WorkerMonitor::sync`. Do not notify now. + probe!(mmtk, add_schedule_collection_packet); self.work_buckets[WorkBucketStage::Unconstrained].add_no_notify(ScheduleCollection); } diff --git a/tools/tracing/timeline/capture.bt b/tools/tracing/timeline/capture.bt index b076f1cd18..5fd3a072aa 100644 --- a/tools/tracing/timeline/capture.bt +++ b/tools/tracing/timeline/capture.bt @@ -124,4 +124,19 @@ usdt:$MMTK:mmtk:sweep_chunk { } } +usdt:$MMTK:mmtk:concurrent_trace_objects { + printf("concurrent_trace_objects,meta,%d,%lu,%lu,%lu,%lu\n", tid, nsecs, arg0, arg1, arg2); +} + +usdt:$MMTK:mmtk:gcrequester_request { + printf("gcrequester_request,i,%d,%lu\n", tid, nsecs); +} + +usdt:$MMTK:mmtk:add_schedule_collection_packet { + printf("add_schedule_collection_packet,i,%d,%lu\n", tid, nsecs); +} + +usdt:$MMTK:mmtk:num_concurrent_tracing_packets_change { + printf("num_concurrent_tracing_packets_change,C,%d,%lu,%lu\n", tid, nsecs, arg0); +} // vim: ft=bpftrace ts=4 sw=4 sts=4 et diff --git a/tools/tracing/timeline/visualize.py b/tools/tracing/timeline/visualize.py index ca45845a44..7ad34df1cf 100755 --- a/tools/tracing/timeline/visualize.py +++ b/tools/tracing/timeline/visualize.py @@ -146,6 +146,15 @@ def enrich_event(self, name, ph, tid, ts, result, args): "stage": int(args[0]), } + case "gcrequester_request": + result["tid"] = 1 + + case "num_concurrent_tracing_packets_change": + result["name"] = "Concurrent tracing packets" + result["args"] |= { + "number": int(args[0]), + } + case _: if self.enrich_event_extra is not None: # Call ``enrich_event_extra`` in the extension script if defined. @@ -241,6 +250,21 @@ def enrich_meta(self, name, tid, ts, gc, wp, args): } } + case "concurrent_trace_objects": + objects = int(args[0]) + next_objects = int(args[1]) + iterations = int(args[2]) + total_objects = objects + next_objects + wp["args"] |= { + # Put args in a group. See comments in "process_slots". + "scan_objects": { + "objects": objects, + "next_objects": next_objects, + "total_objects": total_objects, + "iterations": iterations, + } + } + case "sweep_chunk": wp["args"] |= { "allocated_blocks": int(args[0]), From 4f14b2fc0c49d143866a6d9a2e0c21af5184c9e1 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Thu, 31 Jul 2025 17:46:00 +0800 Subject: [PATCH 07/59] Wake up workers immediately for concurrent work If concurrent work packets are scheduled during a GC, the last parked worker now wakes up all GC workers to process the concurrent packets instead of going to sleep. --- src/scheduler/scheduler.rs | 44 ++++++++++++++++++------------------ src/scheduler/work_bucket.rs | 2 -- 2 files changed, 22 insertions(+), 24 deletions(-) diff --git a/src/scheduler/scheduler.rs b/src/scheduler/scheduler.rs index 2f37702104..2bec746f68 100644 --- a/src/scheduler/scheduler.rs +++ b/src/scheduler/scheduler.rs @@ -63,17 +63,6 @@ impl GCWorkScheduler { let mut open_stages: Vec = vec![first_stw_stage]; let stages = (0..WorkBucketStage::LENGTH).map(WorkBucketStage::from_usize); for stage in stages { - { - if stage == WorkBucketStage::ConcurrentSentinel { - work_buckets[stage].set_open_condition( - move |scheduler: &GCWorkScheduler| { - scheduler.work_buckets[WorkBucketStage::Unconstrained].is_drained() - }, - ); - open_stages.push(stage); - continue; - } - } // Unconstrained is always open. // The first STW stage (Prepare) will be opened when the world stopped // (i.e. when all mutators are suspended). @@ -507,11 +496,20 @@ impl GCWorkScheduler { LastParkedResult::WakeAll } else { // GC finished. - self.on_gc_finished(worker); + let concurrent_work_scheduled = self.on_gc_finished(worker); // Clear the current goal goals.on_current_goal_completed(); - self.respond_to_requests(worker, goals) + + if concurrent_work_scheduled { + // It was the initial mark pause and scheduled concurrent work. + // Wake up all GC workers to do concurrent work. + LastParkedResult::WakeAll + } else { + // It was an STW GC or the final mark pause of a concurrent GC. + // Respond to another goal. + self.respond_to_requests(worker, goals) + } } } WorkerGoal::StopForFork => { @@ -584,7 +582,9 @@ impl GCWorkScheduler { } /// Called when GC has finished, i.e. when all work packets have been executed. - fn on_gc_finished(&self, worker: &GCWorker) { + /// + /// Return `true` if any concurrent work packets have been scheduled. + fn on_gc_finished(&self, worker: &GCWorker) -> bool { // All GC workers must have parked by now. debug_assert!(!self.worker_group.has_designated_work()); debug_assert!(self.all_buckets_empty()); @@ -652,12 +652,14 @@ impl GCWorkScheduler { mmtk.state.reset_collection_trigger(); self.set_in_gc_pause(false); - self.schedule_concurrent_packets(queue, pqueue); + let concurrent_work_scheduled = self.schedule_concurrent_packets(queue, pqueue); self.debug_assert_all_buckets_deactivated(); // Set to NotInGC after everything, and right before resuming mutators. mmtk.set_gc_status(GcStatus::NotInGC); ::VMCollection::resume_mutators(worker.tls); + + concurrent_work_scheduled } pub fn enable_stat(&self) { @@ -711,24 +713,22 @@ impl GCWorkScheduler { &self, queue: PostponeQueue, pqueue: PostponeQueue, - ) { + ) -> bool { // crate::MOVE_CONCURRENT_MARKING_TO_STW.store(false, Ordering::SeqCst); // crate::PAUSE_CONCURRENT_MARKING.store(false, Ordering::SeqCst); - let mut notify = false; + let mut concurrent_work_scheduled = false; if !queue.is_empty() { let old_queue = self.work_buckets[WorkBucketStage::Unconstrained].swap_queue(queue); debug_assert!(old_queue.is_empty()); - notify = true; + concurrent_work_scheduled = true; } if !pqueue.is_empty() { let old_queue = self.work_buckets[WorkBucketStage::Unconstrained].swap_queue_prioritized(pqueue); debug_assert!(old_queue.is_empty()); - notify = true; - } - if notify { - self.wakeup_all_concurrent_workers(); + concurrent_work_scheduled = true; } + concurrent_work_scheduled } pub fn wakeup_all_concurrent_workers(&self) { diff --git a/src/scheduler/work_bucket.rs b/src/scheduler/work_bucket.rs index 563c43a9b5..cf9fce7ca4 100644 --- a/src/scheduler/work_bucket.rs +++ b/src/scheduler/work_bucket.rs @@ -289,8 +289,6 @@ impl WorkBucket { pub enum WorkBucketStage { /// This bucket is always open. Unconstrained, - Initial, - ConcurrentSentinel, /// Preparation work. Plans, spaces, GC workers, mutators, etc. should be prepared for GC at /// this stage. Prepare, From 865e24cd1cfe825d39b5a8be8a4d39bf66301966 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Thu, 31 Jul 2025 17:48:11 +0800 Subject: [PATCH 08/59] Fix clippy warnings and formatting --- src/plan/concurrent/concurrent_marking_work.rs | 8 +++++++- src/plan/concurrent/immix/global.rs | 2 +- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/src/plan/concurrent/concurrent_marking_work.rs b/src/plan/concurrent/concurrent_marking_work.rs index 113776d9f2..ff8cc77187 100644 --- a/src/plan/concurrent/concurrent_marking_work.rs +++ b/src/plan/concurrent/concurrent_marking_work.rs @@ -132,7 +132,13 @@ impl GCWork for ConcurrentTraceObjects { iterations += 1; } } - probe!(mmtk, concurrent_trace_objects, num_objects, num_next_objects, iterations); + probe!( + mmtk, + concurrent_trace_objects, + num_objects, + num_next_objects, + iterations + ); self.flush(); let old_value = crate::NUM_CONCURRENT_TRACING_PACKETS.fetch_sub(1, Ordering::SeqCst); diff --git a/src/plan/concurrent/immix/global.rs b/src/plan/concurrent/immix/global.rs index ed52096212..93228c64d0 100644 --- a/src/plan/concurrent/immix/global.rs +++ b/src/plan/concurrent/immix/global.rs @@ -433,7 +433,7 @@ impl ConcurrentImmix { // Finalization if !*self.base().options.no_finalizer { - use crate::util::finalizable_processor::{Finalization, ForwardFinalization}; + use crate::util::finalizable_processor::Finalization; // finalization scheduler.work_buckets[WorkBucketStage::FinalRefClosure] .add(Finalization::>::new()); From 7b132722a0ac94ed4325fb9f89e5d17cd6848da8 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Fri, 1 Aug 2025 03:46:25 +0000 Subject: [PATCH 09/59] Move concurrent_marking_active to the plan. Add a flag allocate_as_live for spaces. --- src/global_state.rs | 2 -- src/plan/concurrent/immix/global.rs | 23 ++++++++++++++--------- src/policy/immix/immixspace.rs | 7 ------- src/policy/largeobjectspace.rs | 9 +-------- src/policy/space.rs | 16 ++++++++++++++-- src/util/alloc/immix_allocator.rs | 4 ++-- 6 files changed, 31 insertions(+), 30 deletions(-) diff --git a/src/global_state.rs b/src/global_state.rs index 8abe617b49..317caf0c81 100644 --- a/src/global_state.rs +++ b/src/global_state.rs @@ -49,7 +49,6 @@ pub struct GlobalState { pub(crate) malloc_bytes: AtomicUsize, /// This stores the live bytes and the used bytes (by pages) for each space in last GC. This counter is only updated in the GC release phase. pub(crate) live_bytes_in_last_gc: AtomicRefCell>, - pub(crate) concurrent_marking_active: AtomicBool, pub(crate) concurrent_marking_threshold: AtomicUsize, } @@ -209,7 +208,6 @@ impl Default for GlobalState { malloc_bytes: AtomicUsize::new(0), live_bytes_in_last_gc: AtomicRefCell::new(HashMap::new()), concurrent_marking_threshold: AtomicUsize::new(0), - concurrent_marking_active: AtomicBool::new(false), } } } diff --git a/src/plan/concurrent/immix/global.rs b/src/plan/concurrent/immix/global.rs index 93228c64d0..4424a2f900 100644 --- a/src/plan/concurrent/immix/global.rs +++ b/src/plan/concurrent/immix/global.rs @@ -54,6 +54,7 @@ pub struct ConcurrentImmix { current_pause: Atomic>, previous_pause: Atomic>, gc_cause: Atomic, + concurrent_marking_active: AtomicBool, } /// The plan constraints for the immix plan. @@ -307,6 +308,7 @@ impl ConcurrentImmix { current_pause: Atomic::new(None), previous_pause: Atomic::new(None), gc_cause: Atomic::new(GCCause::Unknown), + concurrent_marking_active: AtomicBool::new(false), }; immix.verify_side_metadata_sanity(); @@ -441,21 +443,24 @@ impl ConcurrentImmix { } pub fn concurrent_marking_in_progress(&self) -> bool { - self.common() - .base - .global_state - .concurrent_marking_active - .load(Ordering::Acquire) + self.concurrent_marking_active.load(Ordering::Acquire) } fn set_concurrent_marking_state(&self, active: bool) { + use crate::plan::global::HasSpaces; use crate::vm::Collection; + // Update the binding about concurrent marking ::VMCollection::set_concurrent_marking_state(active); - self.common() - .base - .global_state - .concurrent_marking_active + + // Tell the spaces to allocate new objects as live + let allocate_object_as_live = active; + self.for_each_space(&mut |space: &dyn Space| { + space.set_allocate_as_live(allocate_object_as_live); + }); + + // Store the state. + self.concurrent_marking_active .store(active, Ordering::SeqCst); } diff --git a/src/policy/immix/immixspace.rs b/src/policy/immix/immixspace.rs index e205f3bc3d..7a051483d6 100644 --- a/src/policy/immix/immixspace.rs +++ b/src/policy/immix/immixspace.rs @@ -203,13 +203,6 @@ impl Space for ImmixSpace { fn enumerate_objects(&self, enumerator: &mut dyn ObjectEnumerator) { object_enum::enumerate_blocks_from_chunk_map::(enumerator, &self.chunk_map); } - - fn concurrent_marking_active(&self) -> bool { - self.common() - .global_state - .concurrent_marking_active - .load(Ordering::Acquire) - } } impl crate::policy::gc_work::PolicyTraceObject for ImmixSpace { diff --git a/src/policy/largeobjectspace.rs b/src/policy/largeobjectspace.rs index 2563235e3a..3a60b19dad 100644 --- a/src/policy/largeobjectspace.rs +++ b/src/policy/largeobjectspace.rs @@ -60,7 +60,7 @@ impl SFT for LargeObjectSpace { true } fn initialize_object_metadata(&self, object: ObjectReference, alloc: bool) { - if self.concurrent_marking_active() { + if self.should_allocate_as_live() { VM::VMObjectModel::LOCAL_LOS_MARK_NURSERY_SPEC.store_atomic::( object, self.mark_state, @@ -211,13 +211,6 @@ impl Space for LargeObjectSpace { fn enumerate_objects(&self, enumerator: &mut dyn ObjectEnumerator) { self.treadmill.enumerate_objects(enumerator); } - - fn concurrent_marking_active(&self) -> bool { - self.common() - .global_state - .concurrent_marking_active - .load(Ordering::Acquire) - } } use crate::scheduler::GCWorker; diff --git a/src/policy/space.rs b/src/policy/space.rs index 80bbc2b424..f6f46e8197 100644 --- a/src/policy/space.rs +++ b/src/policy/space.rs @@ -34,6 +34,7 @@ use crate::util::memory::{self, HugePageSupport, MmapProtection, MmapStrategy}; use crate::vm::VMBinding; use std::marker::PhantomData; +use std::sync::atomic::AtomicBool; use std::sync::Arc; use std::sync::Mutex; @@ -434,8 +435,16 @@ pub trait Space: 'static + SFT + Sync + Downcast { /// scanning VO bits because it is sparse. fn enumerate_objects(&self, enumerator: &mut dyn ObjectEnumerator); - fn concurrent_marking_active(&self) -> bool { - false + fn set_allocate_as_live(&self, live: bool) { + self.common() + .allocate_as_live + .store(live, std::sync::atomic::Ordering::SeqCst); + } + + fn should_allocate_as_live(&self) -> bool { + self.common() + .allocate_as_live + .load(std::sync::atomic::Ordering::Acquire) } } @@ -537,6 +546,8 @@ pub struct CommonSpace { pub global_state: Arc, pub options: Arc, + pub allocate_as_live: AtomicBool, + p: PhantomData, } @@ -608,6 +619,7 @@ impl CommonSpace { acquire_lock: Mutex::new(()), global_state: args.plan_args.global_state, options: args.plan_args.options.clone(), + allocate_as_live: AtomicBool::new(false), p: PhantomData, }; diff --git a/src/util/alloc/immix_allocator.rs b/src/util/alloc/immix_allocator.rs index bf910c1e33..597b1db4ff 100644 --- a/src/util/alloc/immix_allocator.rs +++ b/src/util/alloc/immix_allocator.rs @@ -266,7 +266,7 @@ impl ImmixAllocator { Some(end_line) }; // mark objects if concurrent marking is active - if self.immix_space().concurrent_marking_active() { + if self.immix_space().should_allocate_as_live() { let state = self .space .line_mark_state @@ -321,7 +321,7 @@ impl ImmixAllocator { Line::MARK_TABLE .bzero_metadata(block.start(), crate::policy::immix::block::Block::BYTES); // mark objects if concurrent marking is active - if self.immix_space().concurrent_marking_active() { + if self.immix_space().should_allocate_as_live() { let state = self .space .line_mark_state From 1db4743e6fe3ec837c2a1b0c3c1f3944e2b825a8 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Fri, 1 Aug 2025 14:42:25 +0800 Subject: [PATCH 10/59] Rename load_reference to load_weak_reference We avoid using the term "reference" to refer to weak reference objects as in JVM. We explicitly mention "weak reference" instead. Also added comments to clarify the purpose of that barrier function. --- src/plan/barriers.rs | 20 +++++++++++++++----- src/plan/concurrent/barrier.rs | 11 ++++++++++- 2 files changed, 25 insertions(+), 6 deletions(-) diff --git a/src/plan/barriers.rs b/src/plan/barriers.rs index 07152bbde7..e81b3166eb 100644 --- a/src/plan/barriers.rs +++ b/src/plan/barriers.rs @@ -46,8 +46,18 @@ impl BarrierSelector { pub trait Barrier: 'static + Send + Downcast { fn flush(&mut self) {} - /// load referent from java.lang.Reference - fn load_reference(&mut self, _referent: ObjectReference) {} + /// Weak reference loading barrier. A mutator should call this when loading from a weak + /// reference field, for example, when executing `java.lang.ref.Reference.get()` in JVM, or + /// loading from a global weak table in CRuby. + /// + /// Note: Merely loading from a field holding weak reference into a local variable will create a + /// strong reference from the stack to the referent, changing its reachablilty from weakly + /// reachable to strongly reachable. Concurrent garbage collectors may need to handle such + /// events specially. See [SATBBarrier::load_weak_reference] for a concrete example. + /// + /// Arguments: + /// * `referent`: The referent object which the weak reference is pointing to. + fn load_weak_reference(&mut self, _referent: ObjectReference) {} /// Subsuming barrier for object reference write fn object_reference_write( @@ -166,7 +176,7 @@ pub trait BarrierSemantics: 'static + Send { /// Object will probably be modified fn object_probable_write_slow(&mut self, _obj: ObjectReference) {} - fn load_reference(&mut self, _o: ObjectReference) {} + fn load_weak_reference(&mut self, _o: ObjectReference) {} fn object_reference_clone_pre(&mut self, _obj: ObjectReference) {} } @@ -280,8 +290,8 @@ impl Barrier for SATBBarrier { self.semantics.flush(); } - fn load_reference(&mut self, o: ObjectReference) { - self.semantics.load_reference(o) + fn load_weak_reference(&mut self, o: ObjectReference) { + self.semantics.load_weak_reference(o) } fn object_reference_clone_pre(&mut self, obj: ObjectReference) { diff --git a/src/plan/concurrent/barrier.rs b/src/plan/concurrent/barrier.rs index cb76910147..716ea1144e 100644 --- a/src/plan/concurrent/barrier.rs +++ b/src/plan/concurrent/barrier.rs @@ -128,7 +128,16 @@ impl BarrierSemantics for SATBBarrierSemantics { } } - fn load_reference(&mut self, o: ObjectReference) { + /// Enqueue the referent during concurrent marking. + /// + /// Note: During concurrent marking, a collector based on snapshot-at-the-beginning (SATB) will + /// not reach objects that were weakly reachable at the time of `InitialMark`. But if a mutator + /// loads from a weak reference field during concurrent marking, it will make the referent + /// strongly reachable, yet the referent is still not part of the SATB. We must conservatively + /// enqueue the referent even though its reachability has not yet been established, otherwise it + /// (and its children) may be treated as garbage if it happened to be weakly reachable at the + /// time of `InitialMark`. + fn load_weak_reference(&mut self, o: ObjectReference) { if !self.immix.concurrent_marking_in_progress() { return; } From 5c3870467022c50461c7b2ade45a351d0aa427a6 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Fri, 1 Aug 2025 17:15:02 +0800 Subject: [PATCH 11/59] Replace `swap` with `replace` and `take` Clippy on the stable version of Rust seems to erroniously consider `std::mem::swap(&mut a, &mut lg)` as swapping with a temporary, where `lg` is a lock guard. We may fix the warning by writing `&mut *lg`, but the current use cases can all be expressed more concisely using `std::mem::replace` and `std::mem::take`. See: https://rust-lang.github.io/rust-clippy/master/index.html#swap_with_temporary --- src/plan/concurrent/concurrent_marking_work.rs | 4 +--- src/plan/tracing.rs | 5 ----- src/scheduler/scheduler.rs | 18 ++++-------------- src/scheduler/work_bucket.rs | 14 ++++++-------- 4 files changed, 11 insertions(+), 30 deletions(-) diff --git a/src/plan/concurrent/concurrent_marking_work.rs b/src/plan/concurrent/concurrent_marking_work.rs index ff8cc77187..660f0e69af 100644 --- a/src/plan/concurrent/concurrent_marking_work.rs +++ b/src/plan/concurrent/concurrent_marking_work.rs @@ -119,14 +119,12 @@ impl GCWork for ConcurrentTraceObjects { } let pause_opt = self.plan.current_pause(); if pause_opt == Some(Pause::FinalMark) || pause_opt.is_none() { - let mut next_objects = vec![]; while !self.next_objects.is_empty() { let pause_opt = self.plan.current_pause(); if !(pause_opt == Some(Pause::FinalMark) || pause_opt.is_none()) { break; } - next_objects.clear(); - self.next_objects.swap(&mut next_objects); + let next_objects = self.next_objects.take(); self.trace_objects(&next_objects); num_next_objects += next_objects.len(); iterations += 1; diff --git a/src/plan/tracing.rs b/src/plan/tracing.rs index e9dad03de8..fac6ff0da8 100644 --- a/src/plan/tracing.rs +++ b/src/plan/tracing.rs @@ -71,11 +71,6 @@ impl VectorQueue { self.buffer.len() } - /// Replace what was in the queue with data in new_buffer - pub fn swap(&mut self, new_buffer: &mut Vec) { - std::mem::swap(&mut self.buffer, new_buffer) - } - /// Empty the queue pub fn clear(&mut self) { self.buffer.clear() diff --git a/src/scheduler/scheduler.rs b/src/scheduler/scheduler.rs index 2bec746f68..fa22f216aa 100644 --- a/src/scheduler/scheduler.rs +++ b/src/scheduler/scheduler.rs @@ -694,18 +694,8 @@ impl GCWorkScheduler { } fn schedule_postponed_concurrent_packets(&self) -> (PostponeQueue, PostponeQueue) { - let mut queue = Injector::new(); - - std::mem::swap::>( - &mut queue, - &mut self.postponed_concurrent_work.write(), - ); - - let mut pqueue = Injector::new(); - std::mem::swap::>( - &mut pqueue, - &mut self.postponed_concurrent_work_prioritized.write(), - ); + let queue = std::mem::take(&mut *self.postponed_concurrent_work.write()); + let pqueue = std::mem::take(&mut *self.postponed_concurrent_work_prioritized.write()); (queue, pqueue) } @@ -718,13 +708,13 @@ impl GCWorkScheduler { // crate::PAUSE_CONCURRENT_MARKING.store(false, Ordering::SeqCst); let mut concurrent_work_scheduled = false; if !queue.is_empty() { - let old_queue = self.work_buckets[WorkBucketStage::Unconstrained].swap_queue(queue); + let old_queue = self.work_buckets[WorkBucketStage::Unconstrained].replace_queue(queue); debug_assert!(old_queue.is_empty()); concurrent_work_scheduled = true; } if !pqueue.is_empty() { let old_queue = - self.work_buckets[WorkBucketStage::Unconstrained].swap_queue_prioritized(pqueue); + self.work_buckets[WorkBucketStage::Unconstrained].replace_queue_prioritized(pqueue); debug_assert!(old_queue.is_empty()); concurrent_work_scheduled = true; } diff --git a/src/scheduler/work_bucket.rs b/src/scheduler/work_bucket.rs index cf9fce7ca4..1a207f0d9e 100644 --- a/src/scheduler/work_bucket.rs +++ b/src/scheduler/work_bucket.rs @@ -98,18 +98,17 @@ impl WorkBucket { self.prioritized_queue = Some(BucketQueue::new()); } - pub fn swap_queue( + pub fn replace_queue( &self, - mut new_queue: Injector>>, + new_queue: Injector>>, ) -> Injector>> { let mut queue = self.queue.queue.write().unwrap(); - std::mem::swap::>>>(&mut queue, &mut new_queue); - new_queue + std::mem::replace::>>>(&mut queue, new_queue) } - pub fn swap_queue_prioritized( + pub fn replace_queue_prioritized( &self, - mut new_queue: Injector>>, + new_queue: Injector>>, ) -> Injector>> { let mut queue = self .prioritized_queue @@ -118,8 +117,7 @@ impl WorkBucket { .queue .write() .unwrap(); - std::mem::swap::>>>(&mut queue, &mut new_queue); - new_queue + std::mem::replace::>>>(&mut queue, new_queue) } fn notify_one_worker(&self) { From a95e94ad928c50fc5bbc9169ad0c268e7a1c431d Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Tue, 5 Aug 2025 00:24:05 +0000 Subject: [PATCH 12/59] Remove schedule_concurrent_collection --- src/plan/concurrent/immix/global.rs | 11 ----------- src/plan/global.rs | 5 ----- src/scheduler/gc_work.rs | 9 +-------- 3 files changed, 1 insertion(+), 24 deletions(-) diff --git a/src/plan/concurrent/immix/global.rs b/src/plan/concurrent/immix/global.rs index 4424a2f900..79d7fbd127 100644 --- a/src/plan/concurrent/immix/global.rs +++ b/src/plan/concurrent/immix/global.rs @@ -122,17 +122,6 @@ impl Plan for ConcurrentImmix { } fn schedule_collection(&'static self, scheduler: &GCWorkScheduler) { - self.current_pause - .store(Some(Pause::Full), Ordering::SeqCst); - - Self::schedule_immix_full_heap_collection::< - ConcurrentImmix, - ConcurrentImmixSTWGCWorkContext, - ConcurrentImmixSTWGCWorkContext, - >(self, &self.immix_space, scheduler); - } - - fn schedule_concurrent_collection(&'static self, scheduler: &GCWorkScheduler) { let pause = self.select_collection_kind(); if pause == Pause::Full { self.current_pause diff --git a/src/plan/global.rs b/src/plan/global.rs index 0b12bb8566..8e3b6db884 100644 --- a/src/plan/global.rs +++ b/src/plan/global.rs @@ -173,11 +173,6 @@ pub trait Plan: 'static + HasSpaces + Sync + Downcast { /// Schedule work for the upcoming GC. fn schedule_collection(&'static self, _scheduler: &GCWorkScheduler); - /// Schedule work for the upcoming concurrent GC. - fn schedule_concurrent_collection(&'static self, _scheduler: &GCWorkScheduler) { - self.schedule_collection(_scheduler); - } - /// Get the common plan. CommonPlan is included by most of MMTk GC plans. fn common(&self) -> &CommonPlan { panic!("Common Plan not handled!") diff --git a/src/scheduler/gc_work.rs b/src/scheduler/gc_work.rs index c80d12a5e5..ad895ec53f 100644 --- a/src/scheduler/gc_work.rs +++ b/src/scheduler/gc_work.rs @@ -30,14 +30,7 @@ impl GCWork for ScheduleCollection { mmtk.set_gc_status(GcStatus::GcPrepare); // Let the plan to schedule collection work - if mmtk.is_user_triggered_collection() || is_emergency { - // user triggered collection is always stop-the-world - mmtk.get_plan().schedule_collection(worker.scheduler()); - } else { - // Let the plan to schedule collection work - mmtk.get_plan() - .schedule_concurrent_collection(worker.scheduler()); - } + mmtk.get_plan().schedule_collection(worker.scheduler()); } } From eacc9599d3cad5968fd06131d4639ebf9622ae64 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Tue, 5 Aug 2025 01:43:29 +0000 Subject: [PATCH 13/59] Rename gc_pause_start. Merge gc_pause_end with end_of_gc --- src/plan/concurrent/immix/global.rs | 20 +++++++++----------- src/plan/global.rs | 7 ++++--- src/scheduler/gc_work.rs | 4 ++-- src/scheduler/scheduler.rs | 1 - 4 files changed, 15 insertions(+), 17 deletions(-) diff --git a/src/plan/concurrent/immix/global.rs b/src/plan/concurrent/immix/global.rs index 79d7fbd127..c377a9fafb 100644 --- a/src/plan/concurrent/immix/global.rs +++ b/src/plan/concurrent/immix/global.rs @@ -196,6 +196,14 @@ impl Plan for ConcurrentImmix { fn end_of_gc(&mut self, _tls: VMWorkerThread) { self.last_gc_was_defrag .store(self.immix_space.end_of_gc(), Ordering::Relaxed); + + let pause = self.current_pause().unwrap(); + if pause == Pause::InitialMark { + self.set_concurrent_marking_state(true); + } + self.previous_pause.store(Some(pause), Ordering::SeqCst); + self.current_pause.store(None, Ordering::SeqCst); + info!("{:?} end", pause); } fn current_gc_may_move_object(&self) -> bool { @@ -222,7 +230,7 @@ impl Plan for ConcurrentImmix { &self.common } - fn gc_pause_start(&self, _scheduler: &GCWorkScheduler) { + fn notify_mutators_paused(&self, _scheduler: &GCWorkScheduler) { use crate::vm::ActivePlan; let pause = self.current_pause().unwrap(); match pause { @@ -247,16 +255,6 @@ impl Plan for ConcurrentImmix { } info!("{:?} start", pause); } - - fn gc_pause_end(&self) { - let pause = self.current_pause().unwrap(); - if pause == Pause::InitialMark { - self.set_concurrent_marking_state(true); - } - self.previous_pause.store(Some(pause), Ordering::SeqCst); - self.current_pause.store(None, Ordering::SeqCst); - info!("{:?} end", pause); - } } impl ConcurrentImmix { diff --git a/src/plan/global.rs b/src/plan/global.rs index 8e3b6db884..2d3f2a3638 100644 --- a/src/plan/global.rs +++ b/src/plan/global.rs @@ -195,6 +195,9 @@ pub trait Plan: 'static + HasSpaces + Sync + Downcast { /// This defines what space this plan will allocate objects into for different semantics. fn get_allocator_mapping(&self) -> &'static EnumMap; + /// Called when all mutators are paused. This is called before prepare. + fn notify_mutators_paused(&self, _scheduler: &GCWorkScheduler) {} + /// Prepare the plan before a GC. This is invoked in an initial step in the GC. /// This is invoked once per GC by one worker thread. `tls` is the worker thread that executes this method. fn prepare(&mut self, tls: VMWorkerThread); @@ -210,6 +213,7 @@ pub trait Plan: 'static + HasSpaces + Sync + Downcast { /// Inform the plan about the end of a GC. It is guaranteed that there is no further work for this GC. /// This is invoked once per GC by one worker thread. `tls` is the worker thread that executes this method. + // TODO: This is actually called at the end of a pause/STW, rather than the end of a GC. It should be renamed. fn end_of_gc(&mut self, _tls: VMWorkerThread); /// Notify the plan that an emergency collection will happen. The plan should try to free as much memory as possible. @@ -344,9 +348,6 @@ pub trait Plan: 'static + HasSpaces + Sync + Downcast { space.verify_side_metadata_sanity(&mut side_metadata_sanity_checker); }) } - - fn gc_pause_start(&self, _scheduler: &GCWorkScheduler) {} - fn gc_pause_end(&self) {} } impl_downcast!(Plan assoc VM); diff --git a/src/scheduler/gc_work.rs b/src/scheduler/gc_work.rs index ad895ec53f..d7070badd8 100644 --- a/src/scheduler/gc_work.rs +++ b/src/scheduler/gc_work.rs @@ -228,9 +228,9 @@ impl GCWork for StopMutators { mutator.flush(); } }); - mmtk.scheduler.set_in_gc_pause(true); - mmtk.get_plan().gc_pause_start(&mmtk.scheduler); trace!("stop_all_mutators end"); + mmtk.scheduler.set_in_gc_pause(true); + mmtk.get_plan().notify_mutators_paused(&mmtk.scheduler); mmtk.scheduler.notify_mutators_paused(mmtk); mmtk.scheduler.work_buckets[WorkBucketStage::Prepare].add(ScanVMSpecificRoots::::new()); } diff --git a/src/scheduler/scheduler.rs b/src/scheduler/scheduler.rs index fa22f216aa..7f9a771569 100644 --- a/src/scheduler/scheduler.rs +++ b/src/scheduler/scheduler.rs @@ -647,7 +647,6 @@ impl GCWorkScheduler { // reset the logging info at the end of each GC mmtk.slot_logger.reset(); } - mmtk.get_plan().gc_pause_end(); // Reset the triggering information. mmtk.state.reset_collection_trigger(); From fe29529297163f406d43c06af5a271fc946399e2 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Wed, 6 Aug 2025 05:21:49 +0000 Subject: [PATCH 14/59] Disallow new weak reference before ref enqueue --- src/util/reference_processor.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/util/reference_processor.rs b/src/util/reference_processor.rs index f997c16742..1b5acb4bfa 100644 --- a/src/util/reference_processor.rs +++ b/src/util/reference_processor.rs @@ -252,6 +252,11 @@ impl ReferenceProcessor { /// Inform the binding to enqueue the weak references whose referents were cleared in this GC. pub fn enqueue(&self, tls: VMWorkerThread) { + // We will acquire a lock below. If anyone tries to insert new weak refs which will acquire the same lock, a deadlock will occur. + // This does happen for OpenJDK with ConcurrentImmix where a write barrier is triggered during the enqueueing of weak references, + // and the write barrier scans the objects and attempts to add new weak references. + // Disallow new candidates to prevent the deadlock. + self.disallow_new_candidate(); let mut sync = self.sync.lock().unwrap(); // This is the end of a GC. We do some assertions here to make sure our reference tables are correct. From 26c2a54621db7cfc2bc38349f38fc9d8f2d7020b Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Wed, 13 Aug 2025 05:37:30 +0000 Subject: [PATCH 15/59] Introduce ConcurrentPlan. Make ConcurrentTraceObjects trace objects in all spaces. --- src/plan/concurrent/barrier.rs | 51 +++++----- .../concurrent/concurrent_marking_work.rs | 95 ++++++++++++------- src/plan/concurrent/global.rs | 7 ++ src/plan/concurrent/immix/global.rs | 17 +++- src/plan/concurrent/immix/mutator.rs | 5 +- src/plan/concurrent/mod.rs | 4 +- src/plan/global.rs | 6 ++ 7 files changed, 114 insertions(+), 71 deletions(-) create mode 100644 src/plan/concurrent/global.rs diff --git a/src/plan/concurrent/barrier.rs b/src/plan/concurrent/barrier.rs index 716ea1144e..991a626dc9 100644 --- a/src/plan/concurrent/barrier.rs +++ b/src/plan/concurrent/barrier.rs @@ -1,7 +1,9 @@ use std::sync::atomic::Ordering; +use super::{concurrent_marking_work::ProcessModBufSATB, Pause}; +use crate::plan::global::PlanTraceObject; use crate::{ - plan::{barriers::BarrierSemantics, concurrent::immix::global::ConcurrentImmix, VectorQueue}, + plan::{barriers::BarrierSemantics, concurrent::global::ConcurrentPlan, VectorQueue}, scheduler::WorkBucketStage, util::ObjectReference, vm::{ @@ -11,25 +13,20 @@ use crate::{ MMTK, }; -use super::{concurrent_marking_work::ProcessModBufSATB, Pause}; - -pub struct SATBBarrierSemantics { +pub struct SATBBarrierSemantics + PlanTraceObject> { mmtk: &'static MMTK, satb: VectorQueue, refs: VectorQueue, - immix: &'static ConcurrentImmix, + plan: &'static P, } -impl SATBBarrierSemantics { +impl + PlanTraceObject> SATBBarrierSemantics { pub fn new(mmtk: &'static MMTK) -> Self { Self { mmtk, satb: VectorQueue::default(), refs: VectorQueue::default(), - immix: mmtk - .get_plan() - .downcast_ref::>() - .unwrap(), + plan: mmtk.get_plan().downcast_ref::

().unwrap(), } } @@ -63,14 +60,12 @@ impl SATBBarrierSemantics { if !self.satb.is_empty() { if self.should_create_satb_packets() { let satb = self.satb.take(); - if let Some(pause) = self.immix.current_pause() { - debug_assert_ne!(pause, Pause::InitialMark); - self.mmtk.scheduler.work_buckets[WorkBucketStage::Closure] - .add(ProcessModBufSATB::new(satb)); + let bucket = if self.plan.concurrent_work_in_progress() { + WorkBucketStage::Unconstrained } else { - self.mmtk.scheduler.work_buckets[WorkBucketStage::Unconstrained] - .add(ProcessModBufSATB::new(satb)); - } + WorkBucketStage::Closure + }; + self.mmtk.scheduler.work_buckets[bucket].add(ProcessModBufSATB::::new(satb)); } else { let _ = self.satb.take(); }; @@ -82,24 +77,24 @@ impl SATBBarrierSemantics { if !self.refs.is_empty() { // debug_assert!(self.should_create_satb_packets()); let nodes = self.refs.take(); - if let Some(pause) = self.immix.current_pause() { - debug_assert_ne!(pause, Pause::InitialMark); - self.mmtk.scheduler.work_buckets[WorkBucketStage::Closure] - .add(ProcessModBufSATB::new(nodes)); + let bucket = if self.plan.concurrent_work_in_progress() { + WorkBucketStage::Unconstrained } else { - self.mmtk.scheduler.work_buckets[WorkBucketStage::Unconstrained] - .add(ProcessModBufSATB::new(nodes)); - } + WorkBucketStage::Closure + }; + self.mmtk.scheduler.work_buckets[bucket].add(ProcessModBufSATB::::new(nodes)); } } fn should_create_satb_packets(&self) -> bool { - self.immix.concurrent_marking_in_progress() - || self.immix.current_pause() == Some(Pause::FinalMark) + self.plan.concurrent_work_in_progress() + || self.plan.current_pause() == Some(Pause::FinalMark) } } -impl BarrierSemantics for SATBBarrierSemantics { +impl + PlanTraceObject> BarrierSemantics + for SATBBarrierSemantics +{ type VM = VM; #[cold] @@ -138,7 +133,7 @@ impl BarrierSemantics for SATBBarrierSemantics { /// (and its children) may be treated as garbage if it happened to be weakly reachable at the /// time of `InitialMark`. fn load_weak_reference(&mut self, o: ObjectReference) { - if !self.immix.concurrent_marking_in_progress() { + if !self.plan.concurrent_work_in_progress() { return; } self.refs.push(o); diff --git a/src/plan/concurrent/concurrent_marking_work.rs b/src/plan/concurrent/concurrent_marking_work.rs index 660f0e69af..cacdeb62d6 100644 --- a/src/plan/concurrent/concurrent_marking_work.rs +++ b/src/plan/concurrent/concurrent_marking_work.rs @@ -1,14 +1,11 @@ -use crate::plan::concurrent::immix::global::ConcurrentImmix; +use crate::plan::concurrent::global::ConcurrentPlan; use crate::plan::concurrent::Pause; +use crate::plan::PlanTraceObject; use crate::plan::VectorQueue; -use crate::policy::gc_work::PolicyTraceObject; use crate::policy::immix::TRACE_KIND_FAST; -use crate::policy::space::Space; use crate::scheduler::gc_work::{ScanObjects, SlotOf}; use crate::util::ObjectReference; use crate::vm::slot::Slot; - -use crate::Plan; use crate::{ plan::ObjectQueue, scheduler::{gc_work::ProcessEdgesBase, GCWork, GCWorker, ProcessEdgesWork, WorkBucketStage}, @@ -18,8 +15,8 @@ use crate::{ use atomic::Ordering; use std::ops::{Deref, DerefMut}; -pub struct ConcurrentTraceObjects { - plan: &'static ConcurrentImmix, +pub struct ConcurrentTraceObjects + PlanTraceObject> { + plan: &'static P, // objects to mark and scan objects: Option>, // recursively generated objects @@ -27,14 +24,13 @@ pub struct ConcurrentTraceObjects { worker: *mut GCWorker, } -impl ConcurrentTraceObjects { +impl + PlanTraceObject> + ConcurrentTraceObjects +{ const SATB_BUFFER_SIZE: usize = 8192; pub fn new(objects: Vec, mmtk: &'static MMTK) -> Self { - let plan = mmtk - .get_plan() - .downcast_ref::>() - .unwrap(); + let plan = mmtk.get_plan().downcast_ref::

().unwrap(); let old_value = crate::NUM_CONCURRENT_TRACING_PACKETS.fetch_add(1, Ordering::SeqCst); let new_value = old_value + 1; probe!(mmtk, num_concurrent_tracing_packets_change, new_value); @@ -63,14 +59,8 @@ impl ConcurrentTraceObjects { } fn trace_object(&mut self, object: ObjectReference) -> ObjectReference { - if self.plan.immix_space.in_space(object) { - self.plan - .immix_space - .trace_object::(self, object, None, self.worker()); - } else { - self.plan.common().get_los().trace_object(self, object); - } - object + self.plan + .trace_object::(self, object, self.worker()) } fn trace_objects(&mut self, objects: &[ObjectReference]) { @@ -93,7 +83,9 @@ impl ConcurrentTraceObjects { } } -impl ObjectQueue for ConcurrentTraceObjects { +impl + PlanTraceObject> ObjectQueue + for ConcurrentTraceObjects +{ fn enqueue(&mut self, object: ObjectReference) { debug_assert!( object.to_raw_address().is_mapped(), @@ -104,9 +96,14 @@ impl ObjectQueue for ConcurrentTraceObjects { } } -unsafe impl Send for ConcurrentTraceObjects {} +unsafe impl + PlanTraceObject> Send + for ConcurrentTraceObjects +{ +} -impl GCWork for ConcurrentTraceObjects { +impl + PlanTraceObject> GCWork + for ConcurrentTraceObjects +{ fn do_work(&mut self, worker: &mut GCWorker, _mmtk: &'static MMTK) { self.worker = worker; let mut num_objects = 0; @@ -145,24 +142,35 @@ impl GCWork for ConcurrentTraceObjects { } } -pub struct ProcessModBufSATB { +pub struct ProcessModBufSATB + PlanTraceObject> { nodes: Option>, + _p: std::marker::PhantomData<(VM, P)>, +} + +unsafe impl + PlanTraceObject> Send + for ProcessModBufSATB +{ } -impl ProcessModBufSATB { +impl + PlanTraceObject> ProcessModBufSATB { pub fn new(nodes: Vec) -> Self { - Self { nodes: Some(nodes) } + Self { + nodes: Some(nodes), + _p: std::marker::PhantomData, + } } } -impl GCWork for ProcessModBufSATB { +impl + PlanTraceObject> GCWork + for ProcessModBufSATB +{ fn do_work(&mut self, worker: &mut GCWorker, mmtk: &'static MMTK) { let mut w = if let Some(nodes) = self.nodes.take() { if nodes.is_empty() { return; } - ConcurrentTraceObjects::new(nodes, mmtk) + ConcurrentTraceObjects::::new(nodes, mmtk) } else { return; }; @@ -170,11 +178,19 @@ impl GCWork for ProcessModBufSATB { } } -pub struct ProcessRootSlots { +pub struct ProcessRootSlots + PlanTraceObject> { base: ProcessEdgesBase, + _p: std::marker::PhantomData

, +} + +unsafe impl + PlanTraceObject> Send + for ProcessRootSlots +{ } -impl ProcessEdgesWork for ProcessRootSlots { +impl + PlanTraceObject> ProcessEdgesWork + for ProcessRootSlots +{ type VM = VM; type ScanObjectsWorkType = ScanObjects; const OVERWRITE_REFERENCE: bool = false; @@ -188,7 +204,10 @@ impl ProcessEdgesWork for ProcessRootSlots { ) -> Self { debug_assert!(roots); let base = ProcessEdgesBase::new(slots, roots, mmtk, bucket); - Self { base } + Self { + base, + _p: std::marker::PhantomData, + } } fn flush(&mut self) {} @@ -201,7 +220,7 @@ impl ProcessEdgesWork for ProcessRootSlots { let pause = self .base .plan() - .downcast_ref::>() + .concurrent() .unwrap() .current_pause() .unwrap(); @@ -219,7 +238,7 @@ impl ProcessEdgesWork for ProcessRootSlots { // create the packet let worker = self.worker(); let mmtk = self.mmtk(); - let w = ConcurrentTraceObjects::new(root_objects.clone(), mmtk); + let w = ConcurrentTraceObjects::::new(root_objects.clone(), mmtk); match pause { Pause::InitialMark => worker.scheduler().postpone(w), @@ -232,7 +251,7 @@ impl ProcessEdgesWork for ProcessRootSlots { } if !root_objects.is_empty() { let worker = self.worker(); - let w = ConcurrentTraceObjects::new(root_objects.clone(), self.mmtk()); + let w = ConcurrentTraceObjects::::new(root_objects.clone(), self.mmtk()); match pause { Pause::InitialMark => worker.scheduler().postpone(w), @@ -247,14 +266,18 @@ impl ProcessEdgesWork for ProcessRootSlots { } } -impl Deref for ProcessRootSlots { +impl + PlanTraceObject> Deref + for ProcessRootSlots +{ type Target = ProcessEdgesBase; fn deref(&self) -> &Self::Target { &self.base } } -impl DerefMut for ProcessRootSlots { +impl + PlanTraceObject> DerefMut + for ProcessRootSlots +{ fn deref_mut(&mut self) -> &mut Self::Target { &mut self.base } diff --git a/src/plan/concurrent/global.rs b/src/plan/concurrent/global.rs new file mode 100644 index 0000000000..a2b06f67c0 --- /dev/null +++ b/src/plan/concurrent/global.rs @@ -0,0 +1,7 @@ +use crate::plan::concurrent::Pause; +use crate::plan::Plan; + +pub trait ConcurrentPlan: Plan { + fn concurrent_work_in_progress(&self) -> bool; + fn current_pause(&self) -> Option; +} diff --git a/src/plan/concurrent/immix/global.rs b/src/plan/concurrent/immix/global.rs index c377a9fafb..ff6a21a092 100644 --- a/src/plan/concurrent/immix/global.rs +++ b/src/plan/concurrent/immix/global.rs @@ -1,4 +1,5 @@ use crate::plan::concurrent::concurrent_marking_work::ProcessRootSlots; +use crate::plan::concurrent::global::ConcurrentPlan; use crate::plan::concurrent::immix::gc_work::ConcurrentImmixGCWorkContext; use crate::plan::concurrent::immix::gc_work::ConcurrentImmixSTWGCWorkContext; use crate::plan::concurrent::Pause; @@ -378,7 +379,7 @@ impl ConcurrentImmix { self.disable_unnecessary_buckets(scheduler, Pause::InitialMark); scheduler.work_buckets[WorkBucketStage::Unconstrained].add_prioritized(Box::new( - StopMutators::>>::new_args( + StopMutators::>>::new_args( Pause::InitialMark, ), )); @@ -391,7 +392,7 @@ impl ConcurrentImmix { self.disable_unnecessary_buckets(scheduler, Pause::FinalMark); scheduler.work_buckets[WorkBucketStage::Unconstrained].add_prioritized(Box::new( - StopMutators::>>::new_args( + StopMutators::>>::new_args( Pause::FinalMark, ), )); @@ -451,11 +452,17 @@ impl ConcurrentImmix { .store(active, Ordering::SeqCst); } - pub fn current_pause(&self) -> Option { + fn previous_pause(&self) -> Option { + self.previous_pause.load(Ordering::SeqCst) + } +} + +impl ConcurrentPlan for ConcurrentImmix { + fn current_pause(&self) -> Option { self.current_pause.load(Ordering::SeqCst) } - pub fn previous_pause(&self) -> Option { - self.previous_pause.load(Ordering::SeqCst) + fn concurrent_work_in_progress(&self) -> bool { + self.concurrent_marking_in_progress() } } diff --git a/src/plan/concurrent/immix/mutator.rs b/src/plan/concurrent/immix/mutator.rs index 1304e34ea0..b8074544fe 100644 --- a/src/plan/concurrent/immix/mutator.rs +++ b/src/plan/concurrent/immix/mutator.rs @@ -79,6 +79,9 @@ pub fn create_concurrent_immix_mutator( let builder = MutatorBuilder::new(mutator_tls, mmtk, config); builder - .barrier(Box::new(SATBBarrier::new(SATBBarrierSemantics::new(mmtk)))) + .barrier(Box::new(SATBBarrier::new(SATBBarrierSemantics::< + VM, + ConcurrentImmix, + >::new(mmtk)))) .build() } diff --git a/src/plan/concurrent/mod.rs b/src/plan/concurrent/mod.rs index 94a582a4aa..692ed99d03 100644 --- a/src/plan/concurrent/mod.rs +++ b/src/plan/concurrent/mod.rs @@ -1,5 +1,7 @@ pub mod barrier; -pub mod concurrent_marking_work; +pub(super) mod concurrent_marking_work; +pub(super) mod global; + pub mod immix; use bytemuck::NoUninit; diff --git a/src/plan/global.rs b/src/plan/global.rs index 2d3f2a3638..b09ff0e23b 100644 --- a/src/plan/global.rs +++ b/src/plan/global.rs @@ -186,6 +186,12 @@ pub trait Plan: 'static + HasSpaces + Sync + Downcast { None } + fn concurrent( + &self, + ) -> Option<&dyn crate::plan::concurrent::global::ConcurrentPlan> { + None + } + /// Get the current run time options. fn options(&self) -> &Options { &self.base().options From 7487c1be0572c045d552da64d99e47c858a42f8b Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Wed, 13 Aug 2025 01:55:11 +0000 Subject: [PATCH 16/59] Fix rayon-core version for MSRV --- Cargo.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/Cargo.toml b/Cargo.toml index f96df1ec29..5b5c0685bc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -47,6 +47,7 @@ portable-atomic = "1.4.3" probe = "0.5" regex = "1.7.0" rustversion = "1.0" +rayon-core = "=1.12.1" # We can remove this dependency when we use MSRV 1.80+ spin = "0.9.5" static_assertions = "1.1.0" strum = "0.27.1" From 6bb03c08bd5f98a36e0be8f879bf1f8e973cf1e2 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Thu, 14 Aug 2025 04:13:06 +0000 Subject: [PATCH 17/59] More assertions and minor fix. --- src/plan/concurrent/barrier.rs | 2 ++ src/plan/concurrent/concurrent_marking_work.rs | 8 ++++++-- src/plan/concurrent/immix/global.rs | 4 ++++ 3 files changed, 12 insertions(+), 2 deletions(-) diff --git a/src/plan/concurrent/barrier.rs b/src/plan/concurrent/barrier.rs index 991a626dc9..09e65276e6 100644 --- a/src/plan/concurrent/barrier.rs +++ b/src/plan/concurrent/barrier.rs @@ -63,6 +63,7 @@ impl + PlanTraceObject> SATBBarrie let bucket = if self.plan.concurrent_work_in_progress() { WorkBucketStage::Unconstrained } else { + debug_assert_eq!(self.plan.current_pause(), Some(Pause::FinalMark)); WorkBucketStage::Closure }; self.mmtk.scheduler.work_buckets[bucket].add(ProcessModBufSATB::::new(satb)); @@ -80,6 +81,7 @@ impl + PlanTraceObject> SATBBarrie let bucket = if self.plan.concurrent_work_in_progress() { WorkBucketStage::Unconstrained } else { + debug_assert_eq!(self.plan.current_pause(), Some(Pause::FinalMark)); WorkBucketStage::Closure }; self.mmtk.scheduler.work_buckets[bucket].add(ProcessModBufSATB::::new(nodes)); diff --git a/src/plan/concurrent/concurrent_marking_work.rs b/src/plan/concurrent/concurrent_marking_work.rs index cacdeb62d6..6620b766d2 100644 --- a/src/plan/concurrent/concurrent_marking_work.rs +++ b/src/plan/concurrent/concurrent_marking_work.rs @@ -59,8 +59,12 @@ impl + PlanTraceObject> } fn trace_object(&mut self, object: ObjectReference) -> ObjectReference { - self.plan - .trace_object::(self, object, self.worker()) + let new_object = + self.plan + .trace_object::(self, object, self.worker()); + // No copying should happen. + debug_assert_eq!(object, new_object); + object } fn trace_objects(&mut self, objects: &[ObjectReference]) { diff --git a/src/plan/concurrent/immix/global.rs b/src/plan/concurrent/immix/global.rs index ff6a21a092..9d7dc4fcdd 100644 --- a/src/plan/concurrent/immix/global.rs +++ b/src/plan/concurrent/immix/global.rs @@ -256,6 +256,10 @@ impl Plan for ConcurrentImmix { } info!("{:?} start", pause); } + + fn concurrent(&self) -> Option<&dyn ConcurrentPlan> { + Some(self) + } } impl ConcurrentImmix { From ad41d7e67629d72a6918cc02e498b053bd91f8b1 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Thu, 14 Aug 2025 05:14:30 +0000 Subject: [PATCH 18/59] Wrong assertions --- src/plan/concurrent/barrier.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/plan/concurrent/barrier.rs b/src/plan/concurrent/barrier.rs index 09e65276e6..a26ee50444 100644 --- a/src/plan/concurrent/barrier.rs +++ b/src/plan/concurrent/barrier.rs @@ -63,7 +63,7 @@ impl + PlanTraceObject> SATBBarrie let bucket = if self.plan.concurrent_work_in_progress() { WorkBucketStage::Unconstrained } else { - debug_assert_eq!(self.plan.current_pause(), Some(Pause::FinalMark)); + debug_assert_ne!(self.plan.current_pause(), Some(Pause::InitialMark)); WorkBucketStage::Closure }; self.mmtk.scheduler.work_buckets[bucket].add(ProcessModBufSATB::::new(satb)); @@ -81,7 +81,7 @@ impl + PlanTraceObject> SATBBarrie let bucket = if self.plan.concurrent_work_in_progress() { WorkBucketStage::Unconstrained } else { - debug_assert_eq!(self.plan.current_pause(), Some(Pause::FinalMark)); + debug_assert_ne!(self.plan.current_pause(), Some(Pause::InitialMark)); WorkBucketStage::Closure }; self.mmtk.scheduler.work_buckets[bucket].add(ProcessModBufSATB::::new(nodes)); From a93811062505b5d82b22d18e384d236b2df06695 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Mon, 25 Aug 2025 00:48:42 +0000 Subject: [PATCH 19/59] Fix style check --- src/plan/concurrent/immix/global.rs | 2 +- src/policy/largeobjectspace.rs | 2 -- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/src/plan/concurrent/immix/global.rs b/src/plan/concurrent/immix/global.rs index 9d7dc4fcdd..01ea679518 100644 --- a/src/plan/concurrent/immix/global.rs +++ b/src/plan/concurrent/immix/global.rs @@ -378,7 +378,7 @@ impl ConcurrentImmix { &'static self, scheduler: &GCWorkScheduler, ) { - use crate::scheduler::gc_work::{Prepare, StopMutators, UnsupportedProcessEdges}; + use crate::scheduler::gc_work::Prepare; self.disable_unnecessary_buckets(scheduler, Pause::InitialMark); diff --git a/src/policy/largeobjectspace.rs b/src/policy/largeobjectspace.rs index 3a60b19dad..6a8e86b8ee 100644 --- a/src/policy/largeobjectspace.rs +++ b/src/policy/largeobjectspace.rs @@ -263,8 +263,6 @@ impl LargeObjectSpace { } pub fn initial_pause_prepare(&self) { - use crate::util::object_enum::ClosureObjectEnumerator; - debug_assert!(self.treadmill.is_from_space_empty()); debug_assert!(self.treadmill.is_nursery_empty()); debug_assert!(self.common.needs_satb); From 321c6e299631790ad0ffa543caca198d76d706fb Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Fri, 8 Aug 2025 01:35:37 +0000 Subject: [PATCH 20/59] Refactor log bits --- src/plan/concurrent/immix/global.rs | 21 +++-- src/plan/generational/copying/global.rs | 3 + src/plan/generational/global.rs | 3 + src/plan/generational/immix/global.rs | 5 +- src/plan/generational/mod.rs | 4 +- src/plan/global.rs | 52 +++++++++++-- src/plan/immix/global.rs | 2 +- src/plan/pageprotect/global.rs | 1 + src/plan/plan_constraints.rs | 14 +++- src/plan/sticky/immix/global.rs | 10 ++- src/policy/copyspace.rs | 24 ++++-- src/policy/immix/immixspace.rs | 78 +++++++++++-------- src/policy/immortalspace.rs | 18 ++++- src/policy/largeobjectspace.rs | 44 ++++++++--- src/policy/marksweepspace/native_ms/global.rs | 28 +++++-- src/policy/space.rs | 33 +++++--- src/policy/vmspace.rs | 6 +- src/util/copy/mod.rs | 2 +- 18 files changed, 254 insertions(+), 94 deletions(-) diff --git a/src/plan/concurrent/immix/global.rs b/src/plan/concurrent/immix/global.rs index 01ea679518..27cd8f9d4c 100644 --- a/src/plan/concurrent/immix/global.rs +++ b/src/plan/concurrent/immix/global.rs @@ -25,6 +25,7 @@ use crate::util::heap::gc_trigger::SpaceStats; use crate::util::heap::VMRequest; use crate::util::metadata::side_metadata::SideMetadataContext; use crate::vm::VMBinding; +use crate::vm::ObjectModel; use crate::{policy::immix::ImmixSpace, util::opaque_pointer::VMWorkerThread}; use std::sync::atomic::AtomicBool; @@ -66,7 +67,7 @@ pub const CONCURRENT_IMMIX_CONSTRAINTS: PlanConstraints = PlanConstraints { max_non_los_default_alloc_bytes: crate::policy::immix::MAX_IMMIX_OBJECT_SIZE, needs_prepare_mutator: true, barrier: crate::BarrierSelector::SATBBarrier, - needs_satb: true, + uses_log_bit: true, ..PlanConstraints::default() }; @@ -162,8 +163,12 @@ impl Plan for ConcurrentImmix { Pause::InitialMark => { // init prepare has to be executed first, otherwise, los objects will not be // dealt with properly - self.common.initial_pause_prepare(); - self.immix_space.initial_pause_prepare(); + // self.common.initial_pause_prepare(); + // self.immix_space.initial_pause_prepare(); + if VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.is_on_side() { + self.common.set_side_log_bits(); + self.immix_space.set_side_log_bits(); + } self.common.prepare(tls, true); self.immix_space.prepare( true, @@ -179,8 +184,12 @@ impl Plan for ConcurrentImmix { match pause { Pause::InitialMark => (), Pause::Full | Pause::FinalMark => { - self.immix_space.final_pause_release(); - self.common.final_pause_release(); + if VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.is_on_side() { + self.immix_space.clear_side_log_bits(); + self.common.clear_side_log_bits(); + } + // self.immix_space.final_pause_release(); + // self.common.final_pause_release(); self.common.release(tls, true); // release the collected region self.immix_space.release(true); @@ -278,7 +287,7 @@ impl ConcurrentImmix { Self::new_with_args( plan_args, ImmixSpaceArgs { - unlog_object_when_traced: false, + // unlog_object_when_traced: false, #[cfg(feature = "vo_bit")] mixed_age: false, never_move_objects: true, diff --git a/src/plan/generational/copying/global.rs b/src/plan/generational/copying/global.rs index b23a81c998..b701f8e07d 100644 --- a/src/plan/generational/copying/global.rs +++ b/src/plan/generational/copying/global.rs @@ -110,6 +110,9 @@ impl Plan for GenCopy { let full_heap = !self.gen.is_current_gc_nursery(); self.gen.release(tls); if full_heap { + if VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.is_on_side() { + self.fromspace().clear_side_log_bits(); + } self.fromspace().release(); } } diff --git a/src/plan/generational/global.rs b/src/plan/generational/global.rs index 0404fc93bb..9617bcd1f4 100644 --- a/src/plan/generational/global.rs +++ b/src/plan/generational/global.rs @@ -74,6 +74,9 @@ impl CommonGenPlan { /// Release Gen. This should be called by a single thread in GC release work. pub fn release(&mut self, tls: VMWorkerThread) { let full_heap = !self.is_current_gc_nursery(); + if VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.is_on_side() { + self.nursery.clear_side_log_bits(); + } self.common.release(tls, full_heap); self.nursery.release(); } diff --git a/src/plan/generational/immix/global.rs b/src/plan/generational/immix/global.rs index ff76d5ac55..a7dc8fa78d 100644 --- a/src/plan/generational/immix/global.rs +++ b/src/plan/generational/immix/global.rs @@ -129,6 +129,9 @@ impl Plan for GenImmix { let full_heap = !self.gen.is_current_gc_nursery(); self.gen.prepare(tls); if full_heap { + if VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.is_on_side() { + self.immix_space.clear_side_log_bits(); + } self.immix_space.prepare( full_heap, Some(crate::policy::immix::defrag::StatsForDefrag::new(self)), @@ -250,7 +253,7 @@ impl GenImmix { plan_args.get_space_args("immix_mature", true, false, VMRequest::discontiguous()), ImmixSpaceArgs { // We need to unlog objects at tracing time since we currently clear all log bits during a major GC - unlog_object_when_traced: true, + // unlog_object_when_traced: true, // In GenImmix, young objects are not allocated in ImmixSpace directly. #[cfg(feature = "vo_bit")] mixed_age: false, diff --git a/src/plan/generational/mod.rs b/src/plan/generational/mod.rs index 15b63abb33..18f1cacec1 100644 --- a/src/plan/generational/mod.rs +++ b/src/plan/generational/mod.rs @@ -44,7 +44,9 @@ pub const FULL_NURSERY_GC: bool = false; /// Constraints for generational plans. Each generational plan should overwrite based on this constant. pub const GEN_CONSTRAINTS: PlanConstraints = PlanConstraints { moves_objects: true, - needs_log_bit: ACTIVE_BARRIER.equals(BarrierSelector::ObjectBarrier), + uses_log_bit: ACTIVE_BARRIER.equals(BarrierSelector::ObjectBarrier), + unlog_allocated_object: true, + unlog_traced_object: true, barrier: ACTIVE_BARRIER, // We may trace duplicate edges in sticky immix (or any plan that uses object remembering barrier). See https://github.com/mmtk/mmtk-core/issues/743. may_trace_duplicate_edges: ACTIVE_BARRIER.equals(BarrierSelector::ObjectBarrier), diff --git a/src/plan/global.rs b/src/plan/global.rs index 0573ee4d82..930ffc3b05 100644 --- a/src/plan/global.rs +++ b/src/plan/global.rs @@ -433,6 +433,8 @@ impl CreateSpecificPlanArgs<'_, VM> { zeroed, permission_exec, vmrequest, + unlog_allocated_object: self.constraints.unlog_allocated_object, + unlog_traced_object: self.constraints.unlog_traced_object, global_side_metadata_specs: self.global_side_metadata_specs.clone(), vm_map: self.global_args.vm_map, mmapper: self.global_args.mmapper, @@ -534,6 +536,28 @@ impl BasePlan { self.vm_space.release(); } + pub fn clear_side_log_bits(&self) { + #[cfg(feature = "code_space")] + self.code_space.clear_side_log_bits(); + #[cfg(feature = "code_space")] + self.code_lo_space.clear_side_log_bits(); + #[cfg(feature = "ro_space")] + self.ro_space.clear_side_log_bits(); + #[cfg(feature = "vm_space")] + self.vm_space.clear_side_log_bits(); + } + + pub fn set_side_log_bits(&self) { + #[cfg(feature = "code_space")] + self.code_space.set_side_log_bits(); + #[cfg(feature = "code_space")] + self.code_lo_space.set_side_log_bits(); + #[cfg(feature = "ro_space")] + self.ro_space.set_side_log_bits(); + #[cfg(feature = "vm_space")] + self.vm_space.set_side_log_bits(); + } + pub fn end_of_gc(&mut self, _tls: VMWorkerThread) { // Do nothing here. None of the spaces needs end_of_gc. } @@ -601,6 +625,7 @@ pub struct CommonPlan { impl CommonPlan { pub fn new(mut args: CreateSpecificPlanArgs) -> CommonPlan { + let uses_log_bit = args.constraints.uses_log_bit; CommonPlan { immortal: ImmortalSpace::new(args.get_space_args( "immortal", @@ -611,6 +636,7 @@ impl CommonPlan { los: LargeObjectSpace::new( args.get_space_args("los", true, false, VMRequest::discontiguous()), false, + uses_log_bit, ), nonmoving: Self::new_nonmoving_space(&mut args), base: BasePlan::new(args), @@ -624,13 +650,13 @@ impl CommonPlan { + self.base.get_used_pages() } - pub fn initial_pause_prepare(&mut self) { - self.los.initial_pause_prepare(); - } + // pub fn initial_pause_prepare(&mut self) { + // self.los.initial_pause_prepare(); + // } - pub fn final_pause_release(&mut self) { - self.los.final_pause_release(); - } + // pub fn final_pause_release(&mut self) { + // self.los.final_pause_release(); + // } pub fn prepare(&mut self, tls: VMWorkerThread, full_heap: bool) { self.immortal.prepare(); @@ -646,6 +672,18 @@ impl CommonPlan { self.base.release(tls, full_heap) } + pub fn clear_side_log_bits(&self) { + self.immortal.clear_side_log_bits(); + self.los.clear_side_log_bits(); + self.base.clear_side_log_bits(); + } + + pub fn set_side_log_bits(&self) { + self.immortal.set_side_log_bits(); + self.los.set_side_log_bits(); + self.base.set_side_log_bits(); + } + pub fn end_of_gc(&mut self, tls: VMWorkerThread) { self.end_of_gc_nonmoving_space(); self.base.end_of_gc(tls); @@ -673,7 +711,7 @@ impl CommonPlan { NonMovingSpace::new( space_args, crate::policy::immix::ImmixSpaceArgs { - unlog_object_when_traced: false, + // unlog_object_when_traced: false, #[cfg(feature = "vo_bit")] mixed_age: false, never_move_objects: true, diff --git a/src/plan/immix/global.rs b/src/plan/immix/global.rs index d8b5f40935..a6eb25b12c 100644 --- a/src/plan/immix/global.rs +++ b/src/plan/immix/global.rs @@ -138,7 +138,7 @@ impl Immix { Self::new_with_args( plan_args, ImmixSpaceArgs { - unlog_object_when_traced: false, + // unlog_object_when_traced: false, #[cfg(feature = "vo_bit")] mixed_age: false, never_move_objects: false, diff --git a/src/plan/pageprotect/global.rs b/src/plan/pageprotect/global.rs index 5d0d868f95..bd4c5288d4 100644 --- a/src/plan/pageprotect/global.rs +++ b/src/plan/pageprotect/global.rs @@ -110,6 +110,7 @@ impl PageProtect { space: LargeObjectSpace::new( plan_args.get_space_args("pageprotect", true, false, VMRequest::discontiguous()), true, + false, // PageProtect does not use log bit ), common: CommonPlan::new(plan_args), }; diff --git a/src/plan/plan_constraints.rs b/src/plan/plan_constraints.rs index 6025020c10..53167db527 100644 --- a/src/plan/plan_constraints.rs +++ b/src/plan/plan_constraints.rs @@ -20,7 +20,7 @@ pub struct PlanConstraints { /// This depends on the copy allocator. pub max_non_los_copy_bytes: usize, /// Does this plan use the log bit? See vm::ObjectModel::GLOBAL_LOG_BIT_SPEC. - pub needs_log_bit: bool, + // pub needs_log_bit: bool, /// Some plans may allow benign race for testing mark bit, and this will lead to trace the same /// edge multiple times. If a plan allows tracing duplicated edges, we will not run duplicate /// edge check in extreme_assertions. @@ -45,7 +45,10 @@ pub struct PlanConstraints { /// `MutatorConfig::prepare_func`). Those plans can set this to `false` so that the /// `PrepareMutator` work packets will not be created at all. pub needs_prepare_mutator: bool, - pub needs_satb: bool, + // pub needs_satb: bool, + pub uses_log_bit: bool, + pub unlog_allocated_object: bool, + pub unlog_traced_object: bool, } impl PlanConstraints { @@ -64,11 +67,14 @@ impl PlanConstraints { // We may trace duplicate edges in mark sweep. If we use mark sweep as the non moving policy, it will be included in every may_trace_duplicate_edges: cfg!(feature = "marksweep_as_nonmoving"), needs_forward_after_liveness: false, - needs_log_bit: false, + // needs_log_bit: false, barrier: BarrierSelector::NoBarrier, // If we use mark sweep as non moving space, we need to prepare mutator. See [`common_prepare_func`]. needs_prepare_mutator: cfg!(feature = "marksweep_as_nonmoving"), - needs_satb: false, + // needs_satb: false, + uses_log_bit: false, + unlog_allocated_object: false, + unlog_traced_object: false, } } } diff --git a/src/plan/sticky/immix/global.rs b/src/plan/sticky/immix/global.rs index a06dee9816..01bebddb92 100644 --- a/src/plan/sticky/immix/global.rs +++ b/src/plan/sticky/immix/global.rs @@ -42,10 +42,12 @@ pub struct StickyImmix { pub const STICKY_IMMIX_CONSTRAINTS: PlanConstraints = PlanConstraints { // If we disable moving in Immix, this is a non-moving plan. moves_objects: !cfg!(feature = "immix_non_moving"), - needs_log_bit: true, + uses_log_bit: true, barrier: crate::plan::BarrierSelector::ObjectBarrier, // We may trace duplicate edges in sticky immix (or any plan that uses object remembering barrier). See https://github.com/mmtk/mmtk-core/issues/743. may_trace_duplicate_edges: true, + unlog_allocated_object: true, + unlog_traced_object: true, ..immix::IMMIX_CONSTRAINTS }; @@ -122,6 +124,10 @@ impl Plan for StickyImmix { self.immix.common.los.prepare(false); } else { self.full_heap_gc_count.lock().unwrap().inc(); + if VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.is_on_side() { + self.immix.common.clear_side_log_bits(); + self.immix.immix_space.clear_side_log_bits(); + } self.immix.prepare(tls); } } @@ -328,7 +334,7 @@ impl StickyImmix { // Every object we trace in nursery GC becomes a mature object. // Every object we trace in full heap GC is a mature object. Thus in both cases, // they should be unlogged. - unlog_object_when_traced: true, + // unlog_object_when_traced: true, // In StickyImmix, both young and old objects are allocated in the ImmixSpace. #[cfg(feature = "vo_bit")] mixed_age: true, diff --git a/src/policy/copyspace.rs b/src/policy/copyspace.rs index 7067f65d43..1d3bc71540 100644 --- a/src/policy/copyspace.rs +++ b/src/policy/copyspace.rs @@ -201,11 +201,11 @@ impl CopySpace { side_forwarding_status_table.bzero_metadata(start, size); } - if self.common.needs_log_bit { - if let MetadataSpec::OnSide(side) = *VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC { - side.bzero_metadata(start, size); - } - } + // if self.common.needs_log_bit { + // if let MetadataSpec::OnSide(side) = *VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC { + // side.bzero_metadata(start, size); + // } + // } // Clear VO bits because all objects in the space are dead. #[cfg(feature = "vo_bit")] @@ -218,6 +218,20 @@ impl CopySpace { self.from_space.store(false, Ordering::SeqCst); } + pub fn clear_side_log_bits(&self) { + let log_bit = VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.extract_side_spec(); + for (start, size) in self.pr.iterate_allocated_regions() { + log_bit.bzero_metadata(start, size); + } + } + + pub fn set_side_log_bits(&self) { + let log_bit = VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.extract_side_spec(); + for (start, size) in self.pr.iterate_allocated_regions() { + log_bit.bset_metadata(start, size); + } + } + fn is_from_space(&self) -> bool { self.from_space.load(Ordering::SeqCst) } diff --git a/src/policy/immix/immixspace.rs b/src/policy/immix/immixspace.rs index 7a051483d6..65a6ab86c1 100644 --- a/src/policy/immix/immixspace.rs +++ b/src/policy/immix/immixspace.rs @@ -65,7 +65,7 @@ pub struct ImmixSpaceArgs { /// In sticky immix, we 'promote' an object to mature when we trace the object /// (no matter we copy an object or not). So we have to use `PromoteToMature`, and instead /// just set the log bit in the space when an object is traced. - pub unlog_object_when_traced: bool, + // pub unlog_object_when_traced: bool, /// Whether this ImmixSpace instance contains both young and old objects. /// This affects the updating of valid-object bits. If some lines or blocks of this ImmixSpace /// instance contain young objects, their VO bits need to be updated during this GC. Currently @@ -301,12 +301,12 @@ impl ImmixSpace { args: crate::policy::space::PlanCreateSpaceArgs, mut space_args: ImmixSpaceArgs, ) -> Self { - if space_args.unlog_object_when_traced { - assert!( - args.constraints.needs_log_bit, - "Invalid args when the plan does not use log bit" - ); - } + // if space_args.unlog_object_when_traced { + // assert!( + // args.constraints.needs_log_bit, + // "Invalid args when the plan does not use log bit" + // ); + // } // Make sure we override the space args if we force non moving Immix if cfg!(feature = "immix_non_moving") && !space_args.never_move_objects { @@ -411,23 +411,23 @@ impl ImmixSpace { &self.scheduler } - pub fn initial_pause_prepare(&mut self) { - // make sure all allocated blocks have unlog bit set during initial mark - if let MetadataSpec::OnSide(side) = *VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC { - for chunk in self.chunk_map.all_chunks() { - side.bset_metadata(chunk.start(), Chunk::BYTES); - } - } - } - - pub fn final_pause_release(&mut self) { - // clear the unlog bit so that during normal mutator phase, stab barrier is effectively disabled (all objects are considered as logged and thus no slow path will be taken) - if let MetadataSpec::OnSide(side) = *VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC { - for chunk in self.chunk_map.all_chunks() { - side.bzero_metadata(chunk.start(), Chunk::BYTES); - } - } - } + // pub fn initial_pause_prepare(&mut self) { + // // make sure all allocated blocks have unlog bit set during initial mark + // if let MetadataSpec::OnSide(side) = *VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC { + // for chunk in self.chunk_map.all_chunks() { + // side.bset_metadata(chunk.start(), Chunk::BYTES); + // } + // } + // } + + // pub fn final_pause_release(&mut self) { + // // clear the unlog bit so that during normal mutator phase, stab barrier is effectively disabled (all objects are considered as logged and thus no slow path will be taken) + // if let MetadataSpec::OnSide(side) = *VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC { + // for chunk in self.chunk_map.all_chunks() { + // side.bzero_metadata(chunk.start(), Chunk::BYTES); + // } + // } + // } pub fn prepare(&mut self, major_gc: bool, plan_stats: Option) { if major_gc { @@ -439,13 +439,13 @@ impl ImmixSpace { unimplemented!("cyclic mark bits is not supported at the moment"); } - if self.common.needs_log_bit { - if let MetadataSpec::OnSide(side) = *VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC { - for chunk in self.chunk_map.all_chunks() { - side.bzero_metadata(chunk.start(), Chunk::BYTES); - } - } - } + // if self.common.needs_log_bit { + // if let MetadataSpec::OnSide(side) = *VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC { + // for chunk in self.chunk_map.all_chunks() { + // side.bzero_metadata(chunk.start(), Chunk::BYTES); + // } + // } + // } // Prepare defrag info if self.is_defrag_enabled() { @@ -547,6 +547,20 @@ impl ImmixSpace { did_defrag } + pub fn clear_side_log_bits(&self) { + let log_bit = VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.extract_side_spec(); + for chunk in self.chunk_map.all_chunks() { + log_bit.bzero_metadata(chunk.start(), Chunk::BYTES); + } + } + + pub fn set_side_log_bits(&self) { + let log_bit = VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.extract_side_spec(); + for chunk in self.chunk_map.all_chunks() { + log_bit.bset_metadata(chunk.start(), Chunk::BYTES); + } + } + /// Generate chunk sweep tasks fn generate_sweep_tasks(&self) -> Vec>> { self.defrag.mark_histograms.lock().clear(); @@ -754,7 +768,7 @@ impl ImmixSpace { } fn unlog_object_if_needed(&self, object: ObjectReference) { - if self.space_args.unlog_object_when_traced { + if self.common.unlog_traced_object { // Make sure the side metadata for the line can fit into one byte. For smaller line size, we should // use `mark_as_unlogged` instead to mark the bit. const_assert!( diff --git a/src/policy/immortalspace.rs b/src/policy/immortalspace.rs index c9eedd365f..4c57c4034b 100644 --- a/src/policy/immortalspace.rs +++ b/src/policy/immortalspace.rs @@ -56,7 +56,7 @@ impl SFT for ImmortalSpace { fn initialize_object_metadata(&self, object: ObjectReference, _alloc: bool) { self.mark_state .on_object_metadata_initialization::(object); - if self.common.needs_log_bit { + if self.common.unlog_allocated_object { VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.mark_as_unlogged::(object, Ordering::SeqCst); } #[cfg(feature = "vo_bit")] @@ -173,6 +173,20 @@ impl ImmortalSpace { self.mark_state.on_global_release::(); } + pub fn clear_side_log_bits(&self) { + let log_bit = VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.extract_side_spec(); + for (start, size) in self.pr.iterate_allocated_regions() { + log_bit.bzero_metadata(start, size); + } + } + + pub fn set_side_log_bits(&self) { + let log_bit = VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.extract_side_spec(); + for (start, size) in self.pr.iterate_allocated_regions() { + log_bit.bset_metadata(start, size); + } + } + pub fn trace_object( &self, queue: &mut Q, @@ -186,7 +200,7 @@ impl ImmortalSpace { ); if self.mark_state.test_and_mark::(object) { // Set the unlog bit if required - if self.common.needs_log_bit { + if self.common.unlog_traced_object { VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.store_atomic::( object, 1, diff --git a/src/policy/largeobjectspace.rs b/src/policy/largeobjectspace.rs index 6a8e86b8ee..48058ab4e9 100644 --- a/src/policy/largeobjectspace.rs +++ b/src/policy/largeobjectspace.rs @@ -31,6 +31,7 @@ pub struct LargeObjectSpace { mark_state: u8, in_nursery_gc: bool, treadmill: TreadMill, + clear_log_bit_on_sweep: bool, } impl SFT for LargeObjectSpace { @@ -95,7 +96,7 @@ impl SFT for LargeObjectSpace { ); // If this object is freshly allocated, we do not set it as unlogged - if !alloc && self.common.needs_log_bit { + if !alloc && self.common.unlog_allocated_object { VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.mark_as_unlogged::(object, Ordering::SeqCst); } @@ -235,6 +236,7 @@ impl LargeObjectSpace { pub fn new( args: crate::policy::space::PlanCreateSpaceArgs, protect_memory_on_release: bool, + clear_log_bit_on_sweep: bool, ) -> Self { let is_discontiguous = args.vmrequest.is_discontiguous(); let vm_map = args.vm_map; @@ -259,26 +261,48 @@ impl LargeObjectSpace { mark_state: 0, in_nursery_gc: false, treadmill: TreadMill::new(), + clear_log_bit_on_sweep, } } - pub fn initial_pause_prepare(&self) { - debug_assert!(self.treadmill.is_from_space_empty()); - debug_assert!(self.treadmill.is_nursery_empty()); - debug_assert!(self.common.needs_satb); + pub fn clear_side_log_bits(&self) { let mut enumator = ClosureObjectEnumerator::<_, VM>::new(|object| { - VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.mark_as_unlogged::(object, Ordering::SeqCst); + VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.clear::(object, Ordering::SeqCst); }); self.treadmill.enumerate_objects(&mut enumator); } - pub fn final_pause_release(&self) { + pub fn set_side_log_bits(&self) { + use crate::util::object_enum::ClosureObjectEnumerator; + + debug_assert!(self.treadmill.is_from_space_empty()); + debug_assert!(self.treadmill.is_nursery_empty()); + // debug_assert!(self.common.needs_satb); let mut enumator = ClosureObjectEnumerator::<_, VM>::new(|object| { - VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.clear::(object, Ordering::SeqCst); + VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.mark_as_unlogged::(object, Ordering::SeqCst); }); self.treadmill.enumerate_objects(&mut enumator); } + // pub fn initial_pause_prepare(&self) { + // // use crate::util::object_enum::ClosureObjectEnumerator; + + // // debug_assert!(self.treadmill.is_from_space_empty()); + // // debug_assert!(self.treadmill.is_nursery_empty()); + // // debug_assert!(self.common.needs_satb); + // // let mut enumator = ClosureObjectEnumerator::<_, VM>::new(|object| { + // // VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.mark_as_unlogged::(object, Ordering::SeqCst); + // // }); + // // self.treadmill.enumerate_objects(&mut enumator); + // } + + // pub fn final_pause_release(&self) { + // // let mut enumator = ClosureObjectEnumerator::<_, VM>::new(|object| { + // // VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.clear::(object, Ordering::SeqCst); + // // }); + // // self.treadmill.enumerate_objects(&mut enumator); + // } + pub fn prepare(&mut self, full_heap: bool) { if full_heap { // debug_assert!(self.treadmill.is_from_space_empty()); @@ -325,7 +349,7 @@ impl LargeObjectSpace { // We just moved the object out of the logical nursery, mark it as unlogged. // We also unlog mature objects as their unlog bit may have been unset before the // full-heap GC - if self.common.needs_log_bit { + if self.common.unlog_traced_object { VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC .mark_as_unlogged::(object, Ordering::SeqCst); } @@ -345,7 +369,7 @@ impl LargeObjectSpace { #[cfg(feature = "vo_bit")] crate::util::metadata::vo_bit::unset_vo_bit(object); // Clear log bits for dead objects to prevent a new nursery object having the unlog bit set - if self.common.needs_log_bit || self.common.needs_satb { + if self.clear_log_bit_on_sweep { VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.clear::(object, Ordering::SeqCst); } self.pr diff --git a/src/policy/marksweepspace/native_ms/global.rs b/src/policy/marksweepspace/native_ms/global.rs index 1dc87cd04c..6009c7503c 100644 --- a/src/policy/marksweepspace/native_ms/global.rs +++ b/src/policy/marksweepspace/native_ms/global.rs @@ -407,13 +407,13 @@ impl MarkSweepSpace { } pub fn prepare(&mut self, full_heap: bool) { - if self.common.needs_log_bit && full_heap { - if let MetadataSpec::OnSide(side) = *VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC { - for chunk in self.chunk_map.all_chunks() { - side.bzero_metadata(chunk.start(), Chunk::BYTES); - } - } - } + // if self.common.needs_log_bit && full_heap { + // if let MetadataSpec::OnSide(side) = *VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC { + // for chunk in self.chunk_map.all_chunks() { + // side.bzero_metadata(chunk.start(), Chunk::BYTES); + // } + // } + // } #[cfg(debug_assertions)] self.abandoned_in_gc.lock().unwrap().assert_empty(); @@ -427,6 +427,20 @@ impl MarkSweepSpace { .bulk_add(work_packets); } + pub fn clear_side_log_bits(&self) { + let log_bit = VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.extract_side_spec(); + for chunk in self.chunk_map.all_chunks() { + log_bit.bzero_metadata(chunk.start(), Chunk::BYTES); + } + } + + pub fn set_side_log_bits(&self) { + let log_bit = VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.extract_side_spec(); + for chunk in self.chunk_map.all_chunks() { + log_bit.bset_metadata(chunk.start(), Chunk::BYTES); + } + } + pub fn release(&mut self) { let num_mutators = VM::VMActivePlan::number_of_mutators(); // all ReleaseMutator work packets plus the ReleaseMarkSweepSpace packet diff --git a/src/policy/space.rs b/src/policy/space.rs index f6f46e8197..833258e03a 100644 --- a/src/policy/space.rs +++ b/src/policy/space.rs @@ -536,8 +536,12 @@ pub struct CommonSpace { /// This field equals to needs_log_bit in the plan constraints. // TODO: This should be a constant for performance. - pub needs_log_bit: bool, - pub needs_satb: bool, + // pub needs_log_bit: bool, + // pub needs_satb: bool, + + pub uses_log_bit: bool, + pub unlog_allocated_object: bool, + pub unlog_traced_object: bool, /// A lock used during acquire() to make sure only one thread can allocate. pub acquire_lock: Mutex<()>, @@ -564,6 +568,8 @@ pub struct PlanCreateSpaceArgs<'a, VM: VMBinding> { pub name: &'static str, pub zeroed: bool, pub permission_exec: bool, + pub unlog_allocated_object: bool, + pub unlog_traced_object: bool, pub vmrequest: VMRequest, pub global_side_metadata_specs: Vec, pub vm_map: &'static dyn VMMap, @@ -609,8 +615,11 @@ impl CommonSpace { extent: 0, vm_map: args.plan_args.vm_map, mmapper: args.plan_args.mmapper, - needs_log_bit: args.plan_args.constraints.needs_log_bit, - needs_satb: args.plan_args.constraints.needs_satb, + // needs_log_bit: args.plan_args.constraints.needs_log_bit, + // needs_satb: args.plan_args.constraints.needs_satb, + uses_log_bit: args.plan_args.constraints.uses_log_bit, + unlog_allocated_object: args.plan_args.unlog_allocated_object, + unlog_traced_object: args.plan_args.unlog_traced_object, gc_trigger: args.plan_args.gc_trigger, metadata: SideMetadataContext { global: args.plan_args.global_side_metadata_specs, @@ -748,14 +757,14 @@ impl CommonSpace { "vo bit = {}", crate::util::metadata::vo_bit::is_vo_bit_set(object) ); - if self.needs_log_bit { - use crate::vm::object_model::ObjectModel; - use std::sync::atomic::Ordering; - println!( - "log bit = {}", - VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.is_unlogged::(object, Ordering::Relaxed), - ); - } + // if self.needs_log_bit { + // use crate::vm::object_model::ObjectModel; + // use std::sync::atomic::Ordering; + // println!( + // "log bit = {}", + // VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.is_unlogged::(object, Ordering::Relaxed), + // ); + // } } } diff --git a/src/policy/vmspace.rs b/src/policy/vmspace.rs index ab1101aaf5..767131f688 100644 --- a/src/policy/vmspace.rs +++ b/src/policy/vmspace.rs @@ -61,7 +61,7 @@ impl SFT for VMSpace { fn initialize_object_metadata(&self, object: ObjectReference, _alloc: bool) { self.mark_state .on_object_metadata_initialization::(object); - if self.common.needs_log_bit { + if self.common.unlog_allocated_object { VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.mark_as_unlogged::(object, Ordering::SeqCst); } #[cfg(feature = "vo_bit")] @@ -248,7 +248,7 @@ impl VMSpace { }); #[cfg(feature = "set_unlog_bits_vm_space")] - if self.common.needs_log_bit { + if self.common.uses_log_bit { // Bulk set unlog bits for all addresses in the VM space. This ensures that any // modification to the bootimage is logged if let MetadataSpec::OnSide(side) = *VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC { @@ -287,7 +287,7 @@ impl VMSpace { // Flip the per-object unlogged bits to "unlogged" state for objects inside the // bootimage #[cfg(feature = "set_unlog_bits_vm_space")] - if self.common.needs_log_bit { + if self.common.unlog_traced_object { VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.store_atomic::( object, 1, diff --git a/src/util/copy/mod.rs b/src/util/copy/mod.rs index aaea3b425a..b656cfd03e 100644 --- a/src/util/copy/mod.rs +++ b/src/util/copy/mod.rs @@ -120,7 +120,7 @@ impl GCWorkerCopyContext { )); } // If we are copying objects in mature space, we would need to mark the object as mature. - if semantics.is_mature() && self.config.constraints.needs_log_bit { + if semantics.is_mature() && self.config.constraints.unlog_traced_object { // If the plan uses unlogged bit, we set the unlogged bit (the object is unlogged/mature) VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC .mark_byte_as_unlogged::(object, Ordering::Relaxed); From 12112d24328cbc91b94ee99de02c45208d67d3bf Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Mon, 25 Aug 2025 04:19:07 +0000 Subject: [PATCH 21/59] Fix style check --- src/plan/concurrent/immix/global.rs | 2 -- src/policy/largeobjectspace.rs | 2 -- 2 files changed, 4 deletions(-) diff --git a/src/plan/concurrent/immix/global.rs b/src/plan/concurrent/immix/global.rs index 892e7a6cd5..7301e4a816 100644 --- a/src/plan/concurrent/immix/global.rs +++ b/src/plan/concurrent/immix/global.rs @@ -273,8 +273,6 @@ impl Plan for ConcurrentImmix { impl ConcurrentImmix { pub fn new(args: CreateGeneralPlanArgs) -> Self { - use crate::vm::ObjectModel; - let spec = crate::util::metadata::extract_side_metadata(&[ *VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC, ]); diff --git a/src/policy/largeobjectspace.rs b/src/policy/largeobjectspace.rs index a091178ca0..8b64d82c78 100644 --- a/src/policy/largeobjectspace.rs +++ b/src/policy/largeobjectspace.rs @@ -289,8 +289,6 @@ impl LargeObjectSpace { } pub fn set_side_log_bits(&self) { - use crate::util::object_enum::ClosureObjectEnumerator; - debug_assert!(self.treadmill.is_from_space_empty()); debug_assert!(self.treadmill.is_nursery_empty()); // debug_assert!(self.common.needs_satb); From 77330fe8c20269e80775af6dca8a90fbb1355302 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Fri, 22 Aug 2025 01:18:37 +0000 Subject: [PATCH 22/59] Use Concurrent bucket for concurrent work --- .../concurrent/concurrent_marking_work.rs | 4 +- src/plan/concurrent/immix/global.rs | 11 +- src/plan/global.rs | 2 + src/scheduler/gc_work.rs | 2 +- src/scheduler/scheduler.rs | 218 ++++++++++-------- src/scheduler/work_bucket.rs | 110 ++++++--- src/scheduler/worker.rs | 4 +- 7 files changed, 214 insertions(+), 137 deletions(-) diff --git a/src/plan/concurrent/concurrent_marking_work.rs b/src/plan/concurrent/concurrent_marking_work.rs index 6620b766d2..94fe3ae974 100644 --- a/src/plan/concurrent/concurrent_marking_work.rs +++ b/src/plan/concurrent/concurrent_marking_work.rs @@ -245,7 +245,7 @@ impl + PlanTraceObject> ProcessEdg let w = ConcurrentTraceObjects::::new(root_objects.clone(), mmtk); match pause { - Pause::InitialMark => worker.scheduler().postpone(w), + Pause::InitialMark => worker.scheduler().work_buckets[WorkBucketStage::Concurrent].add_no_notify(w), _ => unreachable!(), } @@ -258,7 +258,7 @@ impl + PlanTraceObject> ProcessEdg let w = ConcurrentTraceObjects::::new(root_objects.clone(), self.mmtk()); match pause { - Pause::InitialMark => worker.scheduler().postpone(w), + Pause::InitialMark => worker.scheduler().work_buckets[WorkBucketStage::Concurrent].add_no_notify(w), _ => unreachable!(), } } diff --git a/src/plan/concurrent/immix/global.rs b/src/plan/concurrent/immix/global.rs index 7301e4a816..69aaae6bbc 100644 --- a/src/plan/concurrent/immix/global.rs +++ b/src/plan/concurrent/immix/global.rs @@ -263,6 +263,7 @@ impl Plan for ConcurrentImmix { self.set_concurrent_marking_state(false); } } + // scheduler.work_buckets[WorkBucketStage::Concurrent].close(); info!("{:?} start", pause); } @@ -362,16 +363,16 @@ impl ConcurrentImmix { fn disable_unnecessary_buckets(&'static self, scheduler: &GCWorkScheduler, pause: Pause) { if pause == Pause::InitialMark { - scheduler.work_buckets[WorkBucketStage::Closure].set_as_disabled(); + // scheduler.work_buckets[WorkBucketStage::Closure].set_as_disabled(); scheduler.work_buckets[WorkBucketStage::WeakRefClosure].set_as_disabled(); scheduler.work_buckets[WorkBucketStage::FinalRefClosure].set_as_disabled(); + scheduler.work_buckets[WorkBucketStage::SoftRefClosure].set_as_disabled(); scheduler.work_buckets[WorkBucketStage::PhantomRefClosure].set_as_disabled(); } - scheduler.work_buckets[WorkBucketStage::TPinningClosure].set_as_disabled(); - scheduler.work_buckets[WorkBucketStage::PinningRootsTrace].set_as_disabled(); - scheduler.work_buckets[WorkBucketStage::VMRefClosure].set_as_disabled(); + // scheduler.work_buckets[WorkBucketStage::TPinningClosure].set_as_disabled(); + // scheduler.work_buckets[WorkBucketStage::PinningRootsTrace].set_as_disabled(); + // scheduler.work_buckets[WorkBucketStage::VMRefClosure].set_as_disabled(); scheduler.work_buckets[WorkBucketStage::VMRefForwarding].set_as_disabled(); - scheduler.work_buckets[WorkBucketStage::SoftRefClosure].set_as_disabled(); scheduler.work_buckets[WorkBucketStage::CalculateForwarding].set_as_disabled(); scheduler.work_buckets[WorkBucketStage::SecondRoots].set_as_disabled(); scheduler.work_buckets[WorkBucketStage::RefForwarding].set_as_disabled(); diff --git a/src/plan/global.rs b/src/plan/global.rs index fd85ca73d8..3f3da061e0 100644 --- a/src/plan/global.rs +++ b/src/plan/global.rs @@ -366,6 +366,7 @@ pub struct BasePlan { pub(crate) global_state: Arc, pub options: Arc, pub gc_trigger: Arc>, + pub scheduler: Arc>, // Spaces in base plan #[cfg(feature = "code_space")] @@ -568,6 +569,7 @@ impl BasePlan { global_state: args.global_args.state.clone(), gc_trigger: args.global_args.gc_trigger, options: args.global_args.options, + scheduler: args.global_args.scheduler, } } diff --git a/src/scheduler/gc_work.rs b/src/scheduler/gc_work.rs index d7070badd8..b3274f8860 100644 --- a/src/scheduler/gc_work.rs +++ b/src/scheduler/gc_work.rs @@ -229,7 +229,7 @@ impl GCWork for StopMutators { } }); trace!("stop_all_mutators end"); - mmtk.scheduler.set_in_gc_pause(true); + // mmtk.scheduler.set_in_gc_pause(true); mmtk.get_plan().notify_mutators_paused(&mmtk.scheduler); mmtk.scheduler.notify_mutators_paused(mmtk); mmtk.scheduler.work_buckets[WorkBucketStage::Prepare].add(ScanVMSpecificRoots::::new()); diff --git a/src/scheduler/scheduler.rs b/src/scheduler/scheduler.rs index 7f9a771569..00096cbda7 100644 --- a/src/scheduler/scheduler.rs +++ b/src/scheduler/scheduler.rs @@ -32,11 +32,11 @@ pub struct GCWorkScheduler { /// How to assign the affinity of each GC thread. Specified by the user. affinity: AffinityKind, - pub(super) postponed_concurrent_work: - spin::RwLock>>>, - pub(super) postponed_concurrent_work_prioritized: - spin::RwLock>>>, - in_gc_pause: std::sync::atomic::AtomicBool, + // pub(super) postponed_concurrent_work: + // spin::RwLock>>>, + // pub(super) postponed_concurrent_work_prioritized: + // spin::RwLock>>>, + // in_gc_pause: std::sync::atomic::AtomicBool, } // FIXME: GCWorkScheduler should be naturally Sync, but we cannot remove this `impl` yet. @@ -51,8 +51,8 @@ impl GCWorkScheduler { // Create work buckets for workers. let mut work_buckets = EnumMap::from_fn(|stage| { - let active = stage == WorkBucketStage::Unconstrained; - WorkBucket::new(active, worker_monitor.clone()) + let open = stage == WorkBucketStage::Unconstrained || stage == WorkBucketStage::Concurrent; + WorkBucket::new(stage, open, worker_monitor.clone()) }); work_buckets[WorkBucketStage::Unconstrained].enable_prioritized_queue(); @@ -67,14 +67,21 @@ impl GCWorkScheduler { // The first STW stage (Prepare) will be opened when the world stopped // (i.e. when all mutators are suspended). if stage != WorkBucketStage::Unconstrained && stage != first_stw_stage { - // Other work packets will be opened after previous stages are done - // (i.e their buckets are drained and all workers parked). let cur_stages = open_stages.clone(); - work_buckets[stage].set_open_condition( - move |scheduler: &GCWorkScheduler| { - scheduler.are_buckets_drained(&cur_stages) - }, - ); + if stage == WorkBucketStage::Concurrent { + // Concurrent work bucket is always opened, and is enabled explicitly. + work_buckets[stage].set_open_condition(|_| true); + work_buckets[stage].set_as_disabled(); + } else { + // Other work packets will be opened after previous stages are done + // (i.e their buckets are drained and all workers parked). + work_buckets[stage].set_open_condition( + move |scheduler: &GCWorkScheduler| { + debug!("Check if {:?} can be opened? These needs to be drained: {:?}", stage, &cur_stages); + scheduler.are_buckets_drained(&cur_stages) + }, + ); + } open_stages.push(stage); } } @@ -85,43 +92,43 @@ impl GCWorkScheduler { worker_group, worker_monitor, affinity, - postponed_concurrent_work: spin::RwLock::new(crossbeam::deque::Injector::new()), - postponed_concurrent_work_prioritized: spin::RwLock::new( - crossbeam::deque::Injector::new(), - ), - in_gc_pause: std::sync::atomic::AtomicBool::new(false), + // postponed_concurrent_work: spin::RwLock::new(crossbeam::deque::Injector::new()), + // postponed_concurrent_work_prioritized: spin::RwLock::new( + // crossbeam::deque::Injector::new(), + // ), + // in_gc_pause: std::sync::atomic::AtomicBool::new(false), }) } - pub fn postpone(&self, w: impl GCWork) { - self.postponed_concurrent_work.read().push(Box::new(w)) - } + // pub fn postpone(&self, w: impl GCWork) { + // self.postponed_concurrent_work.read().push(Box::new(w)) + // } - pub fn postpone_prioritized(&self, w: impl GCWork) { - self.postponed_concurrent_work_prioritized - .read() - .push(Box::new(w)) - } + // pub fn postpone_prioritized(&self, w: impl GCWork) { + // self.postponed_concurrent_work_prioritized + // .read() + // .push(Box::new(w)) + // } - pub fn postpone_dyn(&self, w: Box>) { - self.postponed_concurrent_work.read().push(w) - } + // pub fn postpone_dyn(&self, w: Box>) { + // self.postponed_concurrent_work.read().push(w) + // } - pub fn postpone_dyn_prioritized(&self, w: Box>) { - self.postponed_concurrent_work_prioritized.read().push(w) - } + // pub fn postpone_dyn_prioritized(&self, w: Box>) { + // self.postponed_concurrent_work_prioritized.read().push(w) + // } - pub fn postpone_all(&self, ws: Vec>>) { - let postponed_concurrent_work = self.postponed_concurrent_work.read(); - ws.into_iter() - .for_each(|w| postponed_concurrent_work.push(w)); - } + // pub fn postpone_all(&self, ws: Vec>>) { + // let postponed_concurrent_work = self.postponed_concurrent_work.read(); + // ws.into_iter() + // .for_each(|w| postponed_concurrent_work.push(w)); + // } - pub fn postpone_all_prioritized(&self, ws: Vec>>) { - let postponed_concurrent_work = self.postponed_concurrent_work_prioritized.read(); - ws.into_iter() - .for_each(|w| postponed_concurrent_work.push(w)); - } + // pub fn postpone_all_prioritized(&self, ws: Vec>>) { + // let postponed_concurrent_work = self.postponed_concurrent_work_prioritized.read(); + // ws.into_iter() + // .for_each(|w| postponed_concurrent_work.push(w)); + // } pub fn num_workers(&self) -> usize { self.worker_group.as_ref().worker_count() @@ -276,18 +283,25 @@ impl GCWorkScheduler { } fn are_buckets_drained(&self, buckets: &[WorkBucketStage]) -> bool { - buckets.iter().all(|&b| self.work_buckets[b].is_drained()) + buckets.iter().all(|&b| self.work_buckets[b].is_disabled() || self.work_buckets[b].is_drained()) } - pub fn all_buckets_empty(&self) -> bool { - self.work_buckets.values().all(|bucket| bucket.is_empty()) + pub fn all_stw_buckets_empty(&self) -> bool { + self.work_buckets.values().filter(|bucket| bucket.stage.is_stw()).all(|bucket| if !bucket.is_empty() { + warn!("Work bucket {:?} is not empty but it is expected to be empty!", bucket.stage); + warn!("Queue: {:?}", bucket.queue); + warn!("PriorityQueue: {:?}", bucket.prioritized_queue); + false + } else { + true + }) } /// Schedule "sentinel" work packets for all activated buckets. pub(crate) fn schedule_sentinels(&self) -> bool { let mut new_packets = false; for (id, work_bucket) in self.work_buckets.iter() { - if work_bucket.is_activated() && work_bucket.maybe_schedule_sentinel() { + if work_bucket.is_open() && work_bucket.maybe_schedule_sentinel() { trace!("Scheduled sentinel packet into {:?}", id); new_packets = true; } @@ -302,6 +316,7 @@ impl GCWorkScheduler { /// /// Return true if there're any non-empty buckets updated. pub(crate) fn update_buckets(&self) -> bool { + debug!("update_buckets"); let mut buckets_updated = false; let mut new_packets = false; for i in 0..WorkBucketStage::LENGTH { @@ -310,6 +325,11 @@ impl GCWorkScheduler { continue; } let bucket = &self.work_buckets[id]; + if bucket.is_disabled() { + debug!("Work bucket {:?} is disabled. Skip.", id); + continue; + } + debug!("Checking if {:?} can be opened...", id); let bucket_opened = bucket.update(self); buckets_updated = buckets_updated || bucket_opened; if bucket_opened { @@ -331,10 +351,10 @@ impl GCWorkScheduler { buckets_updated && new_packets } - pub fn deactivate_all(&self) { + pub fn deactivate_all_stw(&self) { self.work_buckets.iter().for_each(|(id, bkt)| { - if id != WorkBucketStage::Unconstrained { - bkt.deactivate(); + if id.is_stw() { + bkt.close(); bkt.set_as_enabled(); } }); @@ -343,18 +363,18 @@ impl GCWorkScheduler { pub fn reset_state(&self) { let first_stw_stage = WorkBucketStage::first_stw_stage(); self.work_buckets.iter().for_each(|(id, bkt)| { - if id != WorkBucketStage::Unconstrained && id != first_stw_stage { - bkt.deactivate(); + if id.is_stw() && id != first_stw_stage { + bkt.close(); bkt.set_as_enabled(); } }); } - pub fn debug_assert_all_buckets_deactivated(&self) { + pub fn debug_assert_all_stw_buckets_deactivated(&self) { if cfg!(debug_assertions) { self.work_buckets.iter().for_each(|(id, bkt)| { - if id != WorkBucketStage::Unconstrained { - assert!(!bkt.is_activated()); + if id.is_stw() { + assert!(!bkt.is_open()); } }); } @@ -364,8 +384,9 @@ impl GCWorkScheduler { pub(crate) fn assert_all_activated_buckets_are_empty(&self) { let mut error_example = None; for (id, bucket) in self.work_buckets.iter() { - if bucket.is_activated() && !bucket.is_empty() { - error!("Work bucket {:?} is active but not empty!", id); + if !bucket.is_disabled() && bucket.is_open() && !bucket.is_empty() { + error!("Work bucket {:?} is not drained!", id); + error!("Queue: {:?}", bucket.queue); // This error can be hard to reproduce. // If an error happens in the release build where logs are turned off, // we should show at least one abnormal bucket in the panic message @@ -374,21 +395,14 @@ impl GCWorkScheduler { } } if let Some(id) = error_example { - panic!("Some active buckets (such as {:?}) are not empty.", id); - } - } - - pub(super) fn set_in_gc_pause(&self, in_gc_pause: bool) { - self.in_gc_pause - .store(in_gc_pause, std::sync::atomic::Ordering::SeqCst); - for wb in self.work_buckets.values() { - wb.set_in_concurrent(!in_gc_pause); + panic!("Some buckets (such as {:?}) are not draied.", id); } } - pub fn in_concurrent(&self) -> bool { - !self.in_gc_pause.load(std::sync::atomic::Ordering::SeqCst) - } + // pub(super) fn set_in_gc_pause(&self, in_gc_pause: bool) { + // self.in_gc_pause + // .store(in_gc_pause, std::sync::atomic::Ordering::SeqCst); + // } /// Get a schedulable work packet without retry. fn poll_schedulable_work_once(&self, worker: &GCWorker) -> Steal>> { @@ -587,15 +601,15 @@ impl GCWorkScheduler { fn on_gc_finished(&self, worker: &GCWorker) -> bool { // All GC workers must have parked by now. debug_assert!(!self.worker_group.has_designated_work()); - debug_assert!(self.all_buckets_empty()); + debug_assert!(self.all_stw_buckets_empty()); // Deactivate all work buckets to prepare for the next GC. - self.deactivate_all(); - self.debug_assert_all_buckets_deactivated(); + self.deactivate_all_stw(); + self.debug_assert_all_stw_buckets_deactivated(); let mmtk = worker.mmtk; - let (queue, pqueue) = self.schedule_postponed_concurrent_packets(); + // let (queue, pqueue) = self.schedule_postponed_concurrent_packets(); // Tell GC trigger that GC ended - this happens before we resume mutators. mmtk.gc_trigger.policy.on_gc_end(mmtk); @@ -650,9 +664,9 @@ impl GCWorkScheduler { // Reset the triggering information. mmtk.state.reset_collection_trigger(); - self.set_in_gc_pause(false); - let concurrent_work_scheduled = self.schedule_concurrent_packets(queue, pqueue); - self.debug_assert_all_buckets_deactivated(); + // self.set_in_gc_pause(false); + let concurrent_work_scheduled = self.schedule_concurrent_packets(); + self.debug_assert_all_stw_buckets_deactivated(); // Set to NotInGC after everything, and right before resuming mutators. mmtk.set_gc_status(GcStatus::NotInGC); @@ -680,7 +694,7 @@ impl GCWorkScheduler { pub fn notify_mutators_paused(&self, mmtk: &'static MMTK) { mmtk.gc_requester.clear_request(); let first_stw_bucket = &self.work_buckets[WorkBucketStage::first_stw_stage()]; - debug_assert!(!first_stw_bucket.is_activated()); + debug_assert!(!first_stw_bucket.is_open()); // Note: This is the only place where a bucket is opened without having all workers parked. // We usually require all workers to park before opening new buckets because otherwise // packets will be executed out of order. However, since `Prepare` is the first STW @@ -688,36 +702,46 @@ impl GCWorkScheduler { // cannot execute work packets out of order. This is not generally true if we are not // opening the first STW bucket. In the future, we should redesign the opening condition // of work buckets to make the synchronization more robust, - first_stw_bucket.activate(); + first_stw_bucket.open(); self.worker_monitor.notify_work_available(true); } - fn schedule_postponed_concurrent_packets(&self) -> (PostponeQueue, PostponeQueue) { - let queue = std::mem::take(&mut *self.postponed_concurrent_work.write()); - let pqueue = std::mem::take(&mut *self.postponed_concurrent_work_prioritized.write()); - (queue, pqueue) - } + // fn schedule_postponed_concurrent_packets(&self) -> (PostponeQueue, PostponeQueue) { + // let queue = std::mem::take(&mut *self.postponed_concurrent_work.write()); + // let pqueue = std::mem::take(&mut *self.postponed_concurrent_work_prioritized.write()); + // (queue, pqueue) + // } pub(super) fn schedule_concurrent_packets( &self, - queue: PostponeQueue, - pqueue: PostponeQueue, + // queue: PostponeQueue, + // pqueue: PostponeQueue, ) -> bool { // crate::MOVE_CONCURRENT_MARKING_TO_STW.store(false, Ordering::SeqCst); // crate::PAUSE_CONCURRENT_MARKING.store(false, Ordering::SeqCst); - let mut concurrent_work_scheduled = false; - if !queue.is_empty() { - let old_queue = self.work_buckets[WorkBucketStage::Unconstrained].replace_queue(queue); - debug_assert!(old_queue.is_empty()); - concurrent_work_scheduled = true; - } - if !pqueue.is_empty() { - let old_queue = - self.work_buckets[WorkBucketStage::Unconstrained].replace_queue_prioritized(pqueue); - debug_assert!(old_queue.is_empty()); - concurrent_work_scheduled = true; + // let mut concurrent_work_scheduled = false; + // if !queue.is_empty() { + // let old_queue = self.work_buckets[WorkBucketStage::Unconstrained].replace_queue(queue); + // debug_assert!(old_queue.is_empty()); + // concurrent_work_scheduled = true; + // } + // if !pqueue.is_empty() { + // let old_queue = + // self.work_buckets[WorkBucketStage::Unconstrained].replace_queue_prioritized(pqueue); + // debug_assert!(old_queue.is_empty()); + // concurrent_work_scheduled = true; + // } + // concurrent_work_scheduled + let concurrent_bucket = &self.work_buckets[WorkBucketStage::Concurrent]; + if !concurrent_bucket.is_empty() { + concurrent_bucket.set_as_enabled(); + concurrent_bucket.open(); + true + } else { + concurrent_bucket.set_as_disabled(); + concurrent_bucket.close(); + false } - concurrent_work_scheduled } pub fn wakeup_all_concurrent_workers(&self) { diff --git a/src/scheduler/work_bucket.rs b/src/scheduler/work_bucket.rs index 1a207f0d9e..87a0f4d6fa 100644 --- a/src/scheduler/work_bucket.rs +++ b/src/scheduler/work_bucket.rs @@ -6,7 +6,7 @@ use enum_map::Enum; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::{Arc, Mutex}; -struct BucketQueue { +pub struct BucketQueue { // queue: Injector>>, queue: std::sync::RwLock>>>, } @@ -40,12 +40,53 @@ impl BucketQueue { } } +use std::fmt; +impl fmt::Debug for BucketQueue { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut items = Vec::new(); + + { + let queue = self.queue.write().unwrap(); + // Drain queue by stealing until empty + loop { + match queue.steal() { + crossbeam::deque::Steal::Success(work) => { + items.push(work); + } + crossbeam::deque::Steal::Retry => continue, + crossbeam::deque::Steal::Empty => break, + } + } + } + + // Format collected items (just type names or Debug, depending on GCWork) + let debug_items: Vec = items + .iter() + .map(|i| i.get_type_name().to_string()) // placeholder since GCWork isn’t Debug + .collect(); + + // Push items back into the queue + { + let queue = self.queue.write().unwrap(); + for work in items { + queue.push(work); + } + } + + f.debug_struct("BucketQueue") + .field("queue", &debug_items) + .finish() + } +} + pub type BucketOpenCondition = Box) -> bool) + Send>; pub struct WorkBucket { - active: AtomicBool, - queue: BucketQueue, - prioritized_queue: Option>, + // Whether this bucket has been opened. + open: AtomicBool, + pub stage: WorkBucketStage, + pub queue: BucketQueue, + pub prioritized_queue: Option>, monitor: Arc, can_open: Option>, /// After this bucket is activated and all pending work packets (including the packets in this @@ -60,28 +101,27 @@ pub struct WorkBucket { /// recursively, such as ephemerons and Java-style SoftReference and finalizers. Sentinels /// can be used repeatedly to discover and process more such objects. sentinel: Mutex>>>, - in_concurrent: AtomicBool, + + // A disabled work bucket will behave as if it does not exist in terms of scheduling, + // except that users can add work to a disabled bucket, and enable it later to allow those + // work to be scheduled. disable: AtomicBool, } impl WorkBucket { - pub(crate) fn new(active: bool, monitor: Arc) -> Self { + pub(crate) fn new(stage: WorkBucketStage, open: bool, monitor: Arc) -> Self { Self { - active: AtomicBool::new(active), + open: AtomicBool::new(open), + stage, queue: BucketQueue::new(), prioritized_queue: None, monitor, can_open: None, sentinel: Mutex::new(None), - in_concurrent: AtomicBool::new(true), disable: AtomicBool::new(false), } } - pub fn set_in_concurrent(&self, in_concurrent: bool) { - self.in_concurrent.store(in_concurrent, Ordering::SeqCst); - } - pub fn set_as_enabled(&self) { self.disable.store(false, Ordering::SeqCst) } @@ -90,7 +130,7 @@ impl WorkBucket { self.disable.store(true, Ordering::SeqCst) } - pub fn disabled(&self) -> bool { + pub fn is_disabled(&self) -> bool { self.disable.load(Ordering::Relaxed) } @@ -122,7 +162,7 @@ impl WorkBucket { fn notify_one_worker(&self) { // If the bucket is not activated, don't notify anyone. - if !self.is_activated() { + if !self.is_open() { return; } // Notify one if there're any parked workers. @@ -131,20 +171,20 @@ impl WorkBucket { pub fn notify_all_workers(&self) { // If the bucket is not activated, don't notify anyone. - if !self.is_activated() { + if !self.is_open() { return; } // Notify all if there're any parked workers. self.monitor.notify_work_available(true); } - pub fn is_activated(&self) -> bool { - self.active.load(Ordering::SeqCst) + pub fn is_open(&self) -> bool { + self.open.load(Ordering::SeqCst) } /// Enable the bucket - pub fn activate(&self) { - self.active.store(true, Ordering::SeqCst); + pub fn open(&self) { + self.open.store(true, Ordering::SeqCst); } /// Test if the bucket is drained @@ -158,13 +198,13 @@ impl WorkBucket { } pub fn is_drained(&self) -> bool { - self.is_activated() && self.is_empty() + self.is_disabled() || (self.is_open() && self.is_empty()) } /// Disable the bucket - pub fn deactivate(&self) { - debug_assert!(self.queue.is_empty(), "Bucket not drained before close"); - self.active.store(false, Ordering::Relaxed); + pub fn close(&self) { + debug_assert!(self.queue.is_empty(), "Bucket {:?} not drained before close", self.stage); + self.open.store(false, Ordering::Relaxed); } /// Add a work packet to this bucket @@ -203,7 +243,7 @@ impl WorkBucket { /// Panic if this bucket cannot receive prioritized packets. pub fn bulk_add_prioritized(&self, work_vec: Vec>>) { self.prioritized_queue.as_ref().unwrap().push_all(work_vec); - if self.is_activated() { + if self.is_open() { self.notify_all_workers(); } } @@ -214,14 +254,14 @@ impl WorkBucket { return; } self.queue.push_all(work_vec); - if self.is_activated() { + if self.is_open() { self.notify_all_workers(); } } /// Get a work packet from this bucket pub fn poll(&self, worker: &Worker>>) -> Steal>> { - if !self.is_activated() || self.is_empty() { + if self.is_disabled() || !self.is_open() || self.is_empty() { return Steal::Empty; } if let Some(prioritized_queue) = self.prioritized_queue.as_ref() { @@ -252,8 +292,9 @@ impl WorkBucket { pub fn update(&self, scheduler: &GCWorkScheduler) -> bool { if let Some(can_open) = self.can_open.as_ref() { - if !self.is_activated() && can_open(scheduler) { - self.activate(); + if !self.is_open() && can_open(scheduler) { + debug!("Opening work bucket: {:?}", self.stage); + self.open(); return true; } } @@ -262,7 +303,7 @@ impl WorkBucket { pub fn maybe_schedule_sentinel(&self) -> bool { debug_assert!( - self.is_activated(), + self.is_open(), "Attempted to schedule sentinel work while bucket is not open" ); let maybe_sentinel = { @@ -287,6 +328,7 @@ impl WorkBucket { pub enum WorkBucketStage { /// This bucket is always open. Unconstrained, + Concurrent, /// Preparation work. Plans, spaces, GC workers, mutators, etc. should be prepared for GC at /// this stage. Prepare, @@ -346,6 +388,14 @@ pub enum WorkBucketStage { impl WorkBucketStage { /// The first stop-the-world bucket. pub fn first_stw_stage() -> Self { - WorkBucketStage::from_usize(1) + WorkBucketStage::Prepare + } + + pub fn is_stw(&self) -> bool { + !self.is_concurrent() + } + + pub fn is_concurrent(&self) -> bool { + matches!(self, WorkBucketStage::Unconstrained | WorkBucketStage::Concurrent) } } diff --git a/src/scheduler/worker.rs b/src/scheduler/worker.rs index 005ee95e1a..8d7361573c 100644 --- a/src/scheduler/worker.rs +++ b/src/scheduler/worker.rs @@ -157,7 +157,7 @@ impl GCWorker { /// If the bucket is activated, the packet will be pushed to the local queue, otherwise it will be /// pushed to the global bucket with a higher priority. pub fn add_work_prioritized(&mut self, bucket: WorkBucketStage, work: impl GCWork) { - if !self.scheduler().work_buckets[bucket].is_activated() + if !self.scheduler().work_buckets[bucket].is_open() || self.local_work_buffer.len() >= Self::LOCALLY_CACHED_WORK_PACKETS { self.scheduler.work_buckets[bucket].add_prioritized(Box::new(work)); @@ -170,7 +170,7 @@ impl GCWorker { /// If the bucket is activated, the packet will be pushed to the local queue, otherwise it will be /// pushed to the global bucket. pub fn add_work(&mut self, bucket: WorkBucketStage, work: impl GCWork) { - if !self.scheduler().work_buckets[bucket].is_activated() + if !self.scheduler().work_buckets[bucket].is_open() || self.local_work_buffer.len() >= Self::LOCALLY_CACHED_WORK_PACKETS { self.scheduler.work_buckets[bucket].add(work); From bdcf723cc58dc4d268cc794b816e8bdf9694fa91 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Tue, 26 Aug 2025 00:53:35 +0000 Subject: [PATCH 23/59] Re-enable weak ref buckets. Calculate allocated pages using used pages. --- src/global_state.rs | 14 ++++++++++-- src/plan/concurrent/immix/global.rs | 33 +++++++++++++++-------------- src/policy/immix/immixspace.rs | 8 ------- src/policy/largeobjectspace.rs | 4 ---- src/scheduler/scheduler.rs | 3 +++ 5 files changed, 32 insertions(+), 30 deletions(-) diff --git a/src/global_state.rs b/src/global_state.rs index 317caf0c81..b270d94924 100644 --- a/src/global_state.rs +++ b/src/global_state.rs @@ -49,7 +49,8 @@ pub struct GlobalState { pub(crate) malloc_bytes: AtomicUsize, /// This stores the live bytes and the used bytes (by pages) for each space in last GC. This counter is only updated in the GC release phase. pub(crate) live_bytes_in_last_gc: AtomicRefCell>, - pub(crate) concurrent_marking_threshold: AtomicUsize, + /// The number of used pages at the end of the last GC. This can be used to estimate how many pages we have allocated since last GC. + pub(crate) used_pages_after_last_gc: AtomicUsize, } impl GlobalState { @@ -185,6 +186,15 @@ impl GlobalState { pub(crate) fn decrease_malloc_bytes_by(&self, size: usize) { self.malloc_bytes.fetch_sub(size, Ordering::SeqCst); } + + pub(crate) fn set_used_pages_after_last_gc(&self, pages: usize) { + self.used_pages_after_last_gc + .store(pages, Ordering::Relaxed); + } + + pub(crate) fn get_used_pages_after_last_gc(&self) -> usize { + self.used_pages_after_last_gc.load(Ordering::Relaxed) + } } impl Default for GlobalState { @@ -207,7 +217,7 @@ impl Default for GlobalState { #[cfg(feature = "malloc_counted_size")] malloc_bytes: AtomicUsize::new(0), live_bytes_in_last_gc: AtomicRefCell::new(HashMap::new()), - concurrent_marking_threshold: AtomicUsize::new(0), + used_pages_after_last_gc: AtomicUsize::new(0), } } } diff --git a/src/plan/concurrent/immix/global.rs b/src/plan/concurrent/immix/global.rs index 448197a3a1..6a020b3fb5 100644 --- a/src/plan/concurrent/immix/global.rs +++ b/src/plan/concurrent/immix/global.rs @@ -85,13 +85,15 @@ impl Plan for ConcurrentImmix { return true; } let threshold = self.get_total_pages() >> 1; - let concurrent_marking_threshold = self + let used_pages_after_last_gc = self .common .base .global_state - .concurrent_marking_threshold - .load(Ordering::Acquire); - if !concurrent_marking_in_progress && concurrent_marking_threshold > threshold { + .get_used_pages_after_last_gc(); + let used_pages_now = self.get_used_pages(); + let allocated = used_pages_now.saturating_sub(used_pages_after_last_gc); + if !concurrent_marking_in_progress && allocated > threshold { + info!("Allocated {allocated} pages since last GC ({used_pages_now} - {used_pages_after_last_gc} > {threshold}): Do concurrent marking"); debug_assert!(crate::concurrent_marking_packets_drained()); debug_assert!(!self.concurrent_marking_in_progress()); let prev_pause = self.previous_pause(); @@ -195,12 +197,6 @@ impl Plan for ConcurrentImmix { self.immix_space.release(true); } } - // reset the concurrent marking page counting - self.common() - .base - .global_state - .concurrent_marking_threshold - .store(0, Ordering::Release); } fn end_of_gc(&mut self, _tls: VMWorkerThread) { @@ -368,16 +364,21 @@ impl ConcurrentImmix { scheduler.work_buckets[WorkBucketStage::FinalRefClosure].set_enabled(false); scheduler.work_buckets[WorkBucketStage::SoftRefClosure].set_enabled(false); scheduler.work_buckets[WorkBucketStage::PhantomRefClosure].set_enabled(false); + } else { + scheduler.work_buckets[WorkBucketStage::WeakRefClosure].set_enabled(true); + scheduler.work_buckets[WorkBucketStage::FinalRefClosure].set_enabled(true); + scheduler.work_buckets[WorkBucketStage::SoftRefClosure].set_enabled(true); + scheduler.work_buckets[WorkBucketStage::PhantomRefClosure].set_enabled(true); } // scheduler.work_buckets[WorkBucketStage::TPinningClosure].set_as_disabled(); // scheduler.work_buckets[WorkBucketStage::PinningRootsTrace].set_as_disabled(); // scheduler.work_buckets[WorkBucketStage::VMRefClosure].set_as_disabled(); - scheduler.work_buckets[WorkBucketStage::VMRefForwarding].set_enabled(false); - scheduler.work_buckets[WorkBucketStage::CalculateForwarding].set_enabled(false); - scheduler.work_buckets[WorkBucketStage::SecondRoots].set_enabled(false); - scheduler.work_buckets[WorkBucketStage::RefForwarding].set_enabled(false); - scheduler.work_buckets[WorkBucketStage::FinalizableForwarding].set_enabled(false); - scheduler.work_buckets[WorkBucketStage::Compact].set_enabled(false); + // scheduler.work_buckets[WorkBucketStage::VMRefForwarding].set_enabled(false); + // scheduler.work_buckets[WorkBucketStage::CalculateForwarding].set_enabled(false); + // scheduler.work_buckets[WorkBucketStage::SecondRoots].set_enabled(false); + // scheduler.work_buckets[WorkBucketStage::RefForwarding].set_enabled(false); + // scheduler.work_buckets[WorkBucketStage::FinalizableForwarding].set_enabled(false); + // scheduler.work_buckets[WorkBucketStage::Compact].set_enabled(false); } pub(crate) fn schedule_concurrent_marking_initial_pause( diff --git a/src/policy/immix/immixspace.rs b/src/policy/immix/immixspace.rs index d11b04af0f..cabc2aefc0 100644 --- a/src/policy/immix/immixspace.rs +++ b/src/policy/immix/immixspace.rs @@ -602,10 +602,6 @@ impl ImmixSpace { self.chunk_map.set_allocated(block.chunk(), true); self.lines_consumed .fetch_add(Block::LINES, Ordering::SeqCst); - self.common() - .global_state - .concurrent_marking_threshold - .fetch_add(Block::PAGES, Ordering::Relaxed); Some(block) } @@ -632,10 +628,6 @@ impl ImmixSpace { self.lines_consumed.fetch_add(lines_delta, Ordering::SeqCst); block.init(copy); - self.common() - .global_state - .concurrent_marking_threshold - .fetch_add(Block::PAGES, Ordering::Relaxed); return Some(block); } else { return None; diff --git a/src/policy/largeobjectspace.rs b/src/policy/largeobjectspace.rs index 8b64d82c78..9635f57026 100644 --- a/src/policy/largeobjectspace.rs +++ b/src/policy/largeobjectspace.rs @@ -407,10 +407,6 @@ impl LargeObjectSpace { pages: usize, alloc_options: AllocationOptions, ) -> Address { - self.common() - .global_state - .concurrent_marking_threshold - .fetch_add(pages, Ordering::Relaxed); self.acquire(tls, pages, alloc_options) } diff --git a/src/scheduler/scheduler.rs b/src/scheduler/scheduler.rs index 34b6fa9520..d3a9688463 100644 --- a/src/scheduler/scheduler.rs +++ b/src/scheduler/scheduler.rs @@ -652,6 +652,9 @@ impl GCWorkScheduler { } } + mmtk.state + .set_used_pages_after_last_gc(mmtk.get_plan().get_used_pages()); + #[cfg(feature = "extreme_assertions")] if crate::util::slot_logger::should_check_duplicate_slots(mmtk.get_plan()) { // reset the logging info at the end of each GC From 1368ac55be6089933445cdf442d3458b6f5a9c64 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Tue, 26 Aug 2025 04:05:59 +0000 Subject: [PATCH 24/59] Remove NUM_CONCURRENT_TRACING_PACKETS. Fix issues about enabling packets. --- src/lib.rs | 7 -- .../concurrent/concurrent_marking_work.rs | 8 -- src/plan/concurrent/immix/global.rs | 111 ++++++------------ tools/tracing/timeline/capture.bt | 4 - 4 files changed, 35 insertions(+), 95 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index e1a7b627e5..7a77ea6c65 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -32,7 +32,6 @@ extern crate static_assertions; extern crate probe; mod mmtk; -use std::sync::atomic::AtomicUsize; pub use mmtk::MMTKBuilder; pub(crate) use mmtk::MMAPPER; @@ -53,9 +52,3 @@ pub mod vm; pub use crate::plan::{ AllocationSemantics, BarrierSelector, Mutator, MutatorContext, ObjectQueue, Plan, }; - -static NUM_CONCURRENT_TRACING_PACKETS: AtomicUsize = AtomicUsize::new(0); - -fn concurrent_marking_packets_drained() -> bool { - crate::NUM_CONCURRENT_TRACING_PACKETS.load(std::sync::atomic::Ordering::SeqCst) == 0 -} diff --git a/src/plan/concurrent/concurrent_marking_work.rs b/src/plan/concurrent/concurrent_marking_work.rs index d8b79e1048..7af3d4e212 100644 --- a/src/plan/concurrent/concurrent_marking_work.rs +++ b/src/plan/concurrent/concurrent_marking_work.rs @@ -12,7 +12,6 @@ use crate::{ vm::*, MMTK, }; -use atomic::Ordering; use std::ops::{Deref, DerefMut}; pub struct ConcurrentTraceObjects + PlanTraceObject> { @@ -31,9 +30,6 @@ impl + PlanTraceObject> pub fn new(objects: Vec, mmtk: &'static MMTK) -> Self { let plan = mmtk.get_plan().downcast_ref::

().unwrap(); - let old_value = crate::NUM_CONCURRENT_TRACING_PACKETS.fetch_add(1, Ordering::SeqCst); - let new_value = old_value + 1; - probe!(mmtk, num_concurrent_tracing_packets_change, new_value); Self { plan, @@ -139,10 +135,6 @@ impl + PlanTraceObject> GCWork iterations ); self.flush(); - - let old_value = crate::NUM_CONCURRENT_TRACING_PACKETS.fetch_sub(1, Ordering::SeqCst); - let new_value = old_value - 1; - probe!(mmtk, num_concurrent_tracing_packets_change, new_value); } } diff --git a/src/plan/concurrent/immix/global.rs b/src/plan/concurrent/immix/global.rs index 6a020b3fb5..47cfc41708 100644 --- a/src/plan/concurrent/immix/global.rs +++ b/src/plan/concurrent/immix/global.rs @@ -80,21 +80,21 @@ impl Plan for ConcurrentImmix { let concurrent_marking_in_progress = self.concurrent_marking_in_progress(); - if concurrent_marking_in_progress && crate::concurrent_marking_packets_drained() { + if concurrent_marking_in_progress + && self.common.base.scheduler.work_buckets[WorkBucketStage::Concurrent].is_drained() + { self.gc_cause.store(GCCause::FinalMark, Ordering::Release); return true; } let threshold = self.get_total_pages() >> 1; - let used_pages_after_last_gc = self - .common - .base - .global_state - .get_used_pages_after_last_gc(); + let used_pages_after_last_gc = self.common.base.global_state.get_used_pages_after_last_gc(); let used_pages_now = self.get_used_pages(); let allocated = used_pages_now.saturating_sub(used_pages_after_last_gc); if !concurrent_marking_in_progress && allocated > threshold { info!("Allocated {allocated} pages since last GC ({used_pages_now} - {used_pages_after_last_gc} > {threshold}): Do concurrent marking"); - debug_assert!(crate::concurrent_marking_packets_drained()); + debug_assert!( + self.common.base.scheduler.work_buckets[WorkBucketStage::Concurrent].is_empty() + ); debug_assert!(!self.concurrent_marking_in_progress()); let prev_pause = self.previous_pause(); debug_assert!(prev_pause.is_none() || prev_pause.unwrap() != Pause::InitialMark); @@ -130,8 +130,8 @@ impl Plan for ConcurrentImmix { if pause == Pause::Full { self.current_pause .store(Some(Pause::Full), Ordering::SeqCst); - - Self::schedule_immix_full_heap_collection::< + self.set_ref_closure_buckets_enabled(true); + crate::plan::immix::global::Immix::schedule_immix_full_heap_collection::< ConcurrentImmix, ConcurrentImmixSTWGCWorkContext, ConcurrentImmixSTWGCWorkContext, @@ -292,6 +292,16 @@ impl ConcurrentImmix { mut plan_args: CreateSpecificPlanArgs, space_args: ImmixSpaceArgs, ) -> Self { + // These buckets are not used in an Immix plan. We can simply disable them. + // TODO: We should be more systmatic on this, and disable unnecessary buckets for other plans as well. + let scheduler = &plan_args.global_args.scheduler; + scheduler.work_buckets[WorkBucketStage::VMRefForwarding].set_enabled(false); + scheduler.work_buckets[WorkBucketStage::CalculateForwarding].set_enabled(false); + scheduler.work_buckets[WorkBucketStage::SecondRoots].set_enabled(false); + scheduler.work_buckets[WorkBucketStage::RefForwarding].set_enabled(false); + scheduler.work_buckets[WorkBucketStage::FinalizableForwarding].set_enabled(false); + scheduler.work_buckets[WorkBucketStage::Compact].set_enabled(false); + let immix = ConcurrentImmix { immix_space: ImmixSpace::new( plan_args.get_normal_space_args("immix", true, false, VMRequest::discontiguous()), @@ -310,75 +320,24 @@ impl ConcurrentImmix { immix } - /// Schedule a full heap immix collection. This method is used by immix/genimmix/stickyimmix - /// to schedule a full heap collection. A plan must call set_collection_kind and set_gc_status before this method. - pub(crate) fn schedule_immix_full_heap_collection< - PlanType: Plan, - FastContext: GCWorkContext, - DefragContext: GCWorkContext, - >( - plan: &'static DefragContext::PlanType, - immix_space: &ImmixSpace, - scheduler: &GCWorkScheduler, - ) -> bool { - let in_defrag = immix_space.decide_whether_to_defrag( - plan.base().global_state.is_emergency_collection(), - true, - plan.base() - .global_state - .cur_collection_attempts - .load(Ordering::SeqCst), - plan.base().global_state.is_user_triggered_collection(), - *plan.base().options.full_heap_system_gc, - ); - - if in_defrag { - scheduler.schedule_common_work::(plan); - } else { - scheduler.schedule_common_work::(plan); - } - in_defrag - } - fn select_collection_kind(&self) -> Pause { - let emergency = self.base().global_state.is_emergency_collection(); - let user_triggered = self.base().global_state.is_user_triggered_collection(); - let concurrent_marking_in_progress = self.concurrent_marking_in_progress(); - let concurrent_marking_packets_drained = crate::concurrent_marking_packets_drained(); - - if emergency || user_triggered { - return Pause::Full; - } else if !concurrent_marking_in_progress && concurrent_marking_packets_drained { - return Pause::InitialMark; - } else if concurrent_marking_in_progress && concurrent_marking_packets_drained { - return Pause::FinalMark; + match self.gc_cause.load(Ordering::Acquire) { + GCCause::FullHeap => Pause::Full, + GCCause::InitialMark => Pause::InitialMark, + GCCause::FinalMark => Pause::FinalMark, + GCCause::Unknown => { + panic!("Collection kind is not set when scheduling a collection"); + } } - - Pause::Full } - fn disable_unnecessary_buckets(&'static self, scheduler: &GCWorkScheduler, pause: Pause) { - if pause == Pause::InitialMark { - // scheduler.work_buckets[WorkBucketStage::Closure].set_as_disabled(); - scheduler.work_buckets[WorkBucketStage::WeakRefClosure].set_enabled(false); - scheduler.work_buckets[WorkBucketStage::FinalRefClosure].set_enabled(false); - scheduler.work_buckets[WorkBucketStage::SoftRefClosure].set_enabled(false); - scheduler.work_buckets[WorkBucketStage::PhantomRefClosure].set_enabled(false); - } else { - scheduler.work_buckets[WorkBucketStage::WeakRefClosure].set_enabled(true); - scheduler.work_buckets[WorkBucketStage::FinalRefClosure].set_enabled(true); - scheduler.work_buckets[WorkBucketStage::SoftRefClosure].set_enabled(true); - scheduler.work_buckets[WorkBucketStage::PhantomRefClosure].set_enabled(true); - } - // scheduler.work_buckets[WorkBucketStage::TPinningClosure].set_as_disabled(); - // scheduler.work_buckets[WorkBucketStage::PinningRootsTrace].set_as_disabled(); - // scheduler.work_buckets[WorkBucketStage::VMRefClosure].set_as_disabled(); - // scheduler.work_buckets[WorkBucketStage::VMRefForwarding].set_enabled(false); - // scheduler.work_buckets[WorkBucketStage::CalculateForwarding].set_enabled(false); - // scheduler.work_buckets[WorkBucketStage::SecondRoots].set_enabled(false); - // scheduler.work_buckets[WorkBucketStage::RefForwarding].set_enabled(false); - // scheduler.work_buckets[WorkBucketStage::FinalizableForwarding].set_enabled(false); - // scheduler.work_buckets[WorkBucketStage::Compact].set_enabled(false); + fn set_ref_closure_buckets_enabled(&self, do_closure: bool) { + let scheduler = &self.common.base.scheduler; + scheduler.work_buckets[WorkBucketStage::VMRefClosure].set_enabled(do_closure); + scheduler.work_buckets[WorkBucketStage::WeakRefClosure].set_enabled(do_closure); + scheduler.work_buckets[WorkBucketStage::FinalRefClosure].set_enabled(do_closure); + scheduler.work_buckets[WorkBucketStage::SoftRefClosure].set_enabled(do_closure); + scheduler.work_buckets[WorkBucketStage::PhantomRefClosure].set_enabled(do_closure); } pub(crate) fn schedule_concurrent_marking_initial_pause( @@ -387,7 +346,7 @@ impl ConcurrentImmix { ) { use crate::scheduler::gc_work::Prepare; - self.disable_unnecessary_buckets(scheduler, Pause::InitialMark); + self.set_ref_closure_buckets_enabled(false); scheduler.work_buckets[WorkBucketStage::Unconstrained].add_prioritized(Box::new( StopMutators::>>::new_args( @@ -400,7 +359,7 @@ impl ConcurrentImmix { } fn schedule_concurrent_marking_final_pause(&'static self, scheduler: &GCWorkScheduler) { - self.disable_unnecessary_buckets(scheduler, Pause::FinalMark); + self.set_ref_closure_buckets_enabled(true); scheduler.work_buckets[WorkBucketStage::Unconstrained].add_prioritized(Box::new( StopMutators::>>::new_args( diff --git a/tools/tracing/timeline/capture.bt b/tools/tracing/timeline/capture.bt index 4ed38bfd9b..b898d96737 100644 --- a/tools/tracing/timeline/capture.bt +++ b/tools/tracing/timeline/capture.bt @@ -136,10 +136,6 @@ usdt:$MMTK:mmtk:add_schedule_collection_packet { printf("add_schedule_collection_packet,i,%d,%lu\n", tid, nsecs); } -usdt:$MMTK:mmtk:num_concurrent_tracing_packets_change { - printf("num_concurrent_tracing_packets_change,C,%d,%lu,%lu\n", tid, nsecs, arg0); -} - usdt:$MMTK:mmtk:finalization { if (@enable_print) { printf("finalization,meta,%d,%lu,%lu,%lu,%lu,%lu\n", tid, nsecs, arg0, arg1, arg2, arg3); From f906db33863f2f0e8f09992cac9d471fcd053240 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Tue, 26 Aug 2025 04:20:24 +0000 Subject: [PATCH 25/59] Dont use TRACE_KIND_FAST in ConcurrentTraceObjects --- src/plan/concurrent/barrier.rs | 21 +++-- .../concurrent/concurrent_marking_work.rs | 78 +++++++++++-------- src/plan/concurrent/immix/global.rs | 20 ++--- src/plan/concurrent/immix/mutator.rs | 1 + 4 files changed, 73 insertions(+), 47 deletions(-) diff --git a/src/plan/concurrent/barrier.rs b/src/plan/concurrent/barrier.rs index a26ee50444..af9a2d3506 100644 --- a/src/plan/concurrent/barrier.rs +++ b/src/plan/concurrent/barrier.rs @@ -2,6 +2,7 @@ use std::sync::atomic::Ordering; use super::{concurrent_marking_work::ProcessModBufSATB, Pause}; use crate::plan::global::PlanTraceObject; +use crate::policy::gc_work::TraceKind; use crate::{ plan::{barriers::BarrierSemantics, concurrent::global::ConcurrentPlan, VectorQueue}, scheduler::WorkBucketStage, @@ -13,14 +14,20 @@ use crate::{ MMTK, }; -pub struct SATBBarrierSemantics + PlanTraceObject> { +pub struct SATBBarrierSemantics< + VM: VMBinding, + P: ConcurrentPlan + PlanTraceObject, + const KIND: TraceKind, +> { mmtk: &'static MMTK, satb: VectorQueue, refs: VectorQueue, plan: &'static P, } -impl + PlanTraceObject> SATBBarrierSemantics { +impl + PlanTraceObject, const KIND: TraceKind> + SATBBarrierSemantics +{ pub fn new(mmtk: &'static MMTK) -> Self { Self { mmtk, @@ -66,7 +73,8 @@ impl + PlanTraceObject> SATBBarrie debug_assert_ne!(self.plan.current_pause(), Some(Pause::InitialMark)); WorkBucketStage::Closure }; - self.mmtk.scheduler.work_buckets[bucket].add(ProcessModBufSATB::::new(satb)); + self.mmtk.scheduler.work_buckets[bucket] + .add(ProcessModBufSATB::::new(satb)); } else { let _ = self.satb.take(); }; @@ -84,7 +92,8 @@ impl + PlanTraceObject> SATBBarrie debug_assert_ne!(self.plan.current_pause(), Some(Pause::InitialMark)); WorkBucketStage::Closure }; - self.mmtk.scheduler.work_buckets[bucket].add(ProcessModBufSATB::::new(nodes)); + self.mmtk.scheduler.work_buckets[bucket] + .add(ProcessModBufSATB::::new(nodes)); } } @@ -94,8 +103,8 @@ impl + PlanTraceObject> SATBBarrie } } -impl + PlanTraceObject> BarrierSemantics - for SATBBarrierSemantics +impl + PlanTraceObject, const KIND: TraceKind> + BarrierSemantics for SATBBarrierSemantics { type VM = VM; diff --git a/src/plan/concurrent/concurrent_marking_work.rs b/src/plan/concurrent/concurrent_marking_work.rs index 7af3d4e212..c82ab0b1fb 100644 --- a/src/plan/concurrent/concurrent_marking_work.rs +++ b/src/plan/concurrent/concurrent_marking_work.rs @@ -2,7 +2,7 @@ use crate::plan::concurrent::global::ConcurrentPlan; use crate::plan::concurrent::Pause; use crate::plan::PlanTraceObject; use crate::plan::VectorQueue; -use crate::policy::immix::TRACE_KIND_FAST; +use crate::policy::gc_work::TraceKind; use crate::scheduler::gc_work::{ScanObjects, SlotOf}; use crate::util::ObjectReference; use crate::vm::slot::Slot; @@ -14,7 +14,11 @@ use crate::{ }; use std::ops::{Deref, DerefMut}; -pub struct ConcurrentTraceObjects + PlanTraceObject> { +pub struct ConcurrentTraceObjects< + VM: VMBinding, + P: ConcurrentPlan + PlanTraceObject, + const KIND: TraceKind, +> { plan: &'static P, // objects to mark and scan objects: Option>, @@ -23,8 +27,8 @@ pub struct ConcurrentTraceObjects + Pl worker: *mut GCWorker, } -impl + PlanTraceObject> - ConcurrentTraceObjects +impl + PlanTraceObject, const KIND: TraceKind> + ConcurrentTraceObjects { const SATB_BUFFER_SIZE: usize = 8192; @@ -55,9 +59,9 @@ impl + PlanTraceObject> } fn trace_object(&mut self, object: ObjectReference) -> ObjectReference { - let new_object = - self.plan - .trace_object::(self, object, self.worker()); + let new_object = self + .plan + .trace_object::(self, object, self.worker()); // No copying should happen. debug_assert_eq!(object, new_object); object @@ -83,8 +87,8 @@ impl + PlanTraceObject> } } -impl + PlanTraceObject> ObjectQueue - for ConcurrentTraceObjects +impl + PlanTraceObject, const KIND: TraceKind> + ObjectQueue for ConcurrentTraceObjects { fn enqueue(&mut self, object: ObjectReference) { debug_assert!( @@ -96,13 +100,13 @@ impl + PlanTraceObject> ObjectQueu } } -unsafe impl + PlanTraceObject> Send - for ConcurrentTraceObjects +unsafe impl + PlanTraceObject, const KIND: TraceKind> + Send for ConcurrentTraceObjects { } -impl + PlanTraceObject> GCWork - for ConcurrentTraceObjects +impl + PlanTraceObject, const KIND: TraceKind> + GCWork for ConcurrentTraceObjects { fn do_work(&mut self, worker: &mut GCWorker, _mmtk: &'static MMTK) { self.worker = worker; @@ -138,17 +142,23 @@ impl + PlanTraceObject> GCWork } } -pub struct ProcessModBufSATB + PlanTraceObject> { +pub struct ProcessModBufSATB< + VM: VMBinding, + P: ConcurrentPlan + PlanTraceObject, + const KIND: TraceKind, +> { nodes: Option>, _p: std::marker::PhantomData<(VM, P)>, } -unsafe impl + PlanTraceObject> Send - for ProcessModBufSATB +unsafe impl + PlanTraceObject, const KIND: TraceKind> + Send for ProcessModBufSATB { } -impl + PlanTraceObject> ProcessModBufSATB { +impl + PlanTraceObject, const KIND: TraceKind> + ProcessModBufSATB +{ pub fn new(nodes: Vec) -> Self { Self { nodes: Some(nodes), @@ -157,8 +167,8 @@ impl + PlanTraceObject> ProcessMod } } -impl + PlanTraceObject> GCWork - for ProcessModBufSATB +impl + PlanTraceObject, const KIND: TraceKind> + GCWork for ProcessModBufSATB { fn do_work(&mut self, worker: &mut GCWorker, mmtk: &'static MMTK) { let mut w = if let Some(nodes) = self.nodes.take() { @@ -166,7 +176,7 @@ impl + PlanTraceObject> GCWork return; } - ConcurrentTraceObjects::::new(nodes, mmtk) + ConcurrentTraceObjects::::new(nodes, mmtk) } else { return; }; @@ -174,18 +184,22 @@ impl + PlanTraceObject> GCWork } } -pub struct ProcessRootSlots + PlanTraceObject> { +pub struct ProcessRootSlots< + VM: VMBinding, + P: ConcurrentPlan + PlanTraceObject, + const KIND: TraceKind, +> { base: ProcessEdgesBase, _p: std::marker::PhantomData

, } -unsafe impl + PlanTraceObject> Send - for ProcessRootSlots +unsafe impl + PlanTraceObject, const KIND: TraceKind> + Send for ProcessRootSlots { } -impl + PlanTraceObject> ProcessEdgesWork - for ProcessRootSlots +impl + PlanTraceObject, const KIND: TraceKind> + ProcessEdgesWork for ProcessRootSlots { type VM = VM; type ScanObjectsWorkType = ScanObjects; @@ -234,7 +248,8 @@ impl + PlanTraceObject> ProcessEdg // create the packet let worker = self.worker(); let mmtk = self.mmtk(); - let w = ConcurrentTraceObjects::::new(root_objects.clone(), mmtk); + let w = + ConcurrentTraceObjects::::new(root_objects.clone(), mmtk); match pause { Pause::InitialMark => worker.scheduler().work_buckets @@ -249,7 +264,8 @@ impl + PlanTraceObject> ProcessEdg } if !root_objects.is_empty() { let worker = self.worker(); - let w = ConcurrentTraceObjects::::new(root_objects.clone(), self.mmtk()); + let w = + ConcurrentTraceObjects::::new(root_objects.clone(), self.mmtk()); match pause { Pause::InitialMark => worker.scheduler().work_buckets @@ -266,8 +282,8 @@ impl + PlanTraceObject> ProcessEdg } } -impl + PlanTraceObject> Deref - for ProcessRootSlots +impl + PlanTraceObject, const KIND: TraceKind> Deref + for ProcessRootSlots { type Target = ProcessEdgesBase; fn deref(&self) -> &Self::Target { @@ -275,8 +291,8 @@ impl + PlanTraceObject> Deref } } -impl + PlanTraceObject> DerefMut - for ProcessRootSlots +impl + PlanTraceObject, const KIND: TraceKind> + DerefMut for ProcessRootSlots { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.base diff --git a/src/plan/concurrent/immix/global.rs b/src/plan/concurrent/immix/global.rs index 47cfc41708..7884d33823 100644 --- a/src/plan/concurrent/immix/global.rs +++ b/src/plan/concurrent/immix/global.rs @@ -348,11 +348,11 @@ impl ConcurrentImmix { self.set_ref_closure_buckets_enabled(false); - scheduler.work_buckets[WorkBucketStage::Unconstrained].add_prioritized(Box::new( - StopMutators::>>::new_args( - Pause::InitialMark, - ), - )); + scheduler.work_buckets[WorkBucketStage::Unconstrained].add_prioritized( + Box::new(StopMutators::< + ConcurrentImmixGCWorkContext>, + >::new_args(Pause::InitialMark)), + ); scheduler.work_buckets[WorkBucketStage::Prepare].add(Prepare::< ConcurrentImmixGCWorkContext>, >::new(self)); @@ -361,11 +361,11 @@ impl ConcurrentImmix { fn schedule_concurrent_marking_final_pause(&'static self, scheduler: &GCWorkScheduler) { self.set_ref_closure_buckets_enabled(true); - scheduler.work_buckets[WorkBucketStage::Unconstrained].add_prioritized(Box::new( - StopMutators::>>::new_args( - Pause::FinalMark, - ), - )); + scheduler.work_buckets[WorkBucketStage::Unconstrained].add_prioritized( + Box::new(StopMutators::< + ConcurrentImmixGCWorkContext>, + >::new_args(Pause::FinalMark)), + ); scheduler.work_buckets[WorkBucketStage::Release].add(Release::< ConcurrentImmixGCWorkContext>, diff --git a/src/plan/concurrent/immix/mutator.rs b/src/plan/concurrent/immix/mutator.rs index b8074544fe..65352757a6 100644 --- a/src/plan/concurrent/immix/mutator.rs +++ b/src/plan/concurrent/immix/mutator.rs @@ -82,6 +82,7 @@ pub fn create_concurrent_immix_mutator( .barrier(Box::new(SATBBarrier::new(SATBBarrierSemantics::< VM, ConcurrentImmix, + { crate::policy::immix::TRACE_KIND_FAST }, >::new(mmtk)))) .build() } From 4e5c7724f84c0fa8b0f2a7ffc7c3bec8ad189124 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Tue, 26 Aug 2025 04:49:55 +0000 Subject: [PATCH 26/59] Fix docs --- src/plan/barriers.rs | 4 +- src/plan/concurrent/barrier.rs | 7 ++- .../concurrent/concurrent_marking_work.rs | 22 +++++---- src/plan/concurrent/immix/mutator.rs | 2 +- src/plan/global.rs | 2 + src/plan/tracing.rs | 48 ++++--------------- src/util/address.rs | 5 -- 7 files changed, 33 insertions(+), 57 deletions(-) diff --git a/src/plan/barriers.rs b/src/plan/barriers.rs index e81b3166eb..fe1bff7a3b 100644 --- a/src/plan/barriers.rs +++ b/src/plan/barriers.rs @@ -19,8 +19,10 @@ use downcast_rs::Downcast; pub enum BarrierSelector { /// No barrier is used. NoBarrier, - /// Object remembering barrier is used. + /// Object remembering psot-write barrier is used. ObjectBarrier, + /// Object remembering pre-write barrier with weak reference loading barrier. + // TODO: We might be able to generalize this to object remembering pre-write barrier. SATBBarrier, } diff --git a/src/plan/concurrent/barrier.rs b/src/plan/concurrent/barrier.rs index af9a2d3506..9caeba0cdf 100644 --- a/src/plan/concurrent/barrier.rs +++ b/src/plan/concurrent/barrier.rs @@ -3,6 +3,7 @@ use std::sync::atomic::Ordering; use super::{concurrent_marking_work::ProcessModBufSATB, Pause}; use crate::plan::global::PlanTraceObject; use crate::policy::gc_work::TraceKind; +use crate::util::VMMutatorThread; use crate::{ plan::{barriers::BarrierSemantics, concurrent::global::ConcurrentPlan, VectorQueue}, scheduler::WorkBucketStage, @@ -20,6 +21,7 @@ pub struct SATBBarrierSemantics< const KIND: TraceKind, > { mmtk: &'static MMTK, + tls: VMMutatorThread, satb: VectorQueue, refs: VectorQueue, plan: &'static P, @@ -28,9 +30,10 @@ pub struct SATBBarrierSemantics< impl + PlanTraceObject, const KIND: TraceKind> SATBBarrierSemantics { - pub fn new(mmtk: &'static MMTK) -> Self { + pub fn new(mmtk: &'static MMTK, tls: VMMutatorThread) -> Self { Self { mmtk, + tls, satb: VectorQueue::default(), refs: VectorQueue::default(), plan: mmtk.get_plan().downcast_ref::

().unwrap(), @@ -154,7 +157,7 @@ impl + PlanTraceObject, const KIND } fn object_probable_write_slow(&mut self, obj: ObjectReference) { - obj.iterate_fields::(|s| { + crate::plan::tracing::SlotIterator::::iterate_fields(obj, self.tls.0, |s| { self.enqueue_node(Some(obj), s, None); }); } diff --git a/src/plan/concurrent/concurrent_marking_work.rs b/src/plan/concurrent/concurrent_marking_work.rs index c82ab0b1fb..a65b74b5cc 100644 --- a/src/plan/concurrent/concurrent_marking_work.rs +++ b/src/plan/concurrent/concurrent_marking_work.rs @@ -74,16 +74,20 @@ impl + PlanTraceObject, const KIND } fn scan_and_enqueue(&mut self, object: ObjectReference) { - object.iterate_fields::(|s| { - let Some(t) = s.load() else { - return; - }; + crate::plan::tracing::SlotIterator::::iterate_fields( + object, + self.worker().tls.0, + |s| { + let Some(t) = s.load() else { + return; + }; - self.next_objects.push(t); - if self.next_objects.len() > Self::SATB_BUFFER_SIZE { - self.flush(); - } - }); + self.next_objects.push(t); + if self.next_objects.len() > Self::SATB_BUFFER_SIZE { + self.flush(); + } + }, + ); } } diff --git a/src/plan/concurrent/immix/mutator.rs b/src/plan/concurrent/immix/mutator.rs index 65352757a6..ba3459a21d 100644 --- a/src/plan/concurrent/immix/mutator.rs +++ b/src/plan/concurrent/immix/mutator.rs @@ -83,6 +83,6 @@ pub fn create_concurrent_immix_mutator( VM, ConcurrentImmix, { crate::policy::immix::TRACE_KIND_FAST }, - >::new(mmtk)))) + >::new(mmtk, mutator_tls)))) .build() } diff --git a/src/plan/global.rs b/src/plan/global.rs index 3f3da061e0..503ee8268f 100644 --- a/src/plan/global.rs +++ b/src/plan/global.rs @@ -186,6 +186,8 @@ pub trait Plan: 'static + HasSpaces + Sync + Downcast { None } + /// Return a reference to `ConcurrentPlan` to allow + /// access methods specific to concurrent plans if the plan is a concurrent plan. fn concurrent( &self, ) -> Option<&dyn crate::plan::concurrent::global::ConcurrentPlan> { diff --git a/src/plan/tracing.rs b/src/plan/tracing.rs index fac6ff0da8..f7156193e2 100644 --- a/src/plan/tracing.rs +++ b/src/plan/tracing.rs @@ -147,55 +147,25 @@ impl Drop for ObjectsClosure<'_, E> { } } -struct SlotIteratorImpl { +/// For iterating over the slots of an object. +pub struct SlotIterator { f: F, - // should_discover_references: bool, - // should_claim_clds: bool, - // should_follow_clds: bool, _p: PhantomData, } -impl SlotVisitor for SlotIteratorImpl { +impl SlotVisitor for SlotIterator { fn visit_slot(&mut self, slot: VM::VMSlot) { (self.f)(slot); } } -pub struct SlotIterator { - _p: PhantomData, -} - -impl SlotIterator { - pub fn iterate( - o: ObjectReference, - // should_discover_references: bool, - // should_claim_clds: bool, - // should_follow_clds: bool, - f: impl FnMut(VM::VMSlot), - // klass: Option

, - ) { - let mut x = SlotIteratorImpl:: { - f, - // should_discover_references, - // should_claim_clds, - // should_follow_clds, - _p: PhantomData, - }; - // if let Some(klass) = klass { - // >::scan_object_with_klass( - // VMWorkerThread(VMThread::UNINITIALIZED), - // o, - // &mut x, - // klass, - // ); - // } else { - // >::scan_object( - // VMWorkerThread(VMThread::UNINITIALIZED), - // o, - // &mut x, - // ); - // } +impl SlotIterator { + /// Iterate over the slots of an object by applying a function to each slot. + pub fn iterate_fields(o: ObjectReference, _tls: VMThread, f: F) { + let mut x = SlotIterator:: { f, _p: PhantomData }; >::scan_object( + // FIXME: We should use tls from the arguments. + // See https://github.com/mmtk/mmtk-core/issues/1375 VMWorkerThread(VMThread::UNINITIALIZED), o, &mut x, diff --git a/src/util/address.rs b/src/util/address.rs index 45192157d0..c87a5d3abb 100644 --- a/src/util/address.rs +++ b/src/util/address.rs @@ -465,7 +465,6 @@ mod tests { } } -use crate::plan::SlotIterator; use crate::vm::VMBinding; /// `ObjectReference` represents address for an object. Compared with `Address`, operations allowed @@ -700,10 +699,6 @@ impl ObjectReference { pub fn is_sane(self) -> bool { unsafe { SFT_MAP.get_unchecked(self.to_raw_address()) }.is_sane() } - - pub fn iterate_fields(self, f: F) { - SlotIterator::::iterate(self, f) - } } /// allows print Address as upper-case hex value From 47708b7e3919e2f81b16c0140e6c00945620a27d Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Tue, 26 Aug 2025 05:05:19 +0000 Subject: [PATCH 27/59] Allow defrag for STW full heap collection --- src/plan/concurrent/immix/global.rs | 25 ++++++++++--------------- 1 file changed, 10 insertions(+), 15 deletions(-) diff --git a/src/plan/concurrent/immix/global.rs b/src/plan/concurrent/immix/global.rs index 7884d33823..fa8ad7b0b5 100644 --- a/src/plan/concurrent/immix/global.rs +++ b/src/plan/concurrent/immix/global.rs @@ -44,6 +44,8 @@ enum GCCause { FinalMark, } +/// A concurrent Immix plan. The plan supports concurrent collection (strictly non-moving) and STW full heap collection (which may do defrag). +/// The concurrent GC consists of two STW pauses (initial mark and final mark) with concurrent marking in between. #[derive(HasSpaces, PlanTraceObject)] pub struct ConcurrentImmix { #[post_scan] @@ -62,7 +64,7 @@ pub struct ConcurrentImmix { /// The plan constraints for the immix plan. pub const CONCURRENT_IMMIX_CONSTRAINTS: PlanConstraints = PlanConstraints { // If we disable moving in Immix, this is a non-moving plan. - moves_objects: false, + moves_objects: !cfg!(feature = "immix_non_moving"), // Max immix object size is half of a block. max_non_los_default_alloc_bytes: crate::policy::immix::MAX_IMMIX_OBJECT_SIZE, needs_prepare_mutator: true, @@ -274,24 +276,17 @@ impl ConcurrentImmix { *VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC, ]); - let plan_args = CreateSpecificPlanArgs { + let mut plan_args = CreateSpecificPlanArgs { global_args: args, constraints: &CONCURRENT_IMMIX_CONSTRAINTS, global_side_metadata_specs: SideMetadataContext::new_global_specs(&spec), }; - Self::new_with_args( - plan_args, - ImmixSpaceArgs { - mixed_age: false, - never_move_objects: true, - }, - ) - } - pub fn new_with_args( - mut plan_args: CreateSpecificPlanArgs, - space_args: ImmixSpaceArgs, - ) -> Self { + let immix_args = ImmixSpaceArgs { + mixed_age: false, + never_move_objects: false, + }; + // These buckets are not used in an Immix plan. We can simply disable them. // TODO: We should be more systmatic on this, and disable unnecessary buckets for other plans as well. let scheduler = &plan_args.global_args.scheduler; @@ -305,7 +300,7 @@ impl ConcurrentImmix { let immix = ConcurrentImmix { immix_space: ImmixSpace::new( plan_args.get_normal_space_args("immix", true, false, VMRequest::discontiguous()), - space_args, + immix_args, ), common: CommonPlan::new(plan_args), last_gc_was_defrag: AtomicBool::new(false), From 35cf25ad47a4c09d24b6e9da0effd97116678d15 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Tue, 26 Aug 2025 05:29:06 +0000 Subject: [PATCH 28/59] Put generated concurrent work to the concurrent bucket. Don't need priority queue for unconstrained. --- src/plan/concurrent/barrier.rs | 4 ++-- .../concurrent/concurrent_marking_work.rs | 2 +- src/plan/concurrent/immix/global.rs | 20 +++++++++---------- src/scheduler/scheduler.rs | 2 -- 4 files changed, 13 insertions(+), 15 deletions(-) diff --git a/src/plan/concurrent/barrier.rs b/src/plan/concurrent/barrier.rs index 9caeba0cdf..eb37cf8ed6 100644 --- a/src/plan/concurrent/barrier.rs +++ b/src/plan/concurrent/barrier.rs @@ -71,7 +71,7 @@ impl + PlanTraceObject, const KIND if self.should_create_satb_packets() { let satb = self.satb.take(); let bucket = if self.plan.concurrent_work_in_progress() { - WorkBucketStage::Unconstrained + WorkBucketStage::Concurrent } else { debug_assert_ne!(self.plan.current_pause(), Some(Pause::InitialMark)); WorkBucketStage::Closure @@ -90,7 +90,7 @@ impl + PlanTraceObject, const KIND // debug_assert!(self.should_create_satb_packets()); let nodes = self.refs.take(); let bucket = if self.plan.concurrent_work_in_progress() { - WorkBucketStage::Unconstrained + WorkBucketStage::Concurrent } else { debug_assert_ne!(self.plan.current_pause(), Some(Pause::InitialMark)); WorkBucketStage::Closure diff --git a/src/plan/concurrent/concurrent_marking_work.rs b/src/plan/concurrent/concurrent_marking_work.rs index a65b74b5cc..cd1dc71e81 100644 --- a/src/plan/concurrent/concurrent_marking_work.rs +++ b/src/plan/concurrent/concurrent_marking_work.rs @@ -54,7 +54,7 @@ impl + PlanTraceObject, const KIND let objects = self.next_objects.take(); let worker = self.worker(); let w = Self::new(objects, worker.mmtk); - worker.add_work(WorkBucketStage::Unconstrained, w); + worker.add_work(WorkBucketStage::Concurrent, w); } } diff --git a/src/plan/concurrent/immix/global.rs b/src/plan/concurrent/immix/global.rs index fa8ad7b0b5..b3ef636ac1 100644 --- a/src/plan/concurrent/immix/global.rs +++ b/src/plan/concurrent/immix/global.rs @@ -343,11 +343,11 @@ impl ConcurrentImmix { self.set_ref_closure_buckets_enabled(false); - scheduler.work_buckets[WorkBucketStage::Unconstrained].add_prioritized( - Box::new(StopMutators::< - ConcurrentImmixGCWorkContext>, - >::new_args(Pause::InitialMark)), - ); + scheduler.work_buckets[WorkBucketStage::Unconstrained].add(StopMutators::< + ConcurrentImmixGCWorkContext>, + >::new_args( + Pause::InitialMark + )); scheduler.work_buckets[WorkBucketStage::Prepare].add(Prepare::< ConcurrentImmixGCWorkContext>, >::new(self)); @@ -356,11 +356,11 @@ impl ConcurrentImmix { fn schedule_concurrent_marking_final_pause(&'static self, scheduler: &GCWorkScheduler) { self.set_ref_closure_buckets_enabled(true); - scheduler.work_buckets[WorkBucketStage::Unconstrained].add_prioritized( - Box::new(StopMutators::< - ConcurrentImmixGCWorkContext>, - >::new_args(Pause::FinalMark)), - ); + scheduler.work_buckets[WorkBucketStage::Unconstrained].add(StopMutators::< + ConcurrentImmixGCWorkContext>, + >::new_args( + Pause::FinalMark + )); scheduler.work_buckets[WorkBucketStage::Release].add(Release::< ConcurrentImmixGCWorkContext>, diff --git a/src/scheduler/scheduler.rs b/src/scheduler/scheduler.rs index d3a9688463..0274b8c782 100644 --- a/src/scheduler/scheduler.rs +++ b/src/scheduler/scheduler.rs @@ -51,8 +51,6 @@ impl GCWorkScheduler { WorkBucket::new(stage, worker_monitor.clone()) }); - work_buckets[WorkBucketStage::Unconstrained].enable_prioritized_queue(); - // Set the open condition of each bucket. { let mut open_stages: Vec = vec![WorkBucketStage::FIRST_STW_STAGE]; From 26537168389bc415b5b5c407e476faa80fa4163a Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Tue, 26 Aug 2025 23:29:31 +0000 Subject: [PATCH 29/59] Properly call post_scan_object in ConcurrentTraceObject --- src/plan/concurrent/concurrent_marking_work.rs | 1 + src/policy/immix/mod.rs | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/plan/concurrent/concurrent_marking_work.rs b/src/plan/concurrent/concurrent_marking_work.rs index cd1dc71e81..6e5a269daa 100644 --- a/src/plan/concurrent/concurrent_marking_work.rs +++ b/src/plan/concurrent/concurrent_marking_work.rs @@ -88,6 +88,7 @@ impl + PlanTraceObject, const KIND } }, ); + self.plan.post_scan_object(object); } } diff --git a/src/policy/immix/mod.rs b/src/policy/immix/mod.rs index 7243832b3a..d5895e9470 100644 --- a/src/policy/immix/mod.rs +++ b/src/policy/immix/mod.rs @@ -16,4 +16,4 @@ pub const BLOCK_ONLY: bool = false; /// Mark lines when scanning objects. /// Otherwise, do it at mark time. -pub const MARK_LINE_AT_SCAN_TIME: bool = false; +pub const MARK_LINE_AT_SCAN_TIME: bool = true; From 9cdcb7aa10d6ae3dc1f777c2d9ee2e24ba118340 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Tue, 26 Aug 2025 23:48:27 +0000 Subject: [PATCH 30/59] Remove the use of Pause in StopMutators --- src/plan/concurrent/immix/global.rs | 8 ++------ src/plan/concurrent/mod.rs | 3 +++ src/plan/mod.rs | 1 - src/scheduler/gc_work.rs | 23 +++++++++++++++-------- 4 files changed, 20 insertions(+), 15 deletions(-) diff --git a/src/plan/concurrent/immix/global.rs b/src/plan/concurrent/immix/global.rs index b3ef636ac1..f72b478d1c 100644 --- a/src/plan/concurrent/immix/global.rs +++ b/src/plan/concurrent/immix/global.rs @@ -345,9 +345,7 @@ impl ConcurrentImmix { scheduler.work_buckets[WorkBucketStage::Unconstrained].add(StopMutators::< ConcurrentImmixGCWorkContext>, - >::new_args( - Pause::InitialMark - )); + >::new_no_scan_roots()); scheduler.work_buckets[WorkBucketStage::Prepare].add(Prepare::< ConcurrentImmixGCWorkContext>, >::new(self)); @@ -358,9 +356,7 @@ impl ConcurrentImmix { scheduler.work_buckets[WorkBucketStage::Unconstrained].add(StopMutators::< ConcurrentImmixGCWorkContext>, - >::new_args( - Pause::FinalMark - )); + >::new_no_scan_roots()); scheduler.work_buckets[WorkBucketStage::Release].add(Release::< ConcurrentImmixGCWorkContext>, diff --git a/src/plan/concurrent/mod.rs b/src/plan/concurrent/mod.rs index 692ed99d03..a2a95cf307 100644 --- a/src/plan/concurrent/mod.rs +++ b/src/plan/concurrent/mod.rs @@ -6,6 +6,9 @@ pub mod immix; use bytemuck::NoUninit; +/// The pause type for a concurrent GC phase. +// TODO: This is probably not be general enough for all the concurrent plans. +// TODO: We could consider moving this to specific plans later. #[repr(u8)] #[derive(Debug, PartialEq, Eq, Copy, Clone, NoUninit)] pub enum Pause { diff --git a/src/plan/mod.rs b/src/plan/mod.rs index 062c5c9cb9..bbd2fee815 100644 --- a/src/plan/mod.rs +++ b/src/plan/mod.rs @@ -19,7 +19,6 @@ pub use barriers::BarrierSelector; pub(crate) mod gc_requester; mod global; -pub(crate) use concurrent::Pause; pub(crate) use global::create_gc_worker_context; pub(crate) use global::create_mutator; pub(crate) use global::create_plan; diff --git a/src/scheduler/gc_work.rs b/src/scheduler/gc_work.rs index b3274f8860..32ce29d567 100644 --- a/src/scheduler/gc_work.rs +++ b/src/scheduler/gc_work.rs @@ -2,7 +2,6 @@ use super::work_bucket::WorkBucketStage; use super::*; use crate::global_state::GcStatus; use crate::plan::ObjectsClosure; -use crate::plan::Pause; use crate::plan::VectorObjectQueue; use crate::util::*; use crate::vm::slot::Slot; @@ -193,21 +192,28 @@ impl GCWork for ReleaseCollector { /// TODO: Smaller work granularity #[derive(Default)] pub struct StopMutators { - pause: Pause, + /// If this is true, we skip creating [`ScanMutatorRoots`] work packets for mutators. + /// By default, this is false. + skip_mutator_roots: bool, + /// Flush mutators once they are stopped. By default this is false. [`ScanMutatorRoots`] will flush mutators. + flush_mutator: bool, phantom: PhantomData, } impl StopMutators { pub fn new() -> Self { Self { - pause: Pause::Full, + skip_mutator_roots: false, + flush_mutator: false, phantom: PhantomData, } } - pub fn new_args(pause: Pause) -> Self { + /// Create a `StopMutators` work packet that does not create `ScanMutatorRoots` work packets for mutators, and will simply flush mutators. + pub fn new_no_scan_roots() -> Self { Self { - pause, + skip_mutator_roots: true, + flush_mutator: true, phantom: PhantomData, } } @@ -221,11 +227,12 @@ impl GCWork for StopMutators { // TODO: The stack scanning work won't start immediately, as the `Prepare` bucket is not opened yet (the bucket is opened in notify_mutators_paused). // Should we push to Unconstrained instead? - if self.pause != Pause::FinalMark { + if self.flush_mutator { + mutator.flush(); + } + if !self.skip_mutator_roots { mmtk.scheduler.work_buckets[WorkBucketStage::Prepare] .add(ScanMutatorRoots::(mutator)); - } else { - mutator.flush(); } }); trace!("stop_all_mutators end"); From d43c9e5af87d27d5d00ff84ea9db8a3711629ad5 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Wed, 27 Aug 2025 04:35:57 +0000 Subject: [PATCH 31/59] Use normal StopMutators for initial marking --- src/plan/concurrent/immix/global.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/plan/concurrent/immix/global.rs b/src/plan/concurrent/immix/global.rs index f72b478d1c..728fc200d5 100644 --- a/src/plan/concurrent/immix/global.rs +++ b/src/plan/concurrent/immix/global.rs @@ -345,7 +345,7 @@ impl ConcurrentImmix { scheduler.work_buckets[WorkBucketStage::Unconstrained].add(StopMutators::< ConcurrentImmixGCWorkContext>, - >::new_no_scan_roots()); + >::new()); scheduler.work_buckets[WorkBucketStage::Prepare].add(Prepare::< ConcurrentImmixGCWorkContext>, >::new(self)); @@ -354,6 +354,7 @@ impl ConcurrentImmix { fn schedule_concurrent_marking_final_pause(&'static self, scheduler: &GCWorkScheduler) { self.set_ref_closure_buckets_enabled(true); + // Skip root scanning in the final mark scheduler.work_buckets[WorkBucketStage::Unconstrained].add(StopMutators::< ConcurrentImmixGCWorkContext>, >::new_no_scan_roots()); From e3163d8bbb77c701b28e6e53ae75faf5727166d2 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Wed, 27 Aug 2025 05:05:08 +0000 Subject: [PATCH 32/59] Remove Collection::set_concurrent_marking_state. Introduce active for barriers. --- src/plan/barriers.rs | 31 +++++++++++++++++++++++++-- src/plan/concurrent/immix/global.rs | 10 +++++---- src/plan/concurrent/immix/mutator.rs | 32 ++++++++++++++++++++++++++-- src/plan/mod.rs | 2 ++ src/vm/collection.rs | 3 --- 5 files changed, 67 insertions(+), 11 deletions(-) diff --git a/src/plan/barriers.rs b/src/plan/barriers.rs index fe1bff7a3b..eda01f3588 100644 --- a/src/plan/barriers.rs +++ b/src/plan/barriers.rs @@ -46,6 +46,17 @@ impl BarrierSelector { /// As a performance optimization, the binding may also choose to port the fast-path to the VM side, /// and call the slow-path (`object_reference_write_slow`) only if necessary. pub trait Barrier: 'static + Send + Downcast { + /// Check if the barrier is active. For barriers that are always active, this always returns true. + fn is_active(&self) -> bool { + true + } + + /// Set the barrier active or inactive. For barriers that are always active, this should not be called. + fn set_active(&mut self, _val: bool) { + unreachable!() + } + + /// Flush thread-local states like buffers or remembered sets. fn flush(&mut self) {} /// Weak reference loading barrier. A mutator should call this when loading from a weak @@ -274,13 +285,19 @@ impl Barrier for ObjectBarrier { } pub struct SATBBarrier { + // This only affects the reference load barrier. + active: bool, semantics: S, } impl SATBBarrier { pub fn new(semantics: S) -> Self { - Self { semantics } + Self { + active: false, + semantics, + } } + fn object_is_unlogged(&self, object: ObjectReference) -> bool { // unsafe { S::UNLOG_BIT_SPEC.load::(object, None) != 0 } S::UNLOG_BIT_SPEC.load_atomic::(object, None, Ordering::SeqCst) != 0 @@ -288,12 +305,22 @@ impl SATBBarrier { } impl Barrier for SATBBarrier { + fn set_active(&mut self, val: bool) { + self.active = val; + } + + fn is_active(&self) -> bool { + self.active + } + fn flush(&mut self) { self.semantics.flush(); } fn load_weak_reference(&mut self, o: ObjectReference) { - self.semantics.load_weak_reference(o) + if self.active { + self.semantics.load_weak_reference(o) + } } fn object_reference_clone_pre(&mut self, obj: ObjectReference) { diff --git a/src/plan/concurrent/immix/global.rs b/src/plan/concurrent/immix/global.rs index 728fc200d5..62bda4b175 100644 --- a/src/plan/concurrent/immix/global.rs +++ b/src/plan/concurrent/immix/global.rs @@ -398,10 +398,6 @@ impl ConcurrentImmix { fn set_concurrent_marking_state(&self, active: bool) { use crate::plan::global::HasSpaces; - use crate::vm::Collection; - - // Update the binding about concurrent marking - ::VMCollection::set_concurrent_marking_state(active); // Tell the spaces to allocate new objects as live let allocate_object_as_live = active; @@ -412,6 +408,12 @@ impl ConcurrentImmix { // Store the state. self.concurrent_marking_active .store(active, Ordering::SeqCst); + + // We also set SATB barrier as active -- this is done in Mutator prepare/release. + } + + pub(super) fn is_concurrent_marking_active(&self) -> bool { + self.concurrent_marking_active.load(Ordering::SeqCst) } fn previous_pause(&self) -> Option { diff --git a/src/plan/concurrent/immix/mutator.rs b/src/plan/concurrent/immix/mutator.rs index ba3459a21d..daf68b3b71 100644 --- a/src/plan/concurrent/immix/mutator.rs +++ b/src/plan/concurrent/immix/mutator.rs @@ -1,6 +1,7 @@ use crate::plan::barriers::SATBBarrier; use crate::plan::concurrent::barrier::SATBBarrierSemantics; use crate::plan::concurrent::immix::ConcurrentImmix; +use crate::plan::concurrent::Pause; use crate::plan::mutator_context::create_allocator_mapping; use crate::plan::mutator_context::create_space_mapping; @@ -20,6 +21,10 @@ pub fn concurrent_immix_mutator_release( mutator: &mut Mutator, _tls: VMWorkerThread, ) { + // Release is not scheduled for initial mark pause + let current_pause = mutator.plan.concurrent().unwrap().current_pause().unwrap(); + debug_assert_ne!(current_pause, Pause::InitialMark); + let immix_allocator = unsafe { mutator .allocators @@ -28,12 +33,22 @@ pub fn concurrent_immix_mutator_release( .downcast_mut::>() .unwrap(); immix_allocator.reset(); + + // Deactivate SATB + if current_pause == Pause::Full || current_pause == Pause::FinalMark { + debug!("Deactivate SATB barrier active for {:?}", mutator as *mut _); + mutator.barrier.set_active(false); + } } pub fn concurent_immix_mutator_prepare( mutator: &mut Mutator, _tls: VMWorkerThread, ) { + // Prepare is not scheduled for final mark pause + let current_pause = mutator.plan.concurrent().unwrap().current_pause().unwrap(); + debug_assert_ne!(current_pause, Pause::FinalMark); + let immix_allocator = unsafe { mutator .allocators @@ -42,6 +57,12 @@ pub fn concurent_immix_mutator_prepare( .downcast_mut::>() .unwrap(); immix_allocator.reset(); + + // Activate SATB + if current_pause == Pause::InitialMark { + debug!("Activate SATB barrier active for {:?}", mutator as *mut _); + mutator.barrier.set_active(true); + } } pub(in crate::plan) const RESERVED_ALLOCATORS: ReservedAllocators = ReservedAllocators { @@ -78,11 +99,18 @@ pub fn create_concurrent_immix_mutator( }; let builder = MutatorBuilder::new(mutator_tls, mmtk, config); - builder + let mut mutator = builder .barrier(Box::new(SATBBarrier::new(SATBBarrierSemantics::< VM, ConcurrentImmix, { crate::policy::immix::TRACE_KIND_FAST }, >::new(mmtk, mutator_tls)))) - .build() + .build(); + + // Set barrier active, based on whether concurrent marking is in progress + mutator + .barrier + .set_active(immix.is_concurrent_marking_active()); + + mutator } diff --git a/src/plan/mod.rs b/src/plan/mod.rs index bbd2fee815..aa46e686f9 100644 --- a/src/plan/mod.rs +++ b/src/plan/mod.rs @@ -15,6 +15,8 @@ mod barriers; pub use barriers::BarrierSelector; +pub use barriers::ObjectBarrier; +pub use barriers::SATBBarrier; pub(crate) mod gc_requester; diff --git a/src/vm/collection.rs b/src/vm/collection.rs index 98121317b9..16e87eebe0 100644 --- a/src/vm/collection.rs +++ b/src/vm/collection.rs @@ -162,7 +162,4 @@ pub trait Collection { fn create_gc_trigger() -> Box> { unimplemented!() } - - /// Inform the VM of concurrent marking status - fn set_concurrent_marking_state(_active: bool) {} } From 49661317c89bc5e1c84b8afbc77a4be7a65b8d6c Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Wed, 27 Aug 2025 05:45:22 +0000 Subject: [PATCH 33/59] Remove the GCCause type --- src/plan/concurrent/immix/global.rs | 35 ++++++++--------------------- 1 file changed, 9 insertions(+), 26 deletions(-) diff --git a/src/plan/concurrent/immix/global.rs b/src/plan/concurrent/immix/global.rs index 62bda4b175..19b14ffda6 100644 --- a/src/plan/concurrent/immix/global.rs +++ b/src/plan/concurrent/immix/global.rs @@ -35,15 +35,6 @@ use enum_map::EnumMap; use mmtk_macros::{HasSpaces, PlanTraceObject}; -#[derive(Debug, Clone, Copy, bytemuck::NoUninit, PartialEq, Eq)] -#[repr(u8)] -enum GCCause { - Unknown, - FullHeap, - InitialMark, - FinalMark, -} - /// A concurrent Immix plan. The plan supports concurrent collection (strictly non-moving) and STW full heap collection (which may do defrag). /// The concurrent GC consists of two STW pauses (initial mark and final mark) with concurrent marking in between. #[derive(HasSpaces, PlanTraceObject)] @@ -57,7 +48,7 @@ pub struct ConcurrentImmix { last_gc_was_defrag: AtomicBool, current_pause: Atomic>, previous_pause: Atomic>, - gc_cause: Atomic, + gc_cause: Atomic>, concurrent_marking_active: AtomicBool, } @@ -76,7 +67,7 @@ pub const CONCURRENT_IMMIX_CONSTRAINTS: PlanConstraints = PlanConstraints { impl Plan for ConcurrentImmix { fn collection_required(&self, space_full: bool, _space: Option>) -> bool { if self.base().collection_required(self, space_full) { - self.gc_cause.store(GCCause::FullHeap, Ordering::Release); + self.gc_cause.store(Some(Pause::Full), Ordering::Release); return true; } @@ -85,7 +76,8 @@ impl Plan for ConcurrentImmix { if concurrent_marking_in_progress && self.common.base.scheduler.work_buckets[WorkBucketStage::Concurrent].is_drained() { - self.gc_cause.store(GCCause::FinalMark, Ordering::Release); + self.gc_cause + .store(Some(Pause::FinalMark), Ordering::Release); return true; } let threshold = self.get_total_pages() >> 1; @@ -100,7 +92,8 @@ impl Plan for ConcurrentImmix { debug_assert!(!self.concurrent_marking_in_progress()); let prev_pause = self.previous_pause(); debug_assert!(prev_pause.is_none() || prev_pause.unwrap() != Pause::InitialMark); - self.gc_cause.store(GCCause::InitialMark, Ordering::Release); + self.gc_cause + .store(Some(Pause::InitialMark), Ordering::Release); return true; } false @@ -129,9 +122,8 @@ impl Plan for ConcurrentImmix { fn schedule_collection(&'static self, scheduler: &GCWorkScheduler) { let pause = self.select_collection_kind(); + self.current_pause.store(Some(pause), Ordering::SeqCst); if pause == Pause::Full { - self.current_pause - .store(Some(Pause::Full), Ordering::SeqCst); self.set_ref_closure_buckets_enabled(true); crate::plan::immix::global::Immix::schedule_immix_full_heap_collection::< ConcurrentImmix, @@ -139,8 +131,6 @@ impl Plan for ConcurrentImmix { ConcurrentImmixSTWGCWorkContext, >(self, &self.immix_space, scheduler); } else { - // Set current pause kind - self.current_pause.store(Some(pause), Ordering::SeqCst); // Schedule work match pause { Pause::InitialMark => self.schedule_concurrent_marking_initial_pause(scheduler), @@ -306,7 +296,7 @@ impl ConcurrentImmix { last_gc_was_defrag: AtomicBool::new(false), current_pause: Atomic::new(None), previous_pause: Atomic::new(None), - gc_cause: Atomic::new(GCCause::Unknown), + gc_cause: Atomic::new(None), concurrent_marking_active: AtomicBool::new(false), }; @@ -316,14 +306,7 @@ impl ConcurrentImmix { } fn select_collection_kind(&self) -> Pause { - match self.gc_cause.load(Ordering::Acquire) { - GCCause::FullHeap => Pause::Full, - GCCause::InitialMark => Pause::InitialMark, - GCCause::FinalMark => Pause::FinalMark, - GCCause::Unknown => { - panic!("Collection kind is not set when scheduling a collection"); - } - } + self.gc_cause.load(Ordering::Acquire).take().unwrap() } fn set_ref_closure_buckets_enabled(&self, do_closure: bool) { From c70c03f900cc3915445c3ef58327a18d3f89a02f Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Wed, 27 Aug 2025 06:17:14 +0000 Subject: [PATCH 34/59] Cleanup --- src/plan/barriers.rs | 6 +- src/plan/concurrent/barrier.rs | 1 - .../concurrent/concurrent_marking_work.rs | 38 ++++------ src/plan/concurrent/immix/global.rs | 38 ++++------ src/plan/global.rs | 8 -- src/plan/mod.rs | 1 + src/plan/plan_constraints.rs | 3 +- src/policy/immix/immixspace.rs | 18 ----- src/policy/largeobjectspace.rs | 21 ------ src/scheduler/scheduler.rs | 75 +------------------ 10 files changed, 39 insertions(+), 170 deletions(-) diff --git a/src/plan/barriers.rs b/src/plan/barriers.rs index eda01f3588..c973905710 100644 --- a/src/plan/barriers.rs +++ b/src/plan/barriers.rs @@ -200,6 +200,7 @@ pub struct ObjectBarrier { } impl ObjectBarrier { + /// Create a new ObjectBarrier with the given semantics. pub fn new(semantics: S) -> Self { Self { semantics } } @@ -284,13 +285,16 @@ impl Barrier for ObjectBarrier { } } +/// A SATB (Snapshot-At-The-Beginning) barrier implementation. +/// This barrier is basically a pre-write object barrier with a weak reference loading barrier. pub struct SATBBarrier { - // This only affects the reference load barrier. + /// This only affects the weak reference load barrier. active: bool, semantics: S, } impl SATBBarrier { + /// Create a new SATBBarrier with the given semantics. pub fn new(semantics: S) -> Self { Self { active: false, diff --git a/src/plan/concurrent/barrier.rs b/src/plan/concurrent/barrier.rs index eb37cf8ed6..a0ab577cc9 100644 --- a/src/plan/concurrent/barrier.rs +++ b/src/plan/concurrent/barrier.rs @@ -87,7 +87,6 @@ impl + PlanTraceObject, const KIND #[cold] fn flush_weak_refs(&mut self) { if !self.refs.is_empty() { - // debug_assert!(self.should_create_satb_packets()); let nodes = self.refs.take(); let bucket = if self.plan.concurrent_work_in_progress() { WorkBucketStage::Concurrent diff --git a/src/plan/concurrent/concurrent_marking_work.rs b/src/plan/concurrent/concurrent_marking_work.rs index 6e5a269daa..0cb5a4e09c 100644 --- a/src/plan/concurrent/concurrent_marking_work.rs +++ b/src/plan/concurrent/concurrent_marking_work.rs @@ -203,6 +203,18 @@ unsafe impl + PlanTraceObject, con { } +impl + PlanTraceObject, const KIND: TraceKind> + ProcessRootSlots +{ + fn create_and_schedule_concurrent_trace_objects_work(&self, objects: &Vec) { + let worker = self.worker(); + let mmtk = self.mmtk(); + let w = ConcurrentTraceObjects::::new(objects.clone(), mmtk); + + worker.scheduler().work_buckets[WorkBucketStage::Concurrent].add_no_notify(w); + } +} + impl + PlanTraceObject, const KIND: TraceKind> ProcessEdgesWork for ProcessRootSlots { @@ -243,6 +255,7 @@ impl + PlanTraceObject, const KIND if pause == Pause::FinalMark { return; } + debug_assert_eq!(pause, Pause::InitialMark); let mut root_objects = Vec::with_capacity(Self::CAPACITY); if !self.slots.is_empty() { let slots = std::mem::take(&mut self.slots); @@ -250,34 +263,13 @@ impl + PlanTraceObject, const KIND if let Some(object) = slot.load() { root_objects.push(object); if root_objects.len() == Self::CAPACITY { - // create the packet - let worker = self.worker(); - let mmtk = self.mmtk(); - let w = - ConcurrentTraceObjects::::new(root_objects.clone(), mmtk); - - match pause { - Pause::InitialMark => worker.scheduler().work_buckets - [WorkBucketStage::Concurrent] - .add_no_notify(w), - _ => unreachable!(), - } - + self.create_and_schedule_concurrent_trace_objects_work(&root_objects); root_objects.clear(); } } } if !root_objects.is_empty() { - let worker = self.worker(); - let w = - ConcurrentTraceObjects::::new(root_objects.clone(), self.mmtk()); - - match pause { - Pause::InitialMark => worker.scheduler().work_buckets - [WorkBucketStage::Concurrent] - .add_no_notify(w), - _ => unreachable!(), - } + self.create_and_schedule_concurrent_trace_objects_work(&root_objects); } } } diff --git a/src/plan/concurrent/immix/global.rs b/src/plan/concurrent/immix/global.rs index 19b14ffda6..4e0326c2b7 100644 --- a/src/plan/concurrent/immix/global.rs +++ b/src/plan/concurrent/immix/global.rs @@ -52,7 +52,7 @@ pub struct ConcurrentImmix { concurrent_marking_active: AtomicBool, } -/// The plan constraints for the immix plan. +/// The plan constraints for the concurrent immix plan. pub const CONCURRENT_IMMIX_CONSTRAINTS: PlanConstraints = PlanConstraints { // If we disable moving in Immix, this is a non-moving plan. moves_objects: !cfg!(feature = "immix_non_moving"), @@ -123,20 +123,20 @@ impl Plan for ConcurrentImmix { fn schedule_collection(&'static self, scheduler: &GCWorkScheduler) { let pause = self.select_collection_kind(); self.current_pause.store(Some(pause), Ordering::SeqCst); - if pause == Pause::Full { - self.set_ref_closure_buckets_enabled(true); - crate::plan::immix::global::Immix::schedule_immix_full_heap_collection::< - ConcurrentImmix, - ConcurrentImmixSTWGCWorkContext, - ConcurrentImmixSTWGCWorkContext, - >(self, &self.immix_space, scheduler); - } else { - // Schedule work - match pause { - Pause::InitialMark => self.schedule_concurrent_marking_initial_pause(scheduler), - Pause::FinalMark => self.schedule_concurrent_marking_final_pause(scheduler), - _ => unreachable!(), + + match pause { + Pause::Full => { + // Ref closure buckets is disabled by initial mark, and needs to be re-enabled for full GC before + // we reuse the normal Immix scheduling. + self.set_ref_closure_buckets_enabled(true); + crate::plan::immix::global::Immix::schedule_immix_full_heap_collection::< + ConcurrentImmix, + ConcurrentImmixSTWGCWorkContext, + ConcurrentImmixSTWGCWorkContext, + >(self, &self.immix_space, scheduler); } + Pause::InitialMark => self.schedule_concurrent_marking_initial_pause(scheduler), + Pause::FinalMark => self.schedule_concurrent_marking_final_pause(scheduler), } } @@ -155,10 +155,7 @@ impl Plan for ConcurrentImmix { ); } Pause::InitialMark => { - // init prepare has to be executed first, otherwise, los objects will not be - // dealt with properly - // self.common.initial_pause_prepare(); - // self.immix_space.initial_pause_prepare(); + // Bulk set log bits so SATB barrier will be triggered on the existing objects. if VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.is_on_side() { self.common.set_side_log_bits(); self.immix_space.set_side_log_bits(); @@ -178,14 +175,12 @@ impl Plan for ConcurrentImmix { match pause { Pause::InitialMark => (), Pause::Full | Pause::FinalMark => { + // Bulk clear log bits so SATB barrier will not be triggered. if VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.is_on_side() { self.immix_space.clear_side_log_bits(); self.common.clear_side_log_bits(); } - // self.immix_space.final_pause_release(); - // self.common.final_pause_release(); self.common.release(tls, true); - // release the collected region self.immix_space.release(true); } } @@ -251,7 +246,6 @@ impl Plan for ConcurrentImmix { self.set_concurrent_marking_state(false); } } - // scheduler.work_buckets[WorkBucketStage::Concurrent].close(); info!("{:?} start", pause); } diff --git a/src/plan/global.rs b/src/plan/global.rs index 503ee8268f..0572cdfe5c 100644 --- a/src/plan/global.rs +++ b/src/plan/global.rs @@ -739,14 +739,6 @@ impl CommonPlan { + self.base.get_used_pages() } - // pub fn initial_pause_prepare(&mut self) { - // self.los.initial_pause_prepare(); - // } - - // pub fn final_pause_release(&mut self) { - // self.los.final_pause_release(); - // } - pub fn prepare(&mut self, tls: VMWorkerThread, full_heap: bool) { self.immortal.prepare(); self.los.prepare(full_heap); diff --git a/src/plan/mod.rs b/src/plan/mod.rs index aa46e686f9..7f688a2578 100644 --- a/src/plan/mod.rs +++ b/src/plan/mod.rs @@ -55,6 +55,7 @@ mod nogc; mod pageprotect; mod semispace; +pub(crate) use concurrent::global::ConcurrentPlan; pub(crate) use generational::global::is_nursery_gc; pub(crate) use generational::global::GenerationalPlan; diff --git a/src/plan/plan_constraints.rs b/src/plan/plan_constraints.rs index df991b8df0..f86de5e05f 100644 --- a/src/plan/plan_constraints.rs +++ b/src/plan/plan_constraints.rs @@ -57,7 +57,6 @@ impl PlanConstraints { moves_objects: false, max_non_los_default_alloc_bytes: MAX_INT, max_non_los_copy_bytes: MAX_INT, - needs_log_bit: false, // As `LAZY_SWEEP` is true, needs_linear_scan is true for all the plans. This is strange. // https://github.com/mmtk/mmtk-core/issues/1027 tracks the issue. needs_linear_scan: crate::util::constants::SUPPORT_CARD_SCANNING @@ -66,7 +65,7 @@ impl PlanConstraints { // We may trace duplicate edges in mark sweep. If we use mark sweep as the non moving policy, it will be included in every may_trace_duplicate_edges: cfg!(feature = "marksweep_as_nonmoving"), needs_forward_after_liveness: false, - // needs_log_bit: false, + needs_log_bit: false, barrier: BarrierSelector::NoBarrier, // If we use mark sweep as non moving space, we need to prepare mutator. See [`common_prepare_func`]. needs_prepare_mutator: cfg!(feature = "marksweep_as_nonmoving"), diff --git a/src/policy/immix/immixspace.rs b/src/policy/immix/immixspace.rs index cabc2aefc0..dba4922093 100644 --- a/src/policy/immix/immixspace.rs +++ b/src/policy/immix/immixspace.rs @@ -417,24 +417,6 @@ impl ImmixSpace { &self.scheduler } - // pub fn initial_pause_prepare(&mut self) { - // // make sure all allocated blocks have unlog bit set during initial mark - // if let MetadataSpec::OnSide(side) = *VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC { - // for chunk in self.chunk_map.all_chunks() { - // side.bset_metadata(chunk.start(), Chunk::BYTES); - // } - // } - // } - - // pub fn final_pause_release(&mut self) { - // // clear the unlog bit so that during normal mutator phase, stab barrier is effectively disabled (all objects are considered as logged and thus no slow path will be taken) - // if let MetadataSpec::OnSide(side) = *VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC { - // for chunk in self.chunk_map.all_chunks() { - // side.bzero_metadata(chunk.start(), Chunk::BYTES); - // } - // } - // } - pub fn prepare(&mut self, major_gc: bool, plan_stats: Option) { if major_gc { // Update mark_state diff --git a/src/policy/largeobjectspace.rs b/src/policy/largeobjectspace.rs index 9635f57026..d92b0ba0ee 100644 --- a/src/policy/largeobjectspace.rs +++ b/src/policy/largeobjectspace.rs @@ -291,35 +291,14 @@ impl LargeObjectSpace { pub fn set_side_log_bits(&self) { debug_assert!(self.treadmill.is_from_space_empty()); debug_assert!(self.treadmill.is_nursery_empty()); - // debug_assert!(self.common.needs_satb); let mut enumator = ClosureObjectEnumerator::<_, VM>::new(|object| { VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.mark_as_unlogged::(object, Ordering::SeqCst); }); self.treadmill.enumerate_objects(&mut enumator); } - // pub fn initial_pause_prepare(&self) { - // // use crate::util::object_enum::ClosureObjectEnumerator; - - // // debug_assert!(self.treadmill.is_from_space_empty()); - // // debug_assert!(self.treadmill.is_nursery_empty()); - // // debug_assert!(self.common.needs_satb); - // // let mut enumator = ClosureObjectEnumerator::<_, VM>::new(|object| { - // // VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.mark_as_unlogged::(object, Ordering::SeqCst); - // // }); - // // self.treadmill.enumerate_objects(&mut enumator); - // } - - // pub fn final_pause_release(&self) { - // // let mut enumator = ClosureObjectEnumerator::<_, VM>::new(|object| { - // // VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.clear::(object, Ordering::SeqCst); - // // }); - // // self.treadmill.enumerate_objects(&mut enumator); - // } - pub fn prepare(&mut self, full_heap: bool) { if full_heap { - // debug_assert!(self.treadmill.is_from_space_empty()); self.mark_state = MARK_BIT - self.mark_state; } self.treadmill.flip(full_heap); diff --git a/src/scheduler/scheduler.rs b/src/scheduler/scheduler.rs index 0274b8c782..7ae2b5a2c0 100644 --- a/src/scheduler/scheduler.rs +++ b/src/scheduler/scheduler.rs @@ -29,11 +29,6 @@ pub struct GCWorkScheduler { pub(crate) worker_monitor: Arc, /// How to assign the affinity of each GC thread. Specified by the user. affinity: AffinityKind, - // pub(super) postponed_concurrent_work: - // spin::RwLock>>>, - // pub(super) postponed_concurrent_work_prioritized: - // spin::RwLock>>>, - // in_gc_pause: std::sync::atomic::AtomicBool, } // FIXME: GCWorkScheduler should be naturally Sync, but we cannot remove this `impl` yet. @@ -79,44 +74,9 @@ impl GCWorkScheduler { worker_group, worker_monitor, affinity, - // postponed_concurrent_work: spin::RwLock::new(crossbeam::deque::Injector::new()), - // postponed_concurrent_work_prioritized: spin::RwLock::new( - // crossbeam::deque::Injector::new(), - // ), - // in_gc_pause: std::sync::atomic::AtomicBool::new(false), }) } - // pub fn postpone(&self, w: impl GCWork) { - // self.postponed_concurrent_work.read().push(Box::new(w)) - // } - - // pub fn postpone_prioritized(&self, w: impl GCWork) { - // self.postponed_concurrent_work_prioritized - // .read() - // .push(Box::new(w)) - // } - - // pub fn postpone_dyn(&self, w: Box>) { - // self.postponed_concurrent_work.read().push(w) - // } - - // pub fn postpone_dyn_prioritized(&self, w: Box>) { - // self.postponed_concurrent_work_prioritized.read().push(w) - // } - - // pub fn postpone_all(&self, ws: Vec>>) { - // let postponed_concurrent_work = self.postponed_concurrent_work.read(); - // ws.into_iter() - // .for_each(|w| postponed_concurrent_work.push(w)); - // } - - // pub fn postpone_all_prioritized(&self, ws: Vec>>) { - // let postponed_concurrent_work = self.postponed_concurrent_work_prioritized.read(); - // ws.into_iter() - // .for_each(|w| postponed_concurrent_work.push(w)); - // } - pub fn num_workers(&self) -> usize { self.worker_group.as_ref().worker_count() } @@ -393,11 +353,6 @@ impl GCWorkScheduler { } } - // pub(super) fn set_in_gc_pause(&self, in_gc_pause: bool) { - // self.in_gc_pause - // .store(in_gc_pause, std::sync::atomic::Ordering::SeqCst); - // } - /// Get a schedulable work packet without retry. fn poll_schedulable_work_once(&self, worker: &GCWorker) -> Steal>> { let mut should_retry = false; @@ -603,8 +558,6 @@ impl GCWorkScheduler { let mmtk = worker.mmtk; - // let (queue, pqueue) = self.schedule_postponed_concurrent_packets(); - // Tell GC trigger that GC ended - this happens before we resume mutators. mmtk.gc_trigger.policy.on_gc_end(mmtk); @@ -661,7 +614,6 @@ impl GCWorkScheduler { // Reset the triggering information. mmtk.state.reset_collection_trigger(); - // self.set_in_gc_pause(false); let concurrent_work_scheduled = self.schedule_concurrent_packets(); self.debug_assert_all_stw_buckets_closed(); @@ -703,32 +655,7 @@ impl GCWorkScheduler { self.worker_monitor.notify_work_available(true); } - // fn schedule_postponed_concurrent_packets(&self) -> (PostponeQueue, PostponeQueue) { - // let queue = std::mem::take(&mut *self.postponed_concurrent_work.write()); - // let pqueue = std::mem::take(&mut *self.postponed_concurrent_work_prioritized.write()); - // (queue, pqueue) - // } - - pub(super) fn schedule_concurrent_packets( - &self, - // queue: PostponeQueue, - // pqueue: PostponeQueue, - ) -> bool { - // crate::MOVE_CONCURRENT_MARKING_TO_STW.store(false, Ordering::SeqCst); - // crate::PAUSE_CONCURRENT_MARKING.store(false, Ordering::SeqCst); - // let mut concurrent_work_scheduled = false; - // if !queue.is_empty() { - // let old_queue = self.work_buckets[WorkBucketStage::Unconstrained].replace_queue(queue); - // debug_assert!(old_queue.is_empty()); - // concurrent_work_scheduled = true; - // } - // if !pqueue.is_empty() { - // let old_queue = - // self.work_buckets[WorkBucketStage::Unconstrained].replace_queue_prioritized(pqueue); - // debug_assert!(old_queue.is_empty()); - // concurrent_work_scheduled = true; - // } - // concurrent_work_scheduled + pub(super) fn schedule_concurrent_packets(&self) -> bool { let concurrent_bucket = &self.work_buckets[WorkBucketStage::Concurrent]; if !concurrent_bucket.is_empty() { concurrent_bucket.set_enabled(true); From c79fe8e2380b3e63da9625474ac767ad887e59f3 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Wed, 27 Aug 2025 06:32:45 +0000 Subject: [PATCH 35/59] Fix style check --- src/plan/concurrent/concurrent_marking_work.rs | 9 +++++---- src/plan/mod.rs | 1 - 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/plan/concurrent/concurrent_marking_work.rs b/src/plan/concurrent/concurrent_marking_work.rs index 0cb5a4e09c..c1bd9f3757 100644 --- a/src/plan/concurrent/concurrent_marking_work.rs +++ b/src/plan/concurrent/concurrent_marking_work.rs @@ -206,7 +206,7 @@ unsafe impl + PlanTraceObject, con impl + PlanTraceObject, const KIND: TraceKind> ProcessRootSlots { - fn create_and_schedule_concurrent_trace_objects_work(&self, objects: &Vec) { + fn create_and_schedule_concurrent_trace_objects_work(&self, objects: Vec) { let worker = self.worker(); let mmtk = self.mmtk(); let w = ConcurrentTraceObjects::::new(objects.clone(), mmtk); @@ -263,13 +263,14 @@ impl + PlanTraceObject, const KIND if let Some(object) = slot.load() { root_objects.push(object); if root_objects.len() == Self::CAPACITY { - self.create_and_schedule_concurrent_trace_objects_work(&root_objects); - root_objects.clear(); + self.create_and_schedule_concurrent_trace_objects_work( + root_objects.drain(..).collect(), + ); } } } if !root_objects.is_empty() { - self.create_and_schedule_concurrent_trace_objects_work(&root_objects); + self.create_and_schedule_concurrent_trace_objects_work(root_objects); } } } diff --git a/src/plan/mod.rs b/src/plan/mod.rs index 7f688a2578..aa46e686f9 100644 --- a/src/plan/mod.rs +++ b/src/plan/mod.rs @@ -55,7 +55,6 @@ mod nogc; mod pageprotect; mod semispace; -pub(crate) use concurrent::global::ConcurrentPlan; pub(crate) use generational::global::is_nursery_gc; pub(crate) use generational::global::GenerationalPlan; From a23bfbba802a1ee452cd3672d747fd7026b30e96 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Wed, 27 Aug 2025 23:44:05 +0000 Subject: [PATCH 36/59] Fix style check --- src/plan/concurrent/concurrent_marking_work.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/plan/concurrent/concurrent_marking_work.rs b/src/plan/concurrent/concurrent_marking_work.rs index c1bd9f3757..7069815213 100644 --- a/src/plan/concurrent/concurrent_marking_work.rs +++ b/src/plan/concurrent/concurrent_marking_work.rs @@ -263,9 +263,9 @@ impl + PlanTraceObject, const KIND if let Some(object) = slot.load() { root_objects.push(object); if root_objects.len() == Self::CAPACITY { - self.create_and_schedule_concurrent_trace_objects_work( - root_objects.drain(..).collect(), - ); + let mut buffer = Vec::with_capacity(Self::CAPACITY); + std::mem::swap(&mut buffer, &mut root_objects); + self.create_and_schedule_concurrent_trace_objects_work(buffer); } } } From 3e076a917bf426d283a89156780dca6f66c7743e Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Thu, 28 Aug 2025 17:37:39 +1200 Subject: [PATCH 37/59] Fix style check --- src/plan/concurrent/immix/global.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/plan/concurrent/immix/global.rs b/src/plan/concurrent/immix/global.rs index 4e0326c2b7..263578b179 100644 --- a/src/plan/concurrent/immix/global.rs +++ b/src/plan/concurrent/immix/global.rs @@ -300,7 +300,7 @@ impl ConcurrentImmix { } fn select_collection_kind(&self) -> Pause { - self.gc_cause.load(Ordering::Acquire).take().unwrap() + self.gc_cause.load(Ordering::Acquire).unwrap() } fn set_ref_closure_buckets_enabled(&self, do_closure: bool) { From 553227a81dd73368e749e30e2cf5d7457fcc9f68 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Thu, 28 Aug 2025 15:20:23 +0800 Subject: [PATCH 38/59] Remove object_reference_clone_pre It is currently a no-op and not needed for SATB barrier. It was intended for implementing OpenJDK's object cloning pre barrier. We will reintroduce it into mmtk-core when we implement a plan that needs such a barrier, and we need to design our API in a VM-agnostic way. --- src/plan/barriers.rs | 8 -------- 1 file changed, 8 deletions(-) diff --git a/src/plan/barriers.rs b/src/plan/barriers.rs index c973905710..9758d52801 100644 --- a/src/plan/barriers.rs +++ b/src/plan/barriers.rs @@ -119,8 +119,6 @@ pub trait Barrier: 'static + Send + Downcast { self.memory_region_copy_post(src, dst); } - fn object_reference_clone_pre(&mut self, _obj: ObjectReference) {} - /// Full pre-barrier for array copy fn memory_region_copy_pre(&mut self, _src: VM::VMMemorySlice, _dst: VM::VMMemorySlice) {} @@ -190,8 +188,6 @@ pub trait BarrierSemantics: 'static + Send { fn object_probable_write_slow(&mut self, _obj: ObjectReference) {} fn load_weak_reference(&mut self, _o: ObjectReference) {} - - fn object_reference_clone_pre(&mut self, _obj: ObjectReference) {} } /// Generic object barrier with a type argument defining it's slow-path behaviour. @@ -327,10 +323,6 @@ impl Barrier for SATBBarrier { } } - fn object_reference_clone_pre(&mut self, obj: ObjectReference) { - self.semantics.object_reference_clone_pre(obj); - } - fn object_probable_write(&mut self, obj: ObjectReference) { self.semantics.object_probable_write_slow(obj); } From 8ad9ed88a06a12b1bbbe04d62c8718fee87578e5 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Thu, 28 Aug 2025 16:54:24 +0800 Subject: [PATCH 39/59] Remove the gc_cause field. We instead use a boolean field `should_do_full_gc` to tell if the user or the `collection_required` method think it is time to do full GC. We let `schedule_collection` decide the actual pause type. --- src/plan/concurrent/immix/global.rs | 32 ++++++++++++++++------------- 1 file changed, 18 insertions(+), 14 deletions(-) diff --git a/src/plan/concurrent/immix/global.rs b/src/plan/concurrent/immix/global.rs index 263578b179..4e3345ee4d 100644 --- a/src/plan/concurrent/immix/global.rs +++ b/src/plan/concurrent/immix/global.rs @@ -48,7 +48,7 @@ pub struct ConcurrentImmix { last_gc_was_defrag: AtomicBool, current_pause: Atomic>, previous_pause: Atomic>, - gc_cause: Atomic>, + should_do_full_gc: AtomicBool, concurrent_marking_active: AtomicBool, } @@ -67,7 +67,8 @@ pub const CONCURRENT_IMMIX_CONSTRAINTS: PlanConstraints = PlanConstraints { impl Plan for ConcurrentImmix { fn collection_required(&self, space_full: bool, _space: Option>) -> bool { if self.base().collection_required(self, space_full) { - self.gc_cause.store(Some(Pause::Full), Ordering::Release); + self.should_do_full_gc.store(true, Ordering::Release); + info!("Triggering full GC"); return true; } @@ -76,10 +77,12 @@ impl Plan for ConcurrentImmix { if concurrent_marking_in_progress && self.common.base.scheduler.work_buckets[WorkBucketStage::Concurrent].is_drained() { - self.gc_cause - .store(Some(Pause::FinalMark), Ordering::Release); + // After the Concurrent bucket is drained during concurrent marking, + // we trigger the FinalMark pause at the next poll() site (here). + // FIXME: Immediately trigger FinalMark when the Concurrent bucket is drained. return true; } + let threshold = self.get_total_pages() >> 1; let used_pages_after_last_gc = self.common.base.global_state.get_used_pages_after_last_gc(); let used_pages_now = self.get_used_pages(); @@ -90,10 +93,7 @@ impl Plan for ConcurrentImmix { self.common.base.scheduler.work_buckets[WorkBucketStage::Concurrent].is_empty() ); debug_assert!(!self.concurrent_marking_in_progress()); - let prev_pause = self.previous_pause(); - debug_assert!(prev_pause.is_none() || prev_pause.unwrap() != Pause::InitialMark); - self.gc_cause - .store(Some(Pause::InitialMark), Ordering::Release); + debug_assert_ne!(self.previous_pause(), Some(Pause::InitialMark)); return true; } false @@ -121,7 +121,14 @@ impl Plan for ConcurrentImmix { } fn schedule_collection(&'static self, scheduler: &GCWorkScheduler) { - let pause = self.select_collection_kind(); + let pause = if self.concurrent_marking_in_progress() { + Pause::FinalMark + } else if self.should_do_full_gc.load(Ordering::SeqCst) { + Pause::Full + } else { + Pause::InitialMark + }; + self.current_pause.store(Some(pause), Ordering::SeqCst); match pause { @@ -196,6 +203,7 @@ impl Plan for ConcurrentImmix { } self.previous_pause.store(Some(pause), Ordering::SeqCst); self.current_pause.store(None, Ordering::SeqCst); + self.should_do_full_gc.store(false, Ordering::SeqCst); info!("{:?} end", pause); } @@ -290,7 +298,7 @@ impl ConcurrentImmix { last_gc_was_defrag: AtomicBool::new(false), current_pause: Atomic::new(None), previous_pause: Atomic::new(None), - gc_cause: Atomic::new(None), + should_do_full_gc: AtomicBool::new(false), concurrent_marking_active: AtomicBool::new(false), }; @@ -299,10 +307,6 @@ impl ConcurrentImmix { immix } - fn select_collection_kind(&self) -> Pause { - self.gc_cause.load(Ordering::Acquire).unwrap() - } - fn set_ref_closure_buckets_enabled(&self, do_closure: bool) { let scheduler = &self.common.base.scheduler; scheduler.work_buckets[WorkBucketStage::VMRefClosure].set_enabled(do_closure); From acea83aa0425191e7ad849705d233030eaa6aef3 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Thu, 28 Aug 2025 17:55:48 +0800 Subject: [PATCH 40/59] Postpone full GC after FinalMark Currently it is unsafe to skip FinalMark and go directly to Full GC. We add a comment for that, and postpone full GC request to the next GC after FinalMark. --- src/plan/concurrent/immix/global.rs | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/src/plan/concurrent/immix/global.rs b/src/plan/concurrent/immix/global.rs index 4e3345ee4d..487956a627 100644 --- a/src/plan/concurrent/immix/global.rs +++ b/src/plan/concurrent/immix/global.rs @@ -122,6 +122,9 @@ impl Plan for ConcurrentImmix { fn schedule_collection(&'static self, scheduler: &GCWorkScheduler) { let pause = if self.concurrent_marking_in_progress() { + // FIXME: Currently it is unsafe to bypass `FinalMark` and go directly from `InitialMark` to `Full`. + // It is related to defragmentation. See https://github.com/mmtk/mmtk-core/issues/1357 for more details. + // We currently force `FinalMark` to happen if the last pause is `InitialMark`. Pause::FinalMark } else if self.should_do_full_gc.load(Ordering::SeqCst) { Pause::Full @@ -203,7 +206,14 @@ impl Plan for ConcurrentImmix { } self.previous_pause.store(Some(pause), Ordering::SeqCst); self.current_pause.store(None, Ordering::SeqCst); - self.should_do_full_gc.store(false, Ordering::SeqCst); + if pause != Pause::FinalMark { + self.should_do_full_gc.store(false, Ordering::SeqCst); + } else { + // FIXME: Currently it is unsafe to trigger full GC during concurrent marking. + // See `Self::schedule_collection`. + // We keep the value of `self.should_do_full_gc` so that if full GC is triggered, + // the next GC will be full GC. + } info!("{:?} end", pause); } From 70ef1db7e064d8517b5ba47419cea007940ed864 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Tue, 9 Sep 2025 22:03:11 +0800 Subject: [PATCH 41/59] Change active to SATBBarrier-specific We remove the `Barrier::active()` property, and add a property `SATBBarrier::weak_ref_barrier_enabled` that is specific to `SATBBarrier`. --- src/plan/barriers.rs | 29 +++++++-------------------- src/plan/concurrent/immix/mutator.rs | 30 ++++++++++++++++++++-------- 2 files changed, 29 insertions(+), 30 deletions(-) diff --git a/src/plan/barriers.rs b/src/plan/barriers.rs index 9758d52801..9a77f2e25d 100644 --- a/src/plan/barriers.rs +++ b/src/plan/barriers.rs @@ -46,16 +46,6 @@ impl BarrierSelector { /// As a performance optimization, the binding may also choose to port the fast-path to the VM side, /// and call the slow-path (`object_reference_write_slow`) only if necessary. pub trait Barrier: 'static + Send + Downcast { - /// Check if the barrier is active. For barriers that are always active, this always returns true. - fn is_active(&self) -> bool { - true - } - - /// Set the barrier active or inactive. For barriers that are always active, this should not be called. - fn set_active(&mut self, _val: bool) { - unreachable!() - } - /// Flush thread-local states like buffers or remembered sets. fn flush(&mut self) {} @@ -284,8 +274,7 @@ impl Barrier for ObjectBarrier { /// A SATB (Snapshot-At-The-Beginning) barrier implementation. /// This barrier is basically a pre-write object barrier with a weak reference loading barrier. pub struct SATBBarrier { - /// This only affects the weak reference load barrier. - active: bool, + weak_ref_barrier_enabled: bool, semantics: S, } @@ -293,11 +282,15 @@ impl SATBBarrier { /// Create a new SATBBarrier with the given semantics. pub fn new(semantics: S) -> Self { Self { - active: false, + weak_ref_barrier_enabled: false, semantics, } } + pub(crate) fn set_weak_ref_barrier_enabled(&mut self, value: bool) { + self.weak_ref_barrier_enabled = value; + } + fn object_is_unlogged(&self, object: ObjectReference) -> bool { // unsafe { S::UNLOG_BIT_SPEC.load::(object, None) != 0 } S::UNLOG_BIT_SPEC.load_atomic::(object, None, Ordering::SeqCst) != 0 @@ -305,20 +298,12 @@ impl SATBBarrier { } impl Barrier for SATBBarrier { - fn set_active(&mut self, val: bool) { - self.active = val; - } - - fn is_active(&self) -> bool { - self.active - } - fn flush(&mut self) { self.semantics.flush(); } fn load_weak_reference(&mut self, o: ObjectReference) { - if self.active { + if self.weak_ref_barrier_enabled { self.semantics.load_weak_reference(o) } } diff --git a/src/plan/concurrent/immix/mutator.rs b/src/plan/concurrent/immix/mutator.rs index daf68b3b71..291c63e72f 100644 --- a/src/plan/concurrent/immix/mutator.rs +++ b/src/plan/concurrent/immix/mutator.rs @@ -17,6 +17,11 @@ use crate::vm::VMBinding; use crate::MMTK; use enum_map::EnumMap; +type BarrierSemanticsType = + SATBBarrierSemantics, { crate::policy::immix::TRACE_KIND_FAST }>; + +type BarrierType = SATBBarrier>; + pub fn concurrent_immix_mutator_release( mutator: &mut Mutator, _tls: VMWorkerThread, @@ -37,7 +42,11 @@ pub fn concurrent_immix_mutator_release( // Deactivate SATB if current_pause == Pause::Full || current_pause == Pause::FinalMark { debug!("Deactivate SATB barrier active for {:?}", mutator as *mut _); - mutator.barrier.set_active(false); + mutator + .barrier + .downcast_mut::>() + .unwrap() + .set_weak_ref_barrier_enabled(false); } } @@ -61,7 +70,11 @@ pub fn concurent_immix_mutator_prepare( // Activate SATB if current_pause == Pause::InitialMark { debug!("Activate SATB barrier active for {:?}", mutator as *mut _); - mutator.barrier.set_active(true); + mutator + .barrier + .downcast_mut::>() + .unwrap() + .set_weak_ref_barrier_enabled(true); } } @@ -100,17 +113,18 @@ pub fn create_concurrent_immix_mutator( let builder = MutatorBuilder::new(mutator_tls, mmtk, config); let mut mutator = builder - .barrier(Box::new(SATBBarrier::new(SATBBarrierSemantics::< - VM, - ConcurrentImmix, - { crate::policy::immix::TRACE_KIND_FAST }, - >::new(mmtk, mutator_tls)))) + .barrier(Box::new(SATBBarrier::new(BarrierSemanticsType::::new( + mmtk, + mutator_tls, + )))) .build(); // Set barrier active, based on whether concurrent marking is in progress mutator .barrier - .set_active(immix.is_concurrent_marking_active()); + .downcast_mut::>() + .unwrap() + .set_weak_ref_barrier_enabled(immix.is_concurrent_marking_active()); mutator } From 6dfde0a1a998084d32f9a1b7ff8409979f7bebb4 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Wed, 10 Sep 2025 11:33:23 +0800 Subject: [PATCH 42/59] Label SlotIterator for refactoring --- src/plan/concurrent/barrier.rs | 2 +- .../concurrent/concurrent_marking_work.rs | 2 +- src/plan/mod.rs | 2 +- src/plan/tracing.rs | 32 ++++++++----------- 4 files changed, 17 insertions(+), 21 deletions(-) diff --git a/src/plan/concurrent/barrier.rs b/src/plan/concurrent/barrier.rs index a0ab577cc9..0bd8995564 100644 --- a/src/plan/concurrent/barrier.rs +++ b/src/plan/concurrent/barrier.rs @@ -156,7 +156,7 @@ impl + PlanTraceObject, const KIND } fn object_probable_write_slow(&mut self, obj: ObjectReference) { - crate::plan::tracing::SlotIterator::::iterate_fields(obj, self.tls.0, |s| { + crate::plan::tracing::SlotIterator::::iterate_fields(obj, self.tls.0, |s| { self.enqueue_node(Some(obj), s, None); }); } diff --git a/src/plan/concurrent/concurrent_marking_work.rs b/src/plan/concurrent/concurrent_marking_work.rs index 7069815213..fca994a7bc 100644 --- a/src/plan/concurrent/concurrent_marking_work.rs +++ b/src/plan/concurrent/concurrent_marking_work.rs @@ -74,7 +74,7 @@ impl + PlanTraceObject, const KIND } fn scan_and_enqueue(&mut self, object: ObjectReference) { - crate::plan::tracing::SlotIterator::::iterate_fields( + crate::plan::tracing::SlotIterator::::iterate_fields( object, self.worker().tls.0, |s| { diff --git a/src/plan/mod.rs b/src/plan/mod.rs index aa46e686f9..85e69cf200 100644 --- a/src/plan/mod.rs +++ b/src/plan/mod.rs @@ -39,7 +39,7 @@ pub use plan_constraints::PlanConstraints; pub(crate) use plan_constraints::DEFAULT_PLAN_CONSTRAINTS; mod tracing; -pub use tracing::{ObjectQueue, ObjectsClosure, SlotIterator, VectorObjectQueue, VectorQueue}; +pub use tracing::{ObjectQueue, ObjectsClosure, VectorObjectQueue, VectorQueue}; /// Generational plans (with a copying nursery) mod generational; diff --git a/src/plan/tracing.rs b/src/plan/tracing.rs index f7156193e2..20f965c254 100644 --- a/src/plan/tracing.rs +++ b/src/plan/tracing.rs @@ -148,27 +148,23 @@ impl Drop for ObjectsClosure<'_, E> { } /// For iterating over the slots of an object. -pub struct SlotIterator { - f: F, +/// +/// FIXME: This type iterates slots, but all of its current use cases only care about the values in the slots. +/// And it currently only works if the object supports slot enqueuing (i.e. `Scanning::scan_object` is implemented). +/// We may refactor the interface according to https://github.com/mmtk/mmtk-core/issues/1375 +pub(crate) struct SlotIterator { _p: PhantomData, } -impl SlotVisitor for SlotIterator { - fn visit_slot(&mut self, slot: VM::VMSlot) { - (self.f)(slot); - } -} - -impl SlotIterator { +impl SlotIterator { /// Iterate over the slots of an object by applying a function to each slot. - pub fn iterate_fields(o: ObjectReference, _tls: VMThread, f: F) { - let mut x = SlotIterator:: { f, _p: PhantomData }; - >::scan_object( - // FIXME: We should use tls from the arguments. - // See https://github.com/mmtk/mmtk-core/issues/1375 - VMWorkerThread(VMThread::UNINITIALIZED), - o, - &mut x, - ); + pub fn iterate_fields(object: ObjectReference, _tls: VMThread, mut f: F) { + // FIXME: We should use tls from the arguments. + // See https://github.com/mmtk/mmtk-core/issues/1375 + let fake_tls = VMWorkerThread(VMThread::UNINITIALIZED); + if !>::support_slot_enqueuing(fake_tls, object) { + panic!("SlotIterator::iterate_fields cannot be used on objects that don't support slot-enqueuing"); + } + >::scan_object(fake_tls, object, &mut f); } } From a33ba5aeda9de178f8b07fb997b83a4f4a19bc94 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Wed, 10 Sep 2025 12:41:55 +0800 Subject: [PATCH 43/59] Fix some comments --- src/plan/barriers.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/plan/barriers.rs b/src/plan/barriers.rs index 9a77f2e25d..5a0bf59493 100644 --- a/src/plan/barriers.rs +++ b/src/plan/barriers.rs @@ -19,7 +19,7 @@ use downcast_rs::Downcast; pub enum BarrierSelector { /// No barrier is used. NoBarrier, - /// Object remembering psot-write barrier is used. + /// Object remembering post-write barrier is used. ObjectBarrier, /// Object remembering pre-write barrier with weak reference loading barrier. // TODO: We might be able to generalize this to object remembering pre-write barrier. @@ -177,6 +177,7 @@ pub trait BarrierSemantics: 'static + Send { /// Object will probably be modified fn object_probable_write_slow(&mut self, _obj: ObjectReference) {} + /// Loading from a weak reference field fn load_weak_reference(&mut self, _o: ObjectReference) {} } @@ -292,7 +293,6 @@ impl SATBBarrier { } fn object_is_unlogged(&self, object: ObjectReference) -> bool { - // unsafe { S::UNLOG_BIT_SPEC.load::(object, None) != 0 } S::UNLOG_BIT_SPEC.load_atomic::(object, None, Ordering::SeqCst) != 0 } } From d7ae8e229d8b00fff7e1672055893a1710ac4aa9 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Wed, 10 Sep 2025 12:42:26 +0800 Subject: [PATCH 44/59] No longer expose concrete barriers to the VM binding. --- src/plan/mod.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/plan/mod.rs b/src/plan/mod.rs index 85e69cf200..d0c56dea0f 100644 --- a/src/plan/mod.rs +++ b/src/plan/mod.rs @@ -15,8 +15,6 @@ mod barriers; pub use barriers::BarrierSelector; -pub use barriers::ObjectBarrier; -pub use barriers::SATBBarrier; pub(crate) mod gc_requester; From 8acb5af9c42e3f4af7e1ade2a1b4d79e00e2a206 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Wed, 10 Sep 2025 12:59:03 +0800 Subject: [PATCH 45/59] Comments and formatting --- src/plan/concurrent/global.rs | 3 +++ src/plan/concurrent/mod.rs | 6 ++++++ src/scheduler/gc_work.rs | 1 - src/scheduler/scheduler.rs | 1 + 4 files changed, 10 insertions(+), 1 deletion(-) diff --git a/src/plan/concurrent/global.rs b/src/plan/concurrent/global.rs index a2b06f67c0..bcfbb40d2d 100644 --- a/src/plan/concurrent/global.rs +++ b/src/plan/concurrent/global.rs @@ -1,7 +1,10 @@ use crate::plan::concurrent::Pause; use crate::plan::Plan; +/// Trait for a concurrent plan. pub trait ConcurrentPlan: Plan { + /// Return `true`` if concurrent work (such as concurrent marking) is in progress. fn concurrent_work_in_progress(&self) -> bool; + /// Return the current pause kind. `None` if not in a pause. fn current_pause(&self) -> Option; } diff --git a/src/plan/concurrent/mod.rs b/src/plan/concurrent/mod.rs index a2a95cf307..1c0a5b6a09 100644 --- a/src/plan/concurrent/mod.rs +++ b/src/plan/concurrent/mod.rs @@ -12,8 +12,14 @@ use bytemuck::NoUninit; #[repr(u8)] #[derive(Debug, PartialEq, Eq, Copy, Clone, NoUninit)] pub enum Pause { + /// A whole GC (including root scanning, closure, releasing, etc.) happening in a single pause. + /// + /// Don't be confused with "full-heap" GC in generational collectors. `Pause::Full` can also + /// refer to a nursery GC that happens in a single pause. Full = 1, + /// The initial pause before concurrent marking. InitialMark, + /// The pause after concurrent marking. FinalMark, } diff --git a/src/scheduler/gc_work.rs b/src/scheduler/gc_work.rs index 32ce29d567..bfc14cb7ec 100644 --- a/src/scheduler/gc_work.rs +++ b/src/scheduler/gc_work.rs @@ -236,7 +236,6 @@ impl GCWork for StopMutators { } }); trace!("stop_all_mutators end"); - // mmtk.scheduler.set_in_gc_pause(true); mmtk.get_plan().notify_mutators_paused(&mmtk.scheduler); mmtk.scheduler.notify_mutators_paused(mmtk); mmtk.scheduler.work_buckets[WorkBucketStage::Prepare].add(ScanVMSpecificRoots::::new()); diff --git a/src/scheduler/scheduler.rs b/src/scheduler/scheduler.rs index 7ae2b5a2c0..facc14f8d5 100644 --- a/src/scheduler/scheduler.rs +++ b/src/scheduler/scheduler.rs @@ -611,6 +611,7 @@ impl GCWorkScheduler { // reset the logging info at the end of each GC mmtk.slot_logger.reset(); } + // Reset the triggering information. mmtk.state.reset_collection_trigger(); From 273144941cef098b975ef15f42d4a059677e7062 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Wed, 10 Sep 2025 13:00:17 +0800 Subject: [PATCH 46/59] Remove dead code. We currently don't have the concept of "concurrent workers". --- src/scheduler/scheduler.rs | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/scheduler/scheduler.rs b/src/scheduler/scheduler.rs index facc14f8d5..38d387b3c4 100644 --- a/src/scheduler/scheduler.rs +++ b/src/scheduler/scheduler.rs @@ -668,8 +668,4 @@ impl GCWorkScheduler { false } } - - pub fn wakeup_all_concurrent_workers(&self) { - self.worker_monitor.notify_work_available(true); - } } From ab75860c586ea516206f73275e7925968f2e4112 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Wed, 10 Sep 2025 13:13:00 +0800 Subject: [PATCH 47/59] Extract method `eager_mark_lines` --- src/util/alloc/immix_allocator.rs | 47 +++++++++++++++---------------- 1 file changed, 23 insertions(+), 24 deletions(-) diff --git a/src/util/alloc/immix_allocator.rs b/src/util/alloc/immix_allocator.rs index 597b1db4ff..a3da9e1182 100644 --- a/src/util/alloc/immix_allocator.rs +++ b/src/util/alloc/immix_allocator.rs @@ -7,7 +7,7 @@ use crate::policy::immix::ImmixSpace; use crate::policy::space::Space; use crate::util::alloc::allocator::get_maximum_aligned_size; use crate::util::alloc::Allocator; -use crate::util::linear_scan::Region; +use crate::util::linear_scan::{Region, RegionIterator}; use crate::util::opaque_pointer::VMThread; use crate::util::rust_util::unlikely; use crate::util::Address; @@ -267,18 +267,7 @@ impl ImmixAllocator { }; // mark objects if concurrent marking is active if self.immix_space().should_allocate_as_live() { - let state = self - .space - .line_mark_state - .load(std::sync::atomic::Ordering::Acquire); - - for line in - crate::util::linear_scan::RegionIterator::::new(start_line, end_line) - { - line.mark(state); - } - - Line::initialize_mark_table_as_marked::(start_line..end_line); + self.eager_mark_lines(start_line, end_line); } return true; } else { @@ -322,17 +311,7 @@ impl ImmixAllocator { .bzero_metadata(block.start(), crate::policy::immix::block::Block::BYTES); // mark objects if concurrent marking is active if self.immix_space().should_allocate_as_live() { - let state = self - .space - .line_mark_state - .load(std::sync::atomic::Ordering::Acquire); - for line in block.lines() { - line.mark(state); - } - - Line::initialize_mark_table_as_marked::( - block.start_line()..block.end_line(), - ); + self.eager_mark_lines(block.start_line(), block.end_line()); } if self.request_for_large { self.large_bump_pointer.cursor = block.start(); @@ -346,6 +325,26 @@ impl ImmixAllocator { } } + /// Eagerly mark mark all line mark states and all side mark bits in the gap. + /// + /// This is useful during concurrent marking. By doing this, the GC workers running concurrently + /// will conservatively consider all objects that will be bump-allocated in the gap as live, and + /// the mutator doesn't need to explicitly mark bump-allocated objects in the fast path. + fn eager_mark_lines(&mut self, start_line: Line, end_line: Line) { + debug_assert!(self.immix_space().should_allocate_as_live()); + + let state = self + .space + .line_mark_state + .load(std::sync::atomic::Ordering::Acquire); + + for line in RegionIterator::::new(start_line, end_line) { + line.mark(state); + } + + Line::initialize_mark_table_as_marked::(start_line..end_line); + } + /// Return whether the TLAB has been exhausted and we need to acquire a new block. Assumes that /// the buffer limits have been restored using [`ImmixAllocator::restore_limit_for_stress`]. /// Note that this function may implicitly change the limits of the allocator. From d96d2e550fccda9468a30e41263584b4e3750326 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Wed, 10 Sep 2025 13:15:33 +0800 Subject: [PATCH 48/59] Minor change --- src/lib.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/src/lib.rs b/src/lib.rs index 7a77ea6c65..afe094885f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -32,7 +32,6 @@ extern crate static_assertions; extern crate probe; mod mmtk; - pub use mmtk::MMTKBuilder; pub(crate) use mmtk::MMAPPER; pub use mmtk::MMTK; From 4e1db4147c931a977a2266860878efeb1e89407e Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Wed, 10 Sep 2025 14:00:00 +0800 Subject: [PATCH 49/59] Record pause kind in eBPF trace --- src/plan/concurrent/immix/global.rs | 2 ++ tools/tracing/timeline/capture.bt | 4 ++++ tools/tracing/timeline/visualize.py | 16 ++++++++++++++++ 3 files changed, 22 insertions(+) diff --git a/src/plan/concurrent/immix/global.rs b/src/plan/concurrent/immix/global.rs index 487956a627..473bfb5c7c 100644 --- a/src/plan/concurrent/immix/global.rs +++ b/src/plan/concurrent/immix/global.rs @@ -134,6 +134,8 @@ impl Plan for ConcurrentImmix { self.current_pause.store(Some(pause), Ordering::SeqCst); + probe!(mmtk, concurrent_pause_determined, pause as usize); + match pause { Pause::Full => { // Ref closure buckets is disabled by initial mark, and needs to be re-enabled for full GC before diff --git a/tools/tracing/timeline/capture.bt b/tools/tracing/timeline/capture.bt index b898d96737..19fd13d381 100644 --- a/tools/tracing/timeline/capture.bt +++ b/tools/tracing/timeline/capture.bt @@ -136,6 +136,10 @@ usdt:$MMTK:mmtk:add_schedule_collection_packet { printf("add_schedule_collection_packet,i,%d,%lu\n", tid, nsecs); } +usdt:$MMTK:mmtk:concurrent_pause_determined { + printf("concurrent_pause_determined,meta,%d,%lu,%lu\n", tid, nsecs, arg0); +} + usdt:$MMTK:mmtk:finalization { if (@enable_print) { printf("finalization,meta,%d,%lu,%lu,%lu,%lu,%lu\n", tid, nsecs, arg0, arg1, arg2, arg3); diff --git a/tools/tracing/timeline/visualize.py b/tools/tracing/timeline/visualize.py index e06d89bb73..1d9effc4c5 100755 --- a/tools/tracing/timeline/visualize.py +++ b/tools/tracing/timeline/visualize.py @@ -22,6 +22,11 @@ class Semantics(Enum): WEAK = 1 PHANTOM = 2 +class Pause(Enum): + FULL = 1 + INITIAL_MARK = 2 + FINAL_MARK = 3 + def get_args(): parser = argparse.ArgumentParser( description=""" @@ -275,6 +280,17 @@ def enrich_meta(self, name, tid, ts, gc, wp, args): } } + case "concurrent_pause_determined": + pause_int = int(args[0]) + if pause_int in Pause: + pause = Pause(pause_int).name + else: + pause = f"(Unknown:{pause_int})" + + gc["args"] |= { + "pause": pause, + } + case "sweep_chunk": wp["args"] |= { "allocated_blocks": int(args[0]), From c9d49716ee84ad7345e00594f660e721fa73b383 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Wed, 10 Sep 2025 15:01:25 +0800 Subject: [PATCH 50/59] Move FIXME out of doc comment --- src/plan/tracing.rs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/plan/tracing.rs b/src/plan/tracing.rs index 20f965c254..792e142c76 100644 --- a/src/plan/tracing.rs +++ b/src/plan/tracing.rs @@ -148,10 +148,9 @@ impl Drop for ObjectsClosure<'_, E> { } /// For iterating over the slots of an object. -/// -/// FIXME: This type iterates slots, but all of its current use cases only care about the values in the slots. -/// And it currently only works if the object supports slot enqueuing (i.e. `Scanning::scan_object` is implemented). -/// We may refactor the interface according to https://github.com/mmtk/mmtk-core/issues/1375 +// FIXME: This type iterates slots, but all of its current use cases only care about the values in the slots. +// And it currently only works if the object supports slot enqueuing (i.e. `Scanning::scan_object` is implemented). +// We may refactor the interface according to pub(crate) struct SlotIterator { _p: PhantomData, } From ad0d88ec7b4286d86437c22505871093c14246d6 Mon Sep 17 00:00:00 2001 From: Yi Lin Date: Thu, 11 Sep 2025 05:59:52 +0000 Subject: [PATCH 51/59] Remove duplicate clear/set_side_log_bits from policies --- src/policy/copyspace.rs | 14 -------------- src/policy/immix/immixspace.rs | 14 -------------- src/policy/immortalspace.rs | 14 -------------- src/policy/largeobjectspace.rs | 16 ---------------- src/policy/marksweepspace/native_ms/global.rs | 14 -------------- 5 files changed, 72 deletions(-) diff --git a/src/policy/copyspace.rs b/src/policy/copyspace.rs index 31e911a803..e34c799c7e 100644 --- a/src/policy/copyspace.rs +++ b/src/policy/copyspace.rs @@ -226,20 +226,6 @@ impl CopySpace { self.from_space.store(false, Ordering::SeqCst); } - pub fn clear_side_log_bits(&self) { - let log_bit = VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.extract_side_spec(); - for (start, size) in self.pr.iterate_allocated_regions() { - log_bit.bzero_metadata(start, size); - } - } - - pub fn set_side_log_bits(&self) { - let log_bit = VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.extract_side_spec(); - for (start, size) in self.pr.iterate_allocated_regions() { - log_bit.bset_metadata(start, size); - } - } - fn is_from_space(&self) -> bool { self.from_space.load(Ordering::SeqCst) } diff --git a/src/policy/immix/immixspace.rs b/src/policy/immix/immixspace.rs index dba4922093..bc3a585eff 100644 --- a/src/policy/immix/immixspace.rs +++ b/src/policy/immix/immixspace.rs @@ -527,20 +527,6 @@ impl ImmixSpace { did_defrag } - pub fn clear_side_log_bits(&self) { - let log_bit = VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.extract_side_spec(); - for chunk in self.chunk_map.all_chunks() { - log_bit.bzero_metadata(chunk.start(), Chunk::BYTES); - } - } - - pub fn set_side_log_bits(&self) { - let log_bit = VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.extract_side_spec(); - for chunk in self.chunk_map.all_chunks() { - log_bit.bset_metadata(chunk.start(), Chunk::BYTES); - } - } - /// Generate chunk sweep tasks fn generate_sweep_tasks(&self) -> Vec>> { self.defrag.mark_histograms.lock().clear(); diff --git a/src/policy/immortalspace.rs b/src/policy/immortalspace.rs index b708a405ee..325ddc1c81 100644 --- a/src/policy/immortalspace.rs +++ b/src/policy/immortalspace.rs @@ -187,20 +187,6 @@ impl ImmortalSpace { self.mark_state.on_global_release::(); } - pub fn clear_side_log_bits(&self) { - let log_bit = VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.extract_side_spec(); - for (start, size) in self.pr.iterate_allocated_regions() { - log_bit.bzero_metadata(start, size); - } - } - - pub fn set_side_log_bits(&self) { - let log_bit = VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.extract_side_spec(); - for (start, size) in self.pr.iterate_allocated_regions() { - log_bit.bset_metadata(start, size); - } - } - pub fn trace_object( &self, queue: &mut Q, diff --git a/src/policy/largeobjectspace.rs b/src/policy/largeobjectspace.rs index 6909c2dea3..2316a0634c 100644 --- a/src/policy/largeobjectspace.rs +++ b/src/policy/largeobjectspace.rs @@ -283,22 +283,6 @@ impl LargeObjectSpace { } } - pub fn clear_side_log_bits(&self) { - let mut enumator = ClosureObjectEnumerator::<_, VM>::new(|object| { - VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.clear::(object, Ordering::SeqCst); - }); - self.treadmill.enumerate_objects(&mut enumator); - } - - pub fn set_side_log_bits(&self) { - debug_assert!(self.treadmill.is_from_space_empty()); - debug_assert!(self.treadmill.is_nursery_empty()); - let mut enumator = ClosureObjectEnumerator::<_, VM>::new(|object| { - VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.mark_as_unlogged::(object, Ordering::SeqCst); - }); - self.treadmill.enumerate_objects(&mut enumator); - } - pub fn prepare(&mut self, full_heap: bool) { if full_heap { self.mark_state = MARK_BIT - self.mark_state; diff --git a/src/policy/marksweepspace/native_ms/global.rs b/src/policy/marksweepspace/native_ms/global.rs index fdeaf53c40..93697e8b04 100644 --- a/src/policy/marksweepspace/native_ms/global.rs +++ b/src/policy/marksweepspace/native_ms/global.rs @@ -433,20 +433,6 @@ impl MarkSweepSpace { .bulk_add(work_packets); } - pub fn clear_side_log_bits(&self) { - let log_bit = VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.extract_side_spec(); - for chunk in self.chunk_map.all_chunks() { - log_bit.bzero_metadata(chunk.start(), Chunk::BYTES); - } - } - - pub fn set_side_log_bits(&self) { - let log_bit = VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.extract_side_spec(); - for chunk in self.chunk_map.all_chunks() { - log_bit.bset_metadata(chunk.start(), Chunk::BYTES); - } - } - pub fn release(&mut self) { let num_mutators = VM::VMActivePlan::number_of_mutators(); // all ReleaseMutator work packets plus the ReleaseMarkSweepSpace packet From be33efef43aec1a7f8bdeac0c89835969b4e554c Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Thu, 11 Sep 2025 18:45:02 +0800 Subject: [PATCH 52/59] Set and clear side unlog bits in parallel. --- src/plan/concurrent/immix/global.rs | 45 ++++++++++++++++++--------- src/plan/gc_work.rs | 32 +++++++++++++++++++ src/plan/generational/immix/global.rs | 22 ++++++++++--- src/plan/global.rs | 25 +++++++++++++-- src/plan/immix/global.rs | 38 +++++++++++++++++----- src/plan/mod.rs | 2 ++ src/plan/sticky/immix/global.rs | 29 ++++++++++++----- src/policy/immix/immixspace.rs | 45 ++++++++++++++++----------- src/policy/largeobjectspace.rs | 2 -- src/util/metadata/log_bit.rs | 30 ++++++++++++++++++ 10 files changed, 214 insertions(+), 56 deletions(-) create mode 100644 src/plan/gc_work.rs diff --git a/src/plan/concurrent/immix/global.rs b/src/plan/concurrent/immix/global.rs index 473bfb5c7c..118132f46d 100644 --- a/src/plan/concurrent/immix/global.rs +++ b/src/plan/concurrent/immix/global.rs @@ -11,6 +11,7 @@ use crate::plan::immix::mutator::ALLOCATOR_MAPPING; use crate::plan::AllocationSemantics; use crate::plan::Plan; use crate::plan::PlanConstraints; +use crate::policy::immix::defrag::StatsForDefrag; use crate::policy::immix::ImmixSpaceArgs; use crate::policy::immix::TRACE_KIND_DEFRAG; use crate::policy::immix::TRACE_KIND_FAST; @@ -23,6 +24,7 @@ use crate::util::alloc::allocators::AllocatorSelector; use crate::util::copy::*; use crate::util::heap::gc_trigger::SpaceStats; use crate::util::heap::VMRequest; +use crate::util::metadata::log_bit::UnlogBitsOperation; use crate::util::metadata::side_metadata::SideMetadataContext; use crate::vm::ObjectModel; use crate::vm::VMBinding; @@ -163,20 +165,23 @@ impl Plan for ConcurrentImmix { self.common.prepare(tls, true); self.immix_space.prepare( true, - Some(crate::policy::immix::defrag::StatsForDefrag::new(self)), + Some(StatsForDefrag::new(self)), + // Ignore unlog bits in full GCs because unlog bits should be all 0. + UnlogBitsOperation::NoOp, ); } Pause::InitialMark => { - // Bulk set log bits so SATB barrier will be triggered on the existing objects. - if VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.is_on_side() { - self.common.set_side_log_bits(); - self.immix_space.set_side_log_bits(); - } - self.common.prepare(tls, true); self.immix_space.prepare( true, - Some(crate::policy::immix::defrag::StatsForDefrag::new(self)), + Some(StatsForDefrag::new(self)), + // Bulk set log bits so SATB barrier will be triggered on the existing objects. + UnlogBitsOperation::BulkSet, ); + + self.common.prepare(tls, true); + // Bulk set log bits so SATB barrier will be triggered on the existing objects. + self.common + .schedule_unlog_bits_op(UnlogBitsOperation::BulkSet); } Pause::FinalMark => (), } @@ -187,13 +192,25 @@ impl Plan for ConcurrentImmix { match pause { Pause::InitialMark => (), Pause::Full | Pause::FinalMark => { - // Bulk clear log bits so SATB barrier will not be triggered. - if VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.is_on_side() { - self.immix_space.clear_side_log_bits(); - self.common.clear_side_log_bits(); - } + self.immix_space.release( + true, + // Bulk clear log bits so SATB barrier will not be triggered. + UnlogBitsOperation::BulkClear, + ); + self.common.release(tls, true); - self.immix_space.release(true); + + if pause == Pause::FinalMark { + // Bulk clear log bits so SATB barrier will not be triggered. + self.common + .schedule_unlog_bits_op(UnlogBitsOperation::BulkClear); + } else { + // Full pauses didn't set unlog bits in the first place, + // so there is no need to clear them. + // TODO: Currently InitialMark must be followed by a FinalMark. + // If we allow upgrading a concurrent GC to a full STW GC, + // we will need to clear the unlog bits at an appropriate place. + } } } } diff --git a/src/plan/gc_work.rs b/src/plan/gc_work.rs new file mode 100644 index 0000000000..4998d1101c --- /dev/null +++ b/src/plan/gc_work.rs @@ -0,0 +1,32 @@ +//! This module holds work packets for `CommonPlan` and `BasePlan`, or other work packets not +//! directly related to scheduling. + +use crate::{plan::global::CommonPlan, scheduler::GCWork, vm::VMBinding}; + +pub(super) struct SetCommonPlanUnlogBits { + pub common_plan: &'static CommonPlan, +} + +impl GCWork for SetCommonPlanUnlogBits { + fn do_work( + &mut self, + _worker: &mut crate::scheduler::GCWorker, + _mmtk: &'static crate::MMTK, + ) { + self.common_plan.set_side_log_bits(); + } +} + +pub(super) struct ClearCommonPlanUnlogBits { + pub common_plan: &'static CommonPlan, +} + +impl GCWork for ClearCommonPlanUnlogBits { + fn do_work( + &mut self, + _worker: &mut crate::scheduler::GCWorker, + _mmtk: &'static crate::MMTK, + ) { + self.common_plan.clear_side_log_bits(); + } +} diff --git a/src/plan/generational/immix/global.rs b/src/plan/generational/immix/global.rs index 41e7a70768..02e9df1b9f 100644 --- a/src/plan/generational/immix/global.rs +++ b/src/plan/generational/immix/global.rs @@ -10,6 +10,7 @@ use crate::plan::AllocationSemantics; use crate::plan::Plan; use crate::plan::PlanConstraints; use crate::policy::gc_work::TraceKind; +use crate::policy::immix::defrag::StatsForDefrag; use crate::policy::immix::ImmixSpace; use crate::policy::immix::ImmixSpaceArgs; use crate::policy::immix::{TRACE_KIND_DEFRAG, TRACE_KIND_FAST}; @@ -20,6 +21,7 @@ use crate::util::alloc::allocators::AllocatorSelector; use crate::util::copy::*; use crate::util::heap::gc_trigger::SpaceStats; use crate::util::heap::VMRequest; +use crate::util::metadata::log_bit::UnlogBitsOperation; use crate::util::Address; use crate::util::ObjectReference; use crate::util::VMWorkerThread; @@ -129,13 +131,15 @@ impl Plan for GenImmix { let full_heap = !self.gen.is_current_gc_nursery(); self.gen.prepare(tls); if full_heap { - if VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.is_on_side() { - self.immix_space.clear_side_log_bits(); - } self.immix_space.prepare( full_heap, - Some(crate::policy::immix::defrag::StatsForDefrag::new(self)), + Some(StatsForDefrag::new(self)), + // Bulk clear unlog bits so that we will reconstruct them. + UnlogBitsOperation::BulkClear, ); + } else { + // We don't do anything special to unlog bits during nursery GC + // because ProcessModBuf will set the unlog bits back. } } @@ -143,8 +147,16 @@ impl Plan for GenImmix { let full_heap = !self.gen.is_current_gc_nursery(); self.gen.release(tls); if full_heap { - self.immix_space.release(full_heap); + self.immix_space.release( + full_heap, + // We reconstructred unlog bits during tracing. Keep them. + UnlogBitsOperation::NoOp, + ); + } else { + // We don't do anything special to unlog bits during nursery GC + // because ProcessModBuf has set the unlog bits back. } + self.last_gc_was_full_heap .store(full_heap, Ordering::Relaxed); } diff --git a/src/plan/global.rs b/src/plan/global.rs index 0572cdfe5c..5cb6ce9bb9 100644 --- a/src/plan/global.rs +++ b/src/plan/global.rs @@ -3,6 +3,7 @@ use super::PlanConstraints; use crate::global_state::GlobalState; use crate::mmtk::MMTK; +use crate::plan::gc_work::{ClearCommonPlanUnlogBits, SetCommonPlanUnlogBits}; use crate::plan::tracing::ObjectQueue; use crate::plan::Mutator; use crate::policy::immortalspace::ImmortalSpace; @@ -19,6 +20,7 @@ use crate::util::heap::layout::Mmapper; use crate::util::heap::layout::VMMap; use crate::util::heap::HeapMeta; use crate::util::heap::VMRequest; +use crate::util::metadata::log_bit::UnlogBitsOperation; use crate::util::metadata::side_metadata::SideMetadataSanity; use crate::util::metadata::side_metadata::SideMetadataSpec; use crate::util::options::Options; @@ -753,6 +755,25 @@ impl CommonPlan { self.base.release(tls, full_heap) } + pub(crate) fn schedule_unlog_bits_op(&mut self, unlog_bits_op: UnlogBitsOperation) { + if VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.is_on_side() { + // # Safety: CommonPlan reference is always valid within this collection cycle. + let common_plan = unsafe { &*(self as *const CommonPlan) }; + + match unlog_bits_op { + UnlogBitsOperation::NoOp => {} + UnlogBitsOperation::BulkSet => { + self.base.scheduler.work_buckets[WorkBucketStage::Prepare] + .add(SetCommonPlanUnlogBits { common_plan }); + } + UnlogBitsOperation::BulkClear => { + self.base.scheduler.work_buckets[WorkBucketStage::Release] + .add(ClearCommonPlanUnlogBits { common_plan }); + } + } + } + } + pub fn clear_side_log_bits(&self) { self.immortal.clear_side_log_bits(); self.los.clear_side_log_bits(); @@ -807,7 +828,7 @@ impl CommonPlan { } else if #[cfg(feature = "marksweep_as_nonmoving")] { self.nonmoving.prepare(_full_heap); } else { - self.nonmoving.prepare(_full_heap, None); + self.nonmoving.prepare(_full_heap, None, UnlogBitsOperation::NoOp); } } } @@ -819,7 +840,7 @@ impl CommonPlan { } else if #[cfg(feature = "marksweep_as_nonmoving")] { self.nonmoving.prepare(_full_heap); } else { - self.nonmoving.release(_full_heap); + self.nonmoving.release(_full_heap, UnlogBitsOperation::NoOp); } } } diff --git a/src/plan/immix/global.rs b/src/plan/immix/global.rs index 136db58c43..f64d463ab4 100644 --- a/src/plan/immix/global.rs +++ b/src/plan/immix/global.rs @@ -15,6 +15,7 @@ use crate::util::alloc::allocators::AllocatorSelector; use crate::util::copy::*; use crate::util::heap::gc_trigger::SpaceStats; use crate::util::heap::VMRequest; +use crate::util::metadata::log_bit::UnlogBitsOperation; use crate::util::metadata::side_metadata::SideMetadataContext; use crate::vm::VMBinding; use crate::{policy::immix::ImmixSpace, util::opaque_pointer::VMWorkerThread}; @@ -84,17 +85,11 @@ impl Plan for Immix { } fn prepare(&mut self, tls: VMWorkerThread) { - self.common.prepare(tls, true); - self.immix_space.prepare( - true, - Some(crate::policy::immix::defrag::StatsForDefrag::new(self)), - ); + self.prepare_inner(tls, UnlogBitsOperation::NoOp) } fn release(&mut self, tls: VMWorkerThread) { - self.common.release(tls, true); - // release the collected region - self.immix_space.release(true); + self.release_inner(tls, UnlogBitsOperation::NoOp); } fn end_of_gc(&mut self, tls: VMWorkerThread) { @@ -208,4 +203,31 @@ impl Immix { pub(in crate::plan) fn set_last_gc_was_defrag(&self, defrag: bool, order: Ordering) { self.last_gc_was_defrag.store(defrag, order) } + + /// Prepare with unlog-bit operation. + /// Some Immix-derived plans may need to set/clear unlog bits when preparing. + pub(in crate::plan) fn prepare_inner( + &mut self, + tls: VMWorkerThread, + unlog_bits_op: UnlogBitsOperation, + ) { + self.common.prepare(tls, true); + self.immix_space.prepare( + true, + Some(crate::policy::immix::defrag::StatsForDefrag::new(self)), + unlog_bits_op, + ); + } + + /// Release with unlog-bit operation. + /// Some Immix-derived plans may need to set/clear unlog bits when releasing. + pub(in crate::plan) fn release_inner( + &mut self, + tls: VMWorkerThread, + unlog_bits_op: UnlogBitsOperation, + ) { + self.common.release(tls, true); + // release the collected region + self.immix_space.release(true, unlog_bits_op); + } } diff --git a/src/plan/mod.rs b/src/plan/mod.rs index d0c56dea0f..ea7455e9eb 100644 --- a/src/plan/mod.rs +++ b/src/plan/mod.rs @@ -18,6 +18,8 @@ pub use barriers::BarrierSelector; pub(crate) mod gc_requester; +mod gc_work; + mod global; pub(crate) use global::create_gc_worker_context; pub(crate) use global::create_mutator; diff --git a/src/plan/sticky/immix/global.rs b/src/plan/sticky/immix/global.rs index 7dcd83ad3b..a3cac41c61 100644 --- a/src/plan/sticky/immix/global.rs +++ b/src/plan/sticky/immix/global.rs @@ -6,6 +6,7 @@ use crate::plan::immix; use crate::plan::PlanConstraints; use crate::policy::gc_work::TraceKind; use crate::policy::gc_work::TRACE_KIND_TRANSITIVE_PIN; +use crate::policy::immix::defrag::StatsForDefrag; use crate::policy::immix::ImmixSpace; use crate::policy::immix::TRACE_KIND_FAST; use crate::policy::sft::SFT; @@ -14,6 +15,7 @@ use crate::util::copy::CopyConfig; use crate::util::copy::CopySelector; use crate::util::copy::CopySemantics; use crate::util::heap::gc_trigger::SpaceStats; +use crate::util::metadata::log_bit::UnlogBitsOperation; use crate::util::metadata::side_metadata::SideMetadataContext; use crate::util::statistics::counter::EventCounter; use crate::vm::ObjectModel; @@ -118,24 +120,37 @@ impl Plan for StickyImmix { // Prepare both large object space and immix space self.immix.immix_space.prepare( false, - Some(crate::policy::immix::defrag::StatsForDefrag::new(self)), + Some(StatsForDefrag::new(self)), + // We don't do anything special to unlog bits during nursery GC + // because ProcessModBuf will set the unlog bits back. + UnlogBitsOperation::NoOp, ); self.immix.common.los.prepare(false); } else { self.full_heap_gc_count.lock().unwrap().inc(); - if VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.is_on_side() { - self.immix.immix_space.clear_side_log_bits(); - } - self.immix.prepare(tls); + self.immix.prepare_inner( + tls, + // We will reconstruct unlog bits during tracing. + UnlogBitsOperation::BulkClear, + ); } } fn release(&mut self, tls: crate::util::VMWorkerThread) { if self.is_current_gc_nursery() { - self.immix.immix_space.release(false); + self.immix.immix_space.release( + false, + // We don't do anything special to unlog bits during nursery GC + // because ProcessModBuf has set the unlog bits back. + UnlogBitsOperation::NoOp, + ); self.immix.common.los.release(false); } else { - self.immix.release(tls); + self.immix.release_inner( + tls, + // We reconstructred unlog bits during tracing. Keep them. + UnlogBitsOperation::NoOp, + ); } } diff --git a/src/policy/immix/immixspace.rs b/src/policy/immix/immixspace.rs index dba4922093..18c4045352 100644 --- a/src/policy/immix/immixspace.rs +++ b/src/policy/immix/immixspace.rs @@ -14,6 +14,7 @@ use crate::util::heap::chunk_map::*; use crate::util::heap::BlockPageResource; use crate::util::heap::PageResource; use crate::util::linear_scan::{Region, RegionIterator}; +use crate::util::metadata::log_bit::UnlogBitsOperation; use crate::util::metadata::side_metadata::SideMetadataSpec; #[cfg(feature = "vo_bit")] use crate::util::metadata::vo_bit; @@ -197,6 +198,9 @@ impl Space for ImmixSpace { } fn clear_side_log_bits(&self) { + // Remove the following warning if we have a legitimate use case. + warn!("ImmixSpace::clear_side_log_bits is single-treaded. Consider clearing side metadata in per-chunk work packets."); + let log_bit = VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.extract_side_spec(); for chunk in self.chunk_map.all_chunks() { log_bit.bzero_metadata(chunk.start(), Chunk::BYTES); @@ -204,6 +208,9 @@ impl Space for ImmixSpace { } fn set_side_log_bits(&self) { + // Remove the following warning if we have a legitimate use case. + warn!("ImmixSpace::set_side_log_bits is single-treaded. Consider setting side metadata in per-chunk work packets."); + let log_bit = VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.extract_side_spec(); for chunk in self.chunk_map.all_chunks() { log_bit.bset_metadata(chunk.start(), Chunk::BYTES); @@ -417,7 +424,12 @@ impl ImmixSpace { &self.scheduler } - pub fn prepare(&mut self, major_gc: bool, plan_stats: Option) { + pub(crate) fn prepare( + &mut self, + major_gc: bool, + plan_stats: Option, + unlog_bits_op: UnlogBitsOperation, + ) { if major_gc { // Update mark_state if VM::VMObjectModel::LOCAL_MARK_BIT_SPEC.is_on_side() { @@ -445,6 +457,7 @@ impl ImmixSpace { } else { None }, + unlog_bits_op, }) }); self.scheduler().work_buckets[WorkBucketStage::Prepare].bulk_add(work_packets); @@ -496,7 +509,7 @@ impl ImmixSpace { } /// Release for the immix space. - pub fn release(&mut self, major_gc: bool) { + pub(crate) fn release(&mut self, major_gc: bool, unlog_bits_op: UnlogBitsOperation) { if major_gc { // Update line_unavail_state for hole searching after this GC. if !super::BLOCK_ONLY { @@ -511,7 +524,7 @@ impl ImmixSpace { self.reusable_blocks.reset(); } // Sweep chunks and blocks - let work_packets = self.generate_sweep_tasks(); + let work_packets = self.generate_sweep_tasks(unlog_bits_op); self.scheduler().work_buckets[WorkBucketStage::Release].bulk_add(work_packets); self.lines_consumed.store(0, Ordering::Relaxed); @@ -527,22 +540,8 @@ impl ImmixSpace { did_defrag } - pub fn clear_side_log_bits(&self) { - let log_bit = VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.extract_side_spec(); - for chunk in self.chunk_map.all_chunks() { - log_bit.bzero_metadata(chunk.start(), Chunk::BYTES); - } - } - - pub fn set_side_log_bits(&self) { - let log_bit = VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.extract_side_spec(); - for chunk in self.chunk_map.all_chunks() { - log_bit.bset_metadata(chunk.start(), Chunk::BYTES); - } - } - /// Generate chunk sweep tasks - fn generate_sweep_tasks(&self) -> Vec>> { + fn generate_sweep_tasks(&self, unlog_bits_op: UnlogBitsOperation) -> Vec>> { self.defrag.mark_histograms.lock().clear(); // # Safety: ImmixSpace reference is always valid within this collection cycle. let space = unsafe { &*(self as *const Self) }; @@ -554,6 +553,7 @@ impl ImmixSpace { Box::new(SweepChunk { space, chunk, + unlog_bits_op, epilogue: epilogue.clone(), }) }); @@ -923,6 +923,7 @@ pub struct PrepareBlockState { pub space: &'static ImmixSpace, pub chunk: Chunk, pub defrag_threshold: Option, + pub unlog_bits_op: UnlogBitsOperation, } impl PrepareBlockState { @@ -967,6 +968,9 @@ impl GCWork for PrepareBlockState { debug_assert!(!block.get_state().is_reusable()); debug_assert_ne!(block.get_state(), BlockState::Marked); } + + self.unlog_bits_op + .execute::(self.chunk.start(), Chunk::BYTES); } } @@ -974,6 +978,7 @@ impl GCWork for PrepareBlockState { struct SweepChunk { space: &'static ImmixSpace, chunk: Chunk, + unlog_bits_op: UnlogBitsOperation, /// A destructor invoked when all `SweepChunk` packets are finished. epilogue: Arc>, } @@ -1034,6 +1039,10 @@ impl GCWork for SweepChunk { self.space.chunk_map.set_allocated(self.chunk, false) } self.space.defrag.add_completed_mark_histogram(histogram); + + self.unlog_bits_op + .execute::(self.chunk.start(), Chunk::BYTES); + self.epilogue.finish_one_work_packet(); } } diff --git a/src/policy/largeobjectspace.rs b/src/policy/largeobjectspace.rs index 6909c2dea3..b1ec06f166 100644 --- a/src/policy/largeobjectspace.rs +++ b/src/policy/largeobjectspace.rs @@ -291,8 +291,6 @@ impl LargeObjectSpace { } pub fn set_side_log_bits(&self) { - debug_assert!(self.treadmill.is_from_space_empty()); - debug_assert!(self.treadmill.is_nursery_empty()); let mut enumator = ClosureObjectEnumerator::<_, VM>::new(|object| { VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.mark_as_unlogged::(object, Ordering::SeqCst); }); diff --git a/src/util/metadata/log_bit.rs b/src/util/metadata/log_bit.rs index a5a9b8644f..6ea012acbd 100644 --- a/src/util/metadata/log_bit.rs +++ b/src/util/metadata/log_bit.rs @@ -1,4 +1,6 @@ +use crate::util::Address; use crate::util::ObjectReference; +use crate::vm::ObjectModel; use crate::vm::VMBinding; use crate::vm::VMGlobalLogBitSpec; use std::sync::atomic::Ordering; @@ -38,3 +40,31 @@ impl VMGlobalLogBitSpec { self.load_atomic::(object, None, order) == 1 } } + +/// This specifies what to do to the global side unlog bits in various functions or work packets. +#[derive(Clone, Copy, PartialEq, Eq)] +pub(crate) enum UnlogBitsOperation { + /// Do nothing. + NoOp, + /// Bulk set unlog bits to all 1s. + BulkSet, + /// Bulk clear unlog bits to all 0s. + BulkClear, +} + +impl UnlogBitsOperation { + /// Run the specified operation on the address range from `start` to `start + size`. + pub(crate) fn execute(&self, start: Address, size: usize) { + if let MetadataSpec::OnSide(ref unlog_bits) = *VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC { + match self { + UnlogBitsOperation::NoOp => {} + UnlogBitsOperation::BulkSet => { + unlog_bits.bset_metadata(start, size); + } + UnlogBitsOperation::BulkClear => { + unlog_bits.bzero_metadata(start, size); + } + } + } + } +} From 2ee86afae445bd4ea6809e861d99991ee6c05bfe Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Thu, 11 Sep 2025 18:58:26 +0800 Subject: [PATCH 53/59] Remove visualization of unused work packet --- tools/tracing/timeline/visualize.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/tools/tracing/timeline/visualize.py b/tools/tracing/timeline/visualize.py index 1d9effc4c5..24c7bd9338 100755 --- a/tools/tracing/timeline/visualize.py +++ b/tools/tracing/timeline/visualize.py @@ -164,12 +164,6 @@ def enrich_event(self, name, ph, tid, ts, result, args): case "gcrequester_request": result["tid"] = 1 - case "num_concurrent_tracing_packets_change": - result["name"] = "Concurrent tracing packets" - result["args"] |= { - "number": int(args[0]), - } - case _: if self.enrich_event_extra is not None: # Call ``enrich_event_extra`` in the extension script if defined. From 0c3d9f69f1e0fd9d03d7c09d370942e50d9db51d Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Fri, 12 Sep 2025 07:54:52 +0800 Subject: [PATCH 54/59] Remove unnecessary unlog bit clearing --- src/plan/generational/global.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/plan/generational/global.rs b/src/plan/generational/global.rs index 61fe31b836..54e94a6334 100644 --- a/src/plan/generational/global.rs +++ b/src/plan/generational/global.rs @@ -74,9 +74,6 @@ impl CommonGenPlan { /// Release Gen. This should be called by a single thread in GC release work. pub fn release(&mut self, tls: VMWorkerThread) { let full_heap = !self.is_current_gc_nursery(); - if VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.is_on_side() { - self.nursery.clear_side_log_bits(); - } self.common.release(tls, full_heap); self.nursery.release(); } From f76e9547c31c94259411d007abfac1c584fd9fa0 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Fri, 12 Sep 2025 08:06:49 +0800 Subject: [PATCH 55/59] Move `eager_mark_lines` to `Line`. --- src/policy/immix/line.rs | 13 ++++++++++++ src/util/alloc/immix_allocator.rs | 34 ++++++++++--------------------- 2 files changed, 24 insertions(+), 23 deletions(-) diff --git a/src/policy/immix/line.rs b/src/policy/immix/line.rs index f48ea7d271..68f1fbdb3b 100644 --- a/src/policy/immix/line.rs +++ b/src/policy/immix/line.rs @@ -93,4 +93,17 @@ impl Line { std::ptr::write_bytes(start, 0xffu8, bytes); } } + + /// Eagerly mark mark all line mark states and all side mark bits in the gap. + /// + /// This is useful during concurrent marking. By doing this, the GC workers running concurrently + /// will conservatively consider all objects that will be bump-allocated in the gap as live, and + /// the mutator doesn't need to explicitly mark bump-allocated objects in the fast path. + pub fn eager_mark_lines(line_mark_state: u8, start_line: Line, end_line: Line) { + for line in RegionIterator::::new(start_line, end_line) { + line.mark(line_mark_state); + } + + Line::initialize_mark_table_as_marked::(start_line..end_line); + } } diff --git a/src/util/alloc/immix_allocator.rs b/src/util/alloc/immix_allocator.rs index a3da9e1182..4ee77a44e4 100644 --- a/src/util/alloc/immix_allocator.rs +++ b/src/util/alloc/immix_allocator.rs @@ -7,7 +7,7 @@ use crate::policy::immix::ImmixSpace; use crate::policy::space::Space; use crate::util::alloc::allocator::get_maximum_aligned_size; use crate::util::alloc::Allocator; -use crate::util::linear_scan::{Region, RegionIterator}; +use crate::util::linear_scan::Region; use crate::util::opaque_pointer::VMThread; use crate::util::rust_util::unlikely; use crate::util::Address; @@ -267,7 +267,11 @@ impl ImmixAllocator { }; // mark objects if concurrent marking is active if self.immix_space().should_allocate_as_live() { - self.eager_mark_lines(start_line, end_line); + let state = self + .space + .line_mark_state + .load(std::sync::atomic::Ordering::Acquire); + Line::eager_mark_lines::(state, start_line, end_line); } return true; } else { @@ -311,7 +315,11 @@ impl ImmixAllocator { .bzero_metadata(block.start(), crate::policy::immix::block::Block::BYTES); // mark objects if concurrent marking is active if self.immix_space().should_allocate_as_live() { - self.eager_mark_lines(block.start_line(), block.end_line()); + let state = self + .space + .line_mark_state + .load(std::sync::atomic::Ordering::Acquire); + Line::eager_mark_lines::(state, block.start_line(), block.end_line()); } if self.request_for_large { self.large_bump_pointer.cursor = block.start(); @@ -325,26 +333,6 @@ impl ImmixAllocator { } } - /// Eagerly mark mark all line mark states and all side mark bits in the gap. - /// - /// This is useful during concurrent marking. By doing this, the GC workers running concurrently - /// will conservatively consider all objects that will be bump-allocated in the gap as live, and - /// the mutator doesn't need to explicitly mark bump-allocated objects in the fast path. - fn eager_mark_lines(&mut self, start_line: Line, end_line: Line) { - debug_assert!(self.immix_space().should_allocate_as_live()); - - let state = self - .space - .line_mark_state - .load(std::sync::atomic::Ordering::Acquire); - - for line in RegionIterator::::new(start_line, end_line) { - line.mark(state); - } - - Line::initialize_mark_table_as_marked::(start_line..end_line); - } - /// Return whether the TLAB has been exhausted and we need to acquire a new block. Assumes that /// the buffer limits have been restored using [`ImmixAllocator::restore_limit_for_stress`]. /// Note that this function may implicitly change the limits of the allocator. From b7b79e4ca69ba0b37380c070e74fb87ad0b86cd4 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Fri, 12 Sep 2025 08:21:18 +0800 Subject: [PATCH 56/59] Just use bset_metadata --- src/policy/immix/line.rs | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/src/policy/immix/line.rs b/src/policy/immix/line.rs index 68f1fbdb3b..7d1d5f6d06 100644 --- a/src/policy/immix/line.rs +++ b/src/policy/immix/line.rs @@ -2,7 +2,7 @@ use std::ops::Range; use super::block::Block; use crate::util::linear_scan::{Region, RegionIterator}; -use crate::util::metadata::side_metadata::{address_to_meta_address, SideMetadataSpec}; +use crate::util::metadata::side_metadata::SideMetadataSpec; use crate::{ util::{Address, ObjectReference}, vm::*, @@ -86,12 +86,10 @@ impl Line { pub fn initialize_mark_table_as_marked(lines: Range) { let meta = VM::VMObjectModel::LOCAL_MARK_BIT_SPEC.extract_side_spec(); - let start: *mut u8 = address_to_meta_address(meta, lines.start.start()).to_mut_ptr(); - let limit: *mut u8 = address_to_meta_address(meta, lines.end.start()).to_mut_ptr(); - unsafe { - let bytes = limit.offset_from(start) as usize; - std::ptr::write_bytes(start, 0xffu8, bytes); - } + let start = lines.start.start(); + let limit = lines.end.start(); + let size = limit - start; + meta.bset_metadata(start, size); } /// Eagerly mark mark all line mark states and all side mark bits in the gap. From 26f74c13b477a6115e9b0dfeff9118032b24a1b4 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Fri, 12 Sep 2025 08:36:15 +0800 Subject: [PATCH 57/59] Code style and comments --- src/policy/immix/line.rs | 17 ++++++++++------- src/util/alloc/immix_allocator.rs | 15 +++++---------- 2 files changed, 15 insertions(+), 17 deletions(-) diff --git a/src/policy/immix/line.rs b/src/policy/immix/line.rs index 7d1d5f6d06..9c78037c7e 100644 --- a/src/policy/immix/line.rs +++ b/src/policy/immix/line.rs @@ -84,6 +84,11 @@ impl Line { marked_lines } + /// Bulk set the local mark bits of a line range. + /// + /// This is useful during concurrent marking. By doing this, the GC workers running concurrently + /// will conservatively consider all objects that will be bump-allocated in the gap as live, and + /// the mutator doesn't need to explicitly mark bump-allocated objects in the fast path. pub fn initialize_mark_table_as_marked(lines: Range) { let meta = VM::VMObjectModel::LOCAL_MARK_BIT_SPEC.extract_side_spec(); let start = lines.start.start(); @@ -92,16 +97,14 @@ impl Line { meta.bset_metadata(start, size); } - /// Eagerly mark mark all line mark states and all side mark bits in the gap. + /// Eagerly mark all line mark states and all side mark bits in the gap. /// - /// This is useful during concurrent marking. By doing this, the GC workers running concurrently - /// will conservatively consider all objects that will be bump-allocated in the gap as live, and - /// the mutator doesn't need to explicitly mark bump-allocated objects in the fast path. - pub fn eager_mark_lines(line_mark_state: u8, start_line: Line, end_line: Line) { - for line in RegionIterator::::new(start_line, end_line) { + /// Useful during concurrent marking. + pub fn eager_mark_lines(line_mark_state: u8, lines: Range) { + for line in RegionIterator::::new(lines.start, lines.end) { line.mark(line_mark_state); } - Line::initialize_mark_table_as_marked::(start_line..end_line); + Line::initialize_mark_table_as_marked::(lines); } } diff --git a/src/util/alloc/immix_allocator.rs b/src/util/alloc/immix_allocator.rs index 4ee77a44e4..eb2e5235fa 100644 --- a/src/util/alloc/immix_allocator.rs +++ b/src/util/alloc/immix_allocator.rs @@ -1,3 +1,4 @@ +use std::sync::atomic::Ordering; use std::sync::Arc; use super::allocator::{align_allocation_no_fill, fill_alignment_gap, AllocatorContext}; @@ -267,11 +268,8 @@ impl ImmixAllocator { }; // mark objects if concurrent marking is active if self.immix_space().should_allocate_as_live() { - let state = self - .space - .line_mark_state - .load(std::sync::atomic::Ordering::Acquire); - Line::eager_mark_lines::(state, start_line, end_line); + let state = self.space.line_mark_state.load(Ordering::Acquire); + Line::eager_mark_lines::(state, start_line..end_line); } return true; } else { @@ -315,11 +313,8 @@ impl ImmixAllocator { .bzero_metadata(block.start(), crate::policy::immix::block::Block::BYTES); // mark objects if concurrent marking is active if self.immix_space().should_allocate_as_live() { - let state = self - .space - .line_mark_state - .load(std::sync::atomic::Ordering::Acquire); - Line::eager_mark_lines::(state, block.start_line(), block.end_line()); + let state = self.space.line_mark_state.load(Ordering::Acquire); + Line::eager_mark_lines::(state, block.start_line()..block.end_line()); } if self.request_for_large { self.large_bump_pointer.cursor = block.start(); From 5b5552b70cb8fd8527e994f401e5a0fb84caf744 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Fri, 12 Sep 2025 08:51:32 +0800 Subject: [PATCH 58/59] Extract bulk_set_line_mark_states --- src/policy/immix/line.rs | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/src/policy/immix/line.rs b/src/policy/immix/line.rs index 9c78037c7e..a8d2e9686e 100644 --- a/src/policy/immix/line.rs +++ b/src/policy/immix/line.rs @@ -86,9 +86,9 @@ impl Line { /// Bulk set the local mark bits of a line range. /// - /// This is useful during concurrent marking. By doing this, the GC workers running concurrently - /// will conservatively consider all objects that will be bump-allocated in the gap as live, and - /// the mutator doesn't need to explicitly mark bump-allocated objects in the fast path. + /// This is useful during concurrent marking. By doing this, concurrent marking will + /// conservatively consider all objects allocated in the line range as live, and the mutator + /// doesn't need to explicitly mark bump-allocated objects in the fast path. pub fn initialize_mark_table_as_marked(lines: Range) { let meta = VM::VMObjectModel::LOCAL_MARK_BIT_SPEC.extract_side_spec(); let start = lines.start.start(); @@ -97,14 +97,18 @@ impl Line { meta.bset_metadata(start, size); } - /// Eagerly mark all line mark states and all side mark bits in the gap. - /// - /// Useful during concurrent marking. - pub fn eager_mark_lines(line_mark_state: u8, lines: Range) { + /// Bulk set line mark states. + pub fn bulk_set_line_mark_states(line_mark_state: u8, lines: Range) { for line in RegionIterator::::new(lines.start, lines.end) { line.mark(line_mark_state); } + } - Line::initialize_mark_table_as_marked::(lines); + /// Eagerly mark all line mark states and all side mark bits in the gap. + /// + /// Useful during concurrent marking. + pub fn eager_mark_lines(line_mark_state: u8, lines: Range) { + Self::bulk_set_line_mark_states(line_mark_state, lines.clone()); + Self::initialize_mark_table_as_marked::(lines); } } From 815cc172a0422c21ab8cf4922c856a3883446ec7 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Fri, 12 Sep 2025 09:35:11 +0800 Subject: [PATCH 59/59] Make the `-e` option of `capture.py` work for ConcurrentImmix --- tools/tracing/timeline/capture.bt | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/tools/tracing/timeline/capture.bt b/tools/tracing/timeline/capture.bt index 19fd13d381..953ce72d2e 100644 --- a/tools/tracing/timeline/capture.bt +++ b/tools/tracing/timeline/capture.bt @@ -2,6 +2,7 @@ BEGIN { @harness = $HARNESS; @gc_count = 0; + @is_initial_mark = 0; if (!@harness) { //always collect data @@ -30,7 +31,6 @@ usdt:$MMTK:mmtk:harness_end { usdt:$MMTK:mmtk:gc_start { printf("GC,B,%d,%lu\n", tid, nsecs); - @gc_count += 1; // bpftrace warns that signed `%` operator may have undefiend behavior. if ((uint64)@gc_count % $EVERY == 0 && @stats_enabled) { @enable_print = 1; @@ -41,6 +41,14 @@ usdt:$MMTK:mmtk:gc_start { usdt:$MMTK:mmtk:gc_end { printf("GC,E,%d,%lu\n", tid, nsecs); + + // We don't increment the GC count so that we always visualize both InitialMark and FinalMark or neither. + // FIXME: mmtk-core should emit distinct events for GC end and pause end. + if (!@is_initial_mark) { + @gc_count += 1; + } + + @is_initial_mark = 0; } usdt:$MMTK:mmtk:bucket_opened { @@ -125,19 +133,24 @@ usdt:$MMTK:mmtk:sweep_chunk { } usdt:$MMTK:mmtk:concurrent_trace_objects { + if (@enable_print) { printf("concurrent_trace_objects,meta,%d,%lu,%lu,%lu,%lu\n", tid, nsecs, arg0, arg1, arg2); + } } usdt:$MMTK:mmtk:gcrequester_request { - printf("gcrequester_request,i,%d,%lu\n", tid, nsecs); + printf("gcrequester_request,i,%d,%lu\n", tid, nsecs); } usdt:$MMTK:mmtk:add_schedule_collection_packet { - printf("add_schedule_collection_packet,i,%d,%lu\n", tid, nsecs); + printf("add_schedule_collection_packet,i,%d,%lu\n", tid, nsecs); } usdt:$MMTK:mmtk:concurrent_pause_determined { - printf("concurrent_pause_determined,meta,%d,%lu,%lu\n", tid, nsecs, arg0); + printf("concurrent_pause_determined,meta,%d,%lu,%lu\n", tid, nsecs, arg0); + if (arg0 == 2) { // InitialMark + @is_initial_mark = 1; + } } usdt:$MMTK:mmtk:finalization {