From 33512a7f772615ac10502c07e7e7948415015f1b Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Wed, 17 Dec 2025 19:52:25 +0800 Subject: [PATCH 01/10] Value-based API (maybe not a good idea) --- src/plan/concurrent/barrier.rs | 7 +++- .../concurrent/concurrent_marking_work.rs | 1 + src/plan/tracing.rs | 11 +++-- src/policy/compressor/compressorspace.rs | 24 +++++++---- src/scheduler/gc_work.rs | 8 +++- src/vm/mod.rs | 1 + src/vm/scanning.rs | 42 +++++++++++++++++++ 7 files changed, 81 insertions(+), 13 deletions(-) diff --git a/src/plan/concurrent/barrier.rs b/src/plan/concurrent/barrier.rs index 0bd8995564..a803a4fd4c 100644 --- a/src/plan/concurrent/barrier.rs +++ b/src/plan/concurrent/barrier.rs @@ -4,6 +4,7 @@ use super::{concurrent_marking_work::ProcessModBufSATB, Pause}; use crate::plan::global::PlanTraceObject; use crate::policy::gc_work::TraceKind; use crate::util::VMMutatorThread; +use crate::vm::RefScanPolicy; use crate::{ plan::{barriers::BarrierSemantics, concurrent::global::ConcurrentPlan, VectorQueue}, scheduler::WorkBucketStage, @@ -156,7 +157,11 @@ impl + PlanTraceObject, const KIND } fn object_probable_write_slow(&mut self, obj: ObjectReference) { - crate::plan::tracing::SlotIterator::::iterate_fields(obj, self.tls.0, |s| { + // Note: the SATB barrier happens during strong closure computation, so it is a chance for + // the VM binding to discover weak references. The VM may choose to conservatively treat + // weak references as strong during concurrent GC, which is allowed by MMTk. + let policy: RefScanPolicy = RefScanPolicy::StrongClosure; + crate::plan::tracing::SlotIterator::::iterate_fields(obj, policy, self.tls.0, |s| { self.enqueue_node(Some(obj), s, None); }); } diff --git a/src/plan/concurrent/concurrent_marking_work.rs b/src/plan/concurrent/concurrent_marking_work.rs index fca994a7bc..fc090fc18a 100644 --- a/src/plan/concurrent/concurrent_marking_work.rs +++ b/src/plan/concurrent/concurrent_marking_work.rs @@ -76,6 +76,7 @@ impl + PlanTraceObject, const KIND fn scan_and_enqueue(&mut self, object: ObjectReference) { crate::plan::tracing::SlotIterator::::iterate_fields( object, + RefScanPolicy::StrongClosure, self.worker().tls.0, |s| { let Some(t) = s.load() else { diff --git a/src/plan/tracing.rs b/src/plan/tracing.rs index 792e142c76..960ca7cac2 100644 --- a/src/plan/tracing.rs +++ b/src/plan/tracing.rs @@ -6,7 +6,7 @@ use std::marker::PhantomData; use crate::scheduler::gc_work::{ProcessEdgesWork, SlotOf}; use crate::scheduler::{GCWorker, WorkBucketStage, EDGES_WORK_BUFFER_SIZE}; use crate::util::{ObjectReference, VMThread, VMWorkerThread}; -use crate::vm::{Scanning, SlotVisitor, VMBinding}; +use crate::vm::{RefScanPolicy, Scanning, SlotVisitor, VMBinding}; /// This trait represents an object queue to enqueue objects during tracing. pub trait ObjectQueue { @@ -157,13 +157,18 @@ pub(crate) struct SlotIterator { impl SlotIterator { /// Iterate over the slots of an object by applying a function to each slot. - pub fn iterate_fields(object: ObjectReference, _tls: VMThread, mut f: F) { + pub fn iterate_fields( + object: ObjectReference, + policy: RefScanPolicy, + _tls: VMThread, + mut f: F, + ) { // FIXME: We should use tls from the arguments. // See https://github.com/mmtk/mmtk-core/issues/1375 let fake_tls = VMWorkerThread(VMThread::UNINITIALIZED); if !>::support_slot_enqueuing(fake_tls, object) { panic!("SlotIterator::iterate_fields cannot be used on objects that don't support slot-enqueuing"); } - >::scan_object(fake_tls, object, &mut f); + >::scan_object(fake_tls, object, policy, &mut f); } } diff --git a/src/policy/compressor/compressorspace.rs b/src/policy/compressor/compressorspace.rs index adf5746b6e..24318c8658 100644 --- a/src/policy/compressor/compressorspace.rs +++ b/src/policy/compressor/compressorspace.rs @@ -336,15 +336,23 @@ impl CompressorSpace { fn update_references(&self, worker: &mut GCWorker, object: ObjectReference) { if VM::VMScanning::support_slot_enqueuing(worker.tls, object) { - VM::VMScanning::scan_object(worker.tls, object, &mut |s: VM::VMSlot| { - if let Some(o) = s.load() { - s.store(self.forward(o, false)); - } - }); + VM::VMScanning::scan_object( + worker.tls, + object, + RefScanPolicy::RefUpdate, + &mut |s: VM::VMSlot| { + if let Some(o) = s.load() { + s.store(self.forward(o, false)); + } + }, + ); } else { - VM::VMScanning::scan_object_and_trace_edges(worker.tls, object, &mut |o| { - self.forward(o, false) - }); + VM::VMScanning::scan_object_and_trace_edges( + worker.tls, + object, + RefScanPolicy::RefUpdate, + &mut |o| self.forward(o, false), + ); } } diff --git a/src/scheduler/gc_work.rs b/src/scheduler/gc_work.rs index d9f3f1657f..5504a8af44 100644 --- a/src/scheduler/gc_work.rs +++ b/src/scheduler/gc_work.rs @@ -867,7 +867,12 @@ pub trait ScanObjectsWork: GCWork + Sized { if ::VMScanning::support_slot_enqueuing(tls, object) { trace!("Scan object (slot) {}", object); // If an object supports slot-enqueuing, we enqueue its slots. - ::VMScanning::scan_object(tls, object, &mut closure); + ::VMScanning::scan_object( + tls, + object, + RefScanPolicy::StrongClosure, + &mut closure, + ); self.post_scan_object(object); } else { // If an object does not support slot-enqueuing, we have to use @@ -899,6 +904,7 @@ pub trait ScanObjectsWork: GCWork + Sized { ::VMScanning::scan_object_and_trace_edges( tls, object, + RefScanPolicy::StrongClosure, object_tracer, ); self.post_scan_object(object); diff --git a/src/vm/mod.rs b/src/vm/mod.rs index 2ff244f6e4..887b42cc2a 100644 --- a/src/vm/mod.rs +++ b/src/vm/mod.rs @@ -30,6 +30,7 @@ pub use self::reference_glue::Finalizable; pub use self::reference_glue::ReferenceGlue; pub use self::scanning::ObjectTracer; pub use self::scanning::ObjectTracerContext; +pub use self::scanning::RefScanPolicy; pub use self::scanning::RootsWorkFactory; pub use self::scanning::Scanning; pub use self::scanning::SlotVisitor; diff --git a/src/vm/scanning.rs b/src/vm/scanning.rs index 3a87fc4260..2119580a65 100644 --- a/src/vm/scanning.rs +++ b/src/vm/scanning.rs @@ -43,6 +43,46 @@ impl ObjectReference> ObjectTracer for F { } } +/// This type specifies how object-scanning functions ([`Scanning::scan_object`] and +/// [`Scanning::scan_object_and_trace_edges`]) should handle strong and weak reference fields. +/// +/// Note that it is the VM and the VM binding that ultimately decides *which* reference is strong +/// and *which* reference is weak. Particularly, the VM binding is allowed to conservatively report +/// weak references as strong. For example, +/// +/// - A VM binding can report all weak references as strong during nursery collections or +/// concurrent collections to avoid expensive weak reference processing. +/// - The VM binding of a JVM (e.g. mmtk-openjdk) can report the weak reference field in +/// `SoftReference` as strong during non-emergency GCs, and weak during emergency GCs. +pub enum RefScanPolicy { + /// An object is scanned during the strong transitive closure stage. The VM binding should + /// visit fields that contain strong references using the slot visitor or object tracer + /// callbacks. + /// + /// As described in the [Porting Guide][pg-weakref], if a VM binding chooses to discover weak + /// reference fields during tracing, the VM binding should record the object, the fields, the + /// field values, and/or any other relevant data in VM-specific ways during the execution of + /// object-scanning functions. If the VM binding chooses not to discover weak reference fields + /// this way, it can ignore weak fields. + /// + /// [pg-weakref]: https://docs.mmtk.io/portingguide/concerns/weakref.html#identifying-weak-references + StrongClosure, + /// An object is scanned to update its references after objects are moved or after the new + /// addresses of objects have been calculated. The VM binding should visit all reference fields + /// of an object, regardless whether they are holding strong or weak reference. + RefUpdate, + /// Instruct the VM binding to visit all fields of an object, both strong and weak, without any + /// hints about the MMTk's intention to call the object-scanning function. + All, + /// Instruct the VM binding to visit all strong fields, without any hints about the MMTk's + /// intention to call the object-scanning function. Particularly, the VM binding should not + /// discover weak references as suggested by [`RefScanPolicy::StrongClosure`]. + StrongOnly, + /// Instruct the VM binding to visit all weak fields, without any hints about the MMTk's + /// intention to call the object-scanning function. + WeakOnly, +} + /// An `ObjectTracerContext` gives a GC worker temporary access to an `ObjectTracer`, allowing /// the GC worker to trace objects. This trait is intended to abstract out the implementation /// details of tracing objects, enqueuing objects, and creating work packets that expand the @@ -193,6 +233,7 @@ pub trait Scanning { fn scan_object>( tls: VMWorkerThread, object: ObjectReference, + policy: RefScanPolicy, slot_visitor: &mut SV, ); @@ -218,6 +259,7 @@ pub trait Scanning { fn scan_object_and_trace_edges( _tls: VMWorkerThread, _object: ObjectReference, + _policy: RefScanPolicy, _object_tracer: &mut OT, ) { unreachable!("scan_object_and_trace_edges() will not be called when support_slot_enqueuing() is always true.") From 2bd86dc53b0891daa99c5d0bcd5e07164cc4529c Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Wed, 17 Dec 2025 23:19:57 +0800 Subject: [PATCH 02/10] Use type parameter instead of value parameter. The VM binding only needs to know the `RefScanPolicy` trait but not its implementations. --- src/plan/concurrent/barrier.rs | 21 ++++--- .../concurrent/concurrent_marking_work.rs | 4 +- src/plan/tracing.rs | 5 +- src/policy/compressor/compressorspace.rs | 7 +-- src/scheduler/gc_work.rs | 7 +-- src/util/mod.rs | 2 + src/util/ref_scan_policy.rs | 63 +++++++++++++++++++ src/vm/scanning.rs | 41 ++++-------- 8 files changed, 99 insertions(+), 51 deletions(-) create mode 100644 src/util/ref_scan_policy.rs diff --git a/src/plan/concurrent/barrier.rs b/src/plan/concurrent/barrier.rs index a803a4fd4c..ca7709a57a 100644 --- a/src/plan/concurrent/barrier.rs +++ b/src/plan/concurrent/barrier.rs @@ -3,8 +3,8 @@ use std::sync::atomic::Ordering; use super::{concurrent_marking_work::ProcessModBufSATB, Pause}; use crate::plan::global::PlanTraceObject; use crate::policy::gc_work::TraceKind; +use crate::util::ref_scan_policy::StrongOnly; use crate::util::VMMutatorThread; -use crate::vm::RefScanPolicy; use crate::{ plan::{barriers::BarrierSemantics, concurrent::global::ConcurrentPlan, VectorQueue}, scheduler::WorkBucketStage, @@ -157,12 +157,17 @@ impl + PlanTraceObject, const KIND } fn object_probable_write_slow(&mut self, obj: ObjectReference) { - // Note: the SATB barrier happens during strong closure computation, so it is a chance for - // the VM binding to discover weak references. The VM may choose to conservatively treat - // weak references as strong during concurrent GC, which is allowed by MMTk. - let policy: RefScanPolicy = RefScanPolicy::StrongClosure; - crate::plan::tracing::SlotIterator::::iterate_fields(obj, policy, self.tls.0, |s| { - self.enqueue_node(Some(obj), s, None); - }); + // Note: the purpose of the SATB barrier is to ensure all *strongly reachable* objects at + // the beginning of the trace will eventually be marked and scanned. Therefore, we use the + // `StrongOnly` here to enqueue children of strong fields. The current `obj` will + // eventually be scanned by the `ConcurrentTraceObjects` work packet using the + // `StrongClosure` policy, either during the concurrent tracing, or during `FinalMark`. + crate::plan::tracing::SlotIterator::::iterate_fields::<_, StrongOnly>( + obj, + self.tls.0, + |s| { + self.enqueue_node(Some(obj), s, None); + }, + ); } } diff --git a/src/plan/concurrent/concurrent_marking_work.rs b/src/plan/concurrent/concurrent_marking_work.rs index fc090fc18a..76a1743e7e 100644 --- a/src/plan/concurrent/concurrent_marking_work.rs +++ b/src/plan/concurrent/concurrent_marking_work.rs @@ -4,6 +4,7 @@ use crate::plan::PlanTraceObject; use crate::plan::VectorQueue; use crate::policy::gc_work::TraceKind; use crate::scheduler::gc_work::{ScanObjects, SlotOf}; +use crate::util::ref_scan_policy::StrongClosure; use crate::util::ObjectReference; use crate::vm::slot::Slot; use crate::{ @@ -74,9 +75,8 @@ impl + PlanTraceObject, const KIND } fn scan_and_enqueue(&mut self, object: ObjectReference) { - crate::plan::tracing::SlotIterator::::iterate_fields( + crate::plan::tracing::SlotIterator::::iterate_fields::<_, StrongClosure>( object, - RefScanPolicy::StrongClosure, self.worker().tls.0, |s| { let Some(t) = s.load() else { diff --git a/src/plan/tracing.rs b/src/plan/tracing.rs index 960ca7cac2..65597b4daa 100644 --- a/src/plan/tracing.rs +++ b/src/plan/tracing.rs @@ -157,9 +157,8 @@ pub(crate) struct SlotIterator { impl SlotIterator { /// Iterate over the slots of an object by applying a function to each slot. - pub fn iterate_fields( + pub fn iterate_fields( object: ObjectReference, - policy: RefScanPolicy, _tls: VMThread, mut f: F, ) { @@ -169,6 +168,6 @@ impl SlotIterator { if !>::support_slot_enqueuing(fake_tls, object) { panic!("SlotIterator::iterate_fields cannot be used on objects that don't support slot-enqueuing"); } - >::scan_object(fake_tls, object, policy, &mut f); + >::scan_object::<_, R>(fake_tls, object, &mut f); } } diff --git a/src/policy/compressor/compressorspace.rs b/src/policy/compressor/compressorspace.rs index 24318c8658..0bf293d330 100644 --- a/src/policy/compressor/compressorspace.rs +++ b/src/policy/compressor/compressorspace.rs @@ -14,6 +14,7 @@ use crate::util::metadata::extract_side_metadata; use crate::util::metadata::vo_bit; use crate::util::metadata::MetadataSpec; use crate::util::object_enum::{self, ObjectEnumerator}; +use crate::util::ref_scan_policy::RefUpdate; use crate::util::{Address, ObjectReference}; use crate::vm::slot::Slot; use crate::MMTK; @@ -336,10 +337,9 @@ impl CompressorSpace { fn update_references(&self, worker: &mut GCWorker, object: ObjectReference) { if VM::VMScanning::support_slot_enqueuing(worker.tls, object) { - VM::VMScanning::scan_object( + VM::VMScanning::scan_object::<_, RefUpdate>( worker.tls, object, - RefScanPolicy::RefUpdate, &mut |s: VM::VMSlot| { if let Some(o) = s.load() { s.store(self.forward(o, false)); @@ -347,10 +347,9 @@ impl CompressorSpace { }, ); } else { - VM::VMScanning::scan_object_and_trace_edges( + VM::VMScanning::scan_object_and_trace_edges::<_, RefUpdate>( worker.tls, object, - RefScanPolicy::RefUpdate, &mut |o| self.forward(o, false), ); } diff --git a/src/scheduler/gc_work.rs b/src/scheduler/gc_work.rs index 5504a8af44..e3bd4e501e 100644 --- a/src/scheduler/gc_work.rs +++ b/src/scheduler/gc_work.rs @@ -3,6 +3,7 @@ use super::*; use crate::global_state::GcStatus; use crate::plan::ObjectsClosure; use crate::plan::VectorObjectQueue; +use crate::util::ref_scan_policy::StrongClosure; use crate::util::*; use crate::vm::slot::Slot; use crate::vm::*; @@ -867,10 +868,9 @@ pub trait ScanObjectsWork: GCWork + Sized { if ::VMScanning::support_slot_enqueuing(tls, object) { trace!("Scan object (slot) {}", object); // If an object supports slot-enqueuing, we enqueue its slots. - ::VMScanning::scan_object( + ::VMScanning::scan_object::<_, StrongClosure>( tls, object, - RefScanPolicy::StrongClosure, &mut closure, ); self.post_scan_object(object); @@ -901,10 +901,9 @@ pub trait ScanObjectsWork: GCWork + Sized { // Scan objects and trace their outgoing edges at the same time. for object in scan_later.iter().copied() { trace!("Scan object (node) {}", object); - ::VMScanning::scan_object_and_trace_edges( + ::VMScanning::scan_object_and_trace_edges::<_, StrongClosure>( tls, object, - RefScanPolicy::StrongClosure, object_tracer, ); self.post_scan_object(object); diff --git a/src/util/mod.rs b/src/util/mod.rs index d22c29a2e3..88c2087d94 100644 --- a/src/util/mod.rs +++ b/src/util/mod.rs @@ -58,6 +58,8 @@ pub(crate) mod object_enum; pub(crate) mod object_forwarding; /// Reference processing implementation. pub(crate) mod reference_processor; +/// RefScanPolicy implementations. +pub(crate) mod ref_scan_policy; /// Utilities funcitons for Rust pub(crate) mod rust_util; /// Sanity checker for GC. diff --git a/src/util/ref_scan_policy.rs b/src/util/ref_scan_policy.rs new file mode 100644 index 0000000000..1cf5b5359e --- /dev/null +++ b/src/util/ref_scan_policy.rs @@ -0,0 +1,63 @@ +//! This module holds common reference scanning policies used in MMTk core. + +use crate::vm::RefScanPolicy; + +/// An object is scanned during the strong transitive closure stage. The VM binding should +/// visit fields that contain strong references using the slot visitor or object tracer +/// callbacks. +/// +/// As described in the [Porting Guide][pg-weakref], if a VM binding chooses to discover weak +/// reference fields during tracing, the VM binding should record the object, the fields, the +/// field values, and/or any other relevant data in VM-specific ways during the execution of +/// object-scanning functions. If the VM binding chooses not to discover weak reference fields +/// this way, it can ignore weak fields. +/// +/// [pg-weakref]: https://docs.mmtk.io/portingguide/concerns/weakref.html#identifying-weak-references +pub struct StrongClosure; + +impl RefScanPolicy for StrongClosure { + const SHOULD_VISIT_STRONG: bool = true; + const SHOULD_VISIT_WEAK: bool = false; + const SHOULD_DISCOVER_WEAK: bool = true; +} + +/// An object is scanned to update its references after objects are moved or after the new +/// addresses of objects have been calculated. The VM binding should visit all reference fields +/// of an object, regardless whether they are holding strong or weak reference. +pub struct RefUpdate; + +impl RefScanPolicy for RefUpdate { + const SHOULD_VISIT_STRONG: bool = true; + const SHOULD_VISIT_WEAK: bool = false; + const SHOULD_DISCOVER_WEAK: bool = false; +} + +/// Instruct the VM binding to visit all fields of an object, both strong and weak, without any +/// hints about the MMTk's intention to call the object-scanning function. +pub struct All; +impl RefScanPolicy for All { + const SHOULD_VISIT_STRONG: bool = true; + const SHOULD_VISIT_WEAK: bool = true; + const SHOULD_DISCOVER_WEAK: bool = false; +} + +/// Instruct the VM binding to visit all strong fields, without any hints about the MMTk's +/// intention to call the object-scanning function. Particularly, the VM binding should not +/// discover weak references as suggested by [`RefScanPolicy::StrongClosure`]. +pub struct StrongOnly; + +impl RefScanPolicy for StrongOnly { + const SHOULD_VISIT_STRONG: bool = true; + const SHOULD_VISIT_WEAK: bool = false; + const SHOULD_DISCOVER_WEAK: bool = false; +} + +/// Instruct the VM binding to visit all weak fields, without any hints about the MMTk's +/// intention to call the object-scanning function. +pub struct WeakOnly; + +impl RefScanPolicy for WeakOnly { + const SHOULD_VISIT_STRONG: bool = false; + const SHOULD_VISIT_WEAK: bool = true; + const SHOULD_DISCOVER_WEAK: bool = false; +} diff --git a/src/vm/scanning.rs b/src/vm/scanning.rs index 2119580a65..66d2578c21 100644 --- a/src/vm/scanning.rs +++ b/src/vm/scanning.rs @@ -54,33 +54,16 @@ impl ObjectReference> ObjectTracer for F { /// concurrent collections to avoid expensive weak reference processing. /// - The VM binding of a JVM (e.g. mmtk-openjdk) can report the weak reference field in /// `SoftReference` as strong during non-emergency GCs, and weak during emergency GCs. -pub enum RefScanPolicy { - /// An object is scanned during the strong transitive closure stage. The VM binding should - /// visit fields that contain strong references using the slot visitor or object tracer +pub trait RefScanPolicy { + /// True if the reference scanning function should visit strong reference fields in the object using /// callbacks. - /// - /// As described in the [Porting Guide][pg-weakref], if a VM binding chooses to discover weak - /// reference fields during tracing, the VM binding should record the object, the fields, the - /// field values, and/or any other relevant data in VM-specific ways during the execution of - /// object-scanning functions. If the VM binding chooses not to discover weak reference fields - /// this way, it can ignore weak fields. - /// - /// [pg-weakref]: https://docs.mmtk.io/portingguide/concerns/weakref.html#identifying-weak-references - StrongClosure, - /// An object is scanned to update its references after objects are moved or after the new - /// addresses of objects have been calculated. The VM binding should visit all reference fields - /// of an object, regardless whether they are holding strong or weak reference. - RefUpdate, - /// Instruct the VM binding to visit all fields of an object, both strong and weak, without any - /// hints about the MMTk's intention to call the object-scanning function. - All, - /// Instruct the VM binding to visit all strong fields, without any hints about the MMTk's - /// intention to call the object-scanning function. Particularly, the VM binding should not - /// discover weak references as suggested by [`RefScanPolicy::StrongClosure`]. - StrongOnly, - /// Instruct the VM binding to visit all weak fields, without any hints about the MMTk's - /// intention to call the object-scanning function. - WeakOnly, + const SHOULD_VISIT_STRONG: bool; + /// True if the reference scanning function should visit weak reference fields in the object using + /// callbacks. + const SHOULD_VISIT_WEAK: bool; + /// True if the reference scanning function should discover weak reference fields in VM-specific + /// ways. + const SHOULD_DISCOVER_WEAK: bool; } /// An `ObjectTracerContext` gives a GC worker temporary access to an `ObjectTracer`, allowing @@ -230,10 +213,9 @@ pub trait Scanning { /// * `tls`: The VM-specific thread-local storage for the current worker. /// * `object`: The object to be scanned. /// * `slot_visitor`: Called back for each field. - fn scan_object>( + fn scan_object, R: RefScanPolicy>( tls: VMWorkerThread, object: ObjectReference, - policy: RefScanPolicy, slot_visitor: &mut SV, ); @@ -256,10 +238,9 @@ pub trait Scanning { /// * `tls`: The VM-specific thread-local storage for the current worker. /// * `object`: The object to be scanned. /// * `object_tracer`: Called back for the object reference held in each field. - fn scan_object_and_trace_edges( + fn scan_object_and_trace_edges( _tls: VMWorkerThread, _object: ObjectReference, - _policy: RefScanPolicy, _object_tracer: &mut OT, ) { unreachable!("scan_object_and_trace_edges() will not be called when support_slot_enqueuing() is always true.") From 27a34e4e7bfb57cd14f98c01b36d4ea730bc6cd0 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Mon, 19 Jan 2026 17:11:30 +0800 Subject: [PATCH 03/10] Update comments --- src/plan/concurrent/barrier.rs | 14 +++++++++----- src/util/ref_scan_policy.rs | 24 ++++++++++++++---------- 2 files changed, 23 insertions(+), 15 deletions(-) diff --git a/src/plan/concurrent/barrier.rs b/src/plan/concurrent/barrier.rs index ca7709a57a..25bcceeebf 100644 --- a/src/plan/concurrent/barrier.rs +++ b/src/plan/concurrent/barrier.rs @@ -157,11 +157,15 @@ impl + PlanTraceObject, const KIND } fn object_probable_write_slow(&mut self, obj: ObjectReference) { - // Note: the purpose of the SATB barrier is to ensure all *strongly reachable* objects at - // the beginning of the trace will eventually be marked and scanned. Therefore, we use the - // `StrongOnly` here to enqueue children of strong fields. The current `obj` will - // eventually be scanned by the `ConcurrentTraceObjects` work packet using the - // `StrongClosure` policy, either during the concurrent tracing, or during `FinalMark`. + // Note: The SATB barrier ensures all *strongly reachable* objects from roots at the + // beginning of a trace (i.e. the SATB) will eventually be marked. To do this, the SATB + // barrier enqueues the current children of *strong fields*, but it doesn't mark the current + // object or its children. Instead, the marking and scanning will happen in the + // `ConcurrentTraceObjects` work packet which is executed either during the concurrent + // tracing, or during `FinalMark`. For this reason, the barrier itself is not the right + // time to do "reference discovery" because we only discover references of objects + // determined to be live. Therefore, we use `StrongOnly` here and only visit children of + // strong fields. crate::plan::tracing::SlotIterator::::iterate_fields::<_, StrongOnly>( obj, self.tls.0, diff --git a/src/util/ref_scan_policy.rs b/src/util/ref_scan_policy.rs index 1cf5b5359e..aa0c7d7550 100644 --- a/src/util/ref_scan_policy.rs +++ b/src/util/ref_scan_policy.rs @@ -2,17 +2,20 @@ use crate::vm::RefScanPolicy; -/// An object is scanned during the strong transitive closure stage. The VM binding should -/// visit fields that contain strong references using the slot visitor or object tracer -/// callbacks. +#[allow(unused)] // For doc comments. +use crate::vm::{ObjectTracer,SlotVisitor}; + +/// An object is scanned during the strong transitive closure stage. The VM binding should visit +/// fields that contain strong references using the [`SlotVisitor`] or [`ObjectTracer`] callbacks. /// -/// As described in the [Porting Guide][pg-weakref], if a VM binding chooses to discover weak -/// reference fields during tracing, the VM binding should record the object, the fields, the -/// field values, and/or any other relevant data in VM-specific ways during the execution of -/// object-scanning functions. If the VM binding chooses not to discover weak reference fields -/// this way, it can ignore weak fields. +/// The VM binding should not visit weak reference fields using the [`SlotVisitor`] or +/// [`ObjectTracer`] callbacks. If a VM binding chooses to discover weak references during tracing, +/// it should record relevant information (e.g. the current object, its fields, etc.) in VM-specific +/// data structures, as described in the [Porting Guide][pg-weakref]. If the VM binding chooses not +/// to discover weak reference fields this way, it can ignore weak fields. /// -/// [pg-weakref]: https://docs.mmtk.io/portingguide/concerns/weakref.html#identifying-weak-references +/// [pg-weakref]: +/// https://docs.mmtk.io/portingguide/concerns/weakref.html#identifying-weak-references pub struct StrongClosure; impl RefScanPolicy for StrongClosure { @@ -35,6 +38,7 @@ impl RefScanPolicy for RefUpdate { /// Instruct the VM binding to visit all fields of an object, both strong and weak, without any /// hints about the MMTk's intention to call the object-scanning function. pub struct All; + impl RefScanPolicy for All { const SHOULD_VISIT_STRONG: bool = true; const SHOULD_VISIT_WEAK: bool = true; @@ -43,7 +47,7 @@ impl RefScanPolicy for All { /// Instruct the VM binding to visit all strong fields, without any hints about the MMTk's /// intention to call the object-scanning function. Particularly, the VM binding should not -/// discover weak references as suggested by [`RefScanPolicy::StrongClosure`]. +/// discover weak references as [`StrongClosure`] implies. pub struct StrongOnly; impl RefScanPolicy for StrongOnly { From 022658b51ed0a1a40d02facd8d956fb2c3760b44 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Mon, 19 Jan 2026 17:12:18 +0800 Subject: [PATCH 04/10] Remove the `SHOULD_` prefix to make it terser --- src/util/mod.rs | 4 ++-- src/util/ref_scan_policy.rs | 32 ++++++++++++++++---------------- src/vm/scanning.rs | 6 +++--- 3 files changed, 21 insertions(+), 21 deletions(-) diff --git a/src/util/mod.rs b/src/util/mod.rs index 88c2087d94..6a76b8d7a3 100644 --- a/src/util/mod.rs +++ b/src/util/mod.rs @@ -56,10 +56,10 @@ pub(crate) mod logger; pub(crate) mod object_enum; /// Forwarding word in object copying. pub(crate) mod object_forwarding; -/// Reference processing implementation. -pub(crate) mod reference_processor; /// RefScanPolicy implementations. pub(crate) mod ref_scan_policy; +/// Reference processing implementation. +pub(crate) mod reference_processor; /// Utilities funcitons for Rust pub(crate) mod rust_util; /// Sanity checker for GC. diff --git a/src/util/ref_scan_policy.rs b/src/util/ref_scan_policy.rs index aa0c7d7550..8b21e5f3fe 100644 --- a/src/util/ref_scan_policy.rs +++ b/src/util/ref_scan_policy.rs @@ -3,7 +3,7 @@ use crate::vm::RefScanPolicy; #[allow(unused)] // For doc comments. -use crate::vm::{ObjectTracer,SlotVisitor}; +use crate::vm::{ObjectTracer, SlotVisitor}; /// An object is scanned during the strong transitive closure stage. The VM binding should visit /// fields that contain strong references using the [`SlotVisitor`] or [`ObjectTracer`] callbacks. @@ -19,9 +19,9 @@ use crate::vm::{ObjectTracer,SlotVisitor}; pub struct StrongClosure; impl RefScanPolicy for StrongClosure { - const SHOULD_VISIT_STRONG: bool = true; - const SHOULD_VISIT_WEAK: bool = false; - const SHOULD_DISCOVER_WEAK: bool = true; + const VISIT_STRONG: bool = true; + const VISIT_WEAK: bool = false; + const DISCOVER_WEAK: bool = true; } /// An object is scanned to update its references after objects are moved or after the new @@ -30,9 +30,9 @@ impl RefScanPolicy for StrongClosure { pub struct RefUpdate; impl RefScanPolicy for RefUpdate { - const SHOULD_VISIT_STRONG: bool = true; - const SHOULD_VISIT_WEAK: bool = false; - const SHOULD_DISCOVER_WEAK: bool = false; + const VISIT_STRONG: bool = true; + const VISIT_WEAK: bool = false; + const DISCOVER_WEAK: bool = false; } /// Instruct the VM binding to visit all fields of an object, both strong and weak, without any @@ -40,9 +40,9 @@ impl RefScanPolicy for RefUpdate { pub struct All; impl RefScanPolicy for All { - const SHOULD_VISIT_STRONG: bool = true; - const SHOULD_VISIT_WEAK: bool = true; - const SHOULD_DISCOVER_WEAK: bool = false; + const VISIT_STRONG: bool = true; + const VISIT_WEAK: bool = true; + const DISCOVER_WEAK: bool = false; } /// Instruct the VM binding to visit all strong fields, without any hints about the MMTk's @@ -51,9 +51,9 @@ impl RefScanPolicy for All { pub struct StrongOnly; impl RefScanPolicy for StrongOnly { - const SHOULD_VISIT_STRONG: bool = true; - const SHOULD_VISIT_WEAK: bool = false; - const SHOULD_DISCOVER_WEAK: bool = false; + const VISIT_STRONG: bool = true; + const VISIT_WEAK: bool = false; + const DISCOVER_WEAK: bool = false; } /// Instruct the VM binding to visit all weak fields, without any hints about the MMTk's @@ -61,7 +61,7 @@ impl RefScanPolicy for StrongOnly { pub struct WeakOnly; impl RefScanPolicy for WeakOnly { - const SHOULD_VISIT_STRONG: bool = false; - const SHOULD_VISIT_WEAK: bool = true; - const SHOULD_DISCOVER_WEAK: bool = false; + const VISIT_STRONG: bool = false; + const VISIT_WEAK: bool = true; + const DISCOVER_WEAK: bool = false; } diff --git a/src/vm/scanning.rs b/src/vm/scanning.rs index 66d2578c21..32d541f880 100644 --- a/src/vm/scanning.rs +++ b/src/vm/scanning.rs @@ -57,13 +57,13 @@ impl ObjectReference> ObjectTracer for F { pub trait RefScanPolicy { /// True if the reference scanning function should visit strong reference fields in the object using /// callbacks. - const SHOULD_VISIT_STRONG: bool; + const VISIT_STRONG: bool; /// True if the reference scanning function should visit weak reference fields in the object using /// callbacks. - const SHOULD_VISIT_WEAK: bool; + const VISIT_WEAK: bool; /// True if the reference scanning function should discover weak reference fields in VM-specific /// ways. - const SHOULD_DISCOVER_WEAK: bool; + const DISCOVER_WEAK: bool; } /// An `ObjectTracerContext` gives a GC worker temporary access to an `ObjectTracer`, allowing From 7af35398380d33cf301bf4350ced2b5a5b595e7e Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Wed, 21 Jan 2026 16:31:41 +0800 Subject: [PATCH 05/10] Rename StrongClosure to Closure "StrongClosure" and "StrongOnly" are too similar. --- src/plan/concurrent/concurrent_marking_work.rs | 4 ++-- src/scheduler/gc_work.rs | 6 +++--- src/util/ref_scan_policy.rs | 4 ++-- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/plan/concurrent/concurrent_marking_work.rs b/src/plan/concurrent/concurrent_marking_work.rs index 76a1743e7e..2355202a0b 100644 --- a/src/plan/concurrent/concurrent_marking_work.rs +++ b/src/plan/concurrent/concurrent_marking_work.rs @@ -4,7 +4,7 @@ use crate::plan::PlanTraceObject; use crate::plan::VectorQueue; use crate::policy::gc_work::TraceKind; use crate::scheduler::gc_work::{ScanObjects, SlotOf}; -use crate::util::ref_scan_policy::StrongClosure; +use crate::util::ref_scan_policy::Closure; use crate::util::ObjectReference; use crate::vm::slot::Slot; use crate::{ @@ -75,7 +75,7 @@ impl + PlanTraceObject, const KIND } fn scan_and_enqueue(&mut self, object: ObjectReference) { - crate::plan::tracing::SlotIterator::::iterate_fields::<_, StrongClosure>( + crate::plan::tracing::SlotIterator::::iterate_fields::<_, Closure>( object, self.worker().tls.0, |s| { diff --git a/src/scheduler/gc_work.rs b/src/scheduler/gc_work.rs index e3bd4e501e..d1baf3cfbe 100644 --- a/src/scheduler/gc_work.rs +++ b/src/scheduler/gc_work.rs @@ -3,7 +3,7 @@ use super::*; use crate::global_state::GcStatus; use crate::plan::ObjectsClosure; use crate::plan::VectorObjectQueue; -use crate::util::ref_scan_policy::StrongClosure; +use crate::util::ref_scan_policy::Closure; use crate::util::*; use crate::vm::slot::Slot; use crate::vm::*; @@ -868,7 +868,7 @@ pub trait ScanObjectsWork: GCWork + Sized { if ::VMScanning::support_slot_enqueuing(tls, object) { trace!("Scan object (slot) {}", object); // If an object supports slot-enqueuing, we enqueue its slots. - ::VMScanning::scan_object::<_, StrongClosure>( + ::VMScanning::scan_object::<_, Closure>( tls, object, &mut closure, @@ -901,7 +901,7 @@ pub trait ScanObjectsWork: GCWork + Sized { // Scan objects and trace their outgoing edges at the same time. for object in scan_later.iter().copied() { trace!("Scan object (node) {}", object); - ::VMScanning::scan_object_and_trace_edges::<_, StrongClosure>( + ::VMScanning::scan_object_and_trace_edges::<_, Closure>( tls, object, object_tracer, diff --git a/src/util/ref_scan_policy.rs b/src/util/ref_scan_policy.rs index 8b21e5f3fe..d3d3eea852 100644 --- a/src/util/ref_scan_policy.rs +++ b/src/util/ref_scan_policy.rs @@ -16,9 +16,9 @@ use crate::vm::{ObjectTracer, SlotVisitor}; /// /// [pg-weakref]: /// https://docs.mmtk.io/portingguide/concerns/weakref.html#identifying-weak-references -pub struct StrongClosure; +pub struct Closure; -impl RefScanPolicy for StrongClosure { +impl RefScanPolicy for Closure { const VISIT_STRONG: bool = true; const VISIT_WEAK: bool = false; const DISCOVER_WEAK: bool = true; From 787689dc55c976a69ed1d8649f4fa74b44aa3be8 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Wed, 21 Jan 2026 16:34:29 +0800 Subject: [PATCH 06/10] Use impl in scan_object{,_and_trace_edges} This makes the type parameter implicit and make it unnecessary to write `<_, Closure>`. --- src/plan/concurrent/barrier.rs | 2 +- src/plan/concurrent/concurrent_marking_work.rs | 2 +- src/plan/tracing.rs | 6 +++--- src/policy/compressor/compressorspace.rs | 16 ++++++---------- src/scheduler/gc_work.rs | 4 ++-- src/vm/scanning.rs | 8 ++++---- 6 files changed, 17 insertions(+), 21 deletions(-) diff --git a/src/plan/concurrent/barrier.rs b/src/plan/concurrent/barrier.rs index 25bcceeebf..48aae42fa7 100644 --- a/src/plan/concurrent/barrier.rs +++ b/src/plan/concurrent/barrier.rs @@ -166,7 +166,7 @@ impl + PlanTraceObject, const KIND // time to do "reference discovery" because we only discover references of objects // determined to be live. Therefore, we use `StrongOnly` here and only visit children of // strong fields. - crate::plan::tracing::SlotIterator::::iterate_fields::<_, StrongOnly>( + crate::plan::tracing::SlotIterator::::iterate_fields::( obj, self.tls.0, |s| { diff --git a/src/plan/concurrent/concurrent_marking_work.rs b/src/plan/concurrent/concurrent_marking_work.rs index 2355202a0b..e0351b3102 100644 --- a/src/plan/concurrent/concurrent_marking_work.rs +++ b/src/plan/concurrent/concurrent_marking_work.rs @@ -75,7 +75,7 @@ impl + PlanTraceObject, const KIND } fn scan_and_enqueue(&mut self, object: ObjectReference) { - crate::plan::tracing::SlotIterator::::iterate_fields::<_, Closure>( + crate::plan::tracing::SlotIterator::::iterate_fields::( object, self.worker().tls.0, |s| { diff --git a/src/plan/tracing.rs b/src/plan/tracing.rs index 65597b4daa..0cc4304c41 100644 --- a/src/plan/tracing.rs +++ b/src/plan/tracing.rs @@ -157,10 +157,10 @@ pub(crate) struct SlotIterator { impl SlotIterator { /// Iterate over the slots of an object by applying a function to each slot. - pub fn iterate_fields( + pub fn iterate_fields( object: ObjectReference, _tls: VMThread, - mut f: F, + mut f: impl FnMut(VM::VMSlot), ) { // FIXME: We should use tls from the arguments. // See https://github.com/mmtk/mmtk-core/issues/1375 @@ -168,6 +168,6 @@ impl SlotIterator { if !>::support_slot_enqueuing(fake_tls, object) { panic!("SlotIterator::iterate_fields cannot be used on objects that don't support slot-enqueuing"); } - >::scan_object::<_, R>(fake_tls, object, &mut f); + >::scan_object::(fake_tls, object, &mut f); } } diff --git a/src/policy/compressor/compressorspace.rs b/src/policy/compressor/compressorspace.rs index a94bb7b62b..a3c47be0d4 100644 --- a/src/policy/compressor/compressorspace.rs +++ b/src/policy/compressor/compressorspace.rs @@ -342,17 +342,13 @@ impl CompressorSpace { fn update_references(&self, worker: &mut GCWorker, object: ObjectReference) { if VM::VMScanning::support_slot_enqueuing(worker.tls, object) { - VM::VMScanning::scan_object::<_, RefUpdate>( - worker.tls, - object, - &mut |s: VM::VMSlot| { - if let Some(o) = s.load() { - s.store(self.forward(o, false)); - } - }, - ); + VM::VMScanning::scan_object::(worker.tls, object, &mut |s: VM::VMSlot| { + if let Some(o) = s.load() { + s.store(self.forward(o, false)); + } + }); } else { - VM::VMScanning::scan_object_and_trace_edges::<_, RefUpdate>( + VM::VMScanning::scan_object_and_trace_edges::( worker.tls, object, &mut |o| self.forward(o, false), diff --git a/src/scheduler/gc_work.rs b/src/scheduler/gc_work.rs index d1baf3cfbe..53dce1c197 100644 --- a/src/scheduler/gc_work.rs +++ b/src/scheduler/gc_work.rs @@ -868,7 +868,7 @@ pub trait ScanObjectsWork: GCWork + Sized { if ::VMScanning::support_slot_enqueuing(tls, object) { trace!("Scan object (slot) {}", object); // If an object supports slot-enqueuing, we enqueue its slots. - ::VMScanning::scan_object::<_, Closure>( + ::VMScanning::scan_object::( tls, object, &mut closure, @@ -901,7 +901,7 @@ pub trait ScanObjectsWork: GCWork + Sized { // Scan objects and trace their outgoing edges at the same time. for object in scan_later.iter().copied() { trace!("Scan object (node) {}", object); - ::VMScanning::scan_object_and_trace_edges::<_, Closure>( + ::VMScanning::scan_object_and_trace_edges::( tls, object, object_tracer, diff --git a/src/vm/scanning.rs b/src/vm/scanning.rs index 32d541f880..0aaca0ed38 100644 --- a/src/vm/scanning.rs +++ b/src/vm/scanning.rs @@ -213,10 +213,10 @@ pub trait Scanning { /// * `tls`: The VM-specific thread-local storage for the current worker. /// * `object`: The object to be scanned. /// * `slot_visitor`: Called back for each field. - fn scan_object, R: RefScanPolicy>( + fn scan_object( tls: VMWorkerThread, object: ObjectReference, - slot_visitor: &mut SV, + slot_visitor: &mut impl SlotVisitor, ); /// Delegated scanning of a object, visiting each reference field encountered, and tracing the @@ -238,10 +238,10 @@ pub trait Scanning { /// * `tls`: The VM-specific thread-local storage for the current worker. /// * `object`: The object to be scanned. /// * `object_tracer`: Called back for the object reference held in each field. - fn scan_object_and_trace_edges( + fn scan_object_and_trace_edges( _tls: VMWorkerThread, _object: ObjectReference, - _object_tracer: &mut OT, + _object_tracer: &mut impl ObjectTracer, ) { unreachable!("scan_object_and_trace_edges() will not be called when support_slot_enqueuing() is always true.") } From f561760084caaabb567b2fa8d972c1936bf8f7d5 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Wed, 21 Jan 2026 16:39:06 +0800 Subject: [PATCH 07/10] Qualify `ref_scan_policy` to reduce ambiguity. Otherwise `Closure` may be ambiguous. This also makes it looks like the `EnumName::Variant` style. --- src/plan/concurrent/barrier.rs | 4 ++-- src/plan/concurrent/concurrent_marking_work.rs | 4 ++-- src/policy/compressor/compressorspace.rs | 18 +++++++++++------- src/scheduler/gc_work.rs | 12 +++++------- 4 files changed, 20 insertions(+), 18 deletions(-) diff --git a/src/plan/concurrent/barrier.rs b/src/plan/concurrent/barrier.rs index 48aae42fa7..f1d63c33e9 100644 --- a/src/plan/concurrent/barrier.rs +++ b/src/plan/concurrent/barrier.rs @@ -3,7 +3,7 @@ use std::sync::atomic::Ordering; use super::{concurrent_marking_work::ProcessModBufSATB, Pause}; use crate::plan::global::PlanTraceObject; use crate::policy::gc_work::TraceKind; -use crate::util::ref_scan_policy::StrongOnly; +use crate::util::ref_scan_policy; use crate::util::VMMutatorThread; use crate::{ plan::{barriers::BarrierSemantics, concurrent::global::ConcurrentPlan, VectorQueue}, @@ -166,7 +166,7 @@ impl + PlanTraceObject, const KIND // time to do "reference discovery" because we only discover references of objects // determined to be live. Therefore, we use `StrongOnly` here and only visit children of // strong fields. - crate::plan::tracing::SlotIterator::::iterate_fields::( + crate::plan::tracing::SlotIterator::::iterate_fields::( obj, self.tls.0, |s| { diff --git a/src/plan/concurrent/concurrent_marking_work.rs b/src/plan/concurrent/concurrent_marking_work.rs index e0351b3102..d27ecaad42 100644 --- a/src/plan/concurrent/concurrent_marking_work.rs +++ b/src/plan/concurrent/concurrent_marking_work.rs @@ -4,7 +4,7 @@ use crate::plan::PlanTraceObject; use crate::plan::VectorQueue; use crate::policy::gc_work::TraceKind; use crate::scheduler::gc_work::{ScanObjects, SlotOf}; -use crate::util::ref_scan_policy::Closure; +use crate::util::ref_scan_policy; use crate::util::ObjectReference; use crate::vm::slot::Slot; use crate::{ @@ -75,7 +75,7 @@ impl + PlanTraceObject, const KIND } fn scan_and_enqueue(&mut self, object: ObjectReference) { - crate::plan::tracing::SlotIterator::::iterate_fields::( + crate::plan::tracing::SlotIterator::::iterate_fields::( object, self.worker().tls.0, |s| { diff --git a/src/policy/compressor/compressorspace.rs b/src/policy/compressor/compressorspace.rs index a3c47be0d4..4da716adcb 100644 --- a/src/policy/compressor/compressorspace.rs +++ b/src/policy/compressor/compressorspace.rs @@ -14,7 +14,7 @@ use crate::util::metadata::extract_side_metadata; use crate::util::metadata::vo_bit; use crate::util::metadata::MetadataSpec; use crate::util::object_enum::{self, ObjectEnumerator}; -use crate::util::ref_scan_policy::RefUpdate; +use crate::util::ref_scan_policy; use crate::util::{Address, ObjectReference}; use crate::vm::slot::Slot; use crate::MMTK; @@ -342,13 +342,17 @@ impl CompressorSpace { fn update_references(&self, worker: &mut GCWorker, object: ObjectReference) { if VM::VMScanning::support_slot_enqueuing(worker.tls, object) { - VM::VMScanning::scan_object::(worker.tls, object, &mut |s: VM::VMSlot| { - if let Some(o) = s.load() { - s.store(self.forward(o, false)); - } - }); + VM::VMScanning::scan_object::( + worker.tls, + object, + &mut |s: VM::VMSlot| { + if let Some(o) = s.load() { + s.store(self.forward(o, false)); + } + }, + ); } else { - VM::VMScanning::scan_object_and_trace_edges::( + VM::VMScanning::scan_object_and_trace_edges::( worker.tls, object, &mut |o| self.forward(o, false), diff --git a/src/scheduler/gc_work.rs b/src/scheduler/gc_work.rs index 53dce1c197..3d9f1d8359 100644 --- a/src/scheduler/gc_work.rs +++ b/src/scheduler/gc_work.rs @@ -3,7 +3,7 @@ use super::*; use crate::global_state::GcStatus; use crate::plan::ObjectsClosure; use crate::plan::VectorObjectQueue; -use crate::util::ref_scan_policy::Closure; +use crate::util::ref_scan_policy; use crate::util::*; use crate::vm::slot::Slot; use crate::vm::*; @@ -868,7 +868,7 @@ pub trait ScanObjectsWork: GCWork + Sized { if ::VMScanning::support_slot_enqueuing(tls, object) { trace!("Scan object (slot) {}", object); // If an object supports slot-enqueuing, we enqueue its slots. - ::VMScanning::scan_object::( + ::VMScanning::scan_object::( tls, object, &mut closure, @@ -901,11 +901,9 @@ pub trait ScanObjectsWork: GCWork + Sized { // Scan objects and trace their outgoing edges at the same time. for object in scan_later.iter().copied() { trace!("Scan object (node) {}", object); - ::VMScanning::scan_object_and_trace_edges::( - tls, - object, - object_tracer, - ); + ::VMScanning::scan_object_and_trace_edges::< + ref_scan_policy::Closure, + >(tls, object, object_tracer); self.post_scan_object(object); } }); From 8143a891ac333c865ac5b7bc18ffa860ae7b161e Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Thu, 22 Jan 2026 13:01:16 +0800 Subject: [PATCH 08/10] Let scan_object take VMThread parameter We change the `tls` from `VMWorkerThread` to `VMThread` because mutator threads can also scan objects in write barriers. --- src/plan/tracing.rs | 11 ++++------- src/policy/compressor/compressorspace.rs | 6 +++--- src/scheduler/gc_work.rs | 6 +++--- src/util/opaque_pointer.rs | 14 ++++++++++++++ src/vm/scanning.rs | 7 ++++--- 5 files changed, 28 insertions(+), 16 deletions(-) diff --git a/src/plan/tracing.rs b/src/plan/tracing.rs index 0cc4304c41..e6e100f207 100644 --- a/src/plan/tracing.rs +++ b/src/plan/tracing.rs @@ -5,7 +5,7 @@ use std::marker::PhantomData; use crate::scheduler::gc_work::{ProcessEdgesWork, SlotOf}; use crate::scheduler::{GCWorker, WorkBucketStage, EDGES_WORK_BUFFER_SIZE}; -use crate::util::{ObjectReference, VMThread, VMWorkerThread}; +use crate::util::{ObjectReference, VMThread}; use crate::vm::{RefScanPolicy, Scanning, SlotVisitor, VMBinding}; /// This trait represents an object queue to enqueue objects during tracing. @@ -159,15 +159,12 @@ impl SlotIterator { /// Iterate over the slots of an object by applying a function to each slot. pub fn iterate_fields( object: ObjectReference, - _tls: VMThread, + tls: VMThread, mut f: impl FnMut(VM::VMSlot), ) { - // FIXME: We should use tls from the arguments. - // See https://github.com/mmtk/mmtk-core/issues/1375 - let fake_tls = VMWorkerThread(VMThread::UNINITIALIZED); - if !>::support_slot_enqueuing(fake_tls, object) { + if !>::support_slot_enqueuing(tls, object) { panic!("SlotIterator::iterate_fields cannot be used on objects that don't support slot-enqueuing"); } - >::scan_object::(fake_tls, object, &mut f); + >::scan_object::(tls, object, &mut f); } } diff --git a/src/policy/compressor/compressorspace.rs b/src/policy/compressor/compressorspace.rs index 4da716adcb..f8780e9507 100644 --- a/src/policy/compressor/compressorspace.rs +++ b/src/policy/compressor/compressorspace.rs @@ -341,9 +341,9 @@ impl CompressorSpace { } fn update_references(&self, worker: &mut GCWorker, object: ObjectReference) { - if VM::VMScanning::support_slot_enqueuing(worker.tls, object) { + if VM::VMScanning::support_slot_enqueuing(worker.tls.into(), object) { VM::VMScanning::scan_object::( - worker.tls, + worker.tls.into(), object, &mut |s: VM::VMSlot| { if let Some(o) = s.load() { @@ -353,7 +353,7 @@ impl CompressorSpace { ); } else { VM::VMScanning::scan_object_and_trace_edges::( - worker.tls, + worker.tls.into(), object, &mut |o| self.forward(o, false), ); diff --git a/src/scheduler/gc_work.rs b/src/scheduler/gc_work.rs index 3d9f1d8359..8fa430c90d 100644 --- a/src/scheduler/gc_work.rs +++ b/src/scheduler/gc_work.rs @@ -865,11 +865,11 @@ pub trait ScanObjectsWork: GCWork + Sized { } for object in objects_to_scan.iter().copied() { - if ::VMScanning::support_slot_enqueuing(tls, object) { + if ::VMScanning::support_slot_enqueuing(tls.into(), object) { trace!("Scan object (slot) {}", object); // If an object supports slot-enqueuing, we enqueue its slots. ::VMScanning::scan_object::( - tls, + tls.into(), object, &mut closure, ); @@ -903,7 +903,7 @@ pub trait ScanObjectsWork: GCWork + Sized { trace!("Scan object (node) {}", object); ::VMScanning::scan_object_and_trace_edges::< ref_scan_policy::Closure, - >(tls, object, object_tracer); + >(tls.into(), object, object_tracer); self.post_scan_object(object); } }); diff --git a/src/util/opaque_pointer.rs b/src/util/opaque_pointer.rs index 1a16b98c04..368244a83c 100644 --- a/src/util/opaque_pointer.rs +++ b/src/util/opaque_pointer.rs @@ -59,9 +59,23 @@ impl VMThread { #[derive(Copy, Clone, Debug, Eq, PartialEq)] pub struct VMMutatorThread(pub VMThread); +/// Allow unchecked explicit conversion from [VMMutatorThread] to [VMThread] +impl From for VMThread { + fn from(value: VMMutatorThread) -> Self { + value.0 + } +} + /// A VMWorkerThread is a VMThread that is associates with a [`crate::scheduler::GCWorker`]. /// When a VMWorkerThread is used as an argument or a field of a type, it generally means /// the function or the functions for the type is executed in the context of the mutator thread. #[repr(transparent)] #[derive(Copy, Clone, Debug, Eq, PartialEq)] pub struct VMWorkerThread(pub VMThread); + +/// Allow unchecked explicit conversion from [VMWorkerThread] to [VMThread] +impl From for VMThread { + fn from(value: VMWorkerThread) -> Self { + value.0 + } +} diff --git a/src/vm/scanning.rs b/src/vm/scanning.rs index 0aaca0ed38..3c3839dbdf 100644 --- a/src/vm/scanning.rs +++ b/src/vm/scanning.rs @@ -1,6 +1,7 @@ use crate::plan::Mutator; use crate::scheduler::GCWorker; use crate::util::ObjectReference; +use crate::util::VMThread; use crate::util::VMWorkerThread; use crate::vm::slot::Slot; use crate::vm::VMBinding; @@ -191,7 +192,7 @@ pub trait Scanning { /// Arguments: /// * `tls`: The VM-specific thread-local storage for the current worker. /// * `object`: The object to be scanned. - fn support_slot_enqueuing(_tls: VMWorkerThread, _object: ObjectReference) -> bool { + fn support_slot_enqueuing(_tls: VMThread, _object: ObjectReference) -> bool { true } @@ -214,7 +215,7 @@ pub trait Scanning { /// * `object`: The object to be scanned. /// * `slot_visitor`: Called back for each field. fn scan_object( - tls: VMWorkerThread, + tls: VMThread, object: ObjectReference, slot_visitor: &mut impl SlotVisitor, ); @@ -239,7 +240,7 @@ pub trait Scanning { /// * `object`: The object to be scanned. /// * `object_tracer`: Called back for the object reference held in each field. fn scan_object_and_trace_edges( - _tls: VMWorkerThread, + _tls: VMThread, _object: ObjectReference, _object_tracer: &mut impl ObjectTracer, ) { From 84e9987b495e41d82525b10619fdf0e969b76dd6 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Thu, 22 Jan 2026 16:55:07 +0800 Subject: [PATCH 09/10] Fix mock tests --- docs/dummyvm/src/scanning.rs | 7 +++-- src/util/test_util/mock_vm.rs | 29 ++++++++----------- .../mock_test_doc_weakref_code_example.rs | 8 ++--- 3 files changed, 20 insertions(+), 24 deletions(-) diff --git a/docs/dummyvm/src/scanning.rs b/docs/dummyvm/src/scanning.rs index 0465acbabc..3c65e8e74b 100644 --- a/docs/dummyvm/src/scanning.rs +++ b/docs/dummyvm/src/scanning.rs @@ -2,6 +2,7 @@ use crate::DummyVM; use crate::DummyVMSlot; use mmtk::util::opaque_pointer::*; use mmtk::util::ObjectReference; +use mmtk::vm::RefScanPolicy; use mmtk::vm::RootsWorkFactory; use mmtk::vm::Scanning; use mmtk::vm::SlotVisitor; @@ -21,10 +22,10 @@ impl Scanning for VMScanning { fn scan_vm_specific_roots(_tls: VMWorkerThread, _factory: impl RootsWorkFactory) { unimplemented!() } - fn scan_object>( - _tls: VMWorkerThread, + fn scan_object( + _tls: VMThread, _object: ObjectReference, - _slot_visitor: &mut SV, + _slot_visitor: &mut impl SlotVisitor, ) { unimplemented!() } diff --git a/src/util/test_util/mock_vm.rs b/src/util/test_util/mock_vm.rs index 7dc97b1448..1660502d69 100644 --- a/src/util/test_util/mock_vm.rs +++ b/src/util/test_util/mock_vm.rs @@ -15,6 +15,7 @@ use crate::vm::object_model::specs::*; use crate::vm::GCThreadContext; use crate::vm::ObjectTracer; use crate::vm::ObjectTracerContext; +use crate::vm::RefScanPolicy; use crate::vm::RootsWorkFactory; use crate::vm::SlotVisitor; use crate::vm::VMBinding; @@ -243,23 +244,17 @@ pub struct MockVM { pub weakref_get_referent: MockMethod>, pub weakref_enqueue_references: MockMethod<(&'static [ObjectReference], VMWorkerThread), ()>, // scanning - pub support_slot_enqueuing: MockMethod<(VMWorkerThread, ObjectReference), bool>, + pub support_slot_enqueuing: MockMethod<(VMThread, ObjectReference), bool>, pub scan_object: MockMethod< ( - VMWorkerThread, + VMThread, ObjectReference, &'static mut dyn SlotVisitor<::VMSlot>, ), (), >, - pub scan_object_and_trace_edges: MockMethod< - ( - VMWorkerThread, - ObjectReference, - &'static mut dyn ObjectTracer, - ), - (), - >, + pub scan_object_and_trace_edges: + MockMethod<(VMThread, ObjectReference, &'static mut dyn ObjectTracer), ()>, pub scan_roots_in_mutator_thread: Box, pub scan_vm_specific_roots: Box, pub notify_initial_thread_scan_complete: MockMethod<(bool, VMWorkerThread), ()>, @@ -552,13 +547,13 @@ impl crate::vm::ReferenceGlue for MockVM { } impl crate::vm::Scanning for MockVM { - fn support_slot_enqueuing(tls: VMWorkerThread, object: ObjectReference) -> bool { + fn support_slot_enqueuing(tls: VMThread, object: ObjectReference) -> bool { mock!(support_slot_enqueuing(tls, object)) } - fn scan_object::VMSlot>>( - tls: VMWorkerThread, + fn scan_object( + tls: VMThread, object: ObjectReference, - slot_visitor: &mut SV, + slot_visitor: &mut impl SlotVisitor<::VMSlot>, ) { mock!(scan_object( tls, @@ -566,10 +561,10 @@ impl crate::vm::Scanning for MockVM { lifetime!(slot_visitor as &mut dyn SlotVisitor<::VMSlot>) )) } - fn scan_object_and_trace_edges( - tls: VMWorkerThread, + fn scan_object_and_trace_edges( + tls: VMThread, object: ObjectReference, - object_tracer: &mut OT, + object_tracer: &mut impl ObjectTracer, ) { mock!(scan_object_and_trace_edges( tls, diff --git a/src/vm/tests/mock_tests/mock_test_doc_weakref_code_example.rs b/src/vm/tests/mock_tests/mock_test_doc_weakref_code_example.rs index 218c461b72..86045c48ae 100644 --- a/src/vm/tests/mock_tests/mock_test_doc_weakref_code_example.rs +++ b/src/vm/tests/mock_tests/mock_test_doc_weakref_code_example.rs @@ -5,7 +5,7 @@ use crate::{ scheduler::GCWorker, util::ObjectReference, - vm::{ObjectTracer, ObjectTracerContext, Scanning, VMBinding}, + vm::{ObjectTracer, ObjectTracerContext, RefScanPolicy, Scanning, VMBinding}, }; use super::mock_test_prelude::MockVM; @@ -65,10 +65,10 @@ impl Scanning for VMScanning { // Methods after this are placeholders. We only ensure they compile. - fn scan_object::VMSlot>>( - _tls: crate::util::VMWorkerThread, + fn scan_object( + _tls: crate::util::VMThread, _object: ObjectReference, - _slot_visitor: &mut SV, + _slot_visitor: &mut impl crate::vm::SlotVisitor<::VMSlot>, ) { unimplemented!() } From 292725af734fe9fb03601eb2ba57bc94b9c94391 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Thu, 22 Jan 2026 17:26:43 +0800 Subject: [PATCH 10/10] Fix doc comment link --- src/util/ref_scan_policy.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/util/ref_scan_policy.rs b/src/util/ref_scan_policy.rs index d3d3eea852..d4b34a33d5 100644 --- a/src/util/ref_scan_policy.rs +++ b/src/util/ref_scan_policy.rs @@ -47,7 +47,7 @@ impl RefScanPolicy for All { /// Instruct the VM binding to visit all strong fields, without any hints about the MMTk's /// intention to call the object-scanning function. Particularly, the VM binding should not -/// discover weak references as [`StrongClosure`] implies. +/// discover weak references which the [`Closure`] policy implies. pub struct StrongOnly; impl RefScanPolicy for StrongOnly {