diff --git a/docs/dummyvm/src/scanning.rs b/docs/dummyvm/src/scanning.rs index 0465acbabc..3c65e8e74b 100644 --- a/docs/dummyvm/src/scanning.rs +++ b/docs/dummyvm/src/scanning.rs @@ -2,6 +2,7 @@ use crate::DummyVM; use crate::DummyVMSlot; use mmtk::util::opaque_pointer::*; use mmtk::util::ObjectReference; +use mmtk::vm::RefScanPolicy; use mmtk::vm::RootsWorkFactory; use mmtk::vm::Scanning; use mmtk::vm::SlotVisitor; @@ -21,10 +22,10 @@ impl Scanning for VMScanning { fn scan_vm_specific_roots(_tls: VMWorkerThread, _factory: impl RootsWorkFactory) { unimplemented!() } - fn scan_object>( - _tls: VMWorkerThread, + fn scan_object( + _tls: VMThread, _object: ObjectReference, - _slot_visitor: &mut SV, + _slot_visitor: &mut impl SlotVisitor, ) { unimplemented!() } diff --git a/src/plan/concurrent/barrier.rs b/src/plan/concurrent/barrier.rs index 0bd8995564..f1d63c33e9 100644 --- a/src/plan/concurrent/barrier.rs +++ b/src/plan/concurrent/barrier.rs @@ -3,6 +3,7 @@ use std::sync::atomic::Ordering; use super::{concurrent_marking_work::ProcessModBufSATB, Pause}; use crate::plan::global::PlanTraceObject; use crate::policy::gc_work::TraceKind; +use crate::util::ref_scan_policy; use crate::util::VMMutatorThread; use crate::{ plan::{barriers::BarrierSemantics, concurrent::global::ConcurrentPlan, VectorQueue}, @@ -156,8 +157,21 @@ impl + PlanTraceObject, const KIND } fn object_probable_write_slow(&mut self, obj: ObjectReference) { - crate::plan::tracing::SlotIterator::::iterate_fields(obj, self.tls.0, |s| { - self.enqueue_node(Some(obj), s, None); - }); + // Note: The SATB barrier ensures all *strongly reachable* objects from roots at the + // beginning of a trace (i.e. the SATB) will eventually be marked. To do this, the SATB + // barrier enqueues the current children of *strong fields*, but it doesn't mark the current + // object or its children. Instead, the marking and scanning will happen in the + // `ConcurrentTraceObjects` work packet which is executed either during the concurrent + // tracing, or during `FinalMark`. For this reason, the barrier itself is not the right + // time to do "reference discovery" because we only discover references of objects + // determined to be live. Therefore, we use `StrongOnly` here and only visit children of + // strong fields. + crate::plan::tracing::SlotIterator::::iterate_fields::( + obj, + self.tls.0, + |s| { + self.enqueue_node(Some(obj), s, None); + }, + ); } } diff --git a/src/plan/concurrent/concurrent_marking_work.rs b/src/plan/concurrent/concurrent_marking_work.rs index fca994a7bc..d27ecaad42 100644 --- a/src/plan/concurrent/concurrent_marking_work.rs +++ b/src/plan/concurrent/concurrent_marking_work.rs @@ -4,6 +4,7 @@ use crate::plan::PlanTraceObject; use crate::plan::VectorQueue; use crate::policy::gc_work::TraceKind; use crate::scheduler::gc_work::{ScanObjects, SlotOf}; +use crate::util::ref_scan_policy; use crate::util::ObjectReference; use crate::vm::slot::Slot; use crate::{ @@ -74,7 +75,7 @@ impl + PlanTraceObject, const KIND } fn scan_and_enqueue(&mut self, object: ObjectReference) { - crate::plan::tracing::SlotIterator::::iterate_fields( + crate::plan::tracing::SlotIterator::::iterate_fields::( object, self.worker().tls.0, |s| { diff --git a/src/plan/tracing.rs b/src/plan/tracing.rs index 792e142c76..e6e100f207 100644 --- a/src/plan/tracing.rs +++ b/src/plan/tracing.rs @@ -5,8 +5,8 @@ use std::marker::PhantomData; use crate::scheduler::gc_work::{ProcessEdgesWork, SlotOf}; use crate::scheduler::{GCWorker, WorkBucketStage, EDGES_WORK_BUFFER_SIZE}; -use crate::util::{ObjectReference, VMThread, VMWorkerThread}; -use crate::vm::{Scanning, SlotVisitor, VMBinding}; +use crate::util::{ObjectReference, VMThread}; +use crate::vm::{RefScanPolicy, Scanning, SlotVisitor, VMBinding}; /// This trait represents an object queue to enqueue objects during tracing. pub trait ObjectQueue { @@ -157,13 +157,14 @@ pub(crate) struct SlotIterator { impl SlotIterator { /// Iterate over the slots of an object by applying a function to each slot. - pub fn iterate_fields(object: ObjectReference, _tls: VMThread, mut f: F) { - // FIXME: We should use tls from the arguments. - // See https://github.com/mmtk/mmtk-core/issues/1375 - let fake_tls = VMWorkerThread(VMThread::UNINITIALIZED); - if !>::support_slot_enqueuing(fake_tls, object) { + pub fn iterate_fields( + object: ObjectReference, + tls: VMThread, + mut f: impl FnMut(VM::VMSlot), + ) { + if !>::support_slot_enqueuing(tls, object) { panic!("SlotIterator::iterate_fields cannot be used on objects that don't support slot-enqueuing"); } - >::scan_object(fake_tls, object, &mut f); + >::scan_object::(tls, object, &mut f); } } diff --git a/src/policy/compressor/compressorspace.rs b/src/policy/compressor/compressorspace.rs index bb22ecbeec..f8780e9507 100644 --- a/src/policy/compressor/compressorspace.rs +++ b/src/policy/compressor/compressorspace.rs @@ -14,6 +14,7 @@ use crate::util::metadata::extract_side_metadata; use crate::util::metadata::vo_bit; use crate::util::metadata::MetadataSpec; use crate::util::object_enum::{self, ObjectEnumerator}; +use crate::util::ref_scan_policy; use crate::util::{Address, ObjectReference}; use crate::vm::slot::Slot; use crate::MMTK; @@ -340,16 +341,22 @@ impl CompressorSpace { } fn update_references(&self, worker: &mut GCWorker, object: ObjectReference) { - if VM::VMScanning::support_slot_enqueuing(worker.tls, object) { - VM::VMScanning::scan_object(worker.tls, object, &mut |s: VM::VMSlot| { - if let Some(o) = s.load() { - s.store(self.forward(o, false)); - } - }); + if VM::VMScanning::support_slot_enqueuing(worker.tls.into(), object) { + VM::VMScanning::scan_object::( + worker.tls.into(), + object, + &mut |s: VM::VMSlot| { + if let Some(o) = s.load() { + s.store(self.forward(o, false)); + } + }, + ); } else { - VM::VMScanning::scan_object_and_trace_edges(worker.tls, object, &mut |o| { - self.forward(o, false) - }); + VM::VMScanning::scan_object_and_trace_edges::( + worker.tls.into(), + object, + &mut |o| self.forward(o, false), + ); } } diff --git a/src/scheduler/gc_work.rs b/src/scheduler/gc_work.rs index d9f3f1657f..8fa430c90d 100644 --- a/src/scheduler/gc_work.rs +++ b/src/scheduler/gc_work.rs @@ -3,6 +3,7 @@ use super::*; use crate::global_state::GcStatus; use crate::plan::ObjectsClosure; use crate::plan::VectorObjectQueue; +use crate::util::ref_scan_policy; use crate::util::*; use crate::vm::slot::Slot; use crate::vm::*; @@ -864,10 +865,14 @@ pub trait ScanObjectsWork: GCWork + Sized { } for object in objects_to_scan.iter().copied() { - if ::VMScanning::support_slot_enqueuing(tls, object) { + if ::VMScanning::support_slot_enqueuing(tls.into(), object) { trace!("Scan object (slot) {}", object); // If an object supports slot-enqueuing, we enqueue its slots. - ::VMScanning::scan_object(tls, object, &mut closure); + ::VMScanning::scan_object::( + tls.into(), + object, + &mut closure, + ); self.post_scan_object(object); } else { // If an object does not support slot-enqueuing, we have to use @@ -896,11 +901,9 @@ pub trait ScanObjectsWork: GCWork + Sized { // Scan objects and trace their outgoing edges at the same time. for object in scan_later.iter().copied() { trace!("Scan object (node) {}", object); - ::VMScanning::scan_object_and_trace_edges( - tls, - object, - object_tracer, - ); + ::VMScanning::scan_object_and_trace_edges::< + ref_scan_policy::Closure, + >(tls.into(), object, object_tracer); self.post_scan_object(object); } }); diff --git a/src/util/mod.rs b/src/util/mod.rs index d22c29a2e3..6a76b8d7a3 100644 --- a/src/util/mod.rs +++ b/src/util/mod.rs @@ -56,6 +56,8 @@ pub(crate) mod logger; pub(crate) mod object_enum; /// Forwarding word in object copying. pub(crate) mod object_forwarding; +/// RefScanPolicy implementations. +pub(crate) mod ref_scan_policy; /// Reference processing implementation. pub(crate) mod reference_processor; /// Utilities funcitons for Rust diff --git a/src/util/opaque_pointer.rs b/src/util/opaque_pointer.rs index 1a16b98c04..368244a83c 100644 --- a/src/util/opaque_pointer.rs +++ b/src/util/opaque_pointer.rs @@ -59,9 +59,23 @@ impl VMThread { #[derive(Copy, Clone, Debug, Eq, PartialEq)] pub struct VMMutatorThread(pub VMThread); +/// Allow unchecked explicit conversion from [VMMutatorThread] to [VMThread] +impl From for VMThread { + fn from(value: VMMutatorThread) -> Self { + value.0 + } +} + /// A VMWorkerThread is a VMThread that is associates with a [`crate::scheduler::GCWorker`]. /// When a VMWorkerThread is used as an argument or a field of a type, it generally means /// the function or the functions for the type is executed in the context of the mutator thread. #[repr(transparent)] #[derive(Copy, Clone, Debug, Eq, PartialEq)] pub struct VMWorkerThread(pub VMThread); + +/// Allow unchecked explicit conversion from [VMWorkerThread] to [VMThread] +impl From for VMThread { + fn from(value: VMWorkerThread) -> Self { + value.0 + } +} diff --git a/src/util/ref_scan_policy.rs b/src/util/ref_scan_policy.rs new file mode 100644 index 0000000000..d4b34a33d5 --- /dev/null +++ b/src/util/ref_scan_policy.rs @@ -0,0 +1,67 @@ +//! This module holds common reference scanning policies used in MMTk core. + +use crate::vm::RefScanPolicy; + +#[allow(unused)] // For doc comments. +use crate::vm::{ObjectTracer, SlotVisitor}; + +/// An object is scanned during the strong transitive closure stage. The VM binding should visit +/// fields that contain strong references using the [`SlotVisitor`] or [`ObjectTracer`] callbacks. +/// +/// The VM binding should not visit weak reference fields using the [`SlotVisitor`] or +/// [`ObjectTracer`] callbacks. If a VM binding chooses to discover weak references during tracing, +/// it should record relevant information (e.g. the current object, its fields, etc.) in VM-specific +/// data structures, as described in the [Porting Guide][pg-weakref]. If the VM binding chooses not +/// to discover weak reference fields this way, it can ignore weak fields. +/// +/// [pg-weakref]: +/// https://docs.mmtk.io/portingguide/concerns/weakref.html#identifying-weak-references +pub struct Closure; + +impl RefScanPolicy for Closure { + const VISIT_STRONG: bool = true; + const VISIT_WEAK: bool = false; + const DISCOVER_WEAK: bool = true; +} + +/// An object is scanned to update its references after objects are moved or after the new +/// addresses of objects have been calculated. The VM binding should visit all reference fields +/// of an object, regardless whether they are holding strong or weak reference. +pub struct RefUpdate; + +impl RefScanPolicy for RefUpdate { + const VISIT_STRONG: bool = true; + const VISIT_WEAK: bool = false; + const DISCOVER_WEAK: bool = false; +} + +/// Instruct the VM binding to visit all fields of an object, both strong and weak, without any +/// hints about the MMTk's intention to call the object-scanning function. +pub struct All; + +impl RefScanPolicy for All { + const VISIT_STRONG: bool = true; + const VISIT_WEAK: bool = true; + const DISCOVER_WEAK: bool = false; +} + +/// Instruct the VM binding to visit all strong fields, without any hints about the MMTk's +/// intention to call the object-scanning function. Particularly, the VM binding should not +/// discover weak references which the [`Closure`] policy implies. +pub struct StrongOnly; + +impl RefScanPolicy for StrongOnly { + const VISIT_STRONG: bool = true; + const VISIT_WEAK: bool = false; + const DISCOVER_WEAK: bool = false; +} + +/// Instruct the VM binding to visit all weak fields, without any hints about the MMTk's +/// intention to call the object-scanning function. +pub struct WeakOnly; + +impl RefScanPolicy for WeakOnly { + const VISIT_STRONG: bool = false; + const VISIT_WEAK: bool = true; + const DISCOVER_WEAK: bool = false; +} diff --git a/src/util/test_util/mock_vm.rs b/src/util/test_util/mock_vm.rs index 7dc97b1448..1660502d69 100644 --- a/src/util/test_util/mock_vm.rs +++ b/src/util/test_util/mock_vm.rs @@ -15,6 +15,7 @@ use crate::vm::object_model::specs::*; use crate::vm::GCThreadContext; use crate::vm::ObjectTracer; use crate::vm::ObjectTracerContext; +use crate::vm::RefScanPolicy; use crate::vm::RootsWorkFactory; use crate::vm::SlotVisitor; use crate::vm::VMBinding; @@ -243,23 +244,17 @@ pub struct MockVM { pub weakref_get_referent: MockMethod>, pub weakref_enqueue_references: MockMethod<(&'static [ObjectReference], VMWorkerThread), ()>, // scanning - pub support_slot_enqueuing: MockMethod<(VMWorkerThread, ObjectReference), bool>, + pub support_slot_enqueuing: MockMethod<(VMThread, ObjectReference), bool>, pub scan_object: MockMethod< ( - VMWorkerThread, + VMThread, ObjectReference, &'static mut dyn SlotVisitor<::VMSlot>, ), (), >, - pub scan_object_and_trace_edges: MockMethod< - ( - VMWorkerThread, - ObjectReference, - &'static mut dyn ObjectTracer, - ), - (), - >, + pub scan_object_and_trace_edges: + MockMethod<(VMThread, ObjectReference, &'static mut dyn ObjectTracer), ()>, pub scan_roots_in_mutator_thread: Box, pub scan_vm_specific_roots: Box, pub notify_initial_thread_scan_complete: MockMethod<(bool, VMWorkerThread), ()>, @@ -552,13 +547,13 @@ impl crate::vm::ReferenceGlue for MockVM { } impl crate::vm::Scanning for MockVM { - fn support_slot_enqueuing(tls: VMWorkerThread, object: ObjectReference) -> bool { + fn support_slot_enqueuing(tls: VMThread, object: ObjectReference) -> bool { mock!(support_slot_enqueuing(tls, object)) } - fn scan_object::VMSlot>>( - tls: VMWorkerThread, + fn scan_object( + tls: VMThread, object: ObjectReference, - slot_visitor: &mut SV, + slot_visitor: &mut impl SlotVisitor<::VMSlot>, ) { mock!(scan_object( tls, @@ -566,10 +561,10 @@ impl crate::vm::Scanning for MockVM { lifetime!(slot_visitor as &mut dyn SlotVisitor<::VMSlot>) )) } - fn scan_object_and_trace_edges( - tls: VMWorkerThread, + fn scan_object_and_trace_edges( + tls: VMThread, object: ObjectReference, - object_tracer: &mut OT, + object_tracer: &mut impl ObjectTracer, ) { mock!(scan_object_and_trace_edges( tls, diff --git a/src/vm/mod.rs b/src/vm/mod.rs index 2ff244f6e4..887b42cc2a 100644 --- a/src/vm/mod.rs +++ b/src/vm/mod.rs @@ -30,6 +30,7 @@ pub use self::reference_glue::Finalizable; pub use self::reference_glue::ReferenceGlue; pub use self::scanning::ObjectTracer; pub use self::scanning::ObjectTracerContext; +pub use self::scanning::RefScanPolicy; pub use self::scanning::RootsWorkFactory; pub use self::scanning::Scanning; pub use self::scanning::SlotVisitor; diff --git a/src/vm/scanning.rs b/src/vm/scanning.rs index 3a87fc4260..3c3839dbdf 100644 --- a/src/vm/scanning.rs +++ b/src/vm/scanning.rs @@ -1,6 +1,7 @@ use crate::plan::Mutator; use crate::scheduler::GCWorker; use crate::util::ObjectReference; +use crate::util::VMThread; use crate::util::VMWorkerThread; use crate::vm::slot::Slot; use crate::vm::VMBinding; @@ -43,6 +44,29 @@ impl ObjectReference> ObjectTracer for F { } } +/// This type specifies how object-scanning functions ([`Scanning::scan_object`] and +/// [`Scanning::scan_object_and_trace_edges`]) should handle strong and weak reference fields. +/// +/// Note that it is the VM and the VM binding that ultimately decides *which* reference is strong +/// and *which* reference is weak. Particularly, the VM binding is allowed to conservatively report +/// weak references as strong. For example, +/// +/// - A VM binding can report all weak references as strong during nursery collections or +/// concurrent collections to avoid expensive weak reference processing. +/// - The VM binding of a JVM (e.g. mmtk-openjdk) can report the weak reference field in +/// `SoftReference` as strong during non-emergency GCs, and weak during emergency GCs. +pub trait RefScanPolicy { + /// True if the reference scanning function should visit strong reference fields in the object using + /// callbacks. + const VISIT_STRONG: bool; + /// True if the reference scanning function should visit weak reference fields in the object using + /// callbacks. + const VISIT_WEAK: bool; + /// True if the reference scanning function should discover weak reference fields in VM-specific + /// ways. + const DISCOVER_WEAK: bool; +} + /// An `ObjectTracerContext` gives a GC worker temporary access to an `ObjectTracer`, allowing /// the GC worker to trace objects. This trait is intended to abstract out the implementation /// details of tracing objects, enqueuing objects, and creating work packets that expand the @@ -168,7 +192,7 @@ pub trait Scanning { /// Arguments: /// * `tls`: The VM-specific thread-local storage for the current worker. /// * `object`: The object to be scanned. - fn support_slot_enqueuing(_tls: VMWorkerThread, _object: ObjectReference) -> bool { + fn support_slot_enqueuing(_tls: VMThread, _object: ObjectReference) -> bool { true } @@ -190,10 +214,10 @@ pub trait Scanning { /// * `tls`: The VM-specific thread-local storage for the current worker. /// * `object`: The object to be scanned. /// * `slot_visitor`: Called back for each field. - fn scan_object>( - tls: VMWorkerThread, + fn scan_object( + tls: VMThread, object: ObjectReference, - slot_visitor: &mut SV, + slot_visitor: &mut impl SlotVisitor, ); /// Delegated scanning of a object, visiting each reference field encountered, and tracing the @@ -215,10 +239,10 @@ pub trait Scanning { /// * `tls`: The VM-specific thread-local storage for the current worker. /// * `object`: The object to be scanned. /// * `object_tracer`: Called back for the object reference held in each field. - fn scan_object_and_trace_edges( - _tls: VMWorkerThread, + fn scan_object_and_trace_edges( + _tls: VMThread, _object: ObjectReference, - _object_tracer: &mut OT, + _object_tracer: &mut impl ObjectTracer, ) { unreachable!("scan_object_and_trace_edges() will not be called when support_slot_enqueuing() is always true.") } diff --git a/src/vm/tests/mock_tests/mock_test_doc_weakref_code_example.rs b/src/vm/tests/mock_tests/mock_test_doc_weakref_code_example.rs index 218c461b72..86045c48ae 100644 --- a/src/vm/tests/mock_tests/mock_test_doc_weakref_code_example.rs +++ b/src/vm/tests/mock_tests/mock_test_doc_weakref_code_example.rs @@ -5,7 +5,7 @@ use crate::{ scheduler::GCWorker, util::ObjectReference, - vm::{ObjectTracer, ObjectTracerContext, Scanning, VMBinding}, + vm::{ObjectTracer, ObjectTracerContext, RefScanPolicy, Scanning, VMBinding}, }; use super::mock_test_prelude::MockVM; @@ -65,10 +65,10 @@ impl Scanning for VMScanning { // Methods after this are placeholders. We only ensure they compile. - fn scan_object::VMSlot>>( - _tls: crate::util::VMWorkerThread, + fn scan_object( + _tls: crate::util::VMThread, _object: ObjectReference, - _slot_visitor: &mut SV, + _slot_visitor: &mut impl crate::vm::SlotVisitor<::VMSlot>, ) { unimplemented!() }