diff --git a/src/plan/concurrent/barrier.rs b/src/plan/concurrent/barrier.rs index 0bd8995564..ca7709a57a 100644 --- a/src/plan/concurrent/barrier.rs +++ b/src/plan/concurrent/barrier.rs @@ -3,6 +3,7 @@ use std::sync::atomic::Ordering; use super::{concurrent_marking_work::ProcessModBufSATB, Pause}; use crate::plan::global::PlanTraceObject; use crate::policy::gc_work::TraceKind; +use crate::util::ref_scan_policy::StrongOnly; use crate::util::VMMutatorThread; use crate::{ plan::{barriers::BarrierSemantics, concurrent::global::ConcurrentPlan, VectorQueue}, @@ -156,8 +157,17 @@ impl + PlanTraceObject, const KIND } fn object_probable_write_slow(&mut self, obj: ObjectReference) { - crate::plan::tracing::SlotIterator::::iterate_fields(obj, self.tls.0, |s| { - self.enqueue_node(Some(obj), s, None); - }); + // Note: the purpose of the SATB barrier is to ensure all *strongly reachable* objects at + // the beginning of the trace will eventually be marked and scanned. Therefore, we use the + // `StrongOnly` here to enqueue children of strong fields. The current `obj` will + // eventually be scanned by the `ConcurrentTraceObjects` work packet using the + // `StrongClosure` policy, either during the concurrent tracing, or during `FinalMark`. + crate::plan::tracing::SlotIterator::::iterate_fields::<_, StrongOnly>( + obj, + self.tls.0, + |s| { + self.enqueue_node(Some(obj), s, None); + }, + ); } } diff --git a/src/plan/concurrent/concurrent_marking_work.rs b/src/plan/concurrent/concurrent_marking_work.rs index fca994a7bc..76a1743e7e 100644 --- a/src/plan/concurrent/concurrent_marking_work.rs +++ b/src/plan/concurrent/concurrent_marking_work.rs @@ -4,6 +4,7 @@ use crate::plan::PlanTraceObject; use crate::plan::VectorQueue; use crate::policy::gc_work::TraceKind; use crate::scheduler::gc_work::{ScanObjects, SlotOf}; +use crate::util::ref_scan_policy::StrongClosure; use crate::util::ObjectReference; use crate::vm::slot::Slot; use crate::{ @@ -74,7 +75,7 @@ impl + PlanTraceObject, const KIND } fn scan_and_enqueue(&mut self, object: ObjectReference) { - crate::plan::tracing::SlotIterator::::iterate_fields( + crate::plan::tracing::SlotIterator::::iterate_fields::<_, StrongClosure>( object, self.worker().tls.0, |s| { diff --git a/src/plan/tracing.rs b/src/plan/tracing.rs index 792e142c76..65597b4daa 100644 --- a/src/plan/tracing.rs +++ b/src/plan/tracing.rs @@ -6,7 +6,7 @@ use std::marker::PhantomData; use crate::scheduler::gc_work::{ProcessEdgesWork, SlotOf}; use crate::scheduler::{GCWorker, WorkBucketStage, EDGES_WORK_BUFFER_SIZE}; use crate::util::{ObjectReference, VMThread, VMWorkerThread}; -use crate::vm::{Scanning, SlotVisitor, VMBinding}; +use crate::vm::{RefScanPolicy, Scanning, SlotVisitor, VMBinding}; /// This trait represents an object queue to enqueue objects during tracing. pub trait ObjectQueue { @@ -157,13 +157,17 @@ pub(crate) struct SlotIterator { impl SlotIterator { /// Iterate over the slots of an object by applying a function to each slot. - pub fn iterate_fields(object: ObjectReference, _tls: VMThread, mut f: F) { + pub fn iterate_fields( + object: ObjectReference, + _tls: VMThread, + mut f: F, + ) { // FIXME: We should use tls from the arguments. // See https://github.com/mmtk/mmtk-core/issues/1375 let fake_tls = VMWorkerThread(VMThread::UNINITIALIZED); if !>::support_slot_enqueuing(fake_tls, object) { panic!("SlotIterator::iterate_fields cannot be used on objects that don't support slot-enqueuing"); } - >::scan_object(fake_tls, object, &mut f); + >::scan_object::<_, R>(fake_tls, object, &mut f); } } diff --git a/src/policy/compressor/compressorspace.rs b/src/policy/compressor/compressorspace.rs index adf5746b6e..0bf293d330 100644 --- a/src/policy/compressor/compressorspace.rs +++ b/src/policy/compressor/compressorspace.rs @@ -14,6 +14,7 @@ use crate::util::metadata::extract_side_metadata; use crate::util::metadata::vo_bit; use crate::util::metadata::MetadataSpec; use crate::util::object_enum::{self, ObjectEnumerator}; +use crate::util::ref_scan_policy::RefUpdate; use crate::util::{Address, ObjectReference}; use crate::vm::slot::Slot; use crate::MMTK; @@ -336,15 +337,21 @@ impl CompressorSpace { fn update_references(&self, worker: &mut GCWorker, object: ObjectReference) { if VM::VMScanning::support_slot_enqueuing(worker.tls, object) { - VM::VMScanning::scan_object(worker.tls, object, &mut |s: VM::VMSlot| { - if let Some(o) = s.load() { - s.store(self.forward(o, false)); - } - }); + VM::VMScanning::scan_object::<_, RefUpdate>( + worker.tls, + object, + &mut |s: VM::VMSlot| { + if let Some(o) = s.load() { + s.store(self.forward(o, false)); + } + }, + ); } else { - VM::VMScanning::scan_object_and_trace_edges(worker.tls, object, &mut |o| { - self.forward(o, false) - }); + VM::VMScanning::scan_object_and_trace_edges::<_, RefUpdate>( + worker.tls, + object, + &mut |o| self.forward(o, false), + ); } } diff --git a/src/scheduler/gc_work.rs b/src/scheduler/gc_work.rs index d9f3f1657f..e3bd4e501e 100644 --- a/src/scheduler/gc_work.rs +++ b/src/scheduler/gc_work.rs @@ -3,6 +3,7 @@ use super::*; use crate::global_state::GcStatus; use crate::plan::ObjectsClosure; use crate::plan::VectorObjectQueue; +use crate::util::ref_scan_policy::StrongClosure; use crate::util::*; use crate::vm::slot::Slot; use crate::vm::*; @@ -867,7 +868,11 @@ pub trait ScanObjectsWork: GCWork + Sized { if ::VMScanning::support_slot_enqueuing(tls, object) { trace!("Scan object (slot) {}", object); // If an object supports slot-enqueuing, we enqueue its slots. - ::VMScanning::scan_object(tls, object, &mut closure); + ::VMScanning::scan_object::<_, StrongClosure>( + tls, + object, + &mut closure, + ); self.post_scan_object(object); } else { // If an object does not support slot-enqueuing, we have to use @@ -896,7 +901,7 @@ pub trait ScanObjectsWork: GCWork + Sized { // Scan objects and trace their outgoing edges at the same time. for object in scan_later.iter().copied() { trace!("Scan object (node) {}", object); - ::VMScanning::scan_object_and_trace_edges( + ::VMScanning::scan_object_and_trace_edges::<_, StrongClosure>( tls, object, object_tracer, diff --git a/src/util/mod.rs b/src/util/mod.rs index d22c29a2e3..88c2087d94 100644 --- a/src/util/mod.rs +++ b/src/util/mod.rs @@ -58,6 +58,8 @@ pub(crate) mod object_enum; pub(crate) mod object_forwarding; /// Reference processing implementation. pub(crate) mod reference_processor; +/// RefScanPolicy implementations. +pub(crate) mod ref_scan_policy; /// Utilities funcitons for Rust pub(crate) mod rust_util; /// Sanity checker for GC. diff --git a/src/util/ref_scan_policy.rs b/src/util/ref_scan_policy.rs new file mode 100644 index 0000000000..1cf5b5359e --- /dev/null +++ b/src/util/ref_scan_policy.rs @@ -0,0 +1,63 @@ +//! This module holds common reference scanning policies used in MMTk core. + +use crate::vm::RefScanPolicy; + +/// An object is scanned during the strong transitive closure stage. The VM binding should +/// visit fields that contain strong references using the slot visitor or object tracer +/// callbacks. +/// +/// As described in the [Porting Guide][pg-weakref], if a VM binding chooses to discover weak +/// reference fields during tracing, the VM binding should record the object, the fields, the +/// field values, and/or any other relevant data in VM-specific ways during the execution of +/// object-scanning functions. If the VM binding chooses not to discover weak reference fields +/// this way, it can ignore weak fields. +/// +/// [pg-weakref]: https://docs.mmtk.io/portingguide/concerns/weakref.html#identifying-weak-references +pub struct StrongClosure; + +impl RefScanPolicy for StrongClosure { + const SHOULD_VISIT_STRONG: bool = true; + const SHOULD_VISIT_WEAK: bool = false; + const SHOULD_DISCOVER_WEAK: bool = true; +} + +/// An object is scanned to update its references after objects are moved or after the new +/// addresses of objects have been calculated. The VM binding should visit all reference fields +/// of an object, regardless whether they are holding strong or weak reference. +pub struct RefUpdate; + +impl RefScanPolicy for RefUpdate { + const SHOULD_VISIT_STRONG: bool = true; + const SHOULD_VISIT_WEAK: bool = false; + const SHOULD_DISCOVER_WEAK: bool = false; +} + +/// Instruct the VM binding to visit all fields of an object, both strong and weak, without any +/// hints about the MMTk's intention to call the object-scanning function. +pub struct All; +impl RefScanPolicy for All { + const SHOULD_VISIT_STRONG: bool = true; + const SHOULD_VISIT_WEAK: bool = true; + const SHOULD_DISCOVER_WEAK: bool = false; +} + +/// Instruct the VM binding to visit all strong fields, without any hints about the MMTk's +/// intention to call the object-scanning function. Particularly, the VM binding should not +/// discover weak references as suggested by [`RefScanPolicy::StrongClosure`]. +pub struct StrongOnly; + +impl RefScanPolicy for StrongOnly { + const SHOULD_VISIT_STRONG: bool = true; + const SHOULD_VISIT_WEAK: bool = false; + const SHOULD_DISCOVER_WEAK: bool = false; +} + +/// Instruct the VM binding to visit all weak fields, without any hints about the MMTk's +/// intention to call the object-scanning function. +pub struct WeakOnly; + +impl RefScanPolicy for WeakOnly { + const SHOULD_VISIT_STRONG: bool = false; + const SHOULD_VISIT_WEAK: bool = true; + const SHOULD_DISCOVER_WEAK: bool = false; +} diff --git a/src/vm/mod.rs b/src/vm/mod.rs index 2ff244f6e4..887b42cc2a 100644 --- a/src/vm/mod.rs +++ b/src/vm/mod.rs @@ -30,6 +30,7 @@ pub use self::reference_glue::Finalizable; pub use self::reference_glue::ReferenceGlue; pub use self::scanning::ObjectTracer; pub use self::scanning::ObjectTracerContext; +pub use self::scanning::RefScanPolicy; pub use self::scanning::RootsWorkFactory; pub use self::scanning::Scanning; pub use self::scanning::SlotVisitor; diff --git a/src/vm/scanning.rs b/src/vm/scanning.rs index 3a87fc4260..66d2578c21 100644 --- a/src/vm/scanning.rs +++ b/src/vm/scanning.rs @@ -43,6 +43,29 @@ impl ObjectReference> ObjectTracer for F { } } +/// This type specifies how object-scanning functions ([`Scanning::scan_object`] and +/// [`Scanning::scan_object_and_trace_edges`]) should handle strong and weak reference fields. +/// +/// Note that it is the VM and the VM binding that ultimately decides *which* reference is strong +/// and *which* reference is weak. Particularly, the VM binding is allowed to conservatively report +/// weak references as strong. For example, +/// +/// - A VM binding can report all weak references as strong during nursery collections or +/// concurrent collections to avoid expensive weak reference processing. +/// - The VM binding of a JVM (e.g. mmtk-openjdk) can report the weak reference field in +/// `SoftReference` as strong during non-emergency GCs, and weak during emergency GCs. +pub trait RefScanPolicy { + /// True if the reference scanning function should visit strong reference fields in the object using + /// callbacks. + const SHOULD_VISIT_STRONG: bool; + /// True if the reference scanning function should visit weak reference fields in the object using + /// callbacks. + const SHOULD_VISIT_WEAK: bool; + /// True if the reference scanning function should discover weak reference fields in VM-specific + /// ways. + const SHOULD_DISCOVER_WEAK: bool; +} + /// An `ObjectTracerContext` gives a GC worker temporary access to an `ObjectTracer`, allowing /// the GC worker to trace objects. This trait is intended to abstract out the implementation /// details of tracing objects, enqueuing objects, and creating work packets that expand the @@ -190,7 +213,7 @@ pub trait Scanning { /// * `tls`: The VM-specific thread-local storage for the current worker. /// * `object`: The object to be scanned. /// * `slot_visitor`: Called back for each field. - fn scan_object>( + fn scan_object, R: RefScanPolicy>( tls: VMWorkerThread, object: ObjectReference, slot_visitor: &mut SV, @@ -215,7 +238,7 @@ pub trait Scanning { /// * `tls`: The VM-specific thread-local storage for the current worker. /// * `object`: The object to be scanned. /// * `object_tracer`: Called back for the object reference held in each field. - fn scan_object_and_trace_edges( + fn scan_object_and_trace_edges( _tls: VMWorkerThread, _object: ObjectReference, _object_tracer: &mut OT,