diff --git a/src/plan/tracing.rs b/src/plan/tracing.rs index dc4eaac6df..0d95a7225e 100644 --- a/src/plan/tracing.rs +++ b/src/plan/tracing.rs @@ -4,7 +4,7 @@ use crate::scheduler::gc_work::{EdgeOf, ProcessEdgesWork}; use crate::scheduler::{GCWorker, WorkBucketStage}; use crate::util::ObjectReference; -use crate::vm::EdgeVisitor; +use crate::vm::{EdgeVisitor, VMBinding}; /// This trait represents an object queue to enqueue objects during tracing. pub trait ObjectQueue { @@ -20,15 +20,34 @@ pub type VectorObjectQueue = VectorQueue; pub struct VectorQueue { /// Enqueued nodes. buffer: Vec, + /// Capacity of the queue. + capacity: usize, } impl VectorQueue { - /// Reserve a capacity of this on first enqueue to avoid frequent resizing. - const CAPACITY: usize = 4096; + /// The default capacity of the queue. + const DEFAULT_CAPACITY: usize = 4096; - /// Create an empty `VectorObjectQueue`. + /// Create an empty `VectorObjectQueue` with default capacity. pub fn new() -> Self { - Self { buffer: Vec::new() } + Self::with_capacity(Self::DEFAULT_CAPACITY) + } + + /// Create an empty `VectorObjectQueue` with a given capacity. + pub fn with_capacity(capacity: usize) -> Self { + Self { + buffer: Vec::new(), + capacity, + } + } + + /// Create an empty `VectorObjectQueue` with an optionally given capacity. + pub fn with_capacity_opt(capacity_opt: Option) -> Self { + if let Some(capacity) = capacity_opt { + Self::with_capacity(capacity) + } else { + Self::new() + } } /// Return `true` if the queue is empty. @@ -46,14 +65,14 @@ impl VectorQueue { self.buffer } - /// Check if the buffer size reaches `CAPACITY`. + /// Check if the buffer size reaches the capacity. pub fn is_full(&self) -> bool { - self.buffer.len() >= Self::CAPACITY + self.buffer.len() >= self.capacity } pub fn push(&mut self, v: T) { if self.buffer.is_empty() { - self.buffer.reserve(Self::CAPACITY); + self.buffer.reserve(self.capacity); } self.buffer.push(v); } @@ -80,8 +99,9 @@ pub struct ObjectsClosure<'a, E: ProcessEdgesWork> { impl<'a, E: ProcessEdgesWork> ObjectsClosure<'a, E> { pub fn new(worker: &'a mut GCWorker, bucket: WorkBucketStage) -> Self { + let buffer = VectorQueue::with_capacity_opt(E::VM::override_scan_objects_packet_size()); Self { - buffer: VectorQueue::new(), + buffer, worker, bucket, } diff --git a/src/scheduler/gc_work.rs b/src/scheduler/gc_work.rs index a957d13a3f..118ff69c17 100644 --- a/src/scheduler/gc_work.rs +++ b/src/scheduler/gc_work.rs @@ -526,9 +526,10 @@ impl ProcessEdgesBase { mmtk.edge_logger.log_edge(*edge); } } + let nodes = VectorObjectQueue::with_capacity_opt(VM::override_scan_objects_packet_size()); Self { edges, - nodes: VectorObjectQueue::new(), + nodes, mmtk, worker: std::ptr::null_mut(), roots, diff --git a/src/vm/mod.rs b/src/vm/mod.rs index 7440cf2fae..0553a3fd30 100644 --- a/src/vm/mod.rs +++ b/src/vm/mod.rs @@ -71,4 +71,21 @@ where /// Note that MMTk does not attempt to do anything to align the cursor to this value, but /// it merely asserts with this constant. const ALLOC_END_ALIGNMENT: usize = 1; + + /// Override the packet size of the `ScanObjects` work packet. If it returns `Some(size)`, + /// each `ScanObjects` work packets will contain at most `size` objects; otherwise the packet + /// size will be determined by mmtk-core. + /// + /// This method is used for working around a load-balance problem on some VMs that use object- + /// enqueuing tracing. The default packet size (4096) may be too large for some workloads, in + /// which case only a few work packets will be available and most GC workers will be idle. The + /// binding can reduce the packet size to increase the level of parallelism. But if the packet + /// size is too small, it will introduce extra scheduling overhead for executing each work + /// packet. + /// + /// TODO: We should support work stealing within work packets and make this workaround + /// unnecessary. + fn override_scan_objects_packet_size() -> Option { + None + } }