diff --git a/Cargo.toml b/Cargo.toml index f5c90ce936..72dff56b08 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -145,8 +145,6 @@ sticky_immix_non_moving_nursery = [] immix_stress_copying = [] # Reduce block size for ImmixSpace. This mitigates fragmentation when defrag is disabled. immix_smaller_block = [] -# Zero the unmarked lines after a GC cycle in immix. This helps debug untraced objects. -immix_zero_on_release = [] # Run sanity GC sanity = [] @@ -160,6 +158,9 @@ nogc_no_zeroing = ["nogc_lock_free"] # Q: Why do we need this as a compile time flat? We can always set the number of GC threads through options. single_worker = [] +# Poison dead objects on release. Each space uses a different poison value. This helps debug untraced objects. +poison_on_release = [] + # To run expensive comprehensive runtime checks, such as checking duplicate edges extreme_assertions = [] diff --git a/src/policy/copyspace.rs b/src/policy/copyspace.rs index 6b30f80117..d5a461299d 100644 --- a/src/policy/copyspace.rs +++ b/src/policy/copyspace.rs @@ -188,6 +188,11 @@ impl CopySpace { } pub fn release(&self) { + #[cfg(feature = "poison_on_release")] + for (start, size) in self.pr.iterate_allocated_regions() { + crate::util::memory::set(start, 0xbc, size); + } + for (start, size) in self.pr.iterate_allocated_regions() { // Clear the forwarding bits if it is on the side. if let MetadataSpec::OnSide(side_forwarding_status_table) = diff --git a/src/policy/immix/block.rs b/src/policy/immix/block.rs index 85ed76ddef..9b592be871 100644 --- a/src/policy/immix/block.rs +++ b/src/policy/immix/block.rs @@ -248,8 +248,13 @@ impl Block { holes += 1; } - #[cfg(feature = "immix_zero_on_release")] - crate::util::memory::zero(line.start(), Line::BYTES); + // Cheeky trick to encode the released line into the poisoned value + #[cfg(feature = "poison_on_release")] + crate::util::memory::set_pattern( + line.start(), + (0xdeadbeef ^ line.start().as_usize()) & !0xff, + Line::BYTES, + ); // We need to clear the pin bit if it is on the side, as this line can be reused #[cfg(feature = "object_pinning")] diff --git a/src/policy/largeobjectspace.rs b/src/policy/largeobjectspace.rs index 8906a50044..66631dd76f 100644 --- a/src/policy/largeobjectspace.rs +++ b/src/policy/largeobjectspace.rs @@ -288,8 +288,10 @@ impl LargeObjectSpace { let sweep = |object: ObjectReference| { #[cfg(feature = "vo_bit")] crate::util::metadata::vo_bit::unset_vo_bit(object); - self.pr - .release_pages(get_super_page(object.to_object_start::())); + let start = object.to_object_start::(); + #[cfg(feature = "poison_on_release")] + crate::util::memory::set(start, 0xed, VM::VMObjectModel::get_current_size(object)); + self.pr.release_pages(get_super_page(start)); }; if sweep_nursery { for object in self.treadmill.collect_nursery() { diff --git a/src/scheduler/gc_work.rs b/src/scheduler/gc_work.rs index 82be1f3561..a8657792d2 100644 --- a/src/scheduler/gc_work.rs +++ b/src/scheduler/gc_work.rs @@ -235,6 +235,11 @@ impl ObjectTracer for ProcessEdgesWorkTracer { /// Forward the `trace_object` call to the underlying `ProcessEdgesWork`, /// and flush as soon as the underlying buffer of `process_edges_work` is full. fn trace_object(&mut self, object: ObjectReference) -> ObjectReference { + debug_assert!( + ::VMObjectModel::is_object_sane(object), + "Object {:?} is not sane!", + object, + ); let result = self.process_edges_work.trace_object(object); self.flush_if_full(); result @@ -693,6 +698,12 @@ impl ProcessEdgesWork for SFTProcessEdges { fn trace_object(&mut self, object: ObjectReference) -> ObjectReference { use crate::policy::sft::GCWorkerMutRef; + debug_assert!( + ::VMObjectModel::is_object_sane(object), + "Object {:?} is not sane!", + object, + ); + // Erase type parameter let worker = GCWorkerMutRef::new(self.worker()); @@ -840,6 +851,11 @@ pub trait ScanObjectsWork: GCWork + Sized { if ::VMScanning::support_slot_enqueuing(tls, object) { trace!("Scan object (slot) {}", object); + debug_assert!( + ::VMObjectModel::is_object_sane(object), + "Object {:?} is not sane!", + object, + ); // If an object supports slot-enqueuing, we enqueue its slots. ::VMScanning::scan_object(tls, object, &mut closure); self.post_scan_object(object); @@ -977,6 +993,12 @@ impl + Plan, const KIND: TraceKin // Skip slots that are not holding an object reference. return; }; + debug_assert!( + ::VMObjectModel::is_object_sane(object), + "Object {:?} from slot {:?} is not sane!", + object, + slot, + ); let new_object = self.trace_object(object); if P::may_move_objects::() && new_object != object { slot.store(new_object); diff --git a/src/util/memory.rs b/src/util/memory.rs index 0e689be6b9..60256246fb 100644 --- a/src/util/memory.rs +++ b/src/util/memory.rs @@ -175,6 +175,48 @@ pub fn set(start: Address, val: u8, len: usize) { } } +/// Set a range of memory to the given pattern. Similar to C++'s std::fill. +pub fn set_pattern(start: Address, pattern: usize, len: usize) { + debug_assert!(std::mem::size_of::() <= len); + debug_assert!(len % std::mem::size_of::() == 0); + + let end_addr = (start + len).to_mut_ptr::(); + let mut current = start.to_mut_ptr::(); + while current < end_addr { + unsafe { + *current = pattern; + current = current.add(1); + } + } +} + +/// Dump RAM around a given address. Note that be careful when using this function as it may +/// segfault for unmapped memory. ONLY use it for locations that are KNOWN to be broken AND +/// allocated by MMTk. +/// +/// # Safety +/// Extremely unsafe for arbitrary addresses since they may not have been mapped. Only use +/// this function for locations that are KNOWN to be broken AND allocated by MMTk. +pub unsafe fn dump_ram_around_address(addr: Address, bytes: usize) -> String { + let mut string: String = String::new(); + let end_addr = (addr + bytes).to_ptr::(); + let mut current = (addr - bytes).to_ptr::(); + while current < end_addr { + if current == addr.to_ptr::() { + string.push_str(" | "); + } else { + string.push(' '); + } + let s = current.read(); + #[cfg(target_pointer_width = "64")] + string.push_str(format!("{:#018x}", s).as_str()); + #[cfg(target_pointer_width = "32")] + string.push_str(format!("{:#010x}", s).as_str()); + current = current.add(1); + } + string +} + /// Demand-zero mmap: /// This function mmaps the memory and guarantees to zero all mapped memory. /// This function WILL overwrite existing memory mapping. The user of this function