Skip to content

Commit 1b9cfe4

Browse files
authored
Dump process map on mmap failure and fix typos (#1127)
1 parent 5a01555 commit 1b9cfe4

File tree

6 files changed

+33
-21
lines changed

6 files changed

+33
-21
lines changed

src/util/heap/layout/byte_map_mmapper.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -83,7 +83,7 @@ impl Mmapper for ByteMapMmapper {
8383
let start_chunk = Self::address_to_mmap_chunks_down(start);
8484
let end_chunk = Self::address_to_mmap_chunks_up(start + pages_to_bytes(pages));
8585
trace!(
86-
"Calling quanrantine_address_range with start={:?} and {} pages, {}-{}",
86+
"Calling quarantine_address_range with start={:?} and {} pages, {}-{}",
8787
start,
8888
pages,
8989
Self::mmap_chunks_to_address(start_chunk),

src/util/heap/layout/mmapper.rs

+4-4
Original file line numberDiff line numberDiff line change
@@ -121,11 +121,11 @@ impl MapState {
121121
MapState::Unmapped => mmap_noreserve(mmap_start, MMAP_CHUNK_BYTES, strategy),
122122
MapState::Quarantined => Ok(()),
123123
MapState::Mapped => {
124-
// If a chunk is mapped by us and we try to quanrantine it, we simply don't do anything.
124+
// If a chunk is mapped by us and we try to quarantine it, we simply don't do anything.
125125
// We allow this as it is possible to have a situation like this:
126-
// we have global side metdata S, and space A and B. We quanrantine memory X for S for A, then map
127-
// X for A, and then we quanrantine memory Y for S for B. It is possible that X and Y is the same chunk,
128-
// so the chunk is already mapped for A, and we try quanrantine it for B. We simply allow this transition.
126+
// we have global side metadata S, and space A and B. We quarantine memory X for S for A, then map
127+
// X for A, and then we quarantine memory Y for S for B. It is possible that X and Y is the same chunk,
128+
// so the chunk is already mapped for A, and we try quarantine it for B. We simply allow this transition.
129129
return Ok(());
130130
}
131131
MapState::Protected => panic!("Cannot quarantine protected memory"),

src/util/memory.rs

+14-4
Original file line numberDiff line numberDiff line change
@@ -147,6 +147,7 @@ pub fn handle_mmap_error<VM: VMBinding>(error: Error, tls: VMThread) -> ! {
147147
match error.kind() {
148148
// From Rust nightly 2021-05-12, we started to see Rust added this ErrorKind.
149149
ErrorKind::OutOfMemory => {
150+
eprintln!("{}", get_process_memory_maps());
150151
// Signal `MmapOutOfMemory`. Expect the VM to abort immediately.
151152
trace!("Signal MmapOutOfMemory!");
152153
VM::VMCollection::out_of_memory(tls, AllocationError::MmapOutOfMemory);
@@ -159,14 +160,18 @@ pub fn handle_mmap_error<VM: VMBinding>(error: Error, tls: VMThread) -> ! {
159160
if let Some(os_errno) = error.raw_os_error() {
160161
// If it is OOM, we invoke out_of_memory() through the VM interface.
161162
if os_errno == libc::ENOMEM {
163+
eprintln!("{}", get_process_memory_maps());
162164
// Signal `MmapOutOfMemory`. Expect the VM to abort immediately.
163165
trace!("Signal MmapOutOfMemory!");
164166
VM::VMCollection::out_of_memory(tls, AllocationError::MmapOutOfMemory);
165167
unreachable!()
166168
}
167169
}
168170
}
169-
ErrorKind::AlreadyExists => panic!("Failed to mmap, the address is already mapped. Should MMTk quanrantine the address range first?"),
171+
ErrorKind::AlreadyExists => {
172+
eprintln!("{}", get_process_memory_maps());
173+
panic!("Failed to mmap, the address is already mapped. Should MMTk quarantine the address range first?");
174+
}
170175
_ => {}
171176
}
172177
panic!("Unexpected mmap failure: {:?}", error)
@@ -228,9 +233,7 @@ fn wrap_libc_call<T: PartialEq>(f: &dyn Fn() -> T, expect: T) -> Result<()> {
228233

229234
/// Get the memory maps for the process. The returned string is a multi-line string.
230235
/// This is only meant to be used for debugging. For example, log process memory maps after detecting a clash.
231-
/// If we would need to parsable memory maps, I would suggest using a library instead which saves us the trouble to deal with portability.
232-
#[cfg(debug_assertions)]
233-
#[cfg(target_os = "linux")]
236+
#[cfg(any(target_os = "linux", target_os = "android"))]
234237
pub fn get_process_memory_maps() -> String {
235238
// print map
236239
use std::fs::File;
@@ -241,6 +244,13 @@ pub fn get_process_memory_maps() -> String {
241244
data
242245
}
243246

247+
/// Get the memory maps for the process. The returned string is a multi-line string.
248+
/// This is only meant to be used for debugging. For example, log process memory maps after detecting a clash.
249+
#[cfg(not(any(target_os = "linux", target_os = "android")))]
250+
pub fn get_process_memory_maps() -> String {
251+
"(process map unavailable)".to_string()
252+
}
253+
244254
/// Returns the total physical memory for the system in bytes.
245255
pub(crate) fn get_system_total_memory() -> u64 {
246256
// TODO: Note that if we want to get system info somewhere else in the future, we should

src/util/metadata/side_metadata/global.rs

+12-10
Original file line numberDiff line numberDiff line change
@@ -1124,7 +1124,7 @@ impl SideMetadataContext {
11241124
/// # Arguments
11251125
/// * `start` - The starting address of the source data.
11261126
/// * `size` - The size of the source data (in bytes).
1127-
/// * `no_reserve` - whether to invoke mmap with a noreserve flag (we use this flag to quanrantine address range)
1127+
/// * `no_reserve` - whether to invoke mmap with a noreserve flag (we use this flag to quarantine address range)
11281128
fn map_metadata_internal(&self, start: Address, size: usize, no_reserve: bool) -> Result<()> {
11291129
for spec in self.global.iter() {
11301130
match try_mmap_contiguous_metadata_space(start, size, spec, no_reserve) {
@@ -1137,15 +1137,17 @@ impl SideMetadataContext {
11371137
let mut lsize: usize = 0;
11381138

11391139
for spec in self.local.iter() {
1140-
// For local side metadata, we always have to reserve address space for all
1141-
// local metadata required by all policies in MMTk to be able to calculate a constant offset for each local metadata at compile-time
1142-
// (it's like assigning an ID to each policy).
1143-
// As the plan is chosen at run-time, we will never know which subset of policies will be used during run-time.
1144-
// We can't afford this much address space in 32-bits.
1140+
// For local side metadata, we always have to reserve address space for all local
1141+
// metadata required by all policies in MMTk to be able to calculate a constant offset
1142+
// for each local metadata at compile-time (it's like assigning an ID to each policy).
1143+
//
1144+
// As the plan is chosen at run-time, we will never know which subset of policies will
1145+
// be used during run-time. We can't afford this much address space in 32-bits.
11451146
// So, we switch to the chunk-based approach for this specific case.
11461147
//
1147-
// The global metadata is different in that for each plan, we can calculate its constant base addresses at compile-time.
1148-
// Using the chunk-based approach will need the same address space size as the current not-chunked approach.
1148+
// The global metadata is different in that for each plan, we can calculate its constant
1149+
// base addresses at compile-time. Using the chunk-based approach will need the same
1150+
// address space size as the current not-chunked approach.
11491151
#[cfg(target_pointer_width = "64")]
11501152
{
11511153
match try_mmap_contiguous_metadata_space(start, size, spec, no_reserve) {
@@ -1191,13 +1193,13 @@ impl SideMetadataContext {
11911193
debug_assert!(size % BYTES_IN_PAGE == 0);
11921194

11931195
for spec in self.global.iter() {
1194-
ensure_munmap_contiguos_metadata_space(start, size, spec);
1196+
ensure_munmap_contiguous_metadata_space(start, size, spec);
11951197
}
11961198

11971199
for spec in self.local.iter() {
11981200
#[cfg(target_pointer_width = "64")]
11991201
{
1200-
ensure_munmap_contiguos_metadata_space(start, size, spec);
1202+
ensure_munmap_contiguous_metadata_space(start, size, spec);
12011203
}
12021204
#[cfg(target_pointer_width = "32")]
12031205
{

src/util/metadata/side_metadata/helpers.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ pub(crate) fn ensure_munmap_metadata(start: Address, size: usize) {
3737
/// Unmaps a metadata space (`spec`) for the specified data address range (`start` and `size`)
3838
/// Returns the size in bytes that get munmapped.
3939
#[cfg(test)]
40-
pub(crate) fn ensure_munmap_contiguos_metadata_space(
40+
pub(crate) fn ensure_munmap_contiguous_metadata_space(
4141
start: Address,
4242
size: usize,
4343
spec: &SideMetadataSpec,

src/vm/tests/mock_tests/mock_test_handle_mmap_conflict.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ pub fn test_handle_mmap_conflict() {
2929
assert!(panic_res.is_err());
3030
let err = panic_res.err().unwrap();
3131
assert!(err.is::<&str>());
32-
assert_eq!(err.downcast_ref::<&str>().unwrap(), &"Failed to mmap, the address is already mapped. Should MMTk quanrantine the address range first?");
32+
assert_eq!(err.downcast_ref::<&str>().unwrap(), &"Failed to mmap, the address is already mapped. Should MMTk quarantine the address range first?");
3333
},
3434
no_cleanup,
3535
)

0 commit comments

Comments
 (0)