Skip to content

Commit 223c9d7

Browse files
committed
Use wrapper for logging.
We no longer use the `max_level_trace` and `release_max_level_off` features of the `log` crate. By doing this, mmtk-core, as a library, won't conflict with its users which need to use the log level features of the `log` crate. We no longer directly use logging macros in the `log` crate. Now all logging operations are done through wrapper macros in the `crate::util::log` module. Logs of level DEBUG and TRACE are disabled at compile time for release builds, and can be enabled using the "hot_log" Cargo feature. By doing this, we can disable logging statements on hot paths in mmtk-core, only, without affecting log statements in other crates. It also allow developers to use logs in release builds in order to debug the performance.
1 parent 618fde4 commit 223c9d7

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

60 files changed

+466
-334
lines changed

Cargo.toml

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,6 @@ itertools = "0.12.0"
3434
jemalloc-sys = { version = "0.5.3", features = ["disable_initial_exec_tls"], optional = true }
3535
lazy_static = "1.1"
3636
libc = "0.2"
37-
log = { version = "0.4", features = ["max_level_trace", "release_max_level_off"] }
3837
memoffset = "0.9"
3938
mimalloc-sys = { version = "0.1.6", optional = true }
4039
# MMTk macros - we have to specify a version here in order to publish the crate, even though we use the dependency from a local path.
@@ -51,6 +50,9 @@ static_assertions = "1.1.0"
5150
strum = "0.26.2"
5251
strum_macros = "0.26.2"
5352
sysinfo = "0.30.9"
53+
# mmtk-core internally uses wrapper macros in the `mmtk::util::log` module.
54+
# We rename the crate so that programmers don't unintentially use the `log` crate directly.
55+
the_log_crate = { package = "log", version = "0.4"}
5456

5557
[dev-dependencies]
5658
paste = "1.0.8"
@@ -171,6 +173,11 @@ count_live_bytes_in_gc = []
171173
# capture the type names of work packets.
172174
bpftrace_workaround = []
173175

176+
# Enable verbose logs in release build. Those are placed on hot paths, so merely checking whether
177+
# logging is enabled may degrade performance. Currently such logs include `debug!` and `trace!`
178+
# levels.
179+
hot_log = []
180+
174181
# Do not modify the following line - ci-common.sh matches it
175182
# -- Mutally exclusive features --
176183
# Only one feature from each group can be provided. Otherwise build will fail.

src/global_state.rs

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
use crate::util::log;
12
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
23
use std::sync::Mutex;
34
use std::time::Instant;
@@ -160,7 +161,7 @@ impl GlobalState {
160161
/// Increase the allocation bytes and return the current allocation bytes after increasing
161162
pub fn increase_allocation_bytes_by(&self, size: usize) -> usize {
162163
let old_allocation_bytes = self.allocation_bytes.fetch_add(size, Ordering::SeqCst);
163-
trace!(
164+
log::trace!(
164165
"Stress GC: old_allocation_bytes = {}, size = {}, allocation_bytes = {}",
165166
old_allocation_bytes,
166167
size,

src/lib.rs

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -30,8 +30,6 @@
3030
#[macro_use]
3131
extern crate lazy_static;
3232
#[macro_use]
33-
extern crate log;
34-
#[macro_use]
3533
extern crate downcast_rs;
3634
#[macro_use]
3735
extern crate static_assertions;

src/memory_manager.rs

Lines changed: 14 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@ use crate::scheduler::{GCWork, GCWorker};
2020
use crate::util::alloc::allocators::AllocatorSelector;
2121
use crate::util::constants::{LOG_BYTES_IN_PAGE, MIN_OBJECT_SIZE};
2222
use crate::util::heap::layout::vm_layout::vm_layout;
23+
use crate::util::log;
2324
use crate::util::opaque_pointer::*;
2425
use crate::util::{Address, ObjectReference};
2526
use crate::vm::slot::MemorySlice;
@@ -52,8 +53,8 @@ use crate::vm::VMBinding;
5253
/// * `builder`: The reference to a MMTk builder.
5354
pub fn mmtk_init<VM: VMBinding>(builder: &MMTKBuilder) -> Box<MMTK<VM>> {
5455
match crate::util::logger::try_init() {
55-
Ok(_) => debug!("MMTk initialized the logger."),
56-
Err(_) => debug!(
56+
Ok(_) => log::debug!("MMTk initialized the logger."),
57+
Err(_) => log::debug!(
5758
"MMTk failed to initialize the logger. Possibly a logger has been initialized by user."
5859
),
5960
}
@@ -69,19 +70,20 @@ pub fn mmtk_init<VM: VMBinding>(builder: &MMTKBuilder) -> Box<MMTK<VM>> {
6970
if split[0] == "Threads:" {
7071
let threads = split[1].parse::<i32>().unwrap();
7172
if threads != 1 {
72-
warn!("Current process has {} threads, process-wide perf event measurement will only include child threads spawned from this thread", threads);
73+
log::warn!("Current process has {} threads, process-wide perf event measurement will only include child threads spawned from this thread", threads);
7374
}
7475
}
7576
}
7677
}
7778
let mmtk = builder.build();
7879

79-
info!(
80+
log::info!(
8081
"Initialized MMTk with {:?} ({:?})",
81-
*mmtk.options.plan, *mmtk.options.gc_trigger
82+
*mmtk.options.plan,
83+
*mmtk.options.gc_trigger
8284
);
8385
#[cfg(feature = "extreme_assertions")]
84-
warn!("The feature 'extreme_assertions' is enabled. MMTk will run expensive run-time checks. Slow performance should be expected.");
86+
log::warn!("The feature 'extreme_assertions' is enabled. MMTk will run expensive run-time checks. Slow performance should be expected.");
8587
Box::new(mmtk)
8688
}
8789

@@ -115,7 +117,7 @@ pub fn bind_mutator<VM: VMBinding>(
115117

116118
const LOG_ALLOCATOR_MAPPING: bool = false;
117119
if LOG_ALLOCATOR_MAPPING {
118-
info!("{:?}", mutator.config);
120+
log::info!("{:?}", mutator.config);
119121
}
120122
mutator
121123
}
@@ -477,7 +479,7 @@ pub fn gc_poll<VM: VMBinding>(mmtk: &MMTK<VM>, tls: VMMutatorThread) {
477479
);
478480

479481
if VM::VMCollection::is_collection_enabled() && mmtk.gc_trigger.poll(false, None) {
480-
debug!("Collection required");
482+
log::debug!("Collection required");
481483
assert!(mmtk.state.is_initialized(), "GC is not allowed here: collection is not initialized (did you call initialize_collection()?).");
482484
VM::VMCollection::block_for_gc(tls);
483485
}
@@ -764,7 +766,7 @@ pub fn add_finalizer<VM: VMBinding>(
764766
object: <VM::VMReferenceGlue as ReferenceGlue<VM>>::FinalizableType,
765767
) {
766768
if *mmtk.options.no_finalizer {
767-
warn!("add_finalizer() is called when no_finalizer = true");
769+
log::warn!("add_finalizer() is called when no_finalizer = true");
768770
}
769771

770772
mmtk.finalizable_processor.lock().unwrap().add(object);
@@ -823,7 +825,7 @@ pub fn get_finalized_object<VM: VMBinding>(
823825
mmtk: &'static MMTK<VM>,
824826
) -> Option<<VM::VMReferenceGlue as ReferenceGlue<VM>>::FinalizableType> {
825827
if *mmtk.options.no_finalizer {
826-
warn!("get_finalized_object() is called when no_finalizer = true");
828+
log::warn!("get_finalized_object() is called when no_finalizer = true");
827829
}
828830

829831
mmtk.finalizable_processor
@@ -843,7 +845,7 @@ pub fn get_all_finalizers<VM: VMBinding>(
843845
mmtk: &'static MMTK<VM>,
844846
) -> Vec<<VM::VMReferenceGlue as ReferenceGlue<VM>>::FinalizableType> {
845847
if *mmtk.options.no_finalizer {
846-
warn!("get_all_finalizers() is called when no_finalizer = true");
848+
log::warn!("get_all_finalizers() is called when no_finalizer = true");
847849
}
848850

849851
mmtk.finalizable_processor
@@ -863,7 +865,7 @@ pub fn get_finalizers_for<VM: VMBinding>(
863865
object: ObjectReference,
864866
) -> Vec<<VM::VMReferenceGlue as ReferenceGlue<VM>>::FinalizableType> {
865867
if *mmtk.options.no_finalizer {
866-
warn!("get_finalizers() is called when no_finalizer = true");
868+
log::warn!("get_finalizers() is called when no_finalizer = true");
867869
}
868870

869871
mmtk.finalizable_processor

src/mmtk.rs

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@ use crate::util::heap::gc_trigger::GCTrigger;
1515
use crate::util::heap::layout::vm_layout::VMLayout;
1616
use crate::util::heap::layout::{self, Mmapper, VMMap};
1717
use crate::util::heap::HeapMeta;
18+
use crate::util::log;
1819
use crate::util::opaque_pointer::*;
1920
use crate::util::options::Options;
2021
use crate::util::reference_processor::ReferenceProcessors;
@@ -25,6 +26,7 @@ use crate::util::slot_logger::SlotLogger;
2526
use crate::util::statistics::stats::Stats;
2627
use crate::vm::ReferenceGlue;
2728
use crate::vm::VMBinding;
29+
2830
use std::cell::UnsafeCell;
2931
use std::default::Default;
3032
use std::sync::atomic::{AtomicBool, Ordering};
@@ -419,12 +421,12 @@ impl<VM: VMBinding> MMTK<VM> {
419421
) -> bool {
420422
use crate::vm::Collection;
421423
if !self.get_plan().constraints().collects_garbage {
422-
warn!("User attempted a collection request, but the plan can not do GC. The request is ignored.");
424+
log::warn!("User attempted a collection request, but the plan can not do GC. The request is ignored.");
423425
return false;
424426
}
425427

426428
if force || !*self.options.ignore_system_gc && VM::VMCollection::is_collection_enabled() {
427-
info!("User triggering collection");
429+
log::info!("User triggering collection");
428430
if exhaustive {
429431
if let Some(gen) = self.get_plan().generational() {
430432
gen.force_full_heap_collection();

src/plan/generational/global.rs

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@ use crate::scheduler::*;
99
use crate::util::copy::CopySemantics;
1010
use crate::util::heap::gc_trigger::SpaceStats;
1111
use crate::util::heap::VMRequest;
12+
use crate::util::log;
1213
use crate::util::statistics::counter::EventCounter;
1314
use crate::util::Address;
1415
use crate::util::ObjectReference;
@@ -101,7 +102,7 @@ impl<VM: VMBinding> CommonGenPlan<VM> {
101102
let cur_nursery = self.nursery.reserved_pages();
102103
let max_nursery = self.common.base.gc_trigger.get_max_nursery_pages();
103104
let nursery_full = cur_nursery >= max_nursery;
104-
trace!(
105+
log::trace!(
105106
"nursery_full = {:?} (nursery = {}, max_nursery = {})",
106107
nursery_full,
107108
cur_nursery,
@@ -143,7 +144,7 @@ impl<VM: VMBinding> CommonGenPlan<VM> {
143144
// The conditions are complex, and it is easier to read if we put them to separate if blocks.
144145
#[allow(clippy::if_same_then_else, clippy::needless_bool)]
145146
let is_full_heap = if crate::plan::generational::FULL_NURSERY_GC {
146-
trace!("full heap: forced full heap");
147+
log::trace!("full heap: forced full heap");
147148
// For barrier overhead measurements, we always do full gc in nursery collections.
148149
true
149150
} else if self
@@ -154,7 +155,7 @@ impl<VM: VMBinding> CommonGenPlan<VM> {
154155
.load(Ordering::SeqCst)
155156
&& *self.common.base.options.full_heap_system_gc
156157
{
157-
trace!("full heap: user triggered");
158+
log::trace!("full heap: user triggered");
158159
// User triggered collection, and we force full heap for user triggered collection
159160
true
160161
} else if self.next_gc_full_heap.load(Ordering::SeqCst)
@@ -166,7 +167,7 @@ impl<VM: VMBinding> CommonGenPlan<VM> {
166167
.load(Ordering::SeqCst)
167168
> 1
168169
{
169-
trace!(
170+
log::trace!(
170171
"full heap: next_gc_full_heap = {}, cur_collection_attempts = {}",
171172
self.next_gc_full_heap.load(Ordering::SeqCst),
172173
self.common
@@ -178,7 +179,7 @@ impl<VM: VMBinding> CommonGenPlan<VM> {
178179
// Forces full heap collection
179180
true
180181
} else if Self::virtual_memory_exhausted(plan.generational().unwrap()) {
181-
trace!("full heap: virtual memory exhausted");
182+
log::trace!("full heap: virtual memory exhausted");
182183
true
183184
} else {
184185
// We use an Appel-style nursery. The default GC (even for a "heap-full" collection)
@@ -191,7 +192,7 @@ impl<VM: VMBinding> CommonGenPlan<VM> {
191192

192193
self.gc_full_heap.store(is_full_heap, Ordering::SeqCst);
193194

194-
info!(
195+
log::info!(
195196
"{}",
196197
if is_full_heap {
197198
"Full heap GC"
@@ -265,7 +266,7 @@ impl<VM: VMBinding> CommonGenPlan<VM> {
265266
let available = plan.get_available_pages();
266267
let min_nursery = plan.base().gc_trigger.get_min_nursery_pages();
267268
let next_gc_full_heap = available < min_nursery;
268-
trace!(
269+
log::trace!(
269270
"next gc will be full heap? {}, available pages = {}, min nursery = {}",
270271
next_gc_full_heap,
271272
available,

src/plan/generational/immix/global.rs

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@ use crate::util::alloc::allocators::AllocatorSelector;
2020
use crate::util::copy::*;
2121
use crate::util::heap::gc_trigger::SpaceStats;
2222
use crate::util::heap::VMRequest;
23+
use crate::util::log;
2324
use crate::util::Address;
2425
use crate::util::ObjectReference;
2526
use crate::util::VMWorkerThread;
@@ -109,10 +110,10 @@ impl<VM: VMBinding> Plan for GenImmix<VM> {
109110
probe!(mmtk, gen_full_heap, is_full_heap);
110111

111112
if !is_full_heap {
112-
info!("Nursery GC");
113+
log::info!("Nursery GC");
113114
scheduler.schedule_common_work::<GenImmixNurseryGCWorkContext<VM>>(self);
114115
} else {
115-
info!("Full heap GC");
116+
log::info!("Full heap GC");
116117
crate::plan::immix::Immix::schedule_immix_full_heap_collection::<
117118
GenImmix<VM>,
118119
GenImmixMatureGCWorkContext<VM, TRACE_KIND_FAST>,

src/plan/global.rs

Lines changed: 13 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ use crate::util::heap::layout::Mmapper;
1919
use crate::util::heap::layout::VMMap;
2020
use crate::util::heap::HeapMeta;
2121
use crate::util::heap::VMRequest;
22+
use crate::util::log;
2223
use crate::util::metadata::side_metadata::SideMetadataSanity;
2324
use crate::util::metadata::side_metadata::SideMetadataSpec;
2425
use crate::util::options::Options;
@@ -233,7 +234,7 @@ pub trait Plan: 'static + HasSpaces + Sync + Downcast {
233234
let vm_live_pages = conversions::bytes_to_pages_up(vm_live_bytes);
234235
let total = used_pages + collection_reserve + vm_live_pages;
235236

236-
trace!(
237+
log::trace!(
237238
"Reserved pages = {}, used pages: {}, collection reserve: {}, VM live pages: {}",
238239
total,
239240
used_pages,
@@ -267,7 +268,7 @@ pub trait Plan: 'static + HasSpaces + Sync + Downcast {
267268
// may be larger than the reserved pages before a GC, as we may end up using more memory for thread local
268269
// buffers for copy allocators).
269270
let available_pages = total_pages.saturating_sub(reserved_pages);
270-
trace!(
271+
log::trace!(
271272
"Total pages = {}, reserved pages = {}, available pages = {}",
272273
total_pages,
273274
reserved_pages,
@@ -491,25 +492,25 @@ impl<VM: VMBinding> BasePlan<VM> {
491492
) -> ObjectReference {
492493
#[cfg(feature = "code_space")]
493494
if self.code_space.in_space(object) {
494-
trace!("trace_object: object in code space");
495+
log::trace!("trace_object: object in code space");
495496
return self.code_space.trace_object::<Q>(queue, object);
496497
}
497498

498499
#[cfg(feature = "code_space")]
499500
if self.code_lo_space.in_space(object) {
500-
trace!("trace_object: object in large code space");
501+
log::trace!("trace_object: object in large code space");
501502
return self.code_lo_space.trace_object::<Q>(queue, object);
502503
}
503504

504505
#[cfg(feature = "ro_space")]
505506
if self.ro_space.in_space(object) {
506-
trace!("trace_object: object in ro_space space");
507+
log::trace!("trace_object: object in ro_space space");
507508
return self.ro_space.trace_object(queue, object);
508509
}
509510

510511
#[cfg(feature = "vm_space")]
511512
if self.vm_space.in_space(object) {
512-
trace!("trace_object: object in boot space");
513+
log::trace!("trace_object: object in boot space");
513514
return self.vm_space.trace_object(queue, object);
514515
}
515516

@@ -545,18 +546,18 @@ impl<VM: VMBinding> BasePlan<VM> {
545546
&self.options,
546547
);
547548
if stress_force_gc {
548-
debug!(
549+
log::debug!(
549550
"Stress GC: allocation_bytes = {}, stress_factor = {}",
550551
self.global_state.allocation_bytes.load(Ordering::Relaxed),
551552
*self.options.stress_factor
552553
);
553-
debug!("Doing stress GC");
554+
log::debug!("Doing stress GC");
554555
self.global_state
555556
.allocation_bytes
556557
.store(0, Ordering::SeqCst);
557558
}
558559

559-
debug!(
560+
log::debug!(
560561
"self.get_reserved_pages()={}, self.get_total_pages()={}",
561562
plan.get_reserved_pages(),
562563
plan.get_total_pages()
@@ -622,15 +623,15 @@ impl<VM: VMBinding> CommonPlan<VM> {
622623
worker: &mut GCWorker<VM>,
623624
) -> ObjectReference {
624625
if self.immortal.in_space(object) {
625-
trace!("trace_object: object in immortal space");
626+
log::trace!("trace_object: object in immortal space");
626627
return self.immortal.trace_object(queue, object);
627628
}
628629
if self.los.in_space(object) {
629-
trace!("trace_object: object in los");
630+
log::trace!("trace_object: object in los");
630631
return self.los.trace_object(queue, object);
631632
}
632633
if self.nonmoving.in_space(object) {
633-
trace!("trace_object: object in nonmoving space");
634+
log::trace!("trace_object: object in nonmoving space");
634635
return self.nonmoving.trace_object(queue, object);
635636
}
636637
self.base.trace_object::<Q>(queue, object, worker)

src/plan/pageprotect/global.rs

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@ use crate::scheduler::*;
1010
use crate::util::alloc::allocators::AllocatorSelector;
1111
use crate::util::heap::gc_trigger::SpaceStats;
1212
use crate::util::heap::VMRequest;
13+
use crate::util::log;
1314
use crate::util::metadata::side_metadata::SideMetadataContext;
1415
use crate::{plan::global::BasePlan, vm::VMBinding};
1516
use crate::{
@@ -86,7 +87,7 @@ impl<VM: VMBinding> Plan for PageProtect<VM> {
8687
impl<VM: VMBinding> PageProtect<VM> {
8788
pub fn new(args: CreateGeneralPlanArgs<VM>) -> Self {
8889
// Warn users that the plan may fail due to maximum mapping allowed.
89-
warn!(
90+
log::warn!(
9091
"PageProtect uses a high volume of memory mappings. \
9192
If you encounter failures in memory protect/unprotect in this plan,\
9293
consider increase the maximum mapping allowed by the OS{}.",

0 commit comments

Comments
 (0)