Skip to content

Commit c486ca1

Browse files
committed
Use a global static context in fuzzing, reducing overhead
1 parent 940a51c commit c486ca1

File tree

1 file changed

+42
-2
lines changed

1 file changed

+42
-2
lines changed

secp256k1-sys/src/lib.rs

Lines changed: 42 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -669,6 +669,7 @@ impl<T> CPtr for [T] {
669669
#[cfg(fuzzing)]
670670
mod fuzz_dummy {
671671
use super::*;
672+
use core::sync::atomic::{AtomicUsize, Ordering};
672673

673674
#[cfg(rust_secp_no_symbol_renaming)] compile_error!("We do not support fuzzing with rust_secp_no_symbol_renaming");
674675

@@ -678,16 +679,55 @@ mod fuzz_dummy {
678679
fn rustsecp256k1_v0_4_0_context_preallocated_clone(cx: *const Context, prealloc: *mut c_void) -> *mut Context;
679680
}
680681

681-
const CTX_SIZE: usize = 1024 * 1024 * 2;
682+
#[cfg(feature = "lowmemory")]
683+
const CTX_SIZE: usize = 1024 * 65;
684+
#[cfg(not(feature = "lowmemory"))]
685+
const CTX_SIZE: usize = 1024 * (1024 + 128);
682686
// Contexts
683687
pub unsafe fn secp256k1_context_preallocated_size(flags: c_uint) -> size_t {
684688
assert!(rustsecp256k1_v0_4_0_context_preallocated_size(flags) + std::mem::size_of::<c_uint>() <= CTX_SIZE);
685689
CTX_SIZE
686690
}
691+
692+
static HAVE_PREALLOCATED_CONTEXT: AtomicUsize = AtomicUsize::new(0);
693+
const HAVE_CONTEXT_NONE: usize = 0;
694+
const HAVE_CONTEXT_WORKING: usize = 1;
695+
const HAVE_CONTEXT_DONE: usize = 2;
696+
static mut PREALLOCATED_CONTEXT: [u8; CTX_SIZE] = [0; CTX_SIZE];
687697
pub unsafe fn secp256k1_context_preallocated_create(prealloc: *mut c_void, flags: c_uint) -> *mut Context {
698+
// While applications should generally avoid creating too many contexts, sometimes fuzzers
699+
// perform tasks repeatedly which real applications may only do rarely. Thus, we want to
700+
// avoid being overly slow here. We do so by having a static context and copying it into
701+
// new buffers instead of recalculating it. Because we shouldn't rely on std, we use a
702+
// simple hand-written OnceFlag built out of an atomic to gate the global static.
703+
let mut have_ctx = HAVE_PREALLOCATED_CONTEXT.load(Ordering::Relaxed);
704+
while have_ctx != HAVE_CONTEXT_DONE {
705+
if have_ctx == HAVE_CONTEXT_NONE {
706+
have_ctx = HAVE_PREALLOCATED_CONTEXT.swap(HAVE_CONTEXT_WORKING, Ordering::AcqRel);
707+
if have_ctx == HAVE_CONTEXT_NONE {
708+
assert!(rustsecp256k1_v0_4_0_context_preallocated_size(SECP256K1_START_SIGN | SECP256K1_START_VERIFY) + std::mem::size_of::<c_uint>() <= CTX_SIZE);
709+
assert_eq!(rustsecp256k1_v0_4_0_context_preallocated_create(
710+
PREALLOCATED_CONTEXT[..].as_ptr() as *mut c_void,
711+
SECP256K1_START_SIGN | SECP256K1_START_VERIFY),
712+
PREALLOCATED_CONTEXT[..].as_ptr() as *mut Context);
713+
assert_eq!(HAVE_PREALLOCATED_CONTEXT.swap(HAVE_CONTEXT_DONE, Ordering::AcqRel),
714+
HAVE_CONTEXT_WORKING);
715+
} else if have_ctx == HAVE_CONTEXT_DONE {
716+
// Another thread finished while we were swapping.
717+
HAVE_PREALLOCATED_CONTEXT.store(HAVE_CONTEXT_DONE, Ordering::Release);
718+
}
719+
} else {
720+
// Another thread is building, just busy-loop until they're done.
721+
assert_eq!(have_ctx, HAVE_CONTEXT_WORKING);
722+
have_ctx = HAVE_PREALLOCATED_CONTEXT.load(Ordering::Acquire);
723+
#[cfg(feature = "std")]
724+
std::thread::yield_now();
725+
}
726+
}
727+
ptr::copy_nonoverlapping(PREALLOCATED_CONTEXT[..].as_ptr(), prealloc as *mut u8, CTX_SIZE);
688728
let ptr = (prealloc as *mut u8).add(CTX_SIZE).sub(std::mem::size_of::<c_uint>());
689729
(ptr as *mut c_uint).write(flags);
690-
rustsecp256k1_v0_4_0_context_preallocated_create(prealloc, flags)
730+
prealloc as *mut Context
691731
}
692732
pub unsafe fn secp256k1_context_preallocated_clone_size(_cx: *const Context) -> size_t { CTX_SIZE }
693733
pub unsafe fn secp256k1_context_preallocated_clone(cx: *const Context, prealloc: *mut c_void) -> *mut Context {

0 commit comments

Comments
 (0)