@@ -669,6 +669,7 @@ impl<T> CPtr for [T] {
669
669
#[ cfg( fuzzing) ]
670
670
mod fuzz_dummy {
671
671
use super :: * ;
672
+ use core:: sync:: atomic:: { AtomicUsize , Ordering } ;
672
673
673
674
#[ cfg( rust_secp_no_symbol_renaming) ] compile_error ! ( "We do not support fuzzing with rust_secp_no_symbol_renaming" ) ;
674
675
@@ -678,16 +679,55 @@ mod fuzz_dummy {
678
679
fn rustsecp256k1_v0_4_0_context_preallocated_clone ( cx : * const Context , prealloc : * mut c_void ) -> * mut Context ;
679
680
}
680
681
681
- const CTX_SIZE : usize = 1024 * 1024 * 2 ;
682
+ #[ cfg( feature = "lowmemory" ) ]
683
+ const CTX_SIZE : usize = 1024 * 65 ;
684
+ #[ cfg( not( feature = "lowmemory" ) ) ]
685
+ const CTX_SIZE : usize = 1024 * ( 1024 + 128 ) ;
682
686
// Contexts
683
687
pub unsafe fn secp256k1_context_preallocated_size ( flags : c_uint ) -> size_t {
684
688
assert ! ( rustsecp256k1_v0_4_0_context_preallocated_size( flags) + std:: mem:: size_of:: <c_uint>( ) <= CTX_SIZE ) ;
685
689
CTX_SIZE
686
690
}
691
+
692
+ static HAVE_PREALLOCATED_CONTEXT : AtomicUsize = AtomicUsize :: new ( 0 ) ;
693
+ const HAVE_CONTEXT_NONE : usize = 0 ;
694
+ const HAVE_CONTEXT_WORKING : usize = 1 ;
695
+ const HAVE_CONTEXT_DONE : usize = 2 ;
696
+ static mut PREALLOCATED_CONTEXT : [ u8 ; CTX_SIZE ] = [ 0 ; CTX_SIZE ] ;
687
697
pub unsafe fn secp256k1_context_preallocated_create ( prealloc : * mut c_void , flags : c_uint ) -> * mut Context {
698
+ // While applications should generally avoid creating too many contexts, sometimes fuzzers
699
+ // perform tasks repeatedly which real applications may only do rarely. Thus, we want to
700
+ // avoid being overly slow here. We do so by having a static context and copying it into
701
+ // new buffers instead of recalculating it. Because we shouldn't rely on std, we use a
702
+ // simple hand-written OnceFlag built out of an atomic to gate the global static.
703
+ let mut have_ctx = HAVE_PREALLOCATED_CONTEXT . load ( Ordering :: Relaxed ) ;
704
+ while have_ctx != HAVE_CONTEXT_DONE {
705
+ if have_ctx == HAVE_CONTEXT_NONE {
706
+ have_ctx = HAVE_PREALLOCATED_CONTEXT . swap ( HAVE_CONTEXT_WORKING , Ordering :: AcqRel ) ;
707
+ if have_ctx == HAVE_CONTEXT_NONE {
708
+ assert ! ( rustsecp256k1_v0_4_0_context_preallocated_size( SECP256K1_START_SIGN | SECP256K1_START_VERIFY ) + std:: mem:: size_of:: <c_uint>( ) <= CTX_SIZE ) ;
709
+ assert_eq ! ( rustsecp256k1_v0_4_0_context_preallocated_create(
710
+ PREALLOCATED_CONTEXT [ ..] . as_ptr( ) as * mut c_void,
711
+ SECP256K1_START_SIGN | SECP256K1_START_VERIFY ) ,
712
+ PREALLOCATED_CONTEXT [ ..] . as_ptr( ) as * mut Context ) ;
713
+ assert_eq ! ( HAVE_PREALLOCATED_CONTEXT . swap( HAVE_CONTEXT_DONE , Ordering :: AcqRel ) ,
714
+ HAVE_CONTEXT_WORKING ) ;
715
+ } else if have_ctx == HAVE_CONTEXT_DONE {
716
+ // Another thread finished while we were swapping.
717
+ HAVE_PREALLOCATED_CONTEXT . store ( HAVE_CONTEXT_DONE , Ordering :: Release ) ;
718
+ }
719
+ } else {
720
+ // Another thread is building, just busy-loop until they're done.
721
+ assert_eq ! ( have_ctx, HAVE_CONTEXT_WORKING ) ;
722
+ have_ctx = HAVE_PREALLOCATED_CONTEXT . load ( Ordering :: Acquire ) ;
723
+ #[ cfg( feature = "std" ) ]
724
+ std:: thread:: yield_now ( ) ;
725
+ }
726
+ }
727
+ ptr:: copy_nonoverlapping ( PREALLOCATED_CONTEXT [ ..] . as_ptr ( ) , prealloc as * mut u8 , CTX_SIZE ) ;
688
728
let ptr = ( prealloc as * mut u8 ) . add ( CTX_SIZE ) . sub ( std:: mem:: size_of :: < c_uint > ( ) ) ;
689
729
( ptr as * mut c_uint ) . write ( flags) ;
690
- rustsecp256k1_v0_4_0_context_preallocated_create ( prealloc, flags )
730
+ prealloc as * mut Context
691
731
}
692
732
pub unsafe fn secp256k1_context_preallocated_clone_size ( _cx : * const Context ) -> size_t { CTX_SIZE }
693
733
pub unsafe fn secp256k1_context_preallocated_clone ( cx : * const Context , prealloc : * mut c_void ) -> * mut Context {
0 commit comments