@@ -11,11 +11,6 @@ use super::init_once::InitOnce;
11
11
use super :: vector_clock:: VClock ;
12
12
use crate :: * ;
13
13
14
- pub trait SyncId {
15
- fn from_u32 ( id : u32 ) -> Self ;
16
- fn to_u32 ( & self ) -> u32 ;
17
- }
18
-
19
14
/// We cannot use the `newtype_index!` macro because we have to use 0 as a
20
15
/// sentinel value meaning that the identifier is not assigned. This is because
21
16
/// the pthreads static initializers initialize memory with zeros (see the
@@ -27,16 +22,6 @@ macro_rules! declare_id {
27
22
#[ derive( Clone , Copy , Debug , PartialOrd , Ord , PartialEq , Eq , Hash ) ]
28
23
pub struct $name( std:: num:: NonZero <u32 >) ;
29
24
30
- impl $crate:: concurrency:: sync:: SyncId for $name {
31
- // Panics if `id == 0`.
32
- fn from_u32( id: u32 ) -> Self {
33
- Self ( std:: num:: NonZero :: new( id) . unwrap( ) )
34
- }
35
- fn to_u32( & self ) -> u32 {
36
- self . 0 . get( )
37
- }
38
- }
39
-
40
25
impl $crate:: VisitProvenance for $name {
41
26
fn visit_provenance( & self , _visit: & mut VisitWith <' _>) { }
42
27
}
@@ -55,12 +40,6 @@ macro_rules! declare_id {
55
40
usize :: try_from( self . 0 . get( ) - 1 ) . unwrap( )
56
41
}
57
42
}
58
-
59
- impl $name {
60
- pub fn to_u32_scalar( & self ) -> Scalar {
61
- Scalar :: from_u32( self . 0 . get( ) )
62
- }
63
- }
64
43
} ;
65
44
}
66
45
pub ( super ) use declare_id;
@@ -166,56 +145,6 @@ pub struct SynchronizationObjects {
166
145
// Private extension trait for local helper methods
167
146
impl < ' tcx > EvalContextExtPriv < ' tcx > for crate :: MiriInterpCx < ' tcx > { }
168
147
pub ( super ) trait EvalContextExtPriv < ' tcx > : crate :: MiriInterpCxExt < ' tcx > {
169
- /// Lazily initialize the ID of this Miri sync structure.
170
- /// If memory stores '0', that indicates uninit and we generate a new instance.
171
- /// Returns `None` if memory stores a non-zero invalid ID.
172
- ///
173
- /// `get_objs` must return the `IndexVec` that stores all the objects of this type.
174
- /// `create_obj` must create the new object if initialization is needed.
175
- #[ inline]
176
- fn get_or_create_id < Id : SyncId + Idx , T > (
177
- & mut self ,
178
- lock : & MPlaceTy < ' tcx > ,
179
- offset : u64 ,
180
- get_objs : impl for < ' a > Fn ( & ' a mut MiriInterpCx < ' tcx > ) -> & ' a mut IndexVec < Id , T > ,
181
- create_obj : impl for < ' a > FnOnce ( & ' a mut MiriInterpCx < ' tcx > ) -> InterpResult < ' tcx , T > ,
182
- ) -> InterpResult < ' tcx , Option < Id > > {
183
- let this = self . eval_context_mut ( ) ;
184
- let offset = Size :: from_bytes ( offset) ;
185
- assert ! ( lock. layout. size >= offset + this. machine. layouts. u32 . size) ;
186
- let id_place = lock. offset ( offset, this. machine . layouts . u32 , this) ?;
187
- let next_index = get_objs ( this) . next_index ( ) ;
188
-
189
- // Since we are lazy, this update has to be atomic.
190
- let ( old, success) = this
191
- . atomic_compare_exchange_scalar (
192
- & id_place,
193
- & ImmTy :: from_uint ( 0u32 , this. machine . layouts . u32 ) ,
194
- Scalar :: from_u32 ( next_index. to_u32 ( ) ) ,
195
- AtomicRwOrd :: Relaxed , // deliberately *no* synchronization
196
- AtomicReadOrd :: Relaxed ,
197
- false ,
198
- ) ?
199
- . to_scalar_pair ( ) ;
200
-
201
- interp_ok ( if success. to_bool ( ) . expect ( "compare_exchange's second return value is a bool" ) {
202
- // We set the in-memory ID to `next_index`, now also create this object in the machine
203
- // state.
204
- let obj = create_obj ( this) ?;
205
- let new_index = get_objs ( this) . push ( obj) ;
206
- assert_eq ! ( next_index, new_index) ;
207
- Some ( new_index)
208
- } else {
209
- let id = Id :: from_u32 ( old. to_u32 ( ) . expect ( "layout is u32" ) ) ;
210
- if get_objs ( this) . get ( id) . is_none ( ) {
211
- // The in-memory ID is invalid.
212
- None
213
- } else {
214
- Some ( id)
215
- }
216
- } )
217
- }
218
-
219
148
fn condvar_reacquire_mutex (
220
149
& mut self ,
221
150
mutex : MutexId ,
@@ -248,6 +177,80 @@ impl SynchronizationObjects {
248
177
pub fn condvar_create ( & mut self ) -> CondvarId {
249
178
self . condvars . push ( Default :: default ( ) )
250
179
}
180
+
181
+ pub fn init_once_create ( & mut self ) -> InitOnceId {
182
+ self . init_onces . push ( Default :: default ( ) )
183
+ }
184
+ }
185
+
186
+ impl < ' tcx > AllocExtra < ' tcx > {
187
+ pub fn get_sync < T : ' static > ( & self , offset : Size ) -> Option < & T > {
188
+ self . sync . get ( & offset) . and_then ( |s| s. downcast_ref :: < T > ( ) )
189
+ }
190
+ }
191
+
192
+ /// We designate an `init`` field in all primitives.
193
+ /// If `init` is set to this, we consider the primitive initialized.
194
+ pub const LAZY_INIT_COOKIE : u32 = 0xcafe_affe ;
195
+
196
+ /// Helper for lazily initialized `alloc_extra.sync` data:
197
+ /// this forces an immediate init.
198
+ pub fn lazy_sync_init < ' tcx , T : ' static + Copy > (
199
+ ecx : & mut MiriInterpCx < ' tcx > ,
200
+ primitive : & MPlaceTy < ' tcx > ,
201
+ init_offset : Size ,
202
+ data : T ,
203
+ ) -> InterpResult < ' tcx > {
204
+ let init_field = primitive. offset ( init_offset, ecx. machine . layouts . u32 , ecx) ?;
205
+
206
+ let ( alloc, offset, _) = ecx. ptr_get_alloc_id ( primitive. ptr ( ) , 0 ) ?;
207
+ let ( alloc_extra, _machine) = ecx. get_alloc_extra_mut ( alloc) ?;
208
+ alloc_extra. sync . insert ( offset, Box :: new ( data) ) ;
209
+ // Mark this as "initialized".
210
+ ecx. write_scalar_atomic (
211
+ Scalar :: from_u32 ( LAZY_INIT_COOKIE ) ,
212
+ & init_field,
213
+ AtomicWriteOrd :: Relaxed ,
214
+ ) ?;
215
+ interp_ok ( ( ) )
216
+ }
217
+
218
+ /// Helper for lazily initialized `alloc_extra.sync` data:
219
+ /// Checks if the primitive is initialized, and return its associated data if so.
220
+ /// Otherwise, return None.
221
+ pub fn lazy_sync_get_data < ' tcx , T : ' static + Copy > (
222
+ ecx : & mut MiriInterpCx < ' tcx > ,
223
+ primitive : & MPlaceTy < ' tcx > ,
224
+ init_offset : Size ,
225
+ name : & str ,
226
+ ) -> InterpResult < ' tcx , Option < T > > {
227
+ let init_field = primitive. offset ( init_offset, ecx. machine . layouts . u32 , ecx) ?;
228
+ // Check if this is already initialized. Needs to be atomic because we can race with another
229
+ // thread initializing. Needs to be an RMW operation to ensure we read the *latest* value.
230
+ // So we just try to replace MUTEX_INIT_COOKIE with itself.
231
+ let init_cookie = Scalar :: from_u32 ( LAZY_INIT_COOKIE ) ;
232
+ let ( _init, success) = ecx
233
+ . atomic_compare_exchange_scalar (
234
+ & init_field,
235
+ & ImmTy :: from_scalar ( init_cookie, ecx. machine . layouts . u32 ) ,
236
+ init_cookie,
237
+ AtomicRwOrd :: Acquire ,
238
+ AtomicReadOrd :: Acquire ,
239
+ /* can_fail_spuriously */ false ,
240
+ ) ?
241
+ . to_scalar_pair ( ) ;
242
+ if success. to_bool ( ) ? {
243
+ // If it is initialized, it must be found in the "sync primitive" table,
244
+ // or else it has been moved illegally.
245
+ let ( alloc, offset, _) = ecx. ptr_get_alloc_id ( primitive. ptr ( ) , 0 ) ?;
246
+ let alloc_extra = ecx. get_alloc_extra ( alloc) ?;
247
+ let data = alloc_extra
248
+ . get_sync :: < T > ( offset)
249
+ . ok_or_else ( || err_ub_format ! ( "`{name}` can't be moved after first use" ) ) ?;
250
+ interp_ok ( Some ( * data) )
251
+ } else {
252
+ interp_ok ( None )
253
+ }
251
254
}
252
255
253
256
// Public interface to synchronization primitives. Please note that in most
0 commit comments