Skip to content

Commit 3f111c1

Browse files
committed
Auto merge of #2114 - cbeuw:shim-rmw, r=RalfJung
Use atomic RMW for `{mutex, rwlock, cond, srwlock}_get_or_create_id` functions This is required for #1963 `{mutex, rwlock, cond, srwlock}_get_or_create_id()` currently checks whether an ID field is 0 using an atomic read, allocate one and get a new ID if it is, then write it in a separate atomic write. This is fine without weak memory. For instance, in `pthread_mutex_lock` which may be called by two threads concurrently, only one thread can read 0, create and then write a new ID, the later-run thread will always see the newly created ID and never 0. ```rust fn pthread_mutex_lock(&mut self, mutex_op: &OpTy<'tcx, Tag>) -> InterpResult<'tcx, i32> { let this = self.eval_context_mut(); let kind = mutex_get_kind(this, mutex_op)?.check_init()?; let id = mutex_get_or_create_id(this, mutex_op)?; let active_thread = this.get_active_thread(); ``` However, with weak memory behaviour, both threads may read 0: the first thread has to see 0 because nothing else was written to it, and the second thread is not guaranteed to observe the latest value, causing a duplicate mutex to be created and both threads "successfully" acquiring the lock at the same time. This is a pretty typical pattern requiring the use of atomic RMWs. RMW *always* reads the latest value in a location, so only one thread can create the new mutex and ID, all others scheduled later will see the new ID.
2 parents d33e7fc + 9e38dc4 commit 3f111c1

File tree

5 files changed

+170
-63
lines changed

5 files changed

+170
-63
lines changed

src/data_race.rs

Lines changed: 5 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -73,9 +73,9 @@ use rustc_middle::{mir, ty::layout::TyAndLayout};
7373
use rustc_target::abi::Size;
7474

7575
use crate::{
76-
AllocId, AllocRange, ImmTy, Immediate, InterpResult, MPlaceTy, MemPlaceMeta, MemoryKind,
77-
MiriEvalContext, MiriEvalContextExt, MiriMemoryKind, OpTy, Pointer, RangeMap, Scalar,
78-
ScalarMaybeUninit, Tag, ThreadId, VClock, VTimestamp, VectorIdx,
76+
AllocId, AllocRange, HelpersEvalContextExt, ImmTy, Immediate, InterpResult, MPlaceTy,
77+
MemoryKind, MiriEvalContext, MiriEvalContextExt, MiriMemoryKind, OpTy, Pointer, RangeMap,
78+
Scalar, ScalarMaybeUninit, Tag, ThreadId, VClock, VTimestamp, VectorIdx,
7979
};
8080

8181
pub type AllocExtra = VClockAlloc;
@@ -450,12 +450,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
450450
atomic: AtomicReadOp,
451451
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
452452
let this = self.eval_context_ref();
453-
let op_place = this.deref_operand(op)?;
454-
let offset = Size::from_bytes(offset);
455-
456-
// Ensure that the following read at an offset is within bounds.
457-
assert!(op_place.layout.size >= offset + layout.size);
458-
let value_place = op_place.offset(offset, MemPlaceMeta::None, layout, this)?;
453+
let value_place = this.deref_operand_and_offset(op, offset, layout)?;
459454
this.read_scalar_atomic(&value_place, atomic)
460455
}
461456

@@ -469,12 +464,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
469464
atomic: AtomicWriteOp,
470465
) -> InterpResult<'tcx> {
471466
let this = self.eval_context_mut();
472-
let op_place = this.deref_operand(op)?;
473-
let offset = Size::from_bytes(offset);
474-
475-
// Ensure that the following read at an offset is within bounds.
476-
assert!(op_place.layout.size >= offset + layout.size);
477-
let value_place = op_place.offset(offset, MemPlaceMeta::None, layout, this)?;
467+
let value_place = this.deref_operand_and_offset(op, offset, layout)?;
478468
this.write_scalar_atomic(value.into(), &value_place, atomic)
479469
}
480470

src/helpers.rs

Lines changed: 17 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -597,18 +597,31 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
597597
}
598598
}
599599

600-
fn read_scalar_at_offset(
600+
/// Calculates the MPlaceTy given the offset and layout of an access on an operand
601+
fn deref_operand_and_offset(
601602
&self,
602603
op: &OpTy<'tcx, Tag>,
603604
offset: u64,
604605
layout: TyAndLayout<'tcx>,
605-
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
606+
) -> InterpResult<'tcx, MPlaceTy<'tcx, Tag>> {
606607
let this = self.eval_context_ref();
607608
let op_place = this.deref_operand(op)?;
608609
let offset = Size::from_bytes(offset);
609-
// Ensure that the following read at an offset is within bounds
610+
611+
// Ensure that the access is within bounds.
610612
assert!(op_place.layout.size >= offset + layout.size);
611613
let value_place = op_place.offset(offset, MemPlaceMeta::None, layout, this)?;
614+
Ok(value_place)
615+
}
616+
617+
fn read_scalar_at_offset(
618+
&self,
619+
op: &OpTy<'tcx, Tag>,
620+
offset: u64,
621+
layout: TyAndLayout<'tcx>,
622+
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
623+
let this = self.eval_context_ref();
624+
let value_place = this.deref_operand_and_offset(op, offset, layout)?;
612625
this.read_scalar(&value_place.into())
613626
}
614627

@@ -620,11 +633,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
620633
layout: TyAndLayout<'tcx>,
621634
) -> InterpResult<'tcx, ()> {
622635
let this = self.eval_context_mut();
623-
let op_place = this.deref_operand(op)?;
624-
let offset = Size::from_bytes(offset);
625-
// Ensure that the following read at an offset is within bounds
626-
assert!(op_place.layout.size >= offset + layout.size);
627-
let value_place = op_place.offset(offset, MemPlaceMeta::None, layout, this)?;
636+
let value_place = this.deref_operand_and_offset(op, offset, layout)?;
628637
this.write_scalar(value, &value_place.into())
629638
}
630639

src/shims/posix/sync.rs

Lines changed: 66 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -112,16 +112,28 @@ fn mutex_get_or_create_id<'mir, 'tcx: 'mir>(
112112
ecx: &mut MiriEvalContext<'mir, 'tcx>,
113113
mutex_op: &OpTy<'tcx, Tag>,
114114
) -> InterpResult<'tcx, MutexId> {
115-
let id = mutex_get_id(ecx, mutex_op)?.to_u32()?;
116-
if id == 0 {
117-
// 0 is a default value and also not a valid mutex id. Need to allocate
118-
// a new mutex.
119-
let id = ecx.mutex_create();
120-
mutex_set_id(ecx, mutex_op, id.to_u32_scalar())?;
121-
Ok(id)
122-
} else {
123-
Ok(MutexId::from_u32(id))
124-
}
115+
let value_place = ecx.deref_operand_and_offset(mutex_op, 4, ecx.machine.layouts.u32)?;
116+
117+
ecx.mutex_get_or_create(|ecx, next_id| {
118+
let (old, success) = ecx
119+
.atomic_compare_exchange_scalar(
120+
&value_place,
121+
&ImmTy::from_uint(0u32, ecx.machine.layouts.u32),
122+
next_id.to_u32_scalar().into(),
123+
AtomicRwOp::Relaxed,
124+
AtomicReadOp::Relaxed,
125+
false,
126+
)?
127+
.to_scalar_pair()
128+
.expect("compare_exchange returns a scalar pair");
129+
130+
Ok(if success.to_bool().expect("compare_exchange's second return value is a bool") {
131+
// Caller of the closure needs to allocate next_id
132+
None
133+
} else {
134+
Some(MutexId::from_u32(old.to_u32().expect("layout is u32")))
135+
})
136+
})
125137
}
126138

127139
// pthread_rwlock_t is between 32 and 56 bytes, depending on the platform.
@@ -156,16 +168,28 @@ fn rwlock_get_or_create_id<'mir, 'tcx: 'mir>(
156168
ecx: &mut MiriEvalContext<'mir, 'tcx>,
157169
rwlock_op: &OpTy<'tcx, Tag>,
158170
) -> InterpResult<'tcx, RwLockId> {
159-
let id = rwlock_get_id(ecx, rwlock_op)?.to_u32()?;
160-
if id == 0 {
161-
// 0 is a default value and also not a valid rwlock id. Need to allocate
162-
// a new read-write lock.
163-
let id = ecx.rwlock_create();
164-
rwlock_set_id(ecx, rwlock_op, id.to_u32_scalar())?;
165-
Ok(id)
166-
} else {
167-
Ok(RwLockId::from_u32(id))
168-
}
171+
let value_place = ecx.deref_operand_and_offset(rwlock_op, 4, ecx.machine.layouts.u32)?;
172+
173+
ecx.rwlock_get_or_create(|ecx, next_id| {
174+
let (old, success) = ecx
175+
.atomic_compare_exchange_scalar(
176+
&value_place,
177+
&ImmTy::from_uint(0u32, ecx.machine.layouts.u32),
178+
next_id.to_u32_scalar().into(),
179+
AtomicRwOp::Relaxed,
180+
AtomicReadOp::Relaxed,
181+
false,
182+
)?
183+
.to_scalar_pair()
184+
.expect("compare_exchange returns a scalar pair");
185+
186+
Ok(if success.to_bool().expect("compare_exchange's second return value is a bool") {
187+
// Caller of the closure needs to allocate next_id
188+
None
189+
} else {
190+
Some(RwLockId::from_u32(old.to_u32().expect("layout is u32")))
191+
})
192+
})
169193
}
170194

171195
// pthread_condattr_t
@@ -228,16 +252,28 @@ fn cond_get_or_create_id<'mir, 'tcx: 'mir>(
228252
ecx: &mut MiriEvalContext<'mir, 'tcx>,
229253
cond_op: &OpTy<'tcx, Tag>,
230254
) -> InterpResult<'tcx, CondvarId> {
231-
let id = cond_get_id(ecx, cond_op)?.to_u32()?;
232-
if id == 0 {
233-
// 0 is a default value and also not a valid conditional variable id.
234-
// Need to allocate a new id.
235-
let id = ecx.condvar_create();
236-
cond_set_id(ecx, cond_op, id.to_u32_scalar())?;
237-
Ok(id)
238-
} else {
239-
Ok(CondvarId::from_u32(id))
240-
}
255+
let value_place = ecx.deref_operand_and_offset(cond_op, 4, ecx.machine.layouts.u32)?;
256+
257+
ecx.condvar_get_or_create(|ecx, next_id| {
258+
let (old, success) = ecx
259+
.atomic_compare_exchange_scalar(
260+
&value_place,
261+
&ImmTy::from_uint(0u32, ecx.machine.layouts.u32),
262+
next_id.to_u32_scalar().into(),
263+
AtomicRwOp::Relaxed,
264+
AtomicReadOp::Relaxed,
265+
false,
266+
)?
267+
.to_scalar_pair()
268+
.expect("compare_exchange returns a scalar pair");
269+
270+
Ok(if success.to_bool().expect("compare_exchange's second return value is a bool") {
271+
// Caller of the closure needs to allocate next_id
272+
None
273+
} else {
274+
Some(CondvarId::from_u32(old.to_u32().expect("layout is u32")))
275+
})
276+
})
241277
}
242278

243279
fn cond_get_clock_id<'mir, 'tcx: 'mir>(

src/shims/windows/sync.rs

Lines changed: 22 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -7,16 +7,28 @@ fn srwlock_get_or_create_id<'mir, 'tcx: 'mir>(
77
ecx: &mut MiriEvalContext<'mir, 'tcx>,
88
lock_op: &OpTy<'tcx, Tag>,
99
) -> InterpResult<'tcx, RwLockId> {
10-
let id = ecx.read_scalar_at_offset(lock_op, 0, ecx.machine.layouts.u32)?.to_u32()?;
11-
if id == 0 {
12-
// 0 is a default value and also not a valid rwlock id. Need to allocate
13-
// a new rwlock.
14-
let id = ecx.rwlock_create();
15-
ecx.write_scalar_at_offset(lock_op, 0, id.to_u32_scalar(), ecx.machine.layouts.u32)?;
16-
Ok(id)
17-
} else {
18-
Ok(RwLockId::from_u32(id))
19-
}
10+
let value_place = ecx.deref_operand_and_offset(lock_op, 0, ecx.machine.layouts.u32)?;
11+
12+
ecx.rwlock_get_or_create(|ecx, next_id| {
13+
let (old, success) = ecx
14+
.atomic_compare_exchange_scalar(
15+
&value_place,
16+
&ImmTy::from_uint(0u32, ecx.machine.layouts.u32),
17+
next_id.to_u32_scalar().into(),
18+
AtomicRwOp::Relaxed,
19+
AtomicReadOp::Relaxed,
20+
false,
21+
)?
22+
.to_scalar_pair()
23+
.expect("compare_exchange returns a scalar pair");
24+
25+
Ok(if success.to_bool().expect("compare_exchange's second return value is a bool") {
26+
// Caller of the closure needs to allocate next_id
27+
None
28+
} else {
29+
Some(RwLockId::from_u32(old.to_u32().expect("layout is u32")))
30+
})
31+
})
2032
}
2133

2234
impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}

src/sync.rs

Lines changed: 60 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -215,6 +215,24 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
215215
this.machine.threads.sync.mutexes.push(Default::default())
216216
}
217217

218+
#[inline]
219+
/// Provides the closure with the next MutexId. Creates that mutex if the closure returns None,
220+
/// otherwise returns the value from the closure
221+
fn mutex_get_or_create<F>(&mut self, existing: F) -> InterpResult<'tcx, MutexId>
222+
where
223+
F: FnOnce(&mut MiriEvalContext<'mir, 'tcx>, MutexId) -> InterpResult<'tcx, Option<MutexId>>,
224+
{
225+
let this = self.eval_context_mut();
226+
let next_index = this.machine.threads.sync.mutexes.next_index();
227+
if let Some(old) = existing(this, next_index)? {
228+
Ok(old)
229+
} else {
230+
let new_index = this.machine.threads.sync.mutexes.push(Default::default());
231+
assert_eq!(next_index, new_index);
232+
Ok(new_index)
233+
}
234+
}
235+
218236
#[inline]
219237
/// Get the id of the thread that currently owns this lock.
220238
fn mutex_get_owner(&mut self, id: MutexId) -> ThreadId {
@@ -297,6 +315,27 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
297315
this.machine.threads.sync.rwlocks.push(Default::default())
298316
}
299317

318+
#[inline]
319+
/// Provides the closure with the next RwLockId. Creates that RwLock if the closure returns None,
320+
/// otherwise returns the value from the closure
321+
fn rwlock_get_or_create<F>(&mut self, existing: F) -> InterpResult<'tcx, RwLockId>
322+
where
323+
F: FnOnce(
324+
&mut MiriEvalContext<'mir, 'tcx>,
325+
RwLockId,
326+
) -> InterpResult<'tcx, Option<RwLockId>>,
327+
{
328+
let this = self.eval_context_mut();
329+
let next_index = this.machine.threads.sync.rwlocks.next_index();
330+
if let Some(old) = existing(this, next_index)? {
331+
Ok(old)
332+
} else {
333+
let new_index = this.machine.threads.sync.rwlocks.push(Default::default());
334+
assert_eq!(next_index, new_index);
335+
Ok(new_index)
336+
}
337+
}
338+
300339
#[inline]
301340
/// Check if locked.
302341
fn rwlock_is_locked(&self, id: RwLockId) -> bool {
@@ -445,6 +484,27 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
445484
this.machine.threads.sync.condvars.push(Default::default())
446485
}
447486

487+
#[inline]
488+
/// Provides the closure with the next CondvarId. Creates that Condvar if the closure returns None,
489+
/// otherwise returns the value from the closure
490+
fn condvar_get_or_create<F>(&mut self, existing: F) -> InterpResult<'tcx, CondvarId>
491+
where
492+
F: FnOnce(
493+
&mut MiriEvalContext<'mir, 'tcx>,
494+
CondvarId,
495+
) -> InterpResult<'tcx, Option<CondvarId>>,
496+
{
497+
let this = self.eval_context_mut();
498+
let next_index = this.machine.threads.sync.condvars.next_index();
499+
if let Some(old) = existing(this, next_index)? {
500+
Ok(old)
501+
} else {
502+
let new_index = this.machine.threads.sync.condvars.push(Default::default());
503+
assert_eq!(next_index, new_index);
504+
Ok(new_index)
505+
}
506+
}
507+
448508
#[inline]
449509
/// Is the conditional variable awaited?
450510
fn condvar_is_awaited(&mut self, id: CondvarId) -> bool {

0 commit comments

Comments
 (0)