Skip to content

Commit fbd7b2f

Browse files
committed
move atomic access alginment check to helper function and inside atomic access lib
1 parent 1a87926 commit fbd7b2f

File tree

2 files changed

+30
-60
lines changed

2 files changed

+30
-60
lines changed

src/concurrency/data_race.rs

Lines changed: 26 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ use std::{
4949
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
5050
use rustc_index::vec::{Idx, IndexVec};
5151
use rustc_middle::{mir, ty::layout::TyAndLayout};
52-
use rustc_target::abi::Size;
52+
use rustc_target::abi::{Align, Size};
5353

5454
use crate::*;
5555

@@ -463,13 +463,30 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
463463
this.write_scalar_atomic(value.into(), &value_place, atomic)
464464
}
465465

466+
/// Checks that an atomic access is legal at the given place.
467+
fn atomic_access_check(&self, place: &MPlaceTy<'tcx, Provenance>) -> InterpResult<'tcx> {
468+
let this = self.eval_context_ref();
469+
// Check alignment requirements. Atomics must always be aligned to their size,
470+
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
471+
// be 8-aligned).
472+
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
473+
this.check_ptr_access_align(
474+
place.ptr,
475+
place.layout.size,
476+
align,
477+
CheckInAllocMsg::MemoryAccessTest,
478+
)?;
479+
Ok(())
480+
}
481+
466482
/// Perform an atomic read operation at the memory location.
467483
fn read_scalar_atomic(
468484
&self,
469485
place: &MPlaceTy<'tcx, Provenance>,
470486
atomic: AtomicReadOrd,
471487
) -> InterpResult<'tcx, ScalarMaybeUninit<Provenance>> {
472488
let this = self.eval_context_ref();
489+
this.atomic_access_check(place)?;
473490
// This will read from the last store in the modification order of this location. In case
474491
// weak memory emulation is enabled, this may not be the store we will pick to actually read from and return.
475492
// This is fine with StackedBorrow and race checks because they don't concern metadata on
@@ -490,6 +507,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
490507
atomic: AtomicWriteOrd,
491508
) -> InterpResult<'tcx> {
492509
let this = self.eval_context_mut();
510+
this.atomic_access_check(dest)?;
511+
493512
this.validate_overlapping_atomic(dest)?;
494513
this.allow_data_races_mut(move |this| this.write_scalar(val, &dest.into()))?;
495514
this.validate_atomic_store(dest, atomic)?;
@@ -511,6 +530,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
511530
atomic: AtomicRwOrd,
512531
) -> InterpResult<'tcx, ImmTy<'tcx, Provenance>> {
513532
let this = self.eval_context_mut();
533+
this.atomic_access_check(place)?;
514534

515535
this.validate_overlapping_atomic(place)?;
516536
let old = this.allow_data_races_mut(|this| this.read_immediate(&place.into()))?;
@@ -540,6 +560,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
540560
atomic: AtomicRwOrd,
541561
) -> InterpResult<'tcx, ScalarMaybeUninit<Provenance>> {
542562
let this = self.eval_context_mut();
563+
this.atomic_access_check(place)?;
543564

544565
this.validate_overlapping_atomic(place)?;
545566
let old = this.allow_data_races_mut(|this| this.read_scalar(&place.into()))?;
@@ -561,6 +582,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
561582
atomic: AtomicRwOrd,
562583
) -> InterpResult<'tcx, ImmTy<'tcx, Provenance>> {
563584
let this = self.eval_context_mut();
585+
this.atomic_access_check(place)?;
564586

565587
this.validate_overlapping_atomic(place)?;
566588
let old = this.allow_data_races_mut(|this| this.read_immediate(&place.into()))?;
@@ -604,6 +626,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
604626
) -> InterpResult<'tcx, Immediate<Provenance>> {
605627
use rand::Rng as _;
606628
let this = self.eval_context_mut();
629+
this.atomic_access_check(place)?;
607630

608631
this.validate_overlapping_atomic(place)?;
609632
// Failure ordering cannot be stronger than success ordering, therefore first attempt
@@ -1016,6 +1039,7 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
10161039
fn allow_data_races_ref<R>(&self, op: impl FnOnce(&MiriEvalContext<'mir, 'tcx>) -> R) -> R {
10171040
let this = self.eval_context_ref();
10181041
if let Some(data_race) = &this.machine.data_race {
1042+
assert!(!data_race.ongoing_action_data_race_free.get(), "cannot nest allow_data_races");
10191043
data_race.ongoing_action_data_race_free.set(true);
10201044
}
10211045
let result = op(this);
@@ -1035,6 +1059,7 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
10351059
) -> R {
10361060
let this = self.eval_context_mut();
10371061
if let Some(data_race) = &this.machine.data_race {
1062+
assert!(!data_race.ongoing_action_data_race_free.get(), "cannot nest allow_data_races");
10381063
data_race.ongoing_action_data_race_free.set(true);
10391064
}
10401065
let result = op(this);

src/shims/intrinsics/atomic.rs

Lines changed: 4 additions & 59 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,4 @@
11
use rustc_middle::{mir, mir::BinOp, ty};
2-
use rustc_target::abi::Align;
32

43
use crate::*;
54
use helpers::check_arg_count;
@@ -130,20 +129,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
130129
let [place] = check_arg_count(args)?;
131130
let place = this.deref_operand(place)?;
132131

133-
// make sure it fits into a scalar; otherwise it cannot be atomic
132+
// Perform atomic load.
134133
let val = this.read_scalar_atomic(&place, atomic)?;
135-
136-
// Check alignment requirements. Atomics must always be aligned to their size,
137-
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
138-
// be 8-aligned).
139-
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
140-
this.check_ptr_access_align(
141-
place.ptr,
142-
place.layout.size,
143-
align,
144-
CheckInAllocMsg::MemoryAccessTest,
145-
)?;
146-
// Perform regular access.
134+
// Perform regular store.
147135
this.write_scalar(val, dest)?;
148136
Ok(())
149137
}
@@ -157,19 +145,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
157145

158146
let [place, val] = check_arg_count(args)?;
159147
let place = this.deref_operand(place)?;
160-
let val = this.read_scalar(val)?; // make sure it fits into a scalar; otherwise it cannot be atomic
161-
162-
// Check alignment requirements. Atomics must always be aligned to their size,
163-
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
164-
// be 8-aligned).
165-
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
166-
this.check_ptr_access_align(
167-
place.ptr,
168-
place.layout.size,
169-
align,
170-
CheckInAllocMsg::MemoryAccessTest,
171-
)?;
172148

149+
// Perform regular load.
150+
let val = this.read_scalar(val)?;
173151
// Perform atomic store
174152
this.write_scalar_atomic(val, &place, atomic)?;
175153
Ok(())
@@ -220,17 +198,6 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
220198
span_bug!(this.cur_span(), "atomic arithmetic operation type mismatch");
221199
}
222200

223-
// Check alignment requirements. Atomics must always be aligned to their size,
224-
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
225-
// be 8-aligned).
226-
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
227-
this.check_ptr_access_align(
228-
place.ptr,
229-
place.layout.size,
230-
align,
231-
CheckInAllocMsg::MemoryAccessTest,
232-
)?;
233-
234201
match atomic_op {
235202
AtomicOp::Min => {
236203
let old = this.atomic_min_max_scalar(&place, rhs, true, atomic)?;
@@ -262,17 +229,6 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
262229
let place = this.deref_operand(place)?;
263230
let new = this.read_scalar(new)?;
264231

265-
// Check alignment requirements. Atomics must always be aligned to their size,
266-
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
267-
// be 8-aligned).
268-
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
269-
this.check_ptr_access_align(
270-
place.ptr,
271-
place.layout.size,
272-
align,
273-
CheckInAllocMsg::MemoryAccessTest,
274-
)?;
275-
276232
let old = this.atomic_exchange_scalar(&place, new, atomic)?;
277233
this.write_scalar(old, dest)?; // old value is returned
278234
Ok(())
@@ -293,17 +249,6 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
293249
let expect_old = this.read_immediate(expect_old)?; // read as immediate for the sake of `binary_op()`
294250
let new = this.read_scalar(new)?;
295251

296-
// Check alignment requirements. Atomics must always be aligned to their size,
297-
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
298-
// be 8-aligned).
299-
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
300-
this.check_ptr_access_align(
301-
place.ptr,
302-
place.layout.size,
303-
align,
304-
CheckInAllocMsg::MemoryAccessTest,
305-
)?;
306-
307252
let old = this.atomic_compare_exchange_scalar(
308253
&place,
309254
&expect_old,

0 commit comments

Comments
 (0)