Skip to content

Commit e8123e5

Browse files
committed
Auto merge of #3296 - RalfJung:rustup, r=RalfJung
Rustup, more pthread tests
2 parents 877d78e + a0c0485 commit e8123e5

File tree

8 files changed

+205
-30
lines changed

8 files changed

+205
-30
lines changed

rust-version

+1-1
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
0cbef48150e1fab161b5fd147b57ceb3f9272a52
1+
b17491c8f6d555386104dfd82004c01bfef09c95

src/concurrency/sync.rs

+6-11
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ struct Mutex {
7070
lock_count: usize,
7171
/// The queue of threads waiting for this mutex.
7272
queue: VecDeque<ThreadId>,
73-
/// Data race handle, this tracks the happens-before
73+
/// Data race handle. This tracks the happens-before
7474
/// relationship between each mutex access. It is
7575
/// released to during unlock and acquired from during
7676
/// locking, and therefore stores the clock of the last
@@ -92,7 +92,7 @@ struct RwLock {
9292
writer_queue: VecDeque<ThreadId>,
9393
/// The queue of reader threads waiting for this lock.
9494
reader_queue: VecDeque<ThreadId>,
95-
/// Data race handle for writers, tracks the happens-before
95+
/// Data race handle for writers. Tracks the happens-before
9696
/// ordering between each write access to a rwlock and is updated
9797
/// after a sequence of concurrent readers to track the happens-
9898
/// before ordering between the set of previous readers and
@@ -101,7 +101,7 @@ struct RwLock {
101101
/// lock or the joined clock of the set of last threads to release
102102
/// shared reader locks.
103103
data_race: VClock,
104-
/// Data race handle for readers, this is temporary storage
104+
/// Data race handle for readers. This is temporary storage
105105
/// for the combined happens-before ordering for between all
106106
/// concurrent readers and the next writer, and the value
107107
/// is stored to the main data_race variable once all
@@ -110,6 +110,7 @@ struct RwLock {
110110
/// must load the clock of the last write and must not
111111
/// add happens-before orderings between shared reader
112112
/// locks.
113+
/// This is only relevant when there is an active reader.
113114
data_race_reader: VClock,
114115
}
115116

@@ -485,6 +486,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
485486
Entry::Vacant(_) => return false, // we did not even own this lock
486487
}
487488
if let Some(data_race) = &this.machine.data_race {
489+
// Add this to the shared-release clock of all concurrent readers.
488490
data_race.validate_lock_release_shared(
489491
&mut rwlock.data_race_reader,
490492
reader,
@@ -539,20 +541,13 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriInterpCxExt<'mir, 'tcx> {
539541
}
540542
rwlock.writer = None;
541543
trace!("rwlock_writer_unlock: {:?} unlocked by {:?}", id, expected_writer);
542-
// Release memory to both reader and writer vector clocks
543-
// since this writer happens-before both the union of readers once they are finished
544-
// and the next writer
544+
// Release memory to next lock holder.
545545
if let Some(data_race) = &this.machine.data_race {
546546
data_race.validate_lock_release(
547547
&mut rwlock.data_race,
548548
current_writer,
549549
current_span,
550550
);
551-
data_race.validate_lock_release(
552-
&mut rwlock.data_race_reader,
553-
current_writer,
554-
current_span,
555-
);
556551
}
557552
// The thread was a writer.
558553
//
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,31 @@
1+
#![feature(core_intrinsics)]
2+
#![feature(custom_mir)]
3+
4+
use std::intrinsics::mir::*;
5+
use std::num::NonZeroI32;
6+
7+
// We define our own option type so that we can control the variant indices.
8+
#[allow(unused)]
9+
enum Option<T> {
10+
None, // variant 0
11+
Some(T), // variant 1
12+
}
13+
use Option::*;
14+
15+
#[custom_mir(dialect = "runtime", phase = "optimized")]
16+
fn set_discriminant(ptr: &mut Option<NonZeroI32>) {
17+
mir! {
18+
{
19+
// We set the discriminant to `Some`, which is a NOP since this is the niched variant.
20+
// However, the enum is actually encoding `None` currently! That's not good...
21+
SetDiscriminant(*ptr, 1);
22+
//~^ ERROR: trying to set discriminant of a Option<std::num::NonZero<i32>> to the niched variant, but the value does not match
23+
Return()
24+
}
25+
}
26+
}
27+
28+
pub fn main() {
29+
let mut v = None;
30+
set_discriminant(&mut v);
31+
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
error: Undefined Behavior: trying to set discriminant of a Option<std::num::NonZero<i32>> to the niched variant, but the value does not match
2+
--> $DIR/enum-set-discriminant-niche-variant-wrong.rs:LL:CC
3+
|
4+
LL | SetDiscriminant(*ptr, 1);
5+
| ^^^^^^^^^^^^^^^^^^^^^^^^ trying to set discriminant of a Option<std::num::NonZero<i32>> to the niched variant, but the value does not match
6+
|
7+
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
8+
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information
9+
= note: BACKTRACE:
10+
= note: inside `set_discriminant` at $DIR/enum-set-discriminant-niche-variant-wrong.rs:LL:CC
11+
note: inside `main`
12+
--> $DIR/enum-set-discriminant-niche-variant-wrong.rs:LL:CC
13+
|
14+
LL | set_discriminant(&mut v);
15+
| ^^^^^^^^^^^^^^^^^^^^^^^^
16+
17+
note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace
18+
19+
error: aborting due to 1 previous error
20+

tests/pass-dep/concurrency/libc_pthread_cond.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ fn test_timed_wait_timeout(clock_id: i32) {
2222
let mut now_mu: MaybeUninit<libc::timespec> = MaybeUninit::uninit();
2323
assert_eq!(libc::clock_gettime(clock_id, now_mu.as_mut_ptr()), 0);
2424
let now = now_mu.assume_init();
25-
// Waiting for a second... mostly because waiting less requires mich more tricky arithmetic.
25+
// Waiting for a second... mostly because waiting less requires much more tricky arithmetic.
2626
// FIXME: wait less.
2727
let timeout = libc::timespec { tv_sec: now.tv_sec + 1, tv_nsec: now.tv_nsec };
2828

tests/pass-dep/concurrency/libc_pthread_cond_isolated.rs

+1-1
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ fn test_timed_wait_timeout(clock_id: i32) {
2121
let mut now_mu: MaybeUninit<libc::timespec> = MaybeUninit::uninit();
2222
assert_eq!(libc::clock_gettime(clock_id, now_mu.as_mut_ptr()), 0);
2323
let now = now_mu.assume_init();
24-
// Waiting for a second... mostly because waiting less requires mich more tricky arithmetic.
24+
// Waiting for a second... mostly because waiting less requires much more tricky arithmetic.
2525
// FIXME: wait less.
2626
let timeout = libc::timespec { tv_sec: now.tv_sec + 1, tv_nsec: now.tv_nsec };
2727

tests/pass-dep/shims/pthread-sync.rs

+124-10
Original file line numberDiff line numberDiff line change
@@ -1,24 +1,34 @@
11
//@ignore-target-windows: No libc on Windows
2+
// We use `yield` to test specific interleavings, so disable automatic preemption.
3+
//@compile-flags: -Zmiri-preemption-rate=0
4+
#![feature(sync_unsafe_cell)]
5+
6+
use std::cell::SyncUnsafeCell;
7+
use std::thread;
8+
use std::{mem, ptr};
29

310
fn main() {
411
test_mutex_libc_init_recursive();
512
test_mutex_libc_init_normal();
613
test_mutex_libc_init_errorcheck();
714
test_rwlock_libc_static_initializer();
8-
915
#[cfg(target_os = "linux")]
1016
test_mutex_libc_static_initializer_recursive();
17+
18+
test_mutex();
19+
check_rwlock_write();
20+
check_rwlock_read_no_deadlock();
1121
}
1222

1323
fn test_mutex_libc_init_recursive() {
1424
unsafe {
15-
let mut attr: libc::pthread_mutexattr_t = std::mem::zeroed();
25+
let mut attr: libc::pthread_mutexattr_t = mem::zeroed();
1626
assert_eq!(libc::pthread_mutexattr_init(&mut attr as *mut _), 0);
1727
assert_eq!(
1828
libc::pthread_mutexattr_settype(&mut attr as *mut _, libc::PTHREAD_MUTEX_RECURSIVE),
1929
0,
2030
);
21-
let mut mutex: libc::pthread_mutex_t = std::mem::zeroed();
31+
let mut mutex: libc::pthread_mutex_t = mem::zeroed();
2232
assert_eq!(libc::pthread_mutex_init(&mut mutex as *mut _, &mut attr as *mut _), 0);
2333
assert_eq!(libc::pthread_mutex_lock(&mut mutex as *mut _), 0);
2434
assert_eq!(libc::pthread_mutex_trylock(&mut mutex as *mut _), 0);
@@ -36,7 +46,7 @@ fn test_mutex_libc_init_recursive() {
3646

3747
fn test_mutex_libc_init_normal() {
3848
unsafe {
39-
let mut mutexattr: libc::pthread_mutexattr_t = std::mem::zeroed();
49+
let mut mutexattr: libc::pthread_mutexattr_t = mem::zeroed();
4050
assert_eq!(
4151
libc::pthread_mutexattr_settype(&mut mutexattr as *mut _, 0x12345678),
4252
libc::EINVAL,
@@ -45,7 +55,7 @@ fn test_mutex_libc_init_normal() {
4555
libc::pthread_mutexattr_settype(&mut mutexattr as *mut _, libc::PTHREAD_MUTEX_NORMAL),
4656
0,
4757
);
48-
let mut mutex: libc::pthread_mutex_t = std::mem::zeroed();
58+
let mut mutex: libc::pthread_mutex_t = mem::zeroed();
4959
assert_eq!(libc::pthread_mutex_init(&mut mutex as *mut _, &mutexattr as *const _), 0);
5060
assert_eq!(libc::pthread_mutex_lock(&mut mutex as *mut _), 0);
5161
assert_eq!(libc::pthread_mutex_trylock(&mut mutex as *mut _), libc::EBUSY);
@@ -58,15 +68,15 @@ fn test_mutex_libc_init_normal() {
5868

5969
fn test_mutex_libc_init_errorcheck() {
6070
unsafe {
61-
let mut mutexattr: libc::pthread_mutexattr_t = std::mem::zeroed();
71+
let mut mutexattr: libc::pthread_mutexattr_t = mem::zeroed();
6272
assert_eq!(
6373
libc::pthread_mutexattr_settype(
6474
&mut mutexattr as *mut _,
6575
libc::PTHREAD_MUTEX_ERRORCHECK,
6676
),
6777
0,
6878
);
69-
let mut mutex: libc::pthread_mutex_t = std::mem::zeroed();
79+
let mut mutex: libc::pthread_mutex_t = mem::zeroed();
7080
assert_eq!(libc::pthread_mutex_init(&mut mutex as *mut _, &mutexattr as *const _), 0);
7181
assert_eq!(libc::pthread_mutex_lock(&mut mutex as *mut _), 0);
7282
assert_eq!(libc::pthread_mutex_trylock(&mut mutex as *mut _), libc::EBUSY);
@@ -98,9 +108,113 @@ fn test_mutex_libc_static_initializer_recursive() {
98108
}
99109
}
100110

101-
// Testing the behavior of std::sync::RwLock does not fully exercise the pthread rwlock shims, we
102-
// need to go a layer deeper and test the behavior of the libc functions, because
103-
// std::sys::unix::rwlock::RWLock itself keeps track of write_locked and num_readers.
111+
struct SendPtr<T> {
112+
ptr: *mut T,
113+
}
114+
unsafe impl<T> Send for SendPtr<T> {}
115+
impl<T> Copy for SendPtr<T> {}
116+
impl<T> Clone for SendPtr<T> {
117+
fn clone(&self) -> Self {
118+
*self
119+
}
120+
}
121+
122+
fn test_mutex() {
123+
// Specifically *not* using `Arc` to make sure there is no synchronization apart from the mutex.
124+
unsafe {
125+
let data = SyncUnsafeCell::new((libc::PTHREAD_MUTEX_INITIALIZER, 0));
126+
let ptr = SendPtr { ptr: data.get() };
127+
let mut threads = Vec::new();
128+
129+
for _ in 0..3 {
130+
let thread = thread::spawn(move || {
131+
let ptr = ptr; // circumvent per-field closure capture
132+
let mutexptr = ptr::addr_of_mut!((*ptr.ptr).0);
133+
assert_eq!(libc::pthread_mutex_lock(mutexptr), 0);
134+
thread::yield_now();
135+
(*ptr.ptr).1 += 1;
136+
assert_eq!(libc::pthread_mutex_unlock(mutexptr), 0);
137+
});
138+
threads.push(thread);
139+
}
140+
141+
for thread in threads {
142+
thread.join().unwrap();
143+
}
144+
145+
let mutexptr = ptr::addr_of_mut!((*ptr.ptr).0);
146+
assert_eq!(libc::pthread_mutex_trylock(mutexptr), 0);
147+
assert_eq!((*ptr.ptr).1, 3);
148+
}
149+
}
150+
151+
fn check_rwlock_write() {
152+
unsafe {
153+
let data = SyncUnsafeCell::new((libc::PTHREAD_RWLOCK_INITIALIZER, 0));
154+
let ptr = SendPtr { ptr: data.get() };
155+
let mut threads = Vec::new();
156+
157+
for _ in 0..3 {
158+
let thread = thread::spawn(move || {
159+
let ptr = ptr; // circumvent per-field closure capture
160+
let rwlockptr = ptr::addr_of_mut!((*ptr.ptr).0);
161+
assert_eq!(libc::pthread_rwlock_wrlock(rwlockptr), 0);
162+
thread::yield_now();
163+
(*ptr.ptr).1 += 1;
164+
assert_eq!(libc::pthread_rwlock_unlock(rwlockptr), 0);
165+
});
166+
threads.push(thread);
167+
168+
let readthread = thread::spawn(move || {
169+
let ptr = ptr; // circumvent per-field closure capture
170+
let rwlockptr = ptr::addr_of_mut!((*ptr.ptr).0);
171+
assert_eq!(libc::pthread_rwlock_rdlock(rwlockptr), 0);
172+
thread::yield_now();
173+
let val = (*ptr.ptr).1;
174+
assert!(val >= 0 && val <= 3);
175+
assert_eq!(libc::pthread_rwlock_unlock(rwlockptr), 0);
176+
});
177+
threads.push(readthread);
178+
}
179+
180+
for thread in threads {
181+
thread.join().unwrap();
182+
}
183+
184+
let rwlockptr = ptr::addr_of_mut!((*ptr.ptr).0);
185+
assert_eq!(libc::pthread_rwlock_tryrdlock(rwlockptr), 0);
186+
assert_eq!((*ptr.ptr).1, 3);
187+
}
188+
}
189+
190+
fn check_rwlock_read_no_deadlock() {
191+
unsafe {
192+
let l1 = SyncUnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER);
193+
let l1 = SendPtr { ptr: l1.get() };
194+
let l2 = SyncUnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER);
195+
let l2 = SendPtr { ptr: l2.get() };
196+
197+
// acquire l1 and hold it until after the other thread is done
198+
assert_eq!(libc::pthread_rwlock_rdlock(l1.ptr), 0);
199+
let handle = thread::spawn(move || {
200+
let l1 = l1; // circumvent per-field closure capture
201+
let l2 = l2; // circumvent per-field closure capture
202+
// acquire l2 before the other thread
203+
assert_eq!(libc::pthread_rwlock_rdlock(l2.ptr), 0);
204+
thread::yield_now();
205+
assert_eq!(libc::pthread_rwlock_rdlock(l1.ptr), 0);
206+
thread::yield_now();
207+
assert_eq!(libc::pthread_rwlock_unlock(l1.ptr), 0);
208+
assert_eq!(libc::pthread_rwlock_unlock(l2.ptr), 0);
209+
});
210+
thread::yield_now();
211+
assert_eq!(libc::pthread_rwlock_rdlock(l2.ptr), 0);
212+
handle.join().unwrap();
213+
}
214+
}
215+
216+
// std::sync::RwLock does not even used pthread_rwlock any more.
217+
// Do some smoke testing of the API surface.
104218
fn test_rwlock_libc_static_initializer() {
105219
let rw = std::cell::UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER);
106220
unsafe {

tests/pass/concurrency/sync.rs

+21-6
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
//@revisions: stack tree
22
//@[tree]compile-flags: -Zmiri-tree-borrows
3-
//@compile-flags: -Zmiri-disable-isolation -Zmiri-strict-provenance
3+
// We use `yield` to test specific interleavings, so disable automatic preemption.
4+
//@compile-flags: -Zmiri-disable-isolation -Zmiri-strict-provenance -Zmiri-preemption-rate=0
45

56
use std::sync::{Arc, Barrier, Condvar, Mutex, Once, RwLock};
67
use std::thread;
@@ -119,13 +120,25 @@ fn check_rwlock_write() {
119120
let mut threads = Vec::new();
120121

121122
for _ in 0..3 {
122-
let data = Arc::clone(&data);
123-
let thread = thread::spawn(move || {
124-
let mut data = data.write().unwrap();
125-
thread::yield_now();
126-
*data += 1;
123+
let thread = thread::spawn({
124+
let data = Arc::clone(&data);
125+
move || {
126+
let mut data = data.write().unwrap();
127+
thread::yield_now();
128+
*data += 1;
129+
}
127130
});
128131
threads.push(thread);
132+
133+
let readthread = thread::spawn({
134+
let data = Arc::clone(&data);
135+
move || {
136+
let data = data.read().unwrap();
137+
thread::yield_now();
138+
assert!(*data >= 0 && *data <= 3);
139+
}
140+
});
141+
threads.push(readthread);
129142
}
130143

131144
for thread in threads {
@@ -144,8 +157,10 @@ fn check_rwlock_read_no_deadlock() {
144157

145158
let l1_copy = Arc::clone(&l1);
146159
let l2_copy = Arc::clone(&l2);
160+
// acquire l1 and hold it until after the other thread is done
147161
let _guard1 = l1.read().unwrap();
148162
let handle = thread::spawn(move || {
163+
// acquire l2 before the other thread
149164
let _guard2 = l2_copy.read().unwrap();
150165
thread::yield_now();
151166
let _guard1 = l1_copy.read().unwrap();

0 commit comments

Comments
 (0)