Skip to content

Commit 960d1b7

Browse files
committed
Don't allow pthread_rwlock_t to recursively lock itself
This is allowed by POSIX and can happen on glibc with processors that support hardware lock elision.
1 parent d73f5e6 commit 960d1b7

File tree

1 file changed

+52
-9
lines changed

1 file changed

+52
-9
lines changed

src/libstd/sys/unix/rwlock.rs

Lines changed: 52 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -11,14 +11,20 @@
1111
use libc;
1212
use cell::UnsafeCell;
1313

14-
pub struct RWLock { inner: UnsafeCell<libc::pthread_rwlock_t> }
14+
pub struct RWLock {
15+
inner: UnsafeCell<libc::pthread_rwlock_t>,
16+
write_locked: UnsafeCell<bool>,
17+
}
1518

1619
unsafe impl Send for RWLock {}
1720
unsafe impl Sync for RWLock {}
1821

1922
impl RWLock {
2023
pub const fn new() -> RWLock {
21-
RWLock { inner: UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER) }
24+
RWLock {
25+
inner: UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER),
26+
write_locked: UnsafeCell::new(false),
27+
}
2228
}
2329
#[inline]
2430
pub unsafe fn read(&self) {
@@ -35,37 +41,74 @@ impl RWLock {
3541
//
3642
// We roughly maintain the deadlocking behavior by panicking to ensure
3743
// that this lock acquisition does not succeed.
38-
if r == libc::EDEADLK {
44+
//
45+
// We also check whether there this lock is already write locked. This
46+
// is only possible if it was write locked by the current thread and
47+
// the implementation allows recursive locking. The POSIX standard
48+
// doesn't require recursivly locking a rwlock to deadlock, but we can't
49+
// allow that because it could lead to aliasing issues.
50+
if r == libc::EDEADLK || *self.write_locked.get() {
51+
if r == 0 {
52+
self.raw_unlock();
53+
}
3954
panic!("rwlock read lock would result in deadlock");
4055
} else {
4156
debug_assert_eq!(r, 0);
4257
}
4358
}
4459
#[inline]
4560
pub unsafe fn try_read(&self) -> bool {
46-
libc::pthread_rwlock_tryrdlock(self.inner.get()) == 0
61+
let r = libc::pthread_rwlock_tryrdlock(self.inner.get());
62+
if r == 0 && *self.write_locked.get() {
63+
self.raw_unlock();
64+
false
65+
} else {
66+
r == 0
67+
}
4768
}
4869
#[inline]
4970
pub unsafe fn write(&self) {
5071
let r = libc::pthread_rwlock_wrlock(self.inner.get());
51-
// see comments above for why we check for EDEADLK
52-
if r == libc::EDEADLK {
72+
// see comments above for why we check for EDEADLK and write_locked
73+
if r == libc::EDEADLK || *self.write_locked.get() {
74+
if r == 0 {
75+
self.raw_unlock();
76+
}
5377
panic!("rwlock write lock would result in deadlock");
5478
} else {
5579
debug_assert_eq!(r, 0);
5680
}
81+
*self.write_locked.get() = true;
5782
}
5883
#[inline]
5984
pub unsafe fn try_write(&self) -> bool {
60-
libc::pthread_rwlock_trywrlock(self.inner.get()) == 0
85+
let r = libc::pthread_rwlock_trywrlock(self.inner.get());
86+
if r == 0 && *self.write_locked.get() {
87+
self.raw_unlock();
88+
false
89+
} else if r == 0 {
90+
*self.write_locked.get() = true;
91+
true
92+
} else {
93+
false
94+
}
6195
}
6296
#[inline]
63-
pub unsafe fn read_unlock(&self) {
97+
unsafe fn raw_unlock(&self) {
6498
let r = libc::pthread_rwlock_unlock(self.inner.get());
6599
debug_assert_eq!(r, 0);
66100
}
67101
#[inline]
68-
pub unsafe fn write_unlock(&self) { self.read_unlock() }
102+
pub unsafe fn read_unlock(&self) {
103+
debug_assert!(!*self.write_locked.get());
104+
self.raw_unlock();
105+
}
106+
#[inline]
107+
pub unsafe fn write_unlock(&self) {
108+
debug_assert!(*self.write_locked.get());
109+
*self.write_locked.get() = false;
110+
self.raw_unlock();
111+
}
69112
#[inline]
70113
pub unsafe fn destroy(&self) {
71114
let r = libc::pthread_rwlock_destroy(self.inner.get());

0 commit comments

Comments
 (0)