Skip to content

Commit 522b15b

Browse files
committed
Auto merge of rust-lang#128219 - connortsui20:rwlock-downgrade, r=tgross35
Rwlock downgrade Tracking Issue: rust-lang#128203 This PR adds a `downgrade` method for `RwLock` / `RwLockWriteGuard` on all currently supported platforms. Outstanding questions: - [x] Does the `futex.rs` change affect performance at all? It doesn't seem like it will but we can't be certain until we bench it... - [x] Should the SOLID platform implementation [be ported over](rust-lang#128219 (comment)) to the `queue.rs` implementation to allow it to support downgrades?
2 parents 16422db + 060f994 commit 522b15b

File tree

7 files changed

+679
-263
lines changed

7 files changed

+679
-263
lines changed

library/std/src/sync/rwlock.rs

+70-4
Original file line numberDiff line numberDiff line change
@@ -4,10 +4,10 @@ mod tests;
44
use crate::cell::UnsafeCell;
55
use crate::fmt;
66
use crate::marker::PhantomData;
7-
use crate::mem::ManuallyDrop;
7+
use crate::mem::{ManuallyDrop, forget};
88
use crate::ops::{Deref, DerefMut};
99
use crate::ptr::NonNull;
10-
use crate::sync::{LockResult, TryLockError, TryLockResult, poison};
10+
use crate::sync::{LockResult, PoisonError, TryLockError, TryLockResult, poison};
1111
use crate::sys::sync as sys;
1212

1313
/// A reader-writer lock
@@ -574,8 +574,12 @@ impl<T> From<T> for RwLock<T> {
574574

575575
impl<'rwlock, T: ?Sized> RwLockReadGuard<'rwlock, T> {
576576
/// Creates a new instance of `RwLockReadGuard<T>` from a `RwLock<T>`.
577-
// SAFETY: if and only if `lock.inner.read()` (or `lock.inner.try_read()`) has been
578-
// successfully called from the same thread before instantiating this object.
577+
///
578+
/// # Safety
579+
///
580+
/// This function is safe if and only if the same thread has successfully and safely called
581+
/// `lock.inner.read()`, `lock.inner.try_read()`, or `lock.inner.downgrade()` before
582+
/// instantiating this object.
579583
unsafe fn new(lock: &'rwlock RwLock<T>) -> LockResult<RwLockReadGuard<'rwlock, T>> {
580584
poison::map_result(lock.poison.borrow(), |()| RwLockReadGuard {
581585
data: unsafe { NonNull::new_unchecked(lock.data.get()) },
@@ -957,6 +961,68 @@ impl<'a, T: ?Sized> RwLockWriteGuard<'a, T> {
957961
None => Err(orig),
958962
}
959963
}
964+
965+
/// Downgrades a write-locked `RwLockWriteGuard` into a read-locked [`RwLockReadGuard`].
966+
///
967+
/// This method will atomically change the state of the [`RwLock`] from exclusive mode into
968+
/// shared mode. This means that it is impossible for a writing thread to get in between a
969+
/// thread calling `downgrade` and the same thread reading whatever it wrote while it had the
970+
/// [`RwLock`] in write mode.
971+
///
972+
/// Note that since we have the `RwLockWriteGuard`, we know that the [`RwLock`] is already
973+
/// locked for writing, so this method cannot fail.
974+
///
975+
/// # Example
976+
///
977+
/// ```
978+
/// #![feature(rwlock_downgrade)]
979+
/// use std::sync::{Arc, RwLock, RwLockWriteGuard};
980+
///
981+
/// // The inner value starts as 0.
982+
/// let rw = Arc::new(RwLock::new(0));
983+
///
984+
/// // Put the lock in write mode.
985+
/// let mut main_write_guard = rw.write().unwrap();
986+
///
987+
/// let evil = rw.clone();
988+
/// let handle = std::thread::spawn(move || {
989+
/// // This will not return until the main thread drops the `main_read_guard`.
990+
/// let mut evil_guard = evil.write().unwrap();
991+
///
992+
/// assert_eq!(*evil_guard, 1);
993+
/// *evil_guard = 2;
994+
/// });
995+
///
996+
/// // After spawning the writer thread, set the inner value to 1.
997+
/// *main_write_guard = 1;
998+
///
999+
/// // Atomically downgrade the write guard into a read guard.
1000+
/// let main_read_guard = RwLockWriteGuard::downgrade(main_write_guard);
1001+
///
1002+
/// // Since `downgrade` is atomic, the writer thread cannot have set the inner value to 2.
1003+
/// assert_eq!(*main_read_guard, 1, "`downgrade` was not atomic");
1004+
///
1005+
/// // Clean up everything now
1006+
/// drop(main_read_guard);
1007+
/// handle.join().unwrap();
1008+
///
1009+
/// let final_check = rw.read().unwrap();
1010+
/// assert_eq!(*final_check, 2);
1011+
/// ```
1012+
#[unstable(feature = "rwlock_downgrade", issue = "128203")]
1013+
pub fn downgrade(s: Self) -> RwLockReadGuard<'a, T> {
1014+
let lock = s.lock;
1015+
1016+
// We don't want to call the destructor since that calls `write_unlock`.
1017+
forget(s);
1018+
1019+
// SAFETY: We take ownership of a write guard, so we must already have the `RwLock` in write
1020+
// mode, satisfying the `downgrade` contract.
1021+
unsafe { lock.inner.downgrade() };
1022+
1023+
// SAFETY: We have just successfully called `downgrade`, so we fulfill the safety contract.
1024+
unsafe { RwLockReadGuard::new(lock).unwrap_or_else(PoisonError::into_inner) }
1025+
}
9601026
}
9611027

9621028
impl<'a, T: ?Sized> MappedRwLockWriteGuard<'a, T> {

library/std/src/sync/rwlock/tests.rs

+122-2
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,8 @@ use rand::Rng;
33
use crate::sync::atomic::{AtomicUsize, Ordering};
44
use crate::sync::mpsc::channel;
55
use crate::sync::{
6-
Arc, MappedRwLockReadGuard, MappedRwLockWriteGuard, RwLock, RwLockReadGuard, RwLockWriteGuard,
7-
TryLockError,
6+
Arc, Barrier, MappedRwLockReadGuard, MappedRwLockWriteGuard, RwLock, RwLockReadGuard,
7+
RwLockWriteGuard, TryLockError,
88
};
99
use crate::thread;
1010

@@ -501,3 +501,123 @@ fn panic_while_mapping_write_unlocked_poison() {
501501

502502
drop(lock);
503503
}
504+
505+
#[test]
506+
fn test_downgrade_basic() {
507+
let r = RwLock::new(());
508+
509+
let write_guard = r.write().unwrap();
510+
let _read_guard = RwLockWriteGuard::downgrade(write_guard);
511+
}
512+
513+
#[test]
514+
fn test_downgrade_readers() {
515+
// This test creates 1 writing thread and `R` reader threads doing `N` iterations.
516+
const R: usize = 10;
517+
const N: usize = if cfg!(miri) { 100 } else { 1000 };
518+
519+
// The writer thread will constantly update the value inside the `RwLock`, and this test will
520+
// only pass if every reader observes all values between 0 and `N`.
521+
let rwlock = Arc::new(RwLock::new(0));
522+
let barrier = Arc::new(Barrier::new(R + 1));
523+
524+
// Create the writing thread.
525+
let r_writer = rwlock.clone();
526+
let b_writer = barrier.clone();
527+
thread::spawn(move || {
528+
for i in 0..N {
529+
let mut write_guard = r_writer.write().unwrap();
530+
*write_guard = i;
531+
532+
let read_guard = RwLockWriteGuard::downgrade(write_guard);
533+
assert_eq!(*read_guard, i);
534+
535+
// Wait for all readers to observe the new value.
536+
b_writer.wait();
537+
}
538+
});
539+
540+
for _ in 0..R {
541+
let rwlock = rwlock.clone();
542+
let barrier = barrier.clone();
543+
thread::spawn(move || {
544+
// Every reader thread needs to observe every value up to `N`.
545+
for i in 0..N {
546+
let read_guard = rwlock.read().unwrap();
547+
assert_eq!(*read_guard, i);
548+
drop(read_guard);
549+
550+
// Wait for everyone to read and for the writer to change the value again.
551+
barrier.wait();
552+
553+
// Spin until the writer has changed the value.
554+
loop {
555+
let read_guard = rwlock.read().unwrap();
556+
assert!(*read_guard >= i);
557+
558+
if *read_guard > i {
559+
break;
560+
}
561+
}
562+
}
563+
});
564+
}
565+
}
566+
567+
#[test]
568+
fn test_downgrade_atomic() {
569+
const NEW_VALUE: i32 = -1;
570+
571+
// This test checks that `downgrade` is atomic, meaning as soon as a write lock has been
572+
// downgraded, the lock must be in read mode and no other threads can take the write lock to
573+
// modify the protected value.
574+
575+
// `W` is the number of evil writer threads.
576+
const W: usize = if cfg!(miri) || !cfg!(target_pointer_width = "64") { 100 } else { 1000 };
577+
let rwlock = Arc::new(RwLock::new(0));
578+
579+
// Spawns many evil writer threads that will try and write to the locked value before the
580+
// initial writer (who has the exclusive lock) can read after it downgrades.
581+
// If the `RwLock` behaves correctly, then the initial writer should read the value it wrote
582+
// itself as no other thread should be able to mutate the protected value.
583+
584+
// Put the lock in write mode, causing all future threads trying to access this go to sleep.
585+
let mut main_write_guard = rwlock.write().unwrap();
586+
587+
// Spawn all of the evil writer threads. They will each increment the protected value by 1.
588+
let handles: Vec<_> = (0..W)
589+
.map(|_| {
590+
let rwlock = rwlock.clone();
591+
thread::spawn(move || {
592+
// Will go to sleep since the main thread initially has the write lock.
593+
let mut evil_guard = rwlock.write().unwrap();
594+
*evil_guard += 1;
595+
})
596+
})
597+
.collect();
598+
599+
// Wait for a good amount of time so that evil threads go to sleep.
600+
// Note: this is not strictly necessary...
601+
let eternity = crate::time::Duration::from_millis(42);
602+
thread::sleep(eternity);
603+
604+
// Once everyone is asleep, set the value to `NEW_VALUE`.
605+
*main_write_guard = NEW_VALUE;
606+
607+
// Atomically downgrade the write guard into a read guard.
608+
let main_read_guard = RwLockWriteGuard::downgrade(main_write_guard);
609+
610+
// If the above is not atomic, then it would be possible for an evil thread to get in front of
611+
// this read and change the value to be non-negative.
612+
assert_eq!(*main_read_guard, NEW_VALUE, "`downgrade` was not atomic");
613+
614+
// Drop the main read guard and allow the evil writer threads to start incrementing.
615+
drop(main_read_guard);
616+
617+
for handle in handles {
618+
handle.join().unwrap();
619+
}
620+
621+
let final_check = rwlock.read().unwrap();
622+
assert_eq!(*final_check, W as i32 + NEW_VALUE);
623+
}

library/std/src/sys/sync/rwlock/futex.rs

+47-5
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@ pub struct RwLock {
1818
const READ_LOCKED: Primitive = 1;
1919
const MASK: Primitive = (1 << 30) - 1;
2020
const WRITE_LOCKED: Primitive = MASK;
21+
const DOWNGRADE: Primitive = READ_LOCKED.wrapping_sub(WRITE_LOCKED); // READ_LOCKED - WRITE_LOCKED
2122
const MAX_READERS: Primitive = MASK - 1;
2223
const READERS_WAITING: Primitive = 1 << 30;
2324
const WRITERS_WAITING: Primitive = 1 << 31;
@@ -53,6 +54,24 @@ fn is_read_lockable(state: Primitive) -> bool {
5354
state & MASK < MAX_READERS && !has_readers_waiting(state) && !has_writers_waiting(state)
5455
}
5556

57+
#[inline]
58+
fn is_read_lockable_after_wakeup(state: Primitive) -> bool {
59+
// We make a special case for checking if we can read-lock _after_ a reader thread that went to
60+
// sleep has been woken up by a call to `downgrade`.
61+
//
62+
// `downgrade` will wake up all readers and place the lock in read mode. Thus, there should be
63+
// no readers waiting and the lock should be read-locked (not write-locked or unlocked).
64+
//
65+
// Note that we do not check if any writers are waiting. This is because a call to `downgrade`
66+
// implies that the caller wants other readers to read the value protected by the lock. If we
67+
// did not allow readers to acquire the lock before writers after a `downgrade`, then only the
68+
// original writer would be able to read the value, thus defeating the purpose of `downgrade`.
69+
state & MASK < MAX_READERS
70+
&& !has_readers_waiting(state)
71+
&& !is_write_locked(state)
72+
&& !is_unlocked(state)
73+
}
74+
5675
#[inline]
5776
fn has_reached_max_readers(state: Primitive) -> bool {
5877
state & MASK == MAX_READERS
@@ -84,6 +103,9 @@ impl RwLock {
84103
}
85104
}
86105

106+
/// # Safety
107+
///
108+
/// The `RwLock` must be read-locked (N readers) in order to call this.
87109
#[inline]
88110
pub unsafe fn read_unlock(&self) {
89111
let state = self.state.fetch_sub(READ_LOCKED, Release) - READ_LOCKED;
@@ -100,11 +122,13 @@ impl RwLock {
100122

101123
#[cold]
102124
fn read_contended(&self) {
125+
let mut has_slept = false;
103126
let mut state = self.spin_read();
104127

105128
loop {
106-
// If we can lock it, lock it.
107-
if is_read_lockable(state) {
129+
// If we have just been woken up, first check for a `downgrade` call.
130+
// Otherwise, if we can read-lock it, lock it.
131+
if (has_slept && is_read_lockable_after_wakeup(state)) || is_read_lockable(state) {
108132
match self.state.compare_exchange_weak(state, state + READ_LOCKED, Acquire, Relaxed)
109133
{
110134
Ok(_) => return, // Locked!
@@ -116,9 +140,7 @@ impl RwLock {
116140
}
117141

118142
// Check for overflow.
119-
if has_reached_max_readers(state) {
120-
panic!("too many active read locks on RwLock");
121-
}
143+
assert!(!has_reached_max_readers(state), "too many active read locks on RwLock");
122144

123145
// Make sure the readers waiting bit is set before we go to sleep.
124146
if !has_readers_waiting(state) {
@@ -132,6 +154,7 @@ impl RwLock {
132154

133155
// Wait for the state to change.
134156
futex_wait(&self.state, state | READERS_WAITING, None);
157+
has_slept = true;
135158

136159
// Spin again after waking up.
137160
state = self.spin_read();
@@ -152,6 +175,9 @@ impl RwLock {
152175
}
153176
}
154177

178+
/// # Safety
179+
///
180+
/// The `RwLock` must be write-locked (single writer) in order to call this.
155181
#[inline]
156182
pub unsafe fn write_unlock(&self) {
157183
let state = self.state.fetch_sub(WRITE_LOCKED, Release) - WRITE_LOCKED;
@@ -163,6 +189,22 @@ impl RwLock {
163189
}
164190
}
165191

192+
/// # Safety
193+
///
194+
/// The `RwLock` must be write-locked (single writer) in order to call this.
195+
#[inline]
196+
pub unsafe fn downgrade(&self) {
197+
// Removes all write bits and adds a single read bit.
198+
let state = self.state.fetch_add(DOWNGRADE, Relaxed);
199+
debug_assert!(is_write_locked(state), "RwLock must be write locked to call `downgrade`");
200+
201+
if has_readers_waiting(state) {
202+
// Since we had the exclusive lock, nobody else can unset this bit.
203+
self.state.fetch_sub(READERS_WAITING, Relaxed);
204+
futex_wake_all(&self.state);
205+
}
206+
}
207+
166208
#[cold]
167209
fn write_contended(&self) {
168210
let mut state = self.spin_write();

library/std/src/sys/sync/rwlock/no_threads.rs

+5
Original file line numberDiff line numberDiff line change
@@ -62,4 +62,9 @@ impl RwLock {
6262
pub unsafe fn write_unlock(&self) {
6363
assert_eq!(self.mode.replace(0), -1);
6464
}
65+
66+
#[inline]
67+
pub unsafe fn downgrade(&self) {
68+
assert_eq!(self.mode.replace(1), -1);
69+
}
6570
}

0 commit comments

Comments
 (0)