spin/rwlock.rs
1//! A lock that provides data access to either one writer or many readers.
2
3use crate::{
4 atomic::{AtomicUsize, Ordering},
5 RelaxStrategy, Spin,
6};
7use core::{
8 cell::UnsafeCell,
9 fmt,
10 marker::PhantomData,
11 mem,
12 mem::ManuallyDrop,
13 ops::{Deref, DerefMut},
14};
15
16/// A lock that provides data access to either one writer or many readers.
17///
18/// This lock behaves in a similar manner to its namesake `std::sync::RwLock` but uses
19/// spinning for synchronisation instead. Unlike its namesake, this lock does not
20/// track lock poisoning.
21///
22/// This type of lock allows a number of readers or at most one writer at any
23/// point in time. The write portion of this lock typically allows modification
24/// of the underlying data (exclusive access) and the read portion of this lock
25/// typically allows for read-only access (shared access).
26///
27/// The type parameter `T` represents the data that this lock protects. It is
28/// required that `T` satisfies `Send` to be shared across tasks and `Sync` to
29/// allow concurrent access through readers. The RAII guards returned from the
30/// locking methods implement `Deref` (and `DerefMut` for the `write` methods)
31/// to allow access to the contained of the lock.
32///
33/// An [`RwLockUpgradableGuard`] can be upgraded to a writable guard through
34/// the [`RwLockUpgradableGuard::upgrade`] and [`RwLockUpgradableGuard::try_upgrade`]
35/// functions. Writable or upgradeable guards can be downgraded through their
36/// respective `downgrade` functions.
37///
38/// Based on Facebook's
39/// [`folly/RWSpinLock.h`](https://github.com/facebook/folly/blob/a0394d84f2d5c3e50ebfd0566f9d3acb52cfab5a/folly/synchronization/RWSpinLock.h).
40/// This implementation is unfair to writers - if the lock always has readers, then no writers will
41/// ever get a chance. Using an upgradeable lock guard can *somewhat* alleviate this issue as no
42/// new readers are allowed when an upgradeable guard is held, but upgradeable guards can be taken
43/// when there are existing readers. However if the lock is that highly contended and writes are
44/// crucial then this implementation may be a poor choice.
45///
46/// # Examples
47///
48/// ```
49/// use spin;
50///
51/// let lock = spin::RwLock::new(5);
52///
53/// // many reader locks can be held at once
54/// {
55/// let r1 = lock.read();
56/// let r2 = lock.read();
57/// assert_eq!(*r1, 5);
58/// assert_eq!(*r2, 5);
59/// } // read locks are dropped at this point
60///
61/// // only one write lock may be held, however
62/// {
63/// let mut w = lock.write();
64/// *w += 1;
65/// assert_eq!(*w, 6);
66/// } // write lock is dropped here
67/// ```
68pub struct RwLock<T: ?Sized, R = Spin> {
69 phantom: PhantomData<R>,
70 lock: AtomicUsize,
71 data: UnsafeCell<T>,
72}
73
74const READER: usize = 1 << 2;
75const UPGRADED: usize = 1 << 1;
76const WRITER: usize = 1;
77
78/// A guard that provides immutable data access.
79///
80/// When the guard falls out of scope it will decrement the read count,
81/// potentially releasing the lock.
82pub struct RwLockReadGuard<'a, T: 'a + ?Sized> {
83 lock: &'a AtomicUsize,
84 data: *const T,
85}
86
87/// A guard that provides mutable data access.
88///
89/// When the guard falls out of scope it will release the lock.
90pub struct RwLockWriteGuard<'a, T: 'a + ?Sized, R = Spin> {
91 phantom: PhantomData<R>,
92 inner: &'a RwLock<T, R>,
93 data: *mut T,
94}
95
96/// A guard that provides immutable data access but can be upgraded to [`RwLockWriteGuard`].
97///
98/// No writers or other upgradeable guards can exist while this is in scope. New reader
99/// creation is prevented (to alleviate writer starvation) but there may be existing readers
100/// when the lock is acquired.
101///
102/// When the guard falls out of scope it will release the lock.
103pub struct RwLockUpgradableGuard<'a, T: 'a + ?Sized, R = Spin> {
104 phantom: PhantomData<R>,
105 inner: &'a RwLock<T, R>,
106 data: *const T,
107}
108
109// Same unsafe impls as `std::sync::RwLock`
110unsafe impl<T: ?Sized + Send, R> Send for RwLock<T, R> {}
111unsafe impl<T: ?Sized + Send + Sync, R> Sync for RwLock<T, R> {}
112
113unsafe impl<T: ?Sized + Send + Sync, R> Send for RwLockWriteGuard<'_, T, R> {}
114unsafe impl<T: ?Sized + Send + Sync, R> Sync for RwLockWriteGuard<'_, T, R> {}
115
116unsafe impl<T: ?Sized + Sync> Send for RwLockReadGuard<'_, T> {}
117unsafe impl<T: ?Sized + Sync> Sync for RwLockReadGuard<'_, T> {}
118
119unsafe impl<T: ?Sized + Send + Sync, R> Send for RwLockUpgradableGuard<'_, T, R> {}
120unsafe impl<T: ?Sized + Send + Sync, R> Sync for RwLockUpgradableGuard<'_, T, R> {}
121
122impl<T, R> RwLock<T, R> {
123 /// Creates a new spinlock wrapping the supplied data.
124 ///
125 /// May be used statically:
126 ///
127 /// ```
128 /// use spin;
129 ///
130 /// static RW_LOCK: spin::RwLock<()> = spin::RwLock::new(());
131 ///
132 /// fn demo() {
133 /// let lock = RW_LOCK.read();
134 /// // do something with lock
135 /// drop(lock);
136 /// }
137 /// ```
138 #[inline]
139 pub const fn new(data: T) -> Self {
140 RwLock {
141 phantom: PhantomData,
142 lock: AtomicUsize::new(0),
143 data: UnsafeCell::new(data),
144 }
145 }
146
147 /// Consumes this `RwLock`, returning the underlying data.
148 #[inline]
149 pub fn into_inner(self) -> T {
150 // We know statically that there are no outstanding references to
151 // `self` so there's no need to lock.
152 let RwLock { data, .. } = self;
153 data.into_inner()
154 }
155 /// Returns a mutable pointer to the underlying data.
156 ///
157 /// This is mostly meant to be used for applications which require manual unlocking, but where
158 /// storing both the lock and the pointer to the inner data gets inefficient.
159 ///
160 /// While this is safe, writing to the data is undefined behavior unless the current thread has
161 /// acquired a write lock, and reading requires either a read or write lock.
162 ///
163 /// # Example
164 /// ```
165 /// let lock = spin::RwLock::new(42);
166 ///
167 /// unsafe {
168 /// core::mem::forget(lock.write());
169 ///
170 /// assert_eq!(lock.as_mut_ptr().read(), 42);
171 /// lock.as_mut_ptr().write(58);
172 ///
173 /// lock.force_write_unlock();
174 /// }
175 ///
176 /// assert_eq!(*lock.read(), 58);
177 ///
178 /// ```
179 #[inline(always)]
180 pub fn as_mut_ptr(&self) -> *mut T {
181 self.data.get()
182 }
183}
184
185impl<T: ?Sized, R: RelaxStrategy> RwLock<T, R> {
186 /// Locks this rwlock with shared read access, blocking the current thread
187 /// until it can be acquired.
188 ///
189 /// The calling thread will be blocked until there are no more writers which
190 /// hold the lock. There may be other readers currently inside the lock when
191 /// this method returns. This method does not provide any guarantees with
192 /// respect to the ordering of whether contentious readers or writers will
193 /// acquire the lock first.
194 ///
195 /// Returns an RAII guard which will release this thread's shared access
196 /// once it is dropped.
197 ///
198 /// ```
199 /// let mylock = spin::RwLock::new(0);
200 /// {
201 /// let mut data = mylock.read();
202 /// // The lock is now locked and the data can be read
203 /// println!("{}", *data);
204 /// // The lock is dropped
205 /// }
206 /// ```
207 #[inline]
208 pub fn read(&self) -> RwLockReadGuard<'_, T> {
209 loop {
210 match self.try_read() {
211 Some(guard) => return guard,
212 None => R::relax(),
213 }
214 }
215 }
216
217 /// Lock this rwlock with exclusive write access, blocking the current
218 /// thread until it can be acquired.
219 ///
220 /// This function will not return while other writers or other readers
221 /// currently have access to the lock.
222 ///
223 /// Returns an RAII guard which will drop the write access of this rwlock
224 /// when dropped.
225 ///
226 /// ```
227 /// let mylock = spin::RwLock::new(0);
228 /// {
229 /// let mut data = mylock.write();
230 /// // The lock is now locked and the data can be written
231 /// *data += 1;
232 /// // The lock is dropped
233 /// }
234 /// ```
235 #[inline]
236 pub fn write(&self) -> RwLockWriteGuard<'_, T, R> {
237 loop {
238 match self.try_write_internal(false) {
239 Some(guard) => return guard,
240 None => R::relax(),
241 }
242 }
243 }
244
245 /// Obtain a readable lock guard that can later be upgraded to a writable lock guard.
246 /// Upgrades can be done through the [`RwLockUpgradableGuard::upgrade`](RwLockUpgradableGuard::upgrade) method.
247 #[inline]
248 pub fn upgradeable_read(&self) -> RwLockUpgradableGuard<'_, T, R> {
249 loop {
250 match self.try_upgradeable_read() {
251 Some(guard) => return guard,
252 None => R::relax(),
253 }
254 }
255 }
256}
257
258impl<T: ?Sized, R> RwLock<T, R> {
259 // Acquire a read lock, returning the new lock value.
260 fn acquire_reader(&self) -> usize {
261 // An arbitrary cap that allows us to catch overflows long before they happen
262 const MAX_READERS: usize = core::usize::MAX / READER / 2;
263
264 let value = self.lock.fetch_add(READER, Ordering::Acquire);
265
266 if value > MAX_READERS * READER {
267 self.lock.fetch_sub(READER, Ordering::Relaxed);
268 panic!("Too many lock readers, cannot safely proceed");
269 } else {
270 value
271 }
272 }
273
274 /// Attempt to acquire this lock with shared read access.
275 ///
276 /// This function will never block and will return immediately if `read`
277 /// would otherwise succeed. Returns `Some` of an RAII guard which will
278 /// release the shared access of this thread when dropped, or `None` if the
279 /// access could not be granted. This method does not provide any
280 /// guarantees with respect to the ordering of whether contentious readers
281 /// or writers will acquire the lock first.
282 ///
283 /// ```
284 /// let mylock = spin::RwLock::new(0);
285 /// {
286 /// match mylock.try_read() {
287 /// Some(data) => {
288 /// // The lock is now locked and the data can be read
289 /// println!("{}", *data);
290 /// // The lock is dropped
291 /// },
292 /// None => (), // no cigar
293 /// };
294 /// }
295 /// ```
296 #[inline]
297 pub fn try_read(&self) -> Option<RwLockReadGuard<'_, T>> {
298 let value = self.acquire_reader();
299
300 // We check the UPGRADED bit here so that new readers are prevented when an UPGRADED lock is held.
301 // This helps reduce writer starvation.
302 if value & (WRITER | UPGRADED) != 0 {
303 // Lock is taken, undo.
304 self.lock.fetch_sub(READER, Ordering::Release);
305 None
306 } else {
307 Some(RwLockReadGuard {
308 lock: &self.lock,
309 data: unsafe { &*self.data.get() },
310 })
311 }
312 }
313
314 /// Return the number of readers that currently hold the lock (including upgradable readers).
315 ///
316 /// # Safety
317 ///
318 /// This function provides no synchronization guarantees and so its result should be considered 'out of date'
319 /// the instant it is called. Do not use it for synchronization purposes. However, it may be useful as a heuristic.
320 pub fn reader_count(&self) -> usize {
321 let state = self.lock.load(Ordering::Relaxed);
322 state / READER + (state & UPGRADED) / UPGRADED
323 }
324
325 /// Return the number of writers that currently hold the lock.
326 ///
327 /// Because [`RwLock`] guarantees exclusive mutable access, this function may only return either `0` or `1`.
328 ///
329 /// # Safety
330 ///
331 /// This function provides no synchronization guarantees and so its result should be considered 'out of date'
332 /// the instant it is called. Do not use it for synchronization purposes. However, it may be useful as a heuristic.
333 pub fn writer_count(&self) -> usize {
334 (self.lock.load(Ordering::Relaxed) & WRITER) / WRITER
335 }
336
337 /// Force decrement the reader count.
338 ///
339 /// # Safety
340 ///
341 /// This is *extremely* unsafe if there are outstanding `RwLockReadGuard`s
342 /// live, or if called more times than `read` has been called, but can be
343 /// useful in FFI contexts where the caller doesn't know how to deal with
344 /// RAII. The underlying atomic operation uses `Ordering::Release`.
345 #[inline]
346 pub unsafe fn force_read_decrement(&self) {
347 debug_assert!(self.lock.load(Ordering::Relaxed) & !WRITER > 0);
348 self.lock.fetch_sub(READER, Ordering::Release);
349 }
350
351 /// Force unlock exclusive write access.
352 ///
353 /// # Safety
354 ///
355 /// This is *extremely* unsafe if there are outstanding `RwLockWriteGuard`s
356 /// live, or if called when there are current readers, but can be useful in
357 /// FFI contexts where the caller doesn't know how to deal with RAII. The
358 /// underlying atomic operation uses `Ordering::Release`.
359 #[inline]
360 pub unsafe fn force_write_unlock(&self) {
361 debug_assert_eq!(self.lock.load(Ordering::Relaxed) & !(WRITER | UPGRADED), 0);
362 self.lock.fetch_and(!(WRITER | UPGRADED), Ordering::Release);
363 }
364
365 #[inline(always)]
366 fn try_write_internal(&self, strong: bool) -> Option<RwLockWriteGuard<'_, T, R>> {
367 if compare_exchange(
368 &self.lock,
369 0,
370 WRITER,
371 Ordering::Acquire,
372 Ordering::Relaxed,
373 strong,
374 )
375 .is_ok()
376 {
377 Some(RwLockWriteGuard {
378 phantom: PhantomData,
379 inner: self,
380 data: unsafe { &mut *self.data.get() },
381 })
382 } else {
383 None
384 }
385 }
386
387 /// Attempt to lock this rwlock with exclusive write access.
388 ///
389 /// This function does not ever block, and it will return `None` if a call
390 /// to `write` would otherwise block. If successful, an RAII guard is
391 /// returned.
392 ///
393 /// ```
394 /// let mylock = spin::RwLock::new(0);
395 /// {
396 /// match mylock.try_write() {
397 /// Some(mut data) => {
398 /// // The lock is now locked and the data can be written
399 /// *data += 1;
400 /// // The lock is implicitly dropped
401 /// },
402 /// None => (), // no cigar
403 /// };
404 /// }
405 /// ```
406 #[inline]
407 pub fn try_write(&self) -> Option<RwLockWriteGuard<'_, T, R>> {
408 self.try_write_internal(true)
409 }
410
411 /// Attempt to lock this rwlock with exclusive write access.
412 ///
413 /// Unlike [`RwLock::try_write`], this function is allowed to spuriously fail even when acquiring exclusive write access
414 /// would otherwise succeed, which can result in more efficient code on some platforms.
415 #[inline]
416 pub fn try_write_weak(&self) -> Option<RwLockWriteGuard<'_, T, R>> {
417 self.try_write_internal(false)
418 }
419
420 /// Tries to obtain an upgradeable lock guard.
421 #[inline]
422 pub fn try_upgradeable_read(&self) -> Option<RwLockUpgradableGuard<'_, T, R>> {
423 if self.lock.fetch_or(UPGRADED, Ordering::Acquire) & (WRITER | UPGRADED) == 0 {
424 Some(RwLockUpgradableGuard {
425 phantom: PhantomData,
426 inner: self,
427 data: unsafe { &*self.data.get() },
428 })
429 } else {
430 // We can't unflip the UPGRADED bit back just yet as there is another upgradeable or write lock.
431 // When they unlock, they will clear the bit.
432 None
433 }
434 }
435
436 /// Returns a mutable reference to the underlying data.
437 ///
438 /// Since this call borrows the `RwLock` mutably, no actual locking needs to
439 /// take place -- the mutable borrow statically guarantees no locks exist.
440 ///
441 /// # Examples
442 ///
443 /// ```
444 /// let mut lock = spin::RwLock::new(0);
445 /// *lock.get_mut() = 10;
446 /// assert_eq!(*lock.read(), 10);
447 /// ```
448 pub fn get_mut(&mut self) -> &mut T {
449 // We know statically that there are no other references to `self`, so
450 // there's no need to lock the inner lock.
451 unsafe { &mut *self.data.get() }
452 }
453}
454
455impl<T: ?Sized + fmt::Debug, R> fmt::Debug for RwLock<T, R> {
456 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
457 match self.try_read() {
458 Some(guard) => write!(f, "RwLock {{ data: ")
459 .and_then(|()| (&*guard).fmt(f))
460 .and_then(|()| write!(f, " }}")),
461 None => write!(f, "RwLock {{ <locked> }}"),
462 }
463 }
464}
465
466impl<T: ?Sized + Default, R> Default for RwLock<T, R> {
467 fn default() -> Self {
468 Self::new(Default::default())
469 }
470}
471
472impl<T, R> From<T> for RwLock<T, R> {
473 fn from(data: T) -> Self {
474 Self::new(data)
475 }
476}
477
478impl<'rwlock, T: ?Sized> RwLockReadGuard<'rwlock, T> {
479 /// Leak the lock guard, yielding a reference to the underlying data.
480 ///
481 /// Note that this function will permanently lock the original lock for all but reading locks.
482 ///
483 /// ```
484 /// let mylock = spin::RwLock::new(0);
485 ///
486 /// let data: &i32 = spin::RwLockReadGuard::leak(mylock.read());
487 ///
488 /// assert_eq!(*data, 0);
489 /// ```
490 #[inline]
491 pub fn leak(this: Self) -> &'rwlock T {
492 let this = ManuallyDrop::new(this);
493 // Safety: We know statically that only we are referencing data
494 unsafe { &*this.data }
495 }
496}
497
498impl<'rwlock, T: ?Sized + fmt::Debug> fmt::Debug for RwLockReadGuard<'rwlock, T> {
499 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
500 fmt::Debug::fmt(&**self, f)
501 }
502}
503
504impl<'rwlock, T: ?Sized + fmt::Display> fmt::Display for RwLockReadGuard<'rwlock, T> {
505 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
506 fmt::Display::fmt(&**self, f)
507 }
508}
509
510impl<'rwlock, T: ?Sized, R: RelaxStrategy> RwLockUpgradableGuard<'rwlock, T, R> {
511 /// Upgrades an upgradeable lock guard to a writable lock guard.
512 ///
513 /// ```
514 /// let mylock = spin::RwLock::new(0);
515 ///
516 /// let upgradeable = mylock.upgradeable_read(); // Readable, but not yet writable
517 /// let writable = upgradeable.upgrade();
518 /// ```
519 #[inline]
520 pub fn upgrade(mut self) -> RwLockWriteGuard<'rwlock, T, R> {
521 loop {
522 self = match self.try_upgrade_internal(false) {
523 Ok(guard) => return guard,
524 Err(e) => e,
525 };
526
527 R::relax();
528 }
529 }
530}
531
532impl<'rwlock, T: ?Sized, R> RwLockUpgradableGuard<'rwlock, T, R> {
533 #[inline(always)]
534 fn try_upgrade_internal(self, strong: bool) -> Result<RwLockWriteGuard<'rwlock, T, R>, Self> {
535 if compare_exchange(
536 &self.inner.lock,
537 UPGRADED,
538 WRITER,
539 Ordering::Acquire,
540 Ordering::Relaxed,
541 strong,
542 )
543 .is_ok()
544 {
545 let inner = self.inner;
546
547 // Forget the old guard so its destructor doesn't run (before mutably aliasing data below)
548 mem::forget(self);
549
550 // Upgrade successful
551 Ok(RwLockWriteGuard {
552 phantom: PhantomData,
553 inner,
554 data: unsafe { &mut *inner.data.get() },
555 })
556 } else {
557 Err(self)
558 }
559 }
560
561 /// Tries to upgrade an upgradeable lock guard to a writable lock guard.
562 ///
563 /// ```
564 /// let mylock = spin::RwLock::new(0);
565 /// let upgradeable = mylock.upgradeable_read(); // Readable, but not yet writable
566 ///
567 /// match upgradeable.try_upgrade() {
568 /// Ok(writable) => /* upgrade successful - use writable lock guard */ (),
569 /// Err(upgradeable) => /* upgrade unsuccessful */ (),
570 /// };
571 /// ```
572 #[inline]
573 pub fn try_upgrade(self) -> Result<RwLockWriteGuard<'rwlock, T, R>, Self> {
574 self.try_upgrade_internal(true)
575 }
576
577 /// Tries to upgrade an upgradeable lock guard to a writable lock guard.
578 ///
579 /// Unlike [`RwLockUpgradableGuard::try_upgrade`], this function is allowed to spuriously fail even when upgrading
580 /// would otherwise succeed, which can result in more efficient code on some platforms.
581 #[inline]
582 pub fn try_upgrade_weak(self) -> Result<RwLockWriteGuard<'rwlock, T, R>, Self> {
583 self.try_upgrade_internal(false)
584 }
585
586 #[inline]
587 /// Downgrades the upgradeable lock guard to a readable, shared lock guard. Cannot fail and is guaranteed not to spin.
588 ///
589 /// ```
590 /// let mylock = spin::RwLock::new(1);
591 ///
592 /// let upgradeable = mylock.upgradeable_read();
593 /// assert!(mylock.try_read().is_none());
594 /// assert_eq!(*upgradeable, 1);
595 ///
596 /// let readable = upgradeable.downgrade(); // This is guaranteed not to spin
597 /// assert!(mylock.try_read().is_some());
598 /// assert_eq!(*readable, 1);
599 /// ```
600 pub fn downgrade(self) -> RwLockReadGuard<'rwlock, T> {
601 // Reserve the read guard for ourselves
602 self.inner.acquire_reader();
603
604 let inner = self.inner;
605
606 // Dropping self removes the UPGRADED bit
607 mem::drop(self);
608
609 RwLockReadGuard {
610 lock: &inner.lock,
611 data: unsafe { &*inner.data.get() },
612 }
613 }
614
615 /// Leak the lock guard, yielding a reference to the underlying data.
616 ///
617 /// Note that this function will permanently lock the original lock.
618 ///
619 /// ```
620 /// let mylock = spin::RwLock::new(0);
621 ///
622 /// let data: &i32 = spin::RwLockUpgradableGuard::leak(mylock.upgradeable_read());
623 ///
624 /// assert_eq!(*data, 0);
625 /// ```
626 #[inline]
627 pub fn leak(this: Self) -> &'rwlock T {
628 let this = ManuallyDrop::new(this);
629 // Safety: We know statically that only we are referencing data
630 unsafe { &*this.data }
631 }
632}
633
634impl<'rwlock, T: ?Sized + fmt::Debug, R> fmt::Debug for RwLockUpgradableGuard<'rwlock, T, R> {
635 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
636 fmt::Debug::fmt(&**self, f)
637 }
638}
639
640impl<'rwlock, T: ?Sized + fmt::Display, R> fmt::Display for RwLockUpgradableGuard<'rwlock, T, R> {
641 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
642 fmt::Display::fmt(&**self, f)
643 }
644}
645
646impl<'rwlock, T: ?Sized, R> RwLockWriteGuard<'rwlock, T, R> {
647 /// Downgrades the writable lock guard to a readable, shared lock guard. Cannot fail and is guaranteed not to spin.
648 ///
649 /// ```
650 /// let mylock = spin::RwLock::new(0);
651 ///
652 /// let mut writable = mylock.write();
653 /// *writable = 1;
654 ///
655 /// let readable = writable.downgrade(); // This is guaranteed not to spin
656 /// # let readable_2 = mylock.try_read().unwrap();
657 /// assert_eq!(*readable, 1);
658 /// ```
659 #[inline]
660 pub fn downgrade(self) -> RwLockReadGuard<'rwlock, T> {
661 // Reserve the read guard for ourselves
662 self.inner.acquire_reader();
663
664 let inner = self.inner;
665
666 // Dropping self removes the UPGRADED bit
667 mem::drop(self);
668
669 RwLockReadGuard {
670 lock: &inner.lock,
671 data: unsafe { &*inner.data.get() },
672 }
673 }
674
675 /// Downgrades the writable lock guard to an upgradable, shared lock guard. Cannot fail and is guaranteed not to spin.
676 ///
677 /// ```
678 /// let mylock = spin::RwLock::new(0);
679 ///
680 /// let mut writable = mylock.write();
681 /// *writable = 1;
682 ///
683 /// let readable = writable.downgrade_to_upgradeable(); // This is guaranteed not to spin
684 /// assert_eq!(*readable, 1);
685 /// ```
686 #[inline]
687 pub fn downgrade_to_upgradeable(self) -> RwLockUpgradableGuard<'rwlock, T, R> {
688 debug_assert_eq!(
689 self.inner.lock.load(Ordering::Acquire) & (WRITER | UPGRADED),
690 WRITER
691 );
692
693 // Reserve the read guard for ourselves
694 self.inner.lock.store(UPGRADED, Ordering::Release);
695
696 let inner = self.inner;
697
698 // Dropping self removes the UPGRADED bit
699 mem::forget(self);
700
701 RwLockUpgradableGuard {
702 phantom: PhantomData,
703 inner,
704 data: unsafe { &*inner.data.get() },
705 }
706 }
707
708 /// Leak the lock guard, yielding a mutable reference to the underlying data.
709 ///
710 /// Note that this function will permanently lock the original lock.
711 ///
712 /// ```
713 /// let mylock = spin::RwLock::new(0);
714 ///
715 /// let data: &mut i32 = spin::RwLockWriteGuard::leak(mylock.write());
716 ///
717 /// *data = 1;
718 /// assert_eq!(*data, 1);
719 /// ```
720 #[inline]
721 pub fn leak(this: Self) -> &'rwlock mut T {
722 let mut this = ManuallyDrop::new(this);
723 // Safety: We know statically that only we are referencing data
724 unsafe { &mut *this.data }
725 }
726}
727
728impl<'rwlock, T: ?Sized + fmt::Debug, R> fmt::Debug for RwLockWriteGuard<'rwlock, T, R> {
729 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
730 fmt::Debug::fmt(&**self, f)
731 }
732}
733
734impl<'rwlock, T: ?Sized + fmt::Display, R> fmt::Display for RwLockWriteGuard<'rwlock, T, R> {
735 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
736 fmt::Display::fmt(&**self, f)
737 }
738}
739
740impl<'rwlock, T: ?Sized> Deref for RwLockReadGuard<'rwlock, T> {
741 type Target = T;
742
743 fn deref(&self) -> &T {
744 // Safety: We know statically that only we are referencing data
745 unsafe { &*self.data }
746 }
747}
748
749impl<'rwlock, T: ?Sized, R> Deref for RwLockUpgradableGuard<'rwlock, T, R> {
750 type Target = T;
751
752 fn deref(&self) -> &T {
753 // Safety: We know statically that only we are referencing data
754 unsafe { &*self.data }
755 }
756}
757
758impl<'rwlock, T: ?Sized, R> Deref for RwLockWriteGuard<'rwlock, T, R> {
759 type Target = T;
760
761 fn deref(&self) -> &T {
762 // Safety: We know statically that only we are referencing data
763 unsafe { &*self.data }
764 }
765}
766
767impl<'rwlock, T: ?Sized, R> DerefMut for RwLockWriteGuard<'rwlock, T, R> {
768 fn deref_mut(&mut self) -> &mut T {
769 // Safety: We know statically that only we are referencing data
770 unsafe { &mut *self.data }
771 }
772}
773
774impl<'rwlock, T: ?Sized> Drop for RwLockReadGuard<'rwlock, T> {
775 fn drop(&mut self) {
776 debug_assert!(self.lock.load(Ordering::Relaxed) & !(WRITER | UPGRADED) > 0);
777 self.lock.fetch_sub(READER, Ordering::Release);
778 }
779}
780
781impl<'rwlock, T: ?Sized, R> Drop for RwLockUpgradableGuard<'rwlock, T, R> {
782 fn drop(&mut self) {
783 debug_assert_eq!(
784 self.inner.lock.load(Ordering::Relaxed) & (WRITER | UPGRADED),
785 UPGRADED
786 );
787 self.inner.lock.fetch_sub(UPGRADED, Ordering::AcqRel);
788 }
789}
790
791impl<'rwlock, T: ?Sized, R> Drop for RwLockWriteGuard<'rwlock, T, R> {
792 fn drop(&mut self) {
793 debug_assert_eq!(self.inner.lock.load(Ordering::Relaxed) & WRITER, WRITER);
794
795 // Writer is responsible for clearing both WRITER and UPGRADED bits.
796 // The UPGRADED bit may be set if an upgradeable lock attempts an upgrade while this lock is held.
797 self.inner
798 .lock
799 .fetch_and(!(WRITER | UPGRADED), Ordering::Release);
800 }
801}
802
803#[inline(always)]
804fn compare_exchange(
805 atomic: &AtomicUsize,
806 current: usize,
807 new: usize,
808 success: Ordering,
809 failure: Ordering,
810 strong: bool,
811) -> Result<usize, usize> {
812 if strong {
813 atomic.compare_exchange(current, new, success, failure)
814 } else {
815 atomic.compare_exchange_weak(current, new, success, failure)
816 }
817}
818
819#[cfg(feature = "lock_api")]
820unsafe impl<R: RelaxStrategy> lock_api_crate::RawRwLock for RwLock<(), R> {
821 type GuardMarker = lock_api_crate::GuardSend;
822
823 const INIT: Self = Self::new(());
824
825 #[inline(always)]
826 fn lock_exclusive(&self) {
827 // Prevent guard destructor running
828 core::mem::forget(self.write());
829 }
830
831 #[inline(always)]
832 fn try_lock_exclusive(&self) -> bool {
833 // Prevent guard destructor running
834 self.try_write().map(|g| core::mem::forget(g)).is_some()
835 }
836
837 #[inline(always)]
838 unsafe fn unlock_exclusive(&self) {
839 drop(RwLockWriteGuard {
840 inner: self,
841 data: &mut (),
842 phantom: PhantomData,
843 });
844 }
845
846 #[inline(always)]
847 fn lock_shared(&self) {
848 // Prevent guard destructor running
849 core::mem::forget(self.read());
850 }
851
852 #[inline(always)]
853 fn try_lock_shared(&self) -> bool {
854 // Prevent guard destructor running
855 self.try_read().map(|g| core::mem::forget(g)).is_some()
856 }
857
858 #[inline(always)]
859 unsafe fn unlock_shared(&self) {
860 drop(RwLockReadGuard {
861 lock: &self.lock,
862 data: &(),
863 });
864 }
865
866 #[inline(always)]
867 fn is_locked(&self) -> bool {
868 self.lock.load(Ordering::Relaxed) != 0
869 }
870}
871
872#[cfg(feature = "lock_api")]
873unsafe impl<R: RelaxStrategy> lock_api_crate::RawRwLockUpgrade for RwLock<(), R> {
874 #[inline(always)]
875 fn lock_upgradable(&self) {
876 // Prevent guard destructor running
877 core::mem::forget(self.upgradeable_read());
878 }
879
880 #[inline(always)]
881 fn try_lock_upgradable(&self) -> bool {
882 // Prevent guard destructor running
883 self.try_upgradeable_read()
884 .map(|g| core::mem::forget(g))
885 .is_some()
886 }
887
888 #[inline(always)]
889 unsafe fn unlock_upgradable(&self) {
890 drop(RwLockUpgradableGuard {
891 inner: self,
892 data: &(),
893 phantom: PhantomData,
894 });
895 }
896
897 #[inline(always)]
898 unsafe fn upgrade(&self) {
899 let tmp_guard = RwLockUpgradableGuard {
900 inner: self,
901 data: &(),
902 phantom: PhantomData,
903 };
904 core::mem::forget(tmp_guard.upgrade());
905 }
906
907 #[inline(always)]
908 unsafe fn try_upgrade(&self) -> bool {
909 let tmp_guard = RwLockUpgradableGuard {
910 inner: self,
911 data: &(),
912 phantom: PhantomData,
913 };
914 tmp_guard
915 .try_upgrade()
916 .map(|g| core::mem::forget(g))
917 .is_ok()
918 }
919}
920
921#[cfg(feature = "lock_api")]
922unsafe impl<R: RelaxStrategy> lock_api_crate::RawRwLockDowngrade for RwLock<(), R> {
923 unsafe fn downgrade(&self) {
924 let tmp_guard = RwLockWriteGuard {
925 inner: self,
926 data: &mut (),
927 phantom: PhantomData,
928 };
929 core::mem::forget(tmp_guard.downgrade());
930 }
931}
932
933#[cfg(feature = "lock_api")]
934unsafe impl<R: RelaxStrategy> lock_api_crate::RawRwLockUpgradeDowngrade for RwLock<(), R> {
935 unsafe fn downgrade_upgradable(&self) {
936 let tmp_guard = RwLockUpgradableGuard {
937 inner: self,
938 data: &(),
939 phantom: PhantomData,
940 };
941 core::mem::forget(tmp_guard.downgrade());
942 }
943
944 unsafe fn downgrade_to_upgradable(&self) {
945 let tmp_guard = RwLockWriteGuard {
946 inner: self,
947 data: &mut (),
948 phantom: PhantomData,
949 };
950 core::mem::forget(tmp_guard.downgrade_to_upgradeable());
951 }
952}
953
954#[cfg(test)]
955mod tests {
956 use std::prelude::v1::*;
957
958 use std::sync::atomic::{AtomicUsize, Ordering};
959 use std::sync::mpsc::channel;
960 use std::sync::Arc;
961 use std::thread;
962
963 type RwLock<T> = super::RwLock<T>;
964
965 #[derive(Eq, PartialEq, Debug)]
966 struct NonCopy(i32);
967
968 #[test]
969 fn smoke() {
970 let l = RwLock::new(());
971 drop(l.read());
972 drop(l.write());
973 drop((l.read(), l.read()));
974 drop(l.write());
975 }
976
977 // TODO: needs RNG
978 //#[test]
979 //fn frob() {
980 // static R: RwLock = RwLock::new();
981 // const N: usize = 10;
982 // const M: usize = 1000;
983 //
984 // let (tx, rx) = channel::<()>();
985 // for _ in 0..N {
986 // let tx = tx.clone();
987 // thread::spawn(move|| {
988 // let mut rng = rand::thread_rng();
989 // for _ in 0..M {
990 // if rng.gen_weighted_bool(N) {
991 // drop(R.write());
992 // } else {
993 // drop(R.read());
994 // }
995 // }
996 // drop(tx);
997 // });
998 // }
999 // drop(tx);
1000 // let _ = rx.recv();
1001 // unsafe { R.destroy(); }
1002 //}
1003
1004 #[test]
1005 fn test_rw_arc() {
1006 let arc = Arc::new(RwLock::new(0));
1007 let arc2 = arc.clone();
1008 let (tx, rx) = channel();
1009
1010 let t = thread::spawn(move || {
1011 let mut lock = arc2.write();
1012 for _ in 0..10 {
1013 let tmp = *lock;
1014 *lock = -1;
1015 thread::yield_now();
1016 *lock = tmp + 1;
1017 }
1018 tx.send(()).unwrap();
1019 });
1020
1021 // Readers try to catch the writer in the act
1022 let mut children = Vec::new();
1023 for _ in 0..5 {
1024 let arc3 = arc.clone();
1025 children.push(thread::spawn(move || {
1026 let lock = arc3.read();
1027 assert!(*lock >= 0);
1028 }));
1029 }
1030
1031 // Wait for children to pass their asserts
1032 for r in children {
1033 assert!(r.join().is_ok());
1034 }
1035
1036 // Wait for writer to finish
1037 rx.recv().unwrap();
1038 let lock = arc.read();
1039 assert_eq!(*lock, 10);
1040
1041 assert!(t.join().is_ok());
1042 }
1043
1044 #[test]
1045 fn test_rw_access_in_unwind() {
1046 let arc = Arc::new(RwLock::new(1));
1047 let arc2 = arc.clone();
1048 let _ = thread::spawn(move || -> () {
1049 struct Unwinder {
1050 i: Arc<RwLock<isize>>,
1051 }
1052 impl Drop for Unwinder {
1053 fn drop(&mut self) {
1054 let mut lock = self.i.write();
1055 *lock += 1;
1056 }
1057 }
1058 let _u = Unwinder { i: arc2 };
1059 panic!();
1060 })
1061 .join();
1062 let lock = arc.read();
1063 assert_eq!(*lock, 2);
1064 }
1065
1066 #[test]
1067 fn test_rwlock_unsized() {
1068 let rw: &RwLock<[i32]> = &RwLock::new([1, 2, 3]);
1069 {
1070 let b = &mut *rw.write();
1071 b[0] = 4;
1072 b[2] = 5;
1073 }
1074 let comp: &[i32] = &[4, 2, 5];
1075 assert_eq!(&*rw.read(), comp);
1076 }
1077
1078 #[test]
1079 fn test_rwlock_try_write() {
1080 use std::mem::drop;
1081
1082 let lock = RwLock::new(0isize);
1083 let read_guard = lock.read();
1084
1085 let write_result = lock.try_write();
1086 match write_result {
1087 None => (),
1088 Some(_) => assert!(
1089 false,
1090 "try_write should not succeed while read_guard is in scope"
1091 ),
1092 }
1093
1094 drop(read_guard);
1095 }
1096
1097 #[test]
1098 fn test_rw_try_read() {
1099 let m = RwLock::new(0);
1100 ::std::mem::forget(m.write());
1101 assert!(m.try_read().is_none());
1102 }
1103
1104 #[test]
1105 fn test_into_inner() {
1106 let m = RwLock::new(NonCopy(10));
1107 assert_eq!(m.into_inner(), NonCopy(10));
1108 }
1109
1110 #[test]
1111 fn test_into_inner_drop() {
1112 struct Foo(Arc<AtomicUsize>);
1113 impl Drop for Foo {
1114 fn drop(&mut self) {
1115 self.0.fetch_add(1, Ordering::SeqCst);
1116 }
1117 }
1118 let num_drops = Arc::new(AtomicUsize::new(0));
1119 let m = RwLock::new(Foo(num_drops.clone()));
1120 assert_eq!(num_drops.load(Ordering::SeqCst), 0);
1121 {
1122 let _inner = m.into_inner();
1123 assert_eq!(num_drops.load(Ordering::SeqCst), 0);
1124 }
1125 assert_eq!(num_drops.load(Ordering::SeqCst), 1);
1126 }
1127
1128 #[test]
1129 fn test_force_read_decrement() {
1130 let m = RwLock::new(());
1131 ::std::mem::forget(m.read());
1132 ::std::mem::forget(m.read());
1133 ::std::mem::forget(m.read());
1134 assert!(m.try_write().is_none());
1135 unsafe {
1136 m.force_read_decrement();
1137 m.force_read_decrement();
1138 }
1139 assert!(m.try_write().is_none());
1140 unsafe {
1141 m.force_read_decrement();
1142 }
1143 assert!(m.try_write().is_some());
1144 }
1145
1146 #[test]
1147 fn test_force_write_unlock() {
1148 let m = RwLock::new(());
1149 ::std::mem::forget(m.write());
1150 assert!(m.try_read().is_none());
1151 unsafe {
1152 m.force_write_unlock();
1153 }
1154 assert!(m.try_read().is_some());
1155 }
1156
1157 #[test]
1158 fn test_upgrade_downgrade() {
1159 let m = RwLock::new(());
1160 {
1161 let _r = m.read();
1162 let upg = m.try_upgradeable_read().unwrap();
1163 assert!(m.try_read().is_none());
1164 assert!(m.try_write().is_none());
1165 assert!(upg.try_upgrade().is_err());
1166 }
1167 {
1168 let w = m.write();
1169 assert!(m.try_upgradeable_read().is_none());
1170 let _r = w.downgrade();
1171 assert!(m.try_upgradeable_read().is_some());
1172 assert!(m.try_read().is_some());
1173 assert!(m.try_write().is_none());
1174 }
1175 {
1176 let _u = m.upgradeable_read();
1177 assert!(m.try_upgradeable_read().is_none());
1178 }
1179
1180 assert!(m.try_upgradeable_read().unwrap().try_upgrade().is_ok());
1181 }
1182}