tokio/sync/rwlock.rs
1use crate::sync::batch_semaphore::{Semaphore, TryAcquireError};
2use crate::sync::mutex::TryLockError;
3#[cfg(all(tokio_unstable, feature = "tracing"))]
4use crate::util::trace;
5use std::cell::UnsafeCell;
6use std::marker;
7use std::marker::PhantomData;
8use std::sync::Arc;
9
10pub(crate) mod owned_read_guard;
11pub(crate) mod owned_write_guard;
12pub(crate) mod owned_write_guard_mapped;
13pub(crate) mod read_guard;
14pub(crate) mod write_guard;
15pub(crate) mod write_guard_mapped;
16pub(crate) use owned_read_guard::OwnedRwLockReadGuard;
17pub(crate) use owned_write_guard::OwnedRwLockWriteGuard;
18pub(crate) use owned_write_guard_mapped::OwnedRwLockMappedWriteGuard;
19pub(crate) use read_guard::RwLockReadGuard;
20pub(crate) use write_guard::RwLockWriteGuard;
21pub(crate) use write_guard_mapped::RwLockMappedWriteGuard;
22
23#[cfg(not(loom))]
24const MAX_READS: u32 = u32::MAX >> 3;
25
26#[cfg(loom)]
27const MAX_READS: u32 = 10;
28
29/// An asynchronous reader-writer lock.
30///
31/// This type of lock allows a number of readers or at most one writer at any
32/// point in time. The write portion of this lock typically allows modification
33/// of the underlying data (exclusive access) and the read portion of this lock
34/// typically allows for read-only access (shared access).
35///
36/// In comparison, a [`Mutex`] does not distinguish between readers or writers
37/// that acquire the lock, therefore causing any tasks waiting for the lock to
38/// become available to yield. An `RwLock` will allow any number of readers to
39/// acquire the lock as long as a writer is not holding the lock.
40///
41/// The priority policy of Tokio's read-write lock is _fair_ (or
42/// [_write-preferring_]), in order to ensure that readers cannot starve
43/// writers. Fairness is ensured using a first-in, first-out queue for the tasks
44/// awaiting the lock; a read lock will not be given out until all write lock
45/// requests that were queued before it have been acquired and released. This is
46/// in contrast to the Rust standard library's `std::sync::RwLock`, where the
47/// priority policy is dependent on the operating system's implementation.
48///
49/// The type parameter `T` represents the data that this lock protects. It is
50/// required that `T` satisfies [`Send`] to be shared across threads. The RAII guards
51/// returned from the locking methods implement [`Deref`](trait@std::ops::Deref)
52/// (and [`DerefMut`](trait@std::ops::DerefMut)
53/// for the `write` methods) to allow access to the content of the lock.
54///
55/// # Examples
56///
57/// ```
58/// use tokio::sync::RwLock;
59///
60/// # #[tokio::main(flavor = "current_thread")]
61/// # async fn main() {
62/// let lock = RwLock::new(5);
63///
64/// // many reader locks can be held at once
65/// {
66/// let r1 = lock.read().await;
67/// let r2 = lock.read().await;
68/// assert_eq!(*r1, 5);
69/// assert_eq!(*r2, 5);
70/// } // read locks are dropped at this point
71///
72/// // only one write lock may be held, however
73/// {
74/// let mut w = lock.write().await;
75/// *w += 1;
76/// assert_eq!(*w, 6);
77/// } // write lock is dropped here
78/// # }
79/// ```
80///
81/// [`Mutex`]: struct@super::Mutex
82/// [`RwLock`]: struct@RwLock
83/// [`RwLockReadGuard`]: struct@RwLockReadGuard
84/// [`RwLockWriteGuard`]: struct@RwLockWriteGuard
85/// [`Send`]: trait@std::marker::Send
86/// [_write-preferring_]: https://en.wikipedia.org/wiki/Readers%E2%80%93writer_lock#Priority_policies
87pub struct RwLock<T: ?Sized> {
88 #[cfg(all(tokio_unstable, feature = "tracing"))]
89 resource_span: tracing::Span,
90
91 // maximum number of concurrent readers
92 mr: u32,
93
94 //semaphore to coordinate read and write access to T
95 s: Semaphore,
96
97 //inner data T
98 c: UnsafeCell<T>,
99}
100
101#[test]
102#[cfg(not(loom))]
103fn bounds() {
104 fn check_send<T: Send>() {}
105 fn check_sync<T: Sync>() {}
106 fn check_unpin<T: Unpin>() {}
107 // This has to take a value, since the async fn's return type is unnameable.
108 fn check_send_sync_val<T: Send + Sync>(_t: T) {}
109
110 check_send::<RwLock<u32>>();
111 check_sync::<RwLock<u32>>();
112 check_unpin::<RwLock<u32>>();
113
114 check_send::<RwLockReadGuard<'_, u32>>();
115 check_sync::<RwLockReadGuard<'_, u32>>();
116 check_unpin::<RwLockReadGuard<'_, u32>>();
117
118 check_send::<OwnedRwLockReadGuard<u32, i32>>();
119 check_sync::<OwnedRwLockReadGuard<u32, i32>>();
120 check_unpin::<OwnedRwLockReadGuard<u32, i32>>();
121
122 check_send::<RwLockWriteGuard<'_, u32>>();
123 check_sync::<RwLockWriteGuard<'_, u32>>();
124 check_unpin::<RwLockWriteGuard<'_, u32>>();
125
126 check_send::<RwLockMappedWriteGuard<'_, u32>>();
127 check_sync::<RwLockMappedWriteGuard<'_, u32>>();
128 check_unpin::<RwLockMappedWriteGuard<'_, u32>>();
129
130 check_send::<OwnedRwLockWriteGuard<u32>>();
131 check_sync::<OwnedRwLockWriteGuard<u32>>();
132 check_unpin::<OwnedRwLockWriteGuard<u32>>();
133
134 check_send::<OwnedRwLockMappedWriteGuard<u32, i32>>();
135 check_sync::<OwnedRwLockMappedWriteGuard<u32, i32>>();
136 check_unpin::<OwnedRwLockMappedWriteGuard<u32, i32>>();
137
138 let rwlock = Arc::new(RwLock::new(0));
139 check_send_sync_val(rwlock.read());
140 check_send_sync_val(Arc::clone(&rwlock).read_owned());
141 check_send_sync_val(rwlock.write());
142 check_send_sync_val(Arc::clone(&rwlock).write_owned());
143}
144
145// As long as T: Send + Sync, it's fine to send and share RwLock<T> between threads.
146// If T were not Send, sending and sharing a RwLock<T> would be bad, since you can access T through
147// RwLock<T>.
148unsafe impl<T> Send for RwLock<T> where T: ?Sized + Send {}
149unsafe impl<T> Sync for RwLock<T> where T: ?Sized + Send + Sync {}
150// NB: These impls need to be explicit since we're storing a raw pointer.
151// Safety: Stores a raw pointer to `T`, so if `T` is `Sync`, the lock guard over
152// `T` is `Send`.
153unsafe impl<T> Send for RwLockReadGuard<'_, T> where T: ?Sized + Sync {}
154unsafe impl<T> Sync for RwLockReadGuard<'_, T> where T: ?Sized + Send + Sync {}
155// T is required to be `Send` because an OwnedRwLockReadGuard can be used to drop the value held in
156// the RwLock, unlike RwLockReadGuard.
157unsafe impl<T, U> Send for OwnedRwLockReadGuard<T, U>
158where
159 T: ?Sized + Send + Sync,
160 U: ?Sized + Sync,
161{
162}
163unsafe impl<T, U> Sync for OwnedRwLockReadGuard<T, U>
164where
165 T: ?Sized + Send + Sync,
166 U: ?Sized + Send + Sync,
167{
168}
169unsafe impl<T> Sync for RwLockWriteGuard<'_, T> where T: ?Sized + Send + Sync {}
170unsafe impl<T> Sync for OwnedRwLockWriteGuard<T> where T: ?Sized + Send + Sync {}
171unsafe impl<T> Sync for RwLockMappedWriteGuard<'_, T> where T: ?Sized + Send + Sync {}
172unsafe impl<T, U> Sync for OwnedRwLockMappedWriteGuard<T, U>
173where
174 T: ?Sized + Send + Sync,
175 U: ?Sized + Send + Sync,
176{
177}
178// Safety: Stores a raw pointer to `T`, so if `T` is `Sync`, the lock guard over
179// `T` is `Send` - but since this is also provides mutable access, we need to
180// make sure that `T` is `Send` since its value can be sent across thread
181// boundaries.
182unsafe impl<T> Send for RwLockWriteGuard<'_, T> where T: ?Sized + Send + Sync {}
183unsafe impl<T> Send for OwnedRwLockWriteGuard<T> where T: ?Sized + Send + Sync {}
184unsafe impl<T> Send for RwLockMappedWriteGuard<'_, T> where T: ?Sized + Send + Sync {}
185unsafe impl<T, U> Send for OwnedRwLockMappedWriteGuard<T, U>
186where
187 T: ?Sized + Send + Sync,
188 U: ?Sized + Send + Sync,
189{
190}
191
192impl<T: ?Sized> RwLock<T> {
193 /// Creates a new instance of an `RwLock<T>` which is unlocked.
194 ///
195 /// # Examples
196 ///
197 /// ```
198 /// use tokio::sync::RwLock;
199 ///
200 /// let lock = RwLock::new(5);
201 /// ```
202 #[track_caller]
203 pub fn new(value: T) -> RwLock<T>
204 where
205 T: Sized,
206 {
207 #[cfg(all(tokio_unstable, feature = "tracing"))]
208 let resource_span = {
209 let location = std::panic::Location::caller();
210 let resource_span = tracing::trace_span!(
211 parent: None,
212 "runtime.resource",
213 concrete_type = "RwLock",
214 kind = "Sync",
215 loc.file = location.file(),
216 loc.line = location.line(),
217 loc.col = location.column(),
218 );
219
220 resource_span.in_scope(|| {
221 tracing::trace!(
222 target: "runtime::resource::state_update",
223 max_readers = MAX_READS,
224 );
225
226 tracing::trace!(
227 target: "runtime::resource::state_update",
228 write_locked = false,
229 );
230
231 tracing::trace!(
232 target: "runtime::resource::state_update",
233 current_readers = 0,
234 );
235 });
236
237 resource_span
238 };
239
240 #[cfg(all(tokio_unstable, feature = "tracing"))]
241 let s = resource_span.in_scope(|| Semaphore::new(MAX_READS as usize));
242
243 #[cfg(any(not(tokio_unstable), not(feature = "tracing")))]
244 let s = Semaphore::new(MAX_READS as usize);
245
246 RwLock {
247 mr: MAX_READS,
248 c: UnsafeCell::new(value),
249 s,
250 #[cfg(all(tokio_unstable, feature = "tracing"))]
251 resource_span,
252 }
253 }
254
255 /// Creates a new instance of an `RwLock<T>` which is unlocked
256 /// and allows a maximum of `max_reads` concurrent readers.
257 ///
258 /// # Examples
259 ///
260 /// ```
261 /// use tokio::sync::RwLock;
262 ///
263 /// let lock = RwLock::with_max_readers(5, 1024);
264 /// ```
265 ///
266 /// # Panics
267 ///
268 /// Panics if `max_reads` is `0` or is bigger than `u32::MAX >> 3`.
269 #[track_caller]
270 pub fn with_max_readers(value: T, max_reads: u32) -> RwLock<T>
271 where
272 T: Sized,
273 {
274 assert_ne!(max_reads, 0, "a RwLock may not be created with 0 readers");
275 assert!(
276 max_reads <= MAX_READS,
277 "a RwLock may not be created with more than {MAX_READS} readers"
278 );
279
280 #[cfg(all(tokio_unstable, feature = "tracing"))]
281 let resource_span = {
282 let location = std::panic::Location::caller();
283
284 let resource_span = tracing::trace_span!(
285 parent: None,
286 "runtime.resource",
287 concrete_type = "RwLock",
288 kind = "Sync",
289 loc.file = location.file(),
290 loc.line = location.line(),
291 loc.col = location.column(),
292 );
293
294 resource_span.in_scope(|| {
295 tracing::trace!(
296 target: "runtime::resource::state_update",
297 max_readers = max_reads,
298 );
299
300 tracing::trace!(
301 target: "runtime::resource::state_update",
302 write_locked = false,
303 );
304
305 tracing::trace!(
306 target: "runtime::resource::state_update",
307 current_readers = 0,
308 );
309 });
310
311 resource_span
312 };
313
314 #[cfg(all(tokio_unstable, feature = "tracing"))]
315 let s = resource_span.in_scope(|| Semaphore::new(max_reads as usize));
316
317 #[cfg(any(not(tokio_unstable), not(feature = "tracing")))]
318 let s = Semaphore::new(max_reads as usize);
319
320 RwLock {
321 mr: max_reads,
322 c: UnsafeCell::new(value),
323 s,
324 #[cfg(all(tokio_unstable, feature = "tracing"))]
325 resource_span,
326 }
327 }
328
329 /// Creates a new instance of an `RwLock<T>` which is unlocked.
330 ///
331 /// When using the `tracing` [unstable feature], a `RwLock` created with
332 /// `const_new` will not be instrumented. As such, it will not be visible
333 /// in [`tokio-console`]. Instead, [`RwLock::new`] should be used to create
334 /// an instrumented object if that is needed.
335 ///
336 /// # Examples
337 ///
338 /// ```
339 /// use tokio::sync::RwLock;
340 ///
341 /// static LOCK: RwLock<i32> = RwLock::const_new(5);
342 /// ```
343 ///
344 /// [`tokio-console`]: https://github.com/tokio-rs/console
345 /// [unstable feature]: crate#unstable-features
346 #[cfg(not(all(loom, test)))]
347 pub const fn const_new(value: T) -> RwLock<T>
348 where
349 T: Sized,
350 {
351 RwLock {
352 mr: MAX_READS,
353 c: UnsafeCell::new(value),
354 s: Semaphore::const_new(MAX_READS as usize),
355 #[cfg(all(tokio_unstable, feature = "tracing"))]
356 resource_span: tracing::Span::none(),
357 }
358 }
359
360 /// Creates a new instance of an `RwLock<T>` which is unlocked
361 /// and allows a maximum of `max_reads` concurrent readers.
362 ///
363 /// # Examples
364 ///
365 /// ```
366 /// use tokio::sync::RwLock;
367 ///
368 /// static LOCK: RwLock<i32> = RwLock::const_with_max_readers(5, 1024);
369 /// ```
370 ///
371 /// # Panics
372 ///
373 /// Panics if `max_reads` is `0` or is bigger than `u32::MAX >> 3`.
374 #[cfg(not(all(loom, test)))]
375 pub const fn const_with_max_readers(value: T, max_reads: u32) -> RwLock<T>
376 where
377 T: Sized,
378 {
379 assert!(max_reads != 0, "a RwLock may not be created with 0 readers");
380 assert!(max_reads <= MAX_READS);
381
382 RwLock {
383 mr: max_reads,
384 c: UnsafeCell::new(value),
385 s: Semaphore::const_new(max_reads as usize),
386 #[cfg(all(tokio_unstable, feature = "tracing"))]
387 resource_span: tracing::Span::none(),
388 }
389 }
390
391 /// Locks this `RwLock` with shared read access, causing the current task
392 /// to yield until the lock has been acquired.
393 ///
394 /// The calling task will yield until there are no writers which hold the
395 /// lock. There may be other readers inside the lock when the task resumes.
396 ///
397 /// Note that under the priority policy of [`RwLock`], read locks are not
398 /// granted until prior write locks, to prevent starvation. Therefore
399 /// deadlock may occur if a read lock is held by the current task, a write
400 /// lock attempt is made, and then a subsequent read lock attempt is made
401 /// by the current task.
402 ///
403 /// Returns an RAII guard which will drop this read access of the `RwLock`
404 /// when dropped.
405 ///
406 /// # Cancel safety
407 ///
408 /// This method uses a queue to fairly distribute locks in the order they
409 /// were requested. Cancelling a call to `read` makes you lose your place in
410 /// the queue.
411 ///
412 /// # Examples
413 ///
414 /// ```
415 /// use std::sync::Arc;
416 /// use tokio::sync::RwLock;
417 ///
418 /// # #[tokio::main(flavor = "current_thread")]
419 /// # async fn main() {
420 /// let lock = Arc::new(RwLock::new(1));
421 /// let c_lock = lock.clone();
422 ///
423 /// let n = lock.read().await;
424 /// assert_eq!(*n, 1);
425 ///
426 /// tokio::spawn(async move {
427 /// // While main has an active read lock, we acquire one too.
428 /// let r = c_lock.read().await;
429 /// assert_eq!(*r, 1);
430 /// }).await.expect("The spawned task has panicked");
431 ///
432 /// // Drop the guard after the spawned task finishes.
433 /// drop(n);
434 /// # }
435 /// ```
436 pub async fn read(&self) -> RwLockReadGuard<'_, T> {
437 let acquire_fut = async {
438 self.s.acquire(1).await.unwrap_or_else(|_| {
439 // The semaphore was closed. but, we never explicitly close it, and we have a
440 // handle to it through the Arc, which means that this can never happen.
441 unreachable!()
442 });
443
444 RwLockReadGuard {
445 s: &self.s,
446 data: self.c.get(),
447 marker: PhantomData,
448 #[cfg(all(tokio_unstable, feature = "tracing"))]
449 resource_span: self.resource_span.clone(),
450 }
451 };
452
453 #[cfg(all(tokio_unstable, feature = "tracing"))]
454 let acquire_fut = trace::async_op(
455 move || acquire_fut,
456 self.resource_span.clone(),
457 "RwLock::read",
458 "poll",
459 false,
460 );
461
462 #[allow(clippy::let_and_return)] // this lint triggers when disabling tracing
463 let guard = acquire_fut.await;
464
465 #[cfg(all(tokio_unstable, feature = "tracing"))]
466 self.resource_span.in_scope(|| {
467 tracing::trace!(
468 target: "runtime::resource::state_update",
469 current_readers = 1,
470 current_readers.op = "add",
471 )
472 });
473
474 guard
475 }
476
477 /// Blockingly locks this `RwLock` with shared read access.
478 ///
479 /// This method is intended for use cases where you
480 /// need to use this rwlock in asynchronous code as well as in synchronous code.
481 ///
482 /// Returns an RAII guard which will drop the read access of this `RwLock` when dropped.
483 ///
484 /// # Panics
485 ///
486 /// This function panics if called within an asynchronous execution context.
487 ///
488 /// - If you find yourself in an asynchronous execution context and needing
489 /// to call some (synchronous) function which performs one of these
490 /// `blocking_` operations, then consider wrapping that call inside
491 /// [`spawn_blocking()`][crate::runtime::Handle::spawn_blocking]
492 /// (or [`block_in_place()`][crate::task::block_in_place]).
493 ///
494 /// # Examples
495 ///
496 /// ```
497 /// # #[cfg(not(target_family = "wasm"))]
498 /// # {
499 /// use std::sync::Arc;
500 /// use tokio::sync::RwLock;
501 ///
502 /// #[tokio::main]
503 /// async fn main() {
504 /// let rwlock = Arc::new(RwLock::new(1));
505 /// let mut write_lock = rwlock.write().await;
506 ///
507 /// let blocking_task = tokio::task::spawn_blocking({
508 /// let rwlock = Arc::clone(&rwlock);
509 /// move || {
510 /// // This shall block until the `write_lock` is released.
511 /// let read_lock = rwlock.blocking_read();
512 /// assert_eq!(*read_lock, 0);
513 /// }
514 /// });
515 ///
516 /// *write_lock -= 1;
517 /// drop(write_lock); // release the lock.
518 ///
519 /// // Await the completion of the blocking task.
520 /// blocking_task.await.unwrap();
521 ///
522 /// // Assert uncontended.
523 /// assert!(rwlock.try_write().is_ok());
524 /// }
525 /// # }
526 /// ```
527 #[track_caller]
528 #[cfg(feature = "sync")]
529 pub fn blocking_read(&self) -> RwLockReadGuard<'_, T> {
530 crate::future::block_on(self.read())
531 }
532
533 /// Locks this `RwLock` with shared read access, causing the current task
534 /// to yield until the lock has been acquired.
535 ///
536 /// The calling task will yield until there are no writers which hold the
537 /// lock. There may be other readers inside the lock when the task resumes.
538 ///
539 /// This method is identical to [`RwLock::read`], except that the returned
540 /// guard references the `RwLock` with an [`Arc`] rather than by borrowing
541 /// it. Therefore, the `RwLock` must be wrapped in an `Arc` to call this
542 /// method, and the guard will live for the `'static` lifetime, as it keeps
543 /// the `RwLock` alive by holding an `Arc`.
544 ///
545 /// Note that under the priority policy of [`RwLock`], read locks are not
546 /// granted until prior write locks, to prevent starvation. Therefore
547 /// deadlock may occur if a read lock is held by the current task, a write
548 /// lock attempt is made, and then a subsequent read lock attempt is made
549 /// by the current task.
550 ///
551 /// Returns an RAII guard which will drop this read access of the `RwLock`
552 /// when dropped.
553 ///
554 /// # Cancel safety
555 ///
556 /// This method uses a queue to fairly distribute locks in the order they
557 /// were requested. Cancelling a call to `read_owned` makes you lose your
558 /// place in the queue.
559 ///
560 /// # Examples
561 ///
562 /// ```
563 /// use std::sync::Arc;
564 /// use tokio::sync::RwLock;
565 ///
566 /// # #[tokio::main(flavor = "current_thread")]
567 /// # async fn main() {
568 /// let lock = Arc::new(RwLock::new(1));
569 /// let c_lock = lock.clone();
570 ///
571 /// let n = lock.read_owned().await;
572 /// assert_eq!(*n, 1);
573 ///
574 /// tokio::spawn(async move {
575 /// // While main has an active read lock, we acquire one too.
576 /// let r = c_lock.read_owned().await;
577 /// assert_eq!(*r, 1);
578 /// }).await.expect("The spawned task has panicked");
579 ///
580 /// // Drop the guard after the spawned task finishes.
581 /// drop(n);
582 ///}
583 /// ```
584 pub async fn read_owned(self: Arc<Self>) -> OwnedRwLockReadGuard<T> {
585 #[cfg(all(tokio_unstable, feature = "tracing"))]
586 let resource_span = self.resource_span.clone();
587
588 let acquire_fut = async {
589 self.s.acquire(1).await.unwrap_or_else(|_| {
590 // The semaphore was closed. but, we never explicitly close it, and we have a
591 // handle to it through the Arc, which means that this can never happen.
592 unreachable!()
593 });
594
595 OwnedRwLockReadGuard {
596 #[cfg(all(tokio_unstable, feature = "tracing"))]
597 resource_span: self.resource_span.clone(),
598 data: self.c.get(),
599 lock: self,
600 _p: PhantomData,
601 }
602 };
603
604 #[cfg(all(tokio_unstable, feature = "tracing"))]
605 let acquire_fut = trace::async_op(
606 move || acquire_fut,
607 resource_span,
608 "RwLock::read_owned",
609 "poll",
610 false,
611 );
612
613 #[allow(clippy::let_and_return)] // this lint triggers when disabling tracing
614 let guard = acquire_fut.await;
615
616 #[cfg(all(tokio_unstable, feature = "tracing"))]
617 guard.resource_span.in_scope(|| {
618 tracing::trace!(
619 target: "runtime::resource::state_update",
620 current_readers = 1,
621 current_readers.op = "add",
622 )
623 });
624
625 guard
626 }
627
628 /// Attempts to acquire this `RwLock` with shared read access.
629 ///
630 /// If the access couldn't be acquired immediately, returns [`TryLockError`].
631 /// Otherwise, an RAII guard is returned which will release read access
632 /// when dropped.
633 ///
634 /// [`TryLockError`]: TryLockError
635 ///
636 /// # Examples
637 ///
638 /// ```
639 /// use std::sync::Arc;
640 /// use tokio::sync::RwLock;
641 ///
642 /// # #[tokio::main(flavor = "current_thread")]
643 /// # async fn main() {
644 /// let lock = Arc::new(RwLock::new(1));
645 /// let c_lock = lock.clone();
646 ///
647 /// let v = lock.try_read().unwrap();
648 /// assert_eq!(*v, 1);
649 ///
650 /// tokio::spawn(async move {
651 /// // While main has an active read lock, we acquire one too.
652 /// let n = c_lock.read().await;
653 /// assert_eq!(*n, 1);
654 /// }).await.expect("The spawned task has panicked");
655 ///
656 /// // Drop the guard when spawned task finishes.
657 /// drop(v);
658 /// # }
659 /// ```
660 pub fn try_read(&self) -> Result<RwLockReadGuard<'_, T>, TryLockError> {
661 match self.s.try_acquire(1) {
662 Ok(permit) => permit,
663 Err(TryAcquireError::NoPermits) => return Err(TryLockError(())),
664 Err(TryAcquireError::Closed) => unreachable!(),
665 }
666
667 let guard = RwLockReadGuard {
668 s: &self.s,
669 data: self.c.get(),
670 marker: marker::PhantomData,
671 #[cfg(all(tokio_unstable, feature = "tracing"))]
672 resource_span: self.resource_span.clone(),
673 };
674
675 #[cfg(all(tokio_unstable, feature = "tracing"))]
676 self.resource_span.in_scope(|| {
677 tracing::trace!(
678 target: "runtime::resource::state_update",
679 current_readers = 1,
680 current_readers.op = "add",
681 )
682 });
683
684 Ok(guard)
685 }
686
687 /// Attempts to acquire this `RwLock` with shared read access.
688 ///
689 /// If the access couldn't be acquired immediately, returns [`TryLockError`].
690 /// Otherwise, an RAII guard is returned which will release read access
691 /// when dropped.
692 ///
693 /// This method is identical to [`RwLock::try_read`], except that the
694 /// returned guard references the `RwLock` with an [`Arc`] rather than by
695 /// borrowing it. Therefore, the `RwLock` must be wrapped in an `Arc` to
696 /// call this method, and the guard will live for the `'static` lifetime,
697 /// as it keeps the `RwLock` alive by holding an `Arc`.
698 ///
699 /// [`TryLockError`]: TryLockError
700 ///
701 /// # Examples
702 ///
703 /// ```
704 /// use std::sync::Arc;
705 /// use tokio::sync::RwLock;
706 ///
707 /// # #[tokio::main(flavor = "current_thread")]
708 /// # async fn main() {
709 /// let lock = Arc::new(RwLock::new(1));
710 /// let c_lock = lock.clone();
711 ///
712 /// let v = lock.try_read_owned().unwrap();
713 /// assert_eq!(*v, 1);
714 ///
715 /// tokio::spawn(async move {
716 /// // While main has an active read lock, we acquire one too.
717 /// let n = c_lock.read_owned().await;
718 /// assert_eq!(*n, 1);
719 /// }).await.expect("The spawned task has panicked");
720 ///
721 /// // Drop the guard when spawned task finishes.
722 /// drop(v);
723 /// # }
724 /// ```
725 pub fn try_read_owned(self: Arc<Self>) -> Result<OwnedRwLockReadGuard<T>, TryLockError> {
726 match self.s.try_acquire(1) {
727 Ok(permit) => permit,
728 Err(TryAcquireError::NoPermits) => return Err(TryLockError(())),
729 Err(TryAcquireError::Closed) => unreachable!(),
730 }
731
732 let guard = OwnedRwLockReadGuard {
733 #[cfg(all(tokio_unstable, feature = "tracing"))]
734 resource_span: self.resource_span.clone(),
735 data: self.c.get(),
736 lock: self,
737 _p: PhantomData,
738 };
739
740 #[cfg(all(tokio_unstable, feature = "tracing"))]
741 guard.resource_span.in_scope(|| {
742 tracing::trace!(
743 target: "runtime::resource::state_update",
744 current_readers = 1,
745 current_readers.op = "add",
746 )
747 });
748
749 Ok(guard)
750 }
751
752 /// Locks this `RwLock` with exclusive write access, causing the current
753 /// task to yield until the lock has been acquired.
754 ///
755 /// The calling task will yield while other writers or readers currently
756 /// have access to the lock.
757 ///
758 /// Returns an RAII guard which will drop the write access of this `RwLock`
759 /// when dropped.
760 ///
761 /// # Cancel safety
762 ///
763 /// This method uses a queue to fairly distribute locks in the order they
764 /// were requested. Cancelling a call to `write` makes you lose your place
765 /// in the queue.
766 ///
767 /// # Examples
768 ///
769 /// ```
770 /// use tokio::sync::RwLock;
771 ///
772 /// # #[tokio::main(flavor = "current_thread")]
773 /// # async fn main() {
774 /// let lock = RwLock::new(1);
775 ///
776 /// let mut n = lock.write().await;
777 /// *n = 2;
778 /// # }
779 /// ```
780 pub async fn write(&self) -> RwLockWriteGuard<'_, T> {
781 let acquire_fut = async {
782 debug_assert_ne!(self.mr, 0);
783 self.s.acquire(self.mr as usize).await.unwrap_or_else(|_| {
784 // The semaphore was closed. but, we never explicitly close it, and we have a
785 // handle to it through the Arc, which means that this can never happen.
786 unreachable!()
787 });
788
789 RwLockWriteGuard {
790 permits_acquired: self.mr,
791 s: &self.s,
792 data: self.c.get(),
793 marker: marker::PhantomData,
794 #[cfg(all(tokio_unstable, feature = "tracing"))]
795 resource_span: self.resource_span.clone(),
796 }
797 };
798
799 #[cfg(all(tokio_unstable, feature = "tracing"))]
800 let acquire_fut = trace::async_op(
801 move || acquire_fut,
802 self.resource_span.clone(),
803 "RwLock::write",
804 "poll",
805 false,
806 );
807
808 #[allow(clippy::let_and_return)] // this lint triggers when disabling tracing
809 let guard = acquire_fut.await;
810
811 #[cfg(all(tokio_unstable, feature = "tracing"))]
812 self.resource_span.in_scope(|| {
813 tracing::trace!(
814 target: "runtime::resource::state_update",
815 write_locked = true,
816 write_locked.op = "override",
817 )
818 });
819
820 guard
821 }
822
823 /// Blockingly locks this `RwLock` with exclusive write access.
824 ///
825 /// This method is intended for use cases where you
826 /// need to use this rwlock in asynchronous code as well as in synchronous code.
827 ///
828 /// Returns an RAII guard which will drop the write access of this `RwLock` when dropped.
829 ///
830 /// # Panics
831 ///
832 /// This function panics if called within an asynchronous execution context.
833 ///
834 /// - If you find yourself in an asynchronous execution context and needing
835 /// to call some (synchronous) function which performs one of these
836 /// `blocking_` operations, then consider wrapping that call inside
837 /// [`spawn_blocking()`][crate::runtime::Handle::spawn_blocking]
838 /// (or [`block_in_place()`][crate::task::block_in_place]).
839 ///
840 /// # Examples
841 ///
842 /// ```
843 /// # #[cfg(not(target_family = "wasm"))]
844 /// # {
845 /// use std::sync::Arc;
846 /// use tokio::{sync::RwLock};
847 ///
848 /// #[tokio::main]
849 /// async fn main() {
850 /// let rwlock = Arc::new(RwLock::new(1));
851 /// let read_lock = rwlock.read().await;
852 ///
853 /// let blocking_task = tokio::task::spawn_blocking({
854 /// let rwlock = Arc::clone(&rwlock);
855 /// move || {
856 /// // This shall block until the `read_lock` is released.
857 /// let mut write_lock = rwlock.blocking_write();
858 /// *write_lock = 2;
859 /// }
860 /// });
861 ///
862 /// assert_eq!(*read_lock, 1);
863 /// // Release the last outstanding read lock.
864 /// drop(read_lock);
865 ///
866 /// // Await the completion of the blocking task.
867 /// blocking_task.await.unwrap();
868 ///
869 /// // Assert uncontended.
870 /// let read_lock = rwlock.try_read().unwrap();
871 /// assert_eq!(*read_lock, 2);
872 /// }
873 /// # }
874 /// ```
875 #[track_caller]
876 #[cfg(feature = "sync")]
877 pub fn blocking_write(&self) -> RwLockWriteGuard<'_, T> {
878 crate::future::block_on(self.write())
879 }
880
881 /// Locks this `RwLock` with exclusive write access, causing the current
882 /// task to yield until the lock has been acquired.
883 ///
884 /// The calling task will yield while other writers or readers currently
885 /// have access to the lock.
886 ///
887 /// This method is identical to [`RwLock::write`], except that the returned
888 /// guard references the `RwLock` with an [`Arc`] rather than by borrowing
889 /// it. Therefore, the `RwLock` must be wrapped in an `Arc` to call this
890 /// method, and the guard will live for the `'static` lifetime, as it keeps
891 /// the `RwLock` alive by holding an `Arc`.
892 ///
893 /// Returns an RAII guard which will drop the write access of this `RwLock`
894 /// when dropped.
895 ///
896 /// # Cancel safety
897 ///
898 /// This method uses a queue to fairly distribute locks in the order they
899 /// were requested. Cancelling a call to `write_owned` makes you lose your
900 /// place in the queue.
901 ///
902 /// # Examples
903 ///
904 /// ```
905 /// use std::sync::Arc;
906 /// use tokio::sync::RwLock;
907 ///
908 /// # #[tokio::main(flavor = "current_thread")]
909 /// # async fn main() {
910 /// let lock = Arc::new(RwLock::new(1));
911 ///
912 /// let mut n = lock.write_owned().await;
913 /// *n = 2;
914 ///}
915 /// ```
916 pub async fn write_owned(self: Arc<Self>) -> OwnedRwLockWriteGuard<T> {
917 #[cfg(all(tokio_unstable, feature = "tracing"))]
918 let resource_span = self.resource_span.clone();
919
920 let acquire_fut = async {
921 debug_assert_ne!(self.mr, 0);
922 self.s.acquire(self.mr as usize).await.unwrap_or_else(|_| {
923 // The semaphore was closed. but, we never explicitly close it, and we have a
924 // handle to it through the Arc, which means that this can never happen.
925 unreachable!()
926 });
927
928 OwnedRwLockWriteGuard {
929 #[cfg(all(tokio_unstable, feature = "tracing"))]
930 resource_span: self.resource_span.clone(),
931 permits_acquired: self.mr,
932 data: self.c.get(),
933 lock: self,
934 _p: PhantomData,
935 }
936 };
937
938 #[cfg(all(tokio_unstable, feature = "tracing"))]
939 let acquire_fut = trace::async_op(
940 move || acquire_fut,
941 resource_span,
942 "RwLock::write_owned",
943 "poll",
944 false,
945 );
946
947 #[allow(clippy::let_and_return)] // this lint triggers when disabling tracing
948 let guard = acquire_fut.await;
949
950 #[cfg(all(tokio_unstable, feature = "tracing"))]
951 guard.resource_span.in_scope(|| {
952 tracing::trace!(
953 target: "runtime::resource::state_update",
954 write_locked = true,
955 write_locked.op = "override",
956 )
957 });
958
959 guard
960 }
961
962 /// Attempts to acquire this `RwLock` with exclusive write access.
963 ///
964 /// If the access couldn't be acquired immediately, returns [`TryLockError`].
965 /// Otherwise, an RAII guard is returned which will release write access
966 /// when dropped.
967 ///
968 /// [`TryLockError`]: TryLockError
969 ///
970 /// # Examples
971 ///
972 /// ```
973 /// use tokio::sync::RwLock;
974 ///
975 /// # #[tokio::main(flavor = "current_thread")]
976 /// # async fn main() {
977 /// let rw = RwLock::new(1);
978 ///
979 /// let v = rw.read().await;
980 /// assert_eq!(*v, 1);
981 ///
982 /// assert!(rw.try_write().is_err());
983 /// # }
984 /// ```
985 pub fn try_write(&self) -> Result<RwLockWriteGuard<'_, T>, TryLockError> {
986 debug_assert_ne!(self.mr, 0);
987 match self.s.try_acquire(self.mr as usize) {
988 Ok(permit) => permit,
989 Err(TryAcquireError::NoPermits) => return Err(TryLockError(())),
990 Err(TryAcquireError::Closed) => unreachable!(),
991 }
992
993 let guard = RwLockWriteGuard {
994 permits_acquired: self.mr,
995 s: &self.s,
996 data: self.c.get(),
997 marker: marker::PhantomData,
998 #[cfg(all(tokio_unstable, feature = "tracing"))]
999 resource_span: self.resource_span.clone(),
1000 };
1001
1002 #[cfg(all(tokio_unstable, feature = "tracing"))]
1003 self.resource_span.in_scope(|| {
1004 tracing::trace!(
1005 target: "runtime::resource::state_update",
1006 write_locked = true,
1007 write_locked.op = "override",
1008 )
1009 });
1010
1011 Ok(guard)
1012 }
1013
1014 /// Attempts to acquire this `RwLock` with exclusive write access.
1015 ///
1016 /// If the access couldn't be acquired immediately, returns [`TryLockError`].
1017 /// Otherwise, an RAII guard is returned which will release write access
1018 /// when dropped.
1019 ///
1020 /// This method is identical to [`RwLock::try_write`], except that the
1021 /// returned guard references the `RwLock` with an [`Arc`] rather than by
1022 /// borrowing it. Therefore, the `RwLock` must be wrapped in an `Arc` to
1023 /// call this method, and the guard will live for the `'static` lifetime,
1024 /// as it keeps the `RwLock` alive by holding an `Arc`.
1025 ///
1026 /// [`TryLockError`]: TryLockError
1027 ///
1028 /// # Examples
1029 ///
1030 /// ```
1031 /// use std::sync::Arc;
1032 /// use tokio::sync::RwLock;
1033 ///
1034 /// # #[tokio::main(flavor = "current_thread")]
1035 /// # async fn main() {
1036 /// let rw = Arc::new(RwLock::new(1));
1037 ///
1038 /// let v = Arc::clone(&rw).read_owned().await;
1039 /// assert_eq!(*v, 1);
1040 ///
1041 /// assert!(rw.try_write_owned().is_err());
1042 /// # }
1043 /// ```
1044 pub fn try_write_owned(self: Arc<Self>) -> Result<OwnedRwLockWriteGuard<T>, TryLockError> {
1045 debug_assert_ne!(self.mr, 0);
1046 match self.s.try_acquire(self.mr as usize) {
1047 Ok(permit) => permit,
1048 Err(TryAcquireError::NoPermits) => return Err(TryLockError(())),
1049 Err(TryAcquireError::Closed) => unreachable!(),
1050 }
1051
1052 let guard = OwnedRwLockWriteGuard {
1053 #[cfg(all(tokio_unstable, feature = "tracing"))]
1054 resource_span: self.resource_span.clone(),
1055 permits_acquired: self.mr,
1056 data: self.c.get(),
1057 lock: self,
1058 _p: PhantomData,
1059 };
1060
1061 #[cfg(all(tokio_unstable, feature = "tracing"))]
1062 guard.resource_span.in_scope(|| {
1063 tracing::trace!(
1064 target: "runtime::resource::state_update",
1065 write_locked = true,
1066 write_locked.op = "override",
1067 )
1068 });
1069
1070 Ok(guard)
1071 }
1072
1073 /// Returns a mutable reference to the underlying data.
1074 ///
1075 /// Since this call borrows the `RwLock` mutably, no actual locking needs to
1076 /// take place -- the mutable borrow statically guarantees no locks exist.
1077 ///
1078 /// # Examples
1079 ///
1080 /// ```
1081 /// use tokio::sync::RwLock;
1082 ///
1083 /// fn main() {
1084 /// let mut lock = RwLock::new(1);
1085 ///
1086 /// let n = lock.get_mut();
1087 /// *n = 2;
1088 /// }
1089 /// ```
1090 pub fn get_mut(&mut self) -> &mut T {
1091 self.c.get_mut()
1092 }
1093
1094 /// Consumes the lock, returning the underlying data.
1095 pub fn into_inner(self) -> T
1096 where
1097 T: Sized,
1098 {
1099 self.c.into_inner()
1100 }
1101}
1102
1103impl<T> From<T> for RwLock<T> {
1104 fn from(s: T) -> Self {
1105 Self::new(s)
1106 }
1107}
1108
1109impl<T> Default for RwLock<T>
1110where
1111 T: Default,
1112{
1113 fn default() -> Self {
1114 Self::new(T::default())
1115 }
1116}
1117
1118impl<T: ?Sized> std::fmt::Debug for RwLock<T>
1119where
1120 T: std::fmt::Debug,
1121{
1122 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
1123 let mut d = f.debug_struct("RwLock");
1124 match self.try_read() {
1125 Ok(inner) => d.field("data", &&*inner),
1126 Err(_) => d.field("data", &format_args!("<locked>")),
1127 };
1128 d.finish()
1129 }
1130}