referrerpolicy=no-referrer-when-downgrade

sc_consensus/
shared_data.rs

1// This file is part of Substrate.
2
3// Copyright (C) Parity Technologies (UK) Ltd.
4// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
5
6// This program is free software: you can redistribute it and/or modify
7// it under the terms of the GNU General Public License as published by
8// the Free Software Foundation, either version 3 of the License, or
9// (at your option) any later version.
10
11// This program is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// You should have received a copy of the GNU General Public License
17// along with this program. If not, see <https://www.gnu.org/licenses/>.
18
19//! Provides a generic wrapper around shared data. See [`SharedData`] for more information.
20
21use parking_lot::{Condvar, MappedMutexGuard, Mutex, MutexGuard};
22use std::sync::Arc;
23
24/// Created by [`SharedDataLocked::release_mutex`].
25///
26/// As long as the object isn't dropped, the shared data is locked. It is advised to drop this
27/// object when the shared data doesn't need to be locked anymore. To get access to the shared data
28/// [`Self::upgrade`] is provided.
29#[must_use = "Shared data will be unlocked on drop!"]
30pub struct SharedDataLockedUpgradable<T> {
31	shared_data: SharedData<T>,
32}
33
34impl<T> SharedDataLockedUpgradable<T> {
35	/// Upgrade to a *real* mutex guard that will give access to the inner data.
36	///
37	/// Every call to this function will reaquire the mutex again.
38	pub fn upgrade(&mut self) -> MappedMutexGuard<T> {
39		MutexGuard::map(self.shared_data.inner.lock(), |i| &mut i.shared_data)
40	}
41}
42
43impl<T> Drop for SharedDataLockedUpgradable<T> {
44	fn drop(&mut self) {
45		let mut inner = self.shared_data.inner.lock();
46		// It should not be locked anymore
47		inner.locked = false;
48
49		// Notify all waiting threads.
50		self.shared_data.cond_var.notify_all();
51	}
52}
53
54/// Created by [`SharedData::shared_data_locked`].
55///
56/// As long as this object isn't dropped, the shared data is held in a mutex guard and the shared
57/// data is tagged as locked. Access to the shared data is provided through
58/// [`Deref`](std::ops::Deref) and [`DerefMut`](std::ops::DerefMut). The trick is to use
59/// [`Self::release_mutex`] to release the mutex, but still keep the shared data locked. This means
60/// every other thread trying to access the shared data in this time will need to wait until this
61/// lock is freed.
62///
63/// If this object is dropped without calling [`Self::release_mutex`], the lock will be dropped
64/// immediately.
65#[must_use = "Shared data will be unlocked on drop!"]
66pub struct SharedDataLocked<'a, T> {
67	/// The current active mutex guard holding the inner data.
68	inner: MutexGuard<'a, SharedDataInner<T>>,
69	/// The [`SharedData`] instance that created this instance.
70	///
71	/// This instance is only taken on drop or when calling [`Self::release_mutex`].
72	shared_data: Option<SharedData<T>>,
73}
74
75impl<'a, T> SharedDataLocked<'a, T> {
76	/// Release the mutex, but keep the shared data locked.
77	pub fn release_mutex(mut self) -> SharedDataLockedUpgradable<T> {
78		SharedDataLockedUpgradable {
79			shared_data: self.shared_data.take().expect("`shared_data` is only taken on drop; qed"),
80		}
81	}
82}
83
84impl<'a, T> Drop for SharedDataLocked<'a, T> {
85	fn drop(&mut self) {
86		if let Some(shared_data) = self.shared_data.take() {
87			// If the `shared_data` is still set, it means [`Self::release_mutex`] wasn't
88			// called and the lock should be released.
89			self.inner.locked = false;
90
91			// Notify all waiting threads about the released lock.
92			shared_data.cond_var.notify_all();
93		}
94	}
95}
96
97impl<'a, T> std::ops::Deref for SharedDataLocked<'a, T> {
98	type Target = T;
99
100	fn deref(&self) -> &Self::Target {
101		&self.inner.shared_data
102	}
103}
104
105impl<'a, T> std::ops::DerefMut for SharedDataLocked<'a, T> {
106	fn deref_mut(&mut self) -> &mut Self::Target {
107		&mut self.inner.shared_data
108	}
109}
110
111/// Holds the shared data and if the shared data is currently locked.
112///
113/// For more information see [`SharedData`].
114struct SharedDataInner<T> {
115	/// The actual shared data that is protected here against concurrent access.
116	shared_data: T,
117	/// Is `shared_data` currently locked and can not be accessed?
118	locked: bool,
119}
120
121/// Some shared data that provides support for locking this shared data for some time.
122///
123/// When working with consensus engines there is often data that needs to be shared between multiple
124/// parts of the system, like block production and block import. This struct provides an abstraction
125/// for this shared data in a generic way.
126///
127/// The pain point when sharing this data is often the usage of mutex guards in an async context as
128/// this doesn't work for most of them as these guards don't implement `Send`. This abstraction
129/// provides a way to lock the shared data, while not having the mutex locked. So, the data stays
130/// locked and we are still able to hold this lock over an `await` call.
131///
132/// # Example
133///
134/// ```
135/// # use sc_consensus::shared_data::SharedData;
136///
137/// let shared_data = SharedData::new(String::from("hello world"));
138///
139/// let lock = shared_data.shared_data_locked();
140///
141/// let shared_data2 = shared_data.clone();
142/// let join_handle1 = std::thread::spawn(move || {
143///     // This will need to wait for the outer lock to be released before it can access the data.
144///     shared_data2.shared_data().push_str("1");
145/// });
146///
147/// assert_eq!(*lock, "hello world");
148///
149/// // Let us release the mutex, but we still keep it locked.
150/// // Now we could call `await` for example.
151/// let mut lock = lock.release_mutex();
152///
153/// let shared_data2 = shared_data.clone();
154/// let join_handle2 = std::thread::spawn(move || {
155///     shared_data2.shared_data().push_str("2");
156/// });
157///
158/// // We still have the lock and can upgrade it to access the data.
159/// assert_eq!(*lock.upgrade(), "hello world");
160/// lock.upgrade().push_str("3");
161///
162/// drop(lock);
163/// join_handle1.join().unwrap();
164/// join_handle2.join().unwrap();
165///
166/// let data = shared_data.shared_data();
167/// // As we don't know the order of the threads, we need to check for both combinations
168/// assert!(*data == "hello world321" || *data == "hello world312");
169/// ```
170///
171/// # Deadlock
172///
173/// Be aware that this data structure doesn't give you any guarantees that you can not create a
174/// deadlock. If you use [`release_mutex`](SharedDataLocked::release_mutex) followed by a call
175/// to [`shared_data`](Self::shared_data) in the same thread will make your program dead lock.
176/// The same applies when you are using a single threaded executor.
177pub struct SharedData<T> {
178	inner: Arc<Mutex<SharedDataInner<T>>>,
179	cond_var: Arc<Condvar>,
180}
181
182impl<T> Clone for SharedData<T> {
183	fn clone(&self) -> Self {
184		Self { inner: self.inner.clone(), cond_var: self.cond_var.clone() }
185	}
186}
187
188impl<T> SharedData<T> {
189	/// Create a new instance of [`SharedData`] to share the given `shared_data`.
190	pub fn new(shared_data: T) -> Self {
191		Self {
192			inner: Arc::new(Mutex::new(SharedDataInner { shared_data, locked: false })),
193			cond_var: Default::default(),
194		}
195	}
196
197	/// Acquire access to the shared data.
198	///
199	/// This will give mutable access to the shared data. After the returned mutex guard is dropped,
200	/// the shared data is accessible by other threads. So, this function should be used when
201	/// reading/writing of the shared data in a local context is required.
202	///
203	/// When requiring to lock shared data for some longer time, even with temporarily releasing the
204	/// lock, [`Self::shared_data_locked`] should be used.
205	pub fn shared_data(&self) -> MappedMutexGuard<T> {
206		let mut guard = self.inner.lock();
207
208		while guard.locked {
209			self.cond_var.wait(&mut guard);
210		}
211
212		debug_assert!(!guard.locked);
213
214		MutexGuard::map(guard, |i| &mut i.shared_data)
215	}
216
217	/// Acquire access to the shared data and lock it.
218	///
219	/// This will give mutable access to the shared data. The returned [`SharedDataLocked`]
220	/// provides the function [`SharedDataLocked::release_mutex`] to release the mutex, but
221	/// keeping the data locked. This is useful in async contexts for example where the data needs
222	/// to be locked, but a mutex guard can not be held.
223	///
224	/// For an example see [`SharedData`].
225	pub fn shared_data_locked(&self) -> SharedDataLocked<T> {
226		let mut guard = self.inner.lock();
227
228		while guard.locked {
229			self.cond_var.wait(&mut guard);
230		}
231
232		debug_assert!(!guard.locked);
233		guard.locked = true;
234
235		SharedDataLocked { inner: guard, shared_data: Some(self.clone()) }
236	}
237}
238
239#[cfg(test)]
240mod tests {
241	use super::*;
242
243	#[test]
244	fn shared_data_locking_works() {
245		const THREADS: u32 = 100;
246		let shared_data = SharedData::new(0u32);
247
248		let lock = shared_data.shared_data_locked();
249
250		for i in 0..THREADS {
251			let data = shared_data.clone();
252			std::thread::spawn(move || {
253				if i % 2 == 1 {
254					*data.shared_data() += 1;
255				} else {
256					let mut lock = data.shared_data_locked().release_mutex();
257					// Give the other threads some time to wake up
258					std::thread::sleep(std::time::Duration::from_millis(10));
259					*lock.upgrade() += 1;
260				}
261			});
262		}
263
264		let lock = lock.release_mutex();
265		std::thread::sleep(std::time::Duration::from_millis(100));
266		drop(lock);
267
268		while *shared_data.shared_data() < THREADS {
269			std::thread::sleep(std::time::Duration::from_millis(100));
270		}
271	}
272}