referrerpolicy=no-referrer-when-downgrade

staging_tracking_allocator/
lib.rs

1// Copyright (C) Parity Technologies (UK) Ltd.
2// This file is part of Polkadot.
3
4// Polkadot is free software: you can redistribute it and/or modify
5// it under the terms of the GNU General Public License as published by
6// the Free Software Foundation, either version 3 of the License, or
7// (at your option) any later version.
8
9// Polkadot is distributed in the hope that it will be useful,
10// but WITHOUT ANY WARRANTY; without even the implied warranty of
11// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12// GNU General Public License for more details.
13
14// You should have received a copy of the GNU General Public License
15// along with Polkadot.  If not, see <http://www.gnu.org/licenses/>.
16
17//! Tracking/limiting global allocator. Calculates the peak allocation between two checkpoints for
18//! the whole process. Accepts an optional limit and a failure handler which is called if the limit
19//! is overflown.
20
21use core::{
22	alloc::{GlobalAlloc, Layout},
23	ops::{Deref, DerefMut},
24};
25use std::{
26	cell::UnsafeCell,
27	ptr::null_mut,
28	sync::atomic::{AtomicBool, Ordering},
29};
30
31struct Spinlock<T> {
32	lock: AtomicBool,
33	data: UnsafeCell<T>,
34}
35
36struct SpinlockGuard<'a, T: 'a> {
37	lock: &'a Spinlock<T>,
38}
39
40// SAFETY: We require that the data inside of the `SpinLock` is `Send`, so it can be sent
41// and accessed by any thread as long as it's accessed by only one thread at a time.
42// The `SpinLock` provides an exclusive lock over it, so it guarantees that multiple
43// threads cannot access it at the same time, hence it implements `Sync` (that is, it can be
44// accessed concurrently from multiple threads, even though the `T` itself might not
45// necessarily be `Sync` too).
46unsafe impl<T: Send> Sync for Spinlock<T> {}
47
48impl<T> Spinlock<T> {
49	pub const fn new(t: T) -> Spinlock<T> {
50		Spinlock { lock: AtomicBool::new(false), data: UnsafeCell::new(t) }
51	}
52
53	#[inline]
54	pub fn lock(&self) -> SpinlockGuard<T> {
55		loop {
56			// Try to acquire the lock.
57			if self
58				.lock
59				.compare_exchange_weak(false, true, Ordering::Acquire, Ordering::Relaxed)
60				.is_ok()
61			{
62				return SpinlockGuard { lock: self }
63			}
64			// We failed to acquire the lock; wait until it's unlocked.
65			//
66			// In theory this should result in less coherency traffic as unlike `compare_exchange`
67			// it is a read-only operation, so multiple cores can execute it simultaneously
68			// without taking an exclusive lock over the cache line.
69			while self.lock.load(Ordering::Relaxed) {
70				std::hint::spin_loop();
71			}
72		}
73	}
74
75	// SAFETY: It should be only called from the guard's destructor. Calling it explicitly while
76	// the guard is alive is undefined behavior, as it breaks the security contract of `Deref` and
77	// `DerefMut`, which implies that lock is held at the moment of dereferencing.
78	#[inline]
79	unsafe fn unlock(&self) {
80		self.lock.store(false, Ordering::Release);
81	}
82}
83
84impl<T> Deref for SpinlockGuard<'_, T> {
85	type Target = T;
86
87	fn deref(&self) -> &T {
88		// SAFETY: It is safe to dereference a guard to the `UnsafeCell` underlying data as the
89		// presence of the guard means the data is already locked.
90		unsafe { &*self.lock.data.get() }
91	}
92}
93
94impl<T> DerefMut for SpinlockGuard<'_, T> {
95	fn deref_mut(&mut self) -> &mut T {
96		// SAFETY: Same as for `Deref::deref`.
97		unsafe { &mut *self.lock.data.get() }
98	}
99}
100
101impl<T> Drop for SpinlockGuard<'_, T> {
102	fn drop(&mut self) {
103		// SAFETY: Calling `unlock` is only safe when it's guaranteed no guard outlives the
104		// unlocking point; here, the guard is dropped, so it is safe.
105		unsafe { self.lock.unlock() }
106	}
107}
108
109struct TrackingAllocatorData {
110	current: isize,
111	peak: isize,
112	limit: isize,
113	failure_handler: Option<Box<dyn Fn() + Send>>,
114}
115
116impl TrackingAllocatorData {
117	fn start_tracking(
118		mut guard: SpinlockGuard<Self>,
119		limit: isize,
120		failure_handler: Option<Box<dyn Fn() + Send>>,
121	) {
122		guard.current = 0;
123		guard.peak = 0;
124		guard.limit = limit;
125		// Cannot drop it yet, as it would trigger a deallocation
126		let old_handler = guard.failure_handler.take();
127		guard.failure_handler = failure_handler;
128		drop(guard);
129		drop(old_handler);
130	}
131
132	fn end_tracking(mut guard: SpinlockGuard<Self>) -> isize {
133		let peak = guard.peak;
134		guard.limit = 0;
135		// Cannot drop it yet, as it would trigger a deallocation
136		let old_handler = guard.failure_handler.take();
137		drop(guard);
138		drop(old_handler);
139		peak
140	}
141
142	#[inline]
143	fn track_and_check_limits(
144		mut guard: SpinlockGuard<Self>,
145		alloc: isize,
146	) -> Option<SpinlockGuard<Self>> {
147		guard.current += alloc;
148		if guard.current > guard.peak {
149			guard.peak = guard.current;
150		}
151		if guard.limit == 0 || guard.peak <= guard.limit {
152			None
153		} else {
154			Some(guard)
155		}
156	}
157}
158
159static ALLOCATOR_DATA: Spinlock<TrackingAllocatorData> =
160	Spinlock::new(TrackingAllocatorData { current: 0, peak: 0, limit: 0, failure_handler: None });
161
162pub struct TrackingAllocator<A: GlobalAlloc>(pub A);
163
164impl<A: GlobalAlloc> TrackingAllocator<A> {
165	/// Start tracking memory allocations and deallocations.
166	///
167	/// # Safety
168	///
169	/// Failure handler is called with the allocator being in the locked state. Thus, no
170	/// allocations or deallocations are allowed inside the failure handler; otherwise, a
171	/// deadlock will occur.
172	pub unsafe fn start_tracking(
173		&self,
174		limit: Option<isize>,
175		failure_handler: Option<Box<dyn Fn() + Send>>,
176	) {
177		TrackingAllocatorData::start_tracking(
178			ALLOCATOR_DATA.lock(),
179			limit.unwrap_or(0),
180			failure_handler,
181		);
182	}
183
184	/// End tracking and return the peak allocation value in bytes (as `isize`). Peak allocation
185	/// value is not guaranteed to be neither non-zero nor positive.
186	pub fn end_tracking(&self) -> isize {
187		TrackingAllocatorData::end_tracking(ALLOCATOR_DATA.lock())
188	}
189}
190
191#[cold]
192#[inline(never)]
193unsafe fn fail_allocation(guard: SpinlockGuard<TrackingAllocatorData>) -> *mut u8 {
194	if let Some(failure_handler) = &guard.failure_handler {
195		failure_handler()
196	}
197	null_mut()
198}
199
200unsafe impl<A: GlobalAlloc> GlobalAlloc for TrackingAllocator<A> {
201	// SAFETY:
202	// * The wrapped methods are as safe as the underlying allocator implementation is
203
204	#[inline]
205	unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
206		let guard = ALLOCATOR_DATA.lock();
207		if let Some(guard) =
208			TrackingAllocatorData::track_and_check_limits(guard, layout.size() as isize)
209		{
210			fail_allocation(guard)
211		} else {
212			self.0.alloc(layout)
213		}
214	}
215
216	#[inline]
217	unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
218		let guard = ALLOCATOR_DATA.lock();
219		if let Some(guard) =
220			TrackingAllocatorData::track_and_check_limits(guard, layout.size() as isize)
221		{
222			fail_allocation(guard)
223		} else {
224			self.0.alloc_zeroed(layout)
225		}
226	}
227
228	#[inline]
229	unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
230		let guard = ALLOCATOR_DATA.lock();
231		TrackingAllocatorData::track_and_check_limits(guard, -(layout.size() as isize));
232		self.0.dealloc(ptr, layout)
233	}
234
235	#[inline]
236	unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
237		let guard = ALLOCATOR_DATA.lock();
238		if let Some(guard) = TrackingAllocatorData::track_and_check_limits(
239			guard,
240			(new_size as isize) - (layout.size() as isize),
241		) {
242			fail_allocation(guard)
243		} else {
244			self.0.realloc(ptr, layout, new_size)
245		}
246	}
247}