1// Copyright (C) Parity Technologies (UK) Ltd.
2// This file is part of Polkadot.
34// Polkadot is free software: you can redistribute it and/or modify
5// it under the terms of the GNU General Public License as published by
6// the Free Software Foundation, either version 3 of the License, or
7// (at your option) any later version.
89// Polkadot is distributed in the hope that it will be useful,
10// but WITHOUT ANY WARRANTY; without even the implied warranty of
11// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12// GNU General Public License for more details.
1314// You should have received a copy of the GNU General Public License
15// along with Polkadot. If not, see <http://www.gnu.org/licenses/>.
1617//! Tracking/limiting global allocator. Calculates the peak allocation between two checkpoints for
18//! the whole process. Accepts an optional limit and a failure handler which is called if the limit
19//! is overflown.
2021use core::{
22 alloc::{GlobalAlloc, Layout},
23 ops::{Deref, DerefMut},
24};
25use std::{
26 cell::UnsafeCell,
27 ptr::null_mut,
28 sync::atomic::{AtomicBool, Ordering},
29};
3031struct Spinlock<T> {
32 lock: AtomicBool,
33 data: UnsafeCell<T>,
34}
3536struct SpinlockGuard<'a, T: 'a> {
37 lock: &'a Spinlock<T>,
38}
3940// SAFETY: We require that the data inside of the `SpinLock` is `Send`, so it can be sent
41// and accessed by any thread as long as it's accessed by only one thread at a time.
42// The `SpinLock` provides an exclusive lock over it, so it guarantees that multiple
43// threads cannot access it at the same time, hence it implements `Sync` (that is, it can be
44// accessed concurrently from multiple threads, even though the `T` itself might not
45// necessarily be `Sync` too).
46unsafe impl<T: Send> Sync for Spinlock<T> {}
4748impl<T> Spinlock<T> {
49pub const fn new(t: T) -> Spinlock<T> {
50 Spinlock { lock: AtomicBool::new(false), data: UnsafeCell::new(t) }
51 }
5253#[inline]
54pub fn lock(&self) -> SpinlockGuard<T> {
55loop {
56// Try to acquire the lock.
57if self
58.lock
59 .compare_exchange_weak(false, true, Ordering::Acquire, Ordering::Relaxed)
60 .is_ok()
61 {
62return SpinlockGuard { lock: self }
63 }
64// We failed to acquire the lock; wait until it's unlocked.
65 //
66 // In theory this should result in less coherency traffic as unlike `compare_exchange`
67 // it is a read-only operation, so multiple cores can execute it simultaneously
68 // without taking an exclusive lock over the cache line.
69while self.lock.load(Ordering::Relaxed) {
70 std::hint::spin_loop();
71 }
72 }
73 }
7475// SAFETY: It should be only called from the guard's destructor. Calling it explicitly while
76 // the guard is alive is undefined behavior, as it breaks the security contract of `Deref` and
77 // `DerefMut`, which implies that lock is held at the moment of dereferencing.
78#[inline]
79unsafe fn unlock(&self) {
80self.lock.store(false, Ordering::Release);
81 }
82}
8384impl<T> Deref for SpinlockGuard<'_, T> {
85type Target = T;
8687fn deref(&self) -> &T {
88// SAFETY: It is safe to dereference a guard to the `UnsafeCell` underlying data as the
89 // presence of the guard means the data is already locked.
90unsafe { &*self.lock.data.get() }
91 }
92}
9394impl<T> DerefMut for SpinlockGuard<'_, T> {
95fn deref_mut(&mut self) -> &mut T {
96// SAFETY: Same as for `Deref::deref`.
97unsafe { &mut *self.lock.data.get() }
98 }
99}
100101impl<T> Drop for SpinlockGuard<'_, T> {
102fn drop(&mut self) {
103// SAFETY: Calling `unlock` is only safe when it's guaranteed no guard outlives the
104 // unlocking point; here, the guard is dropped, so it is safe.
105unsafe { self.lock.unlock() }
106 }
107}
108109struct TrackingAllocatorData {
110 current: isize,
111 peak: isize,
112 limit: isize,
113 failure_handler: Option<Box<dyn Fn() + Send>>,
114}
115116impl TrackingAllocatorData {
117fn start_tracking(
118mut guard: SpinlockGuard<Self>,
119 limit: isize,
120 failure_handler: Option<Box<dyn Fn() + Send>>,
121 ) {
122 guard.current = 0;
123 guard.peak = 0;
124 guard.limit = limit;
125// Cannot drop it yet, as it would trigger a deallocation
126let old_handler = guard.failure_handler.take();
127 guard.failure_handler = failure_handler;
128 drop(guard);
129 drop(old_handler);
130 }
131132fn end_tracking(mut guard: SpinlockGuard<Self>) -> isize {
133let peak = guard.peak;
134 guard.limit = 0;
135// Cannot drop it yet, as it would trigger a deallocation
136let old_handler = guard.failure_handler.take();
137 drop(guard);
138 drop(old_handler);
139 peak
140 }
141142#[inline]
143fn track_and_check_limits(
144mut guard: SpinlockGuard<Self>,
145 alloc: isize,
146 ) -> Option<SpinlockGuard<Self>> {
147 guard.current += alloc;
148if guard.current > guard.peak {
149 guard.peak = guard.current;
150 }
151if guard.limit == 0 || guard.peak <= guard.limit {
152None
153} else {
154Some(guard)
155 }
156 }
157}
158159static ALLOCATOR_DATA: Spinlock<TrackingAllocatorData> =
160 Spinlock::new(TrackingAllocatorData { current: 0, peak: 0, limit: 0, failure_handler: None });
161162pub struct TrackingAllocator<A: GlobalAlloc>(pub A);
163164impl<A: GlobalAlloc> TrackingAllocator<A> {
165/// Start tracking memory allocations and deallocations.
166 ///
167 /// # Safety
168 ///
169 /// Failure handler is called with the allocator being in the locked state. Thus, no
170 /// allocations or deallocations are allowed inside the failure handler; otherwise, a
171 /// deadlock will occur.
172pub unsafe fn start_tracking(
173&self,
174 limit: Option<isize>,
175 failure_handler: Option<Box<dyn Fn() + Send>>,
176 ) {
177 TrackingAllocatorData::start_tracking(
178 ALLOCATOR_DATA.lock(),
179 limit.unwrap_or(0),
180 failure_handler,
181 );
182 }
183184/// End tracking and return the peak allocation value in bytes (as `isize`). Peak allocation
185 /// value is not guaranteed to be neither non-zero nor positive.
186pub fn end_tracking(&self) -> isize {
187 TrackingAllocatorData::end_tracking(ALLOCATOR_DATA.lock())
188 }
189}
190191#[cold]
192#[inline(never)]
193unsafe fn fail_allocation(guard: SpinlockGuard<TrackingAllocatorData>) -> *mut u8 {
194if let Some(failure_handler) = &guard.failure_handler {
195 failure_handler()
196 }
197 null_mut()
198}
199200unsafe impl<A: GlobalAlloc> GlobalAlloc for TrackingAllocator<A> {
201// SAFETY:
202 // * The wrapped methods are as safe as the underlying allocator implementation is
203204#[inline]
205unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
206let guard = ALLOCATOR_DATA.lock();
207if let Some(guard) =
208 TrackingAllocatorData::track_and_check_limits(guard, layout.size() as isize)
209 {
210 fail_allocation(guard)
211 } else {
212self.0.alloc(layout)
213 }
214 }
215216#[inline]
217unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
218let guard = ALLOCATOR_DATA.lock();
219if let Some(guard) =
220 TrackingAllocatorData::track_and_check_limits(guard, layout.size() as isize)
221 {
222 fail_allocation(guard)
223 } else {
224self.0.alloc_zeroed(layout)
225 }
226 }
227228#[inline]
229unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
230let guard = ALLOCATOR_DATA.lock();
231 TrackingAllocatorData::track_and_check_limits(guard, -(layout.size() as isize));
232self.0.dealloc(ptr, layout)
233 }
234235#[inline]
236unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
237let guard = ALLOCATOR_DATA.lock();
238if let Some(guard) = TrackingAllocatorData::track_and_check_limits(
239 guard,
240 (new_size as isize) - (layout.size() as isize),
241 ) {
242 fail_allocation(guard)
243 } else {
244self.0.realloc(ptr, layout, new_size)
245 }
246 }
247}