staging_tracking_allocator/
lib.rs1use core::{
22 alloc::{GlobalAlloc, Layout},
23 ops::{Deref, DerefMut},
24};
25use std::{
26 cell::UnsafeCell,
27 ptr::null_mut,
28 sync::atomic::{AtomicBool, Ordering},
29};
30
31struct Spinlock<T> {
32 lock: AtomicBool,
33 data: UnsafeCell<T>,
34}
35
36struct SpinlockGuard<'a, T: 'a> {
37 lock: &'a Spinlock<T>,
38}
39
40unsafe impl<T: Send> Sync for Spinlock<T> {}
47
48impl<T> Spinlock<T> {
49 pub const fn new(t: T) -> Spinlock<T> {
50 Spinlock { lock: AtomicBool::new(false), data: UnsafeCell::new(t) }
51 }
52
53 #[inline]
54 pub fn lock(&self) -> SpinlockGuard<T> {
55 loop {
56 if self
58 .lock
59 .compare_exchange_weak(false, true, Ordering::Acquire, Ordering::Relaxed)
60 .is_ok()
61 {
62 return SpinlockGuard { lock: self }
63 }
64 while self.lock.load(Ordering::Relaxed) {
70 std::hint::spin_loop();
71 }
72 }
73 }
74
75 #[inline]
79 unsafe fn unlock(&self) {
80 self.lock.store(false, Ordering::Release);
81 }
82}
83
84impl<T> Deref for SpinlockGuard<'_, T> {
85 type Target = T;
86
87 fn deref(&self) -> &T {
88 unsafe { &*self.lock.data.get() }
91 }
92}
93
94impl<T> DerefMut for SpinlockGuard<'_, T> {
95 fn deref_mut(&mut self) -> &mut T {
96 unsafe { &mut *self.lock.data.get() }
98 }
99}
100
101impl<T> Drop for SpinlockGuard<'_, T> {
102 fn drop(&mut self) {
103 unsafe { self.lock.unlock() }
106 }
107}
108
109struct TrackingAllocatorData {
110 current: isize,
111 peak: isize,
112 limit: isize,
113 failure_handler: Option<Box<dyn Fn() + Send>>,
114}
115
116impl TrackingAllocatorData {
117 fn start_tracking(
118 mut guard: SpinlockGuard<Self>,
119 limit: isize,
120 failure_handler: Option<Box<dyn Fn() + Send>>,
121 ) {
122 guard.current = 0;
123 guard.peak = 0;
124 guard.limit = limit;
125 let old_handler = guard.failure_handler.take();
127 guard.failure_handler = failure_handler;
128 drop(guard);
129 drop(old_handler);
130 }
131
132 fn end_tracking(mut guard: SpinlockGuard<Self>) -> isize {
133 let peak = guard.peak;
134 guard.limit = 0;
135 let old_handler = guard.failure_handler.take();
137 drop(guard);
138 drop(old_handler);
139 peak
140 }
141
142 #[inline]
143 fn track_and_check_limits(
144 mut guard: SpinlockGuard<Self>,
145 alloc: isize,
146 ) -> Option<SpinlockGuard<Self>> {
147 guard.current += alloc;
148 if guard.current > guard.peak {
149 guard.peak = guard.current;
150 }
151 if guard.limit == 0 || guard.peak <= guard.limit {
152 None
153 } else {
154 Some(guard)
155 }
156 }
157}
158
159static ALLOCATOR_DATA: Spinlock<TrackingAllocatorData> =
160 Spinlock::new(TrackingAllocatorData { current: 0, peak: 0, limit: 0, failure_handler: None });
161
162pub struct TrackingAllocator<A: GlobalAlloc>(pub A);
163
164impl<A: GlobalAlloc> TrackingAllocator<A> {
165 pub unsafe fn start_tracking(
173 &self,
174 limit: Option<isize>,
175 failure_handler: Option<Box<dyn Fn() + Send>>,
176 ) {
177 TrackingAllocatorData::start_tracking(
178 ALLOCATOR_DATA.lock(),
179 limit.unwrap_or(0),
180 failure_handler,
181 );
182 }
183
184 pub fn end_tracking(&self) -> isize {
187 TrackingAllocatorData::end_tracking(ALLOCATOR_DATA.lock())
188 }
189}
190
191#[cold]
192#[inline(never)]
193unsafe fn fail_allocation(guard: SpinlockGuard<TrackingAllocatorData>) -> *mut u8 {
194 if let Some(failure_handler) = &guard.failure_handler {
195 failure_handler()
196 }
197 null_mut()
198}
199
200unsafe impl<A: GlobalAlloc> GlobalAlloc for TrackingAllocator<A> {
201 #[inline]
205 unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
206 let guard = ALLOCATOR_DATA.lock();
207 if let Some(guard) =
208 TrackingAllocatorData::track_and_check_limits(guard, layout.size() as isize)
209 {
210 fail_allocation(guard)
211 } else {
212 self.0.alloc(layout)
213 }
214 }
215
216 #[inline]
217 unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
218 let guard = ALLOCATOR_DATA.lock();
219 if let Some(guard) =
220 TrackingAllocatorData::track_and_check_limits(guard, layout.size() as isize)
221 {
222 fail_allocation(guard)
223 } else {
224 self.0.alloc_zeroed(layout)
225 }
226 }
227
228 #[inline]
229 unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
230 let guard = ALLOCATOR_DATA.lock();
231 TrackingAllocatorData::track_and_check_limits(guard, -(layout.size() as isize));
232 self.0.dealloc(ptr, layout)
233 }
234
235 #[inline]
236 unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
237 let guard = ALLOCATOR_DATA.lock();
238 if let Some(guard) = TrackingAllocatorData::track_and_check_limits(
239 guard,
240 (new_size as isize) - (layout.size() as isize),
241 ) {
242 fail_allocation(guard)
243 } else {
244 self.0.realloc(ptr, layout, new_size)
245 }
246 }
247}