1use core::{cell::UnsafeCell, marker::PhantomData, sync::atomic::Ordering};
11
12struct NotRefUnwindSafe(UnsafeCell<()>);
23unsafe impl Sync for NotRefUnwindSafe {}
25
26#[repr(transparent)]
27pub(crate) struct AtomicPtr<T> {
28 inner: core::sync::atomic::AtomicPtr<T>,
29 _not_ref_unwind_safe: PhantomData<NotRefUnwindSafe>,
31}
32impl<T> AtomicPtr<T> {
33 #[inline]
34 pub(crate) const fn new(v: *mut T) -> Self {
35 Self { inner: core::sync::atomic::AtomicPtr::new(v), _not_ref_unwind_safe: PhantomData }
36 }
37 #[inline]
38 pub(crate) fn is_lock_free() -> bool {
39 Self::IS_ALWAYS_LOCK_FREE
40 }
41 pub(crate) const IS_ALWAYS_LOCK_FREE: bool = true;
42 #[inline]
43 pub(crate) fn get_mut(&mut self) -> &mut *mut T {
44 self.inner.get_mut()
45 }
46 #[inline]
47 #[cfg_attr(
48 any(all(debug_assertions, not(portable_atomic_no_track_caller)), miri),
49 track_caller
50 )]
51 pub(crate) fn load(&self, order: Ordering) -> *mut T {
52 crate::utils::assert_load_ordering(order); self.inner.load(order)
54 }
55 #[inline]
56 #[cfg_attr(
57 any(all(debug_assertions, not(portable_atomic_no_track_caller)), miri),
58 track_caller
59 )]
60 pub(crate) fn store(&self, ptr: *mut T, order: Ordering) {
61 crate::utils::assert_store_ordering(order); self.inner.store(ptr, order);
63 }
64 const_fn! {
65 const_if: #[cfg(not(portable_atomic_no_const_raw_ptr_deref))];
66 #[inline]
67 pub(crate) const fn as_ptr(&self) -> *mut *mut T {
68 unsafe { (*(self as *const Self as *const UnsafeCell<*mut T>)).get() }
72 }
73 }
74}
75#[cfg_attr(portable_atomic_no_cfg_target_has_atomic, cfg(not(portable_atomic_no_atomic_cas)))]
76#[cfg_attr(not(portable_atomic_no_cfg_target_has_atomic), cfg(target_has_atomic = "ptr"))]
77impl<T> AtomicPtr<T> {
78 #[inline]
79 #[cfg_attr(
80 any(all(debug_assertions, not(portable_atomic_no_track_caller)), miri),
81 track_caller
82 )]
83 pub(crate) fn compare_exchange(
84 &self,
85 current: *mut T,
86 new: *mut T,
87 success: Ordering,
88 failure: Ordering,
89 ) -> Result<*mut T, *mut T> {
90 crate::utils::assert_compare_exchange_ordering(success, failure); #[cfg(portable_atomic_no_stronger_failure_ordering)]
92 let success = crate::utils::upgrade_success_ordering(success, failure);
93 self.inner.compare_exchange(current, new, success, failure)
94 }
95 #[inline]
96 #[cfg_attr(
97 any(all(debug_assertions, not(portable_atomic_no_track_caller)), miri),
98 track_caller
99 )]
100 pub(crate) fn compare_exchange_weak(
101 &self,
102 current: *mut T,
103 new: *mut T,
104 success: Ordering,
105 failure: Ordering,
106 ) -> Result<*mut T, *mut T> {
107 crate::utils::assert_compare_exchange_ordering(success, failure); #[cfg(portable_atomic_no_stronger_failure_ordering)]
109 let success = crate::utils::upgrade_success_ordering(success, failure);
110 self.inner.compare_exchange_weak(current, new, success, failure)
111 }
112}
113impl<T> core::ops::Deref for AtomicPtr<T> {
114 type Target = core::sync::atomic::AtomicPtr<T>;
115 #[inline]
116 #[cfg_attr(miri, track_caller)] fn deref(&self) -> &Self::Target {
118 &self.inner
119 }
120}
121
122macro_rules! atomic_int {
123 ($atomic_type:ident, $int_type:ident) => {
124 #[repr(transparent)]
125 pub(crate) struct $atomic_type {
126 inner: core::sync::atomic::$atomic_type,
127 _not_ref_unwind_safe: PhantomData<NotRefUnwindSafe>,
129 }
130 #[cfg_attr(
131 portable_atomic_no_cfg_target_has_atomic,
132 cfg(not(portable_atomic_no_atomic_cas))
133 )]
134 #[cfg_attr(not(portable_atomic_no_cfg_target_has_atomic), cfg(target_has_atomic = "ptr"))]
135 impl_default_no_fetch_ops!($atomic_type, $int_type);
136 #[cfg(not(all(
137 any(target_arch = "x86", target_arch = "x86_64"),
138 not(any(miri, portable_atomic_sanitize_thread)),
139 any(not(portable_atomic_no_asm), portable_atomic_unstable_asm),
140 )))]
141 #[cfg_attr(
142 portable_atomic_no_cfg_target_has_atomic,
143 cfg(not(portable_atomic_no_atomic_cas))
144 )]
145 #[cfg_attr(not(portable_atomic_no_cfg_target_has_atomic), cfg(target_has_atomic = "ptr"))]
146 impl_default_bit_opts!($atomic_type, $int_type);
147 impl $atomic_type {
148 #[inline]
149 pub(crate) const fn new(v: $int_type) -> Self {
150 Self {
151 inner: core::sync::atomic::$atomic_type::new(v),
152 _not_ref_unwind_safe: PhantomData,
153 }
154 }
155 #[inline]
156 pub(crate) fn is_lock_free() -> bool {
157 Self::IS_ALWAYS_LOCK_FREE
158 }
159 pub(crate) const IS_ALWAYS_LOCK_FREE: bool = cfg!(not(all(
162 any(target_arch = "riscv32", target_arch = "xtensa"),
163 target_os = "espidf",
164 ))) | (core::mem::size_of::<$int_type>()
165 < 8);
166 #[inline]
167 pub(crate) fn get_mut(&mut self) -> &mut $int_type {
168 self.inner.get_mut()
169 }
170 #[inline]
171 #[cfg_attr(
172 any(all(debug_assertions, not(portable_atomic_no_track_caller)), miri),
173 track_caller
174 )]
175 pub(crate) fn load(&self, order: Ordering) -> $int_type {
176 crate::utils::assert_load_ordering(order); self.inner.load(order)
178 }
179 #[inline]
180 #[cfg_attr(
181 any(all(debug_assertions, not(portable_atomic_no_track_caller)), miri),
182 track_caller
183 )]
184 pub(crate) fn store(&self, val: $int_type, order: Ordering) {
185 crate::utils::assert_store_ordering(order); self.inner.store(val, order);
187 }
188 const_fn! {
189 const_if: #[cfg(not(portable_atomic_no_const_raw_ptr_deref))];
190 #[inline]
191 pub(crate) const fn as_ptr(&self) -> *mut $int_type {
192 unsafe {
196 (*(self as *const Self as *const UnsafeCell<$int_type>)).get()
197 }
198 }
199 }
200 }
201 #[cfg_attr(
202 portable_atomic_no_cfg_target_has_atomic,
203 cfg(not(portable_atomic_no_atomic_cas))
204 )]
205 #[cfg_attr(not(portable_atomic_no_cfg_target_has_atomic), cfg(target_has_atomic = "ptr"))]
206 impl $atomic_type {
207 #[inline]
208 #[cfg_attr(
209 any(all(debug_assertions, not(portable_atomic_no_track_caller)), miri),
210 track_caller
211 )]
212 pub(crate) fn compare_exchange(
213 &self,
214 current: $int_type,
215 new: $int_type,
216 success: Ordering,
217 failure: Ordering,
218 ) -> Result<$int_type, $int_type> {
219 crate::utils::assert_compare_exchange_ordering(success, failure); #[cfg(portable_atomic_no_stronger_failure_ordering)]
221 let success = crate::utils::upgrade_success_ordering(success, failure);
222 self.inner.compare_exchange(current, new, success, failure)
223 }
224 #[inline]
225 #[cfg_attr(
226 any(all(debug_assertions, not(portable_atomic_no_track_caller)), miri),
227 track_caller
228 )]
229 pub(crate) fn compare_exchange_weak(
230 &self,
231 current: $int_type,
232 new: $int_type,
233 success: Ordering,
234 failure: Ordering,
235 ) -> Result<$int_type, $int_type> {
236 crate::utils::assert_compare_exchange_ordering(success, failure); #[cfg(portable_atomic_no_stronger_failure_ordering)]
238 let success = crate::utils::upgrade_success_ordering(success, failure);
239 self.inner.compare_exchange_weak(current, new, success, failure)
240 }
241 #[allow(dead_code)]
242 #[inline]
243 #[cfg_attr(miri, track_caller)] fn fetch_update_<F>(&self, order: Ordering, mut f: F) -> $int_type
245 where
246 F: FnMut($int_type) -> $int_type,
247 {
248 let mut prev = self.load(Ordering::Relaxed);
251 loop {
252 let next = f(prev);
253 match self.compare_exchange_weak(prev, next, order, Ordering::Relaxed) {
254 Ok(x) => return x,
255 Err(next_prev) => prev = next_prev,
256 }
257 }
258 }
259 #[inline]
260 #[cfg_attr(miri, track_caller)] pub(crate) fn fetch_max(&self, val: $int_type, order: Ordering) -> $int_type {
262 #[cfg(not(portable_atomic_no_atomic_min_max))]
263 {
264 #[cfg(any(
265 all(
266 any(target_arch = "aarch64", target_arch = "arm64ec"),
267 any(target_feature = "lse", portable_atomic_target_feature = "lse"),
268 ),
269 all(
270 target_arch = "arm",
271 not(any(
272 target_feature = "v6",
273 portable_atomic_target_feature = "v6",
274 )),
275 ),
276 target_arch = "mips",
277 target_arch = "mips32r6",
278 target_arch = "mips64",
279 target_arch = "mips64r6",
280 target_arch = "powerpc",
281 target_arch = "powerpc64",
282 ))]
283 {
284 if core::mem::size_of::<$int_type>() <= 2 {
301 return self.fetch_update_(order, |x| core::cmp::max(x, val));
302 }
303 }
304 self.inner.fetch_max(val, order)
305 }
306 #[cfg(portable_atomic_no_atomic_min_max)]
307 {
308 self.fetch_update_(order, |x| core::cmp::max(x, val))
309 }
310 }
311 #[inline]
312 #[cfg_attr(miri, track_caller)] pub(crate) fn fetch_min(&self, val: $int_type, order: Ordering) -> $int_type {
314 #[cfg(not(portable_atomic_no_atomic_min_max))]
315 {
316 #[cfg(any(
317 all(
318 any(target_arch = "aarch64", target_arch = "arm64ec"),
319 any(target_feature = "lse", portable_atomic_target_feature = "lse"),
320 ),
321 all(
322 target_arch = "arm",
323 not(any(
324 target_feature = "v6",
325 portable_atomic_target_feature = "v6",
326 )),
327 ),
328 target_arch = "mips",
329 target_arch = "mips32r6",
330 target_arch = "mips64",
331 target_arch = "mips64r6",
332 target_arch = "powerpc",
333 target_arch = "powerpc64",
334 ))]
335 {
336 if core::mem::size_of::<$int_type>() <= 2 {
353 return self.fetch_update_(order, |x| core::cmp::min(x, val));
354 }
355 }
356 self.inner.fetch_min(val, order)
357 }
358 #[cfg(portable_atomic_no_atomic_min_max)]
359 {
360 self.fetch_update_(order, |x| core::cmp::min(x, val))
361 }
362 }
363 #[inline]
364 #[cfg_attr(miri, track_caller)] pub(crate) fn fetch_not(&self, order: Ordering) -> $int_type {
366 self.fetch_xor(!0, order)
367 }
368 #[cfg(not(all(
369 any(target_arch = "x86", target_arch = "x86_64"),
370 not(any(miri, portable_atomic_sanitize_thread)),
371 any(not(portable_atomic_no_asm), portable_atomic_unstable_asm),
372 )))]
373 #[inline]
374 #[cfg_attr(miri, track_caller)] pub(crate) fn not(&self, order: Ordering) {
376 self.fetch_not(order);
377 }
378 #[inline]
380 #[cfg_attr(miri, track_caller)] pub(crate) fn fetch_neg(&self, order: Ordering) -> $int_type {
382 self.fetch_update_(order, $int_type::wrapping_neg)
383 }
384 #[cfg(not(all(
385 any(target_arch = "x86", target_arch = "x86_64"),
386 not(any(miri, portable_atomic_sanitize_thread)),
387 any(not(portable_atomic_no_asm), portable_atomic_unstable_asm),
388 )))]
389 #[inline]
390 #[cfg_attr(miri, track_caller)] pub(crate) fn neg(&self, order: Ordering) {
392 self.fetch_neg(order);
393 }
394 }
395 impl core::ops::Deref for $atomic_type {
396 type Target = core::sync::atomic::$atomic_type;
397 #[inline]
398 #[cfg_attr(miri, track_caller)] fn deref(&self) -> &Self::Target {
400 &self.inner
401 }
402 }
403 };
404}
405
406atomic_int!(AtomicIsize, isize);
407atomic_int!(AtomicUsize, usize);
408#[cfg(not(portable_atomic_no_atomic_load_store))]
409atomic_int!(AtomicI8, i8);
410#[cfg(not(portable_atomic_no_atomic_load_store))]
411atomic_int!(AtomicU8, u8);
412#[cfg(not(portable_atomic_no_atomic_load_store))]
413atomic_int!(AtomicI16, i16);
414#[cfg(not(portable_atomic_no_atomic_load_store))]
415atomic_int!(AtomicU16, u16);
416#[cfg(not(portable_atomic_no_atomic_load_store))]
417#[cfg(not(target_pointer_width = "16"))]
418atomic_int!(AtomicI32, i32);
419#[cfg(not(portable_atomic_no_atomic_load_store))]
420#[cfg(not(target_pointer_width = "16"))]
421atomic_int!(AtomicU32, u32);
422#[cfg_attr(portable_atomic_no_cfg_target_has_atomic, cfg(not(portable_atomic_no_atomic_64)))]
423#[cfg_attr(
424 not(portable_atomic_no_cfg_target_has_atomic),
425 cfg(any(
426 target_has_atomic = "64",
427 not(any(target_pointer_width = "16", target_pointer_width = "32")),
428 ))
429)]
430atomic_int!(AtomicI64, i64);
431#[cfg_attr(portable_atomic_no_cfg_target_has_atomic, cfg(not(portable_atomic_no_atomic_64)))]
432#[cfg_attr(
433 not(portable_atomic_no_cfg_target_has_atomic),
434 cfg(any(
435 target_has_atomic = "64",
436 not(any(target_pointer_width = "16", target_pointer_width = "32")),
437 ))
438)]
439atomic_int!(AtomicU64, u64);