static_init/phase_locker/
sync.rs

1use super::futex::Futex;
2use super::spin_wait::SpinWait;
3use super::{LockNature, LockResult, Mappable, MutPhaseLocker, PhaseGuard, PhaseLocker};
4use crate::phase::*;
5use crate::{Phase, Phased};
6use core::cell::UnsafeCell;
7use core::mem::forget;
8use core::ops::{Deref, DerefMut};
9use core::sync::atomic::{fence, Ordering};
10
11#[cfg(feature = "lock_statistics")]
12use core::sync::atomic::AtomicUsize;
13
14#[cfg(feature = "lock_statistics")]
15use core::fmt::{self, Display, Formatter};
16
17/// A synchronised phase locker.
18pub(crate) struct SyncPhaseLocker(Futex);
19
20pub(crate) struct Lock<'a> {
21    futex: &'a Futex,
22    init_phase: Phase,
23    on_unlock: Phase,
24}
25
26/// A phase guard that allow atomic phase transition that
27/// can be turned fastly into a [SyncReadPhaseGuard].
28pub(crate) struct SyncPhaseGuard<'a, T: ?Sized>(&'a T, Lock<'a>);
29
30pub(crate) struct ReadLock<'a> {
31    futex: &'a Futex,
32    init_phase: Phase,
33}
34
35/// A kind of read lock.
36pub(crate) struct SyncReadPhaseGuard<'a, T: ?Sized>(&'a T, ReadLock<'a>);
37
38pub(crate) struct Mutex<T>(UnsafeCell<T>, SyncPhaseLocker);
39
40pub(crate) struct MutexGuard<'a, T>(&'a mut T, Lock<'a>);
41
42#[cfg(feature = "lock_statistics")]
43static OPTIMISTIC_FAILURES: AtomicUsize = AtomicUsize::new(0);
44
45#[cfg(feature = "lock_statistics")]
46static SECOND_ATTEMPT_FAILURES: AtomicUsize = AtomicUsize::new(0);
47
48#[cfg(feature = "lock_statistics")]
49static WRITE_LOCK_WHILE_READER_FAILURES: AtomicUsize = AtomicUsize::new(0);
50
51#[cfg(feature = "lock_statistics")]
52static WRITE_WAIT_FAILURES: AtomicUsize = AtomicUsize::new(0);
53
54#[cfg(feature = "lock_statistics")]
55static WRITE_WAIT_SUCCESSES: AtomicUsize = AtomicUsize::new(0);
56
57#[cfg(feature = "lock_statistics")]
58static READ_WAIT_FAILURES: AtomicUsize = AtomicUsize::new(0);
59
60#[cfg(feature = "lock_statistics")]
61static READ_WAIT_SUCCESSES: AtomicUsize = AtomicUsize::new(0);
62
63#[cfg(feature = "lock_statistics")]
64static ADDAPTATIVE_WAIT_SUCCESSES: AtomicUsize = AtomicUsize::new(0);
65
66#[cfg(feature = "lock_statistics")]
67static LATE_ADDAPTATIONS: AtomicUsize = AtomicUsize::new(0);
68
69#[cfg(feature = "lock_statistics")]
70#[derive(Debug)]
71pub struct LockStatistics {
72    pub optimistic_failures: usize,
73    pub second_attempt_failures: usize,
74    pub write_lock_while_reader_failures: usize,
75    pub write_wait_failures: usize,
76    pub write_wait_successes: usize,
77    pub read_wait_failures: usize,
78    pub read_wait_successes: usize,
79    pub addaptative_wait_successes: usize,
80    pub late_addaptations: usize,
81}
82
83#[cfg(feature = "lock_statistics")]
84impl LockStatistics {
85    pub fn get_and_reset() -> Self {
86        Self {
87            optimistic_failures: OPTIMISTIC_FAILURES.swap(0, Ordering::Relaxed),
88            second_attempt_failures: SECOND_ATTEMPT_FAILURES.swap(0, Ordering::Relaxed),
89
90            write_lock_while_reader_failures: WRITE_LOCK_WHILE_READER_FAILURES
91                .swap(0, Ordering::Relaxed),
92
93            write_wait_failures: WRITE_WAIT_FAILURES.swap(0, Ordering::Relaxed),
94
95            write_wait_successes: WRITE_WAIT_SUCCESSES.swap(0, Ordering::Relaxed),
96
97            read_wait_failures: READ_WAIT_FAILURES.swap(0, Ordering::Relaxed),
98
99            read_wait_successes: READ_WAIT_SUCCESSES.swap(0, Ordering::Relaxed),
100
101            addaptative_wait_successes: ADDAPTATIVE_WAIT_SUCCESSES.swap(0, Ordering::Relaxed),
102
103            late_addaptations: LATE_ADDAPTATIONS.swap(0, Ordering::Relaxed),
104        }
105    }
106}
107#[cfg(feature = "lock_statistics")]
108impl Display for LockStatistics {
109    fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
110        write!(f, "{:#?}", self)
111    }
112}
113
114// SyncPhaseGuard
115//-------------------
116//
117impl<'a, T> Deref for SyncPhaseGuard<'a, T> {
118    type Target = T;
119    #[inline(always)]
120    fn deref(&self) -> &T {
121        self.0
122    }
123}
124
125impl<'a, T: ?Sized> SyncPhaseGuard<'a, T> {
126    #[inline(always)]
127    fn new(r: &'a T, lock: Lock<'a>) -> Self {
128        Self(r, lock)
129    }
130
131    #[inline(always)]
132    pub fn map<S: ?Sized>(self, f: impl FnOnce(&'a T) -> &'a S) -> SyncPhaseGuard<'a, S> {
133        SyncPhaseGuard(f(self.0), self.1)
134    }
135}
136impl<'a, T: 'a, U: 'a> Mappable<T, U, SyncPhaseGuard<'a, U>> for SyncPhaseGuard<'a, T> {
137    #[inline(always)]
138    fn map<F: FnOnce(&'a T) -> &'a U>(self, f: F) -> SyncPhaseGuard<'a, U> {
139        Self::map(self, f)
140    }
141}
142unsafe impl<'a, T: ?Sized> PhaseGuard<'a, T> for SyncPhaseGuard<'a, T> {
143    #[inline(always)]
144    fn set_phase(&mut self, p: Phase) {
145        self.1.on_unlock = p;
146    }
147    #[inline(always)]
148    fn commit_phase(&mut self) {
149        //Butter fly trick
150        let cur = self.1.phase();
151        let to_xor = self.1.on_unlock ^ cur;
152        self.1.xor_phase(to_xor);
153    }
154    #[inline(always)]
155    fn phase(&self) -> Phase {
156        self.1.on_unlock
157    }
158    #[inline(always)]
159    fn transition<R>(
160        &mut self,
161        f: impl FnOnce(&'a T) -> R,
162        on_success: Phase,
163        on_panic: Phase,
164    ) -> R {
165        self.1.on_unlock = on_panic;
166        let res = f(self.0);
167        self.1.on_unlock = on_success;
168        res
169    }
170}
171
172impl<'a, T> Phased for SyncPhaseGuard<'a, T> {
173    #[inline(always)]
174    fn phase(this: &Self) -> Phase {
175        this.1.on_unlock
176    }
177}
178
179// SyncReadPhaseGuard
180//-------------------
181//
182impl<'a, T> Deref for SyncReadPhaseGuard<'a, T> {
183    type Target = T;
184    #[inline(always)]
185    fn deref(&self) -> &T {
186        self.0
187    }
188}
189
190impl<'a, T: ?Sized> SyncReadPhaseGuard<'a, T> {
191    #[inline(always)]
192    fn new(r: &'a T, lock: ReadLock<'a>) -> Self {
193        Self(r, lock)
194    }
195
196    #[inline(always)]
197    pub fn map<S: ?Sized>(self, f: impl FnOnce(&'a T) -> &'a S) -> SyncReadPhaseGuard<'a, S> {
198        SyncReadPhaseGuard(f(self.0), self.1)
199    }
200}
201impl<'a, T: 'a, U: 'a> Mappable<T, U, SyncReadPhaseGuard<'a, U>> for SyncReadPhaseGuard<'a, T> {
202    #[inline(always)]
203    fn map<F: FnOnce(&'a T) -> &'a U>(self, f: F) -> SyncReadPhaseGuard<'a, U> {
204        Self::map(self, f)
205    }
206}
207impl<'a, T> From<SyncPhaseGuard<'a, T>> for SyncReadPhaseGuard<'a, T> {
208    #[inline(always)]
209    fn from(this: SyncPhaseGuard<'a, T>) -> SyncReadPhaseGuard<'a, T> {
210        SyncReadPhaseGuard(this.0, this.1.into())
211    }
212}
213
214impl<'a, T> Phased for SyncReadPhaseGuard<'a, T> {
215    #[inline(always)]
216    fn phase(this: &Self) -> Phase {
217        this.1.init_phase
218    }
219}
220
221impl<'a, T> Clone for SyncReadPhaseGuard<'a, T> {
222    fn clone(&self) -> Self {
223        SyncReadPhaseGuard(self.0, self.1.clone())
224    }
225}
226
227// Mutex
228//-------------------
229//
230unsafe impl<T: Send> Sync for Mutex<T> {}
231
232unsafe impl<T: Send> Send for Mutex<T> {}
233
234impl<T> Mutex<T> {
235    #[inline(always)]
236    pub(crate) const fn new(value: T) -> Self {
237        Self(UnsafeCell::new(value), SyncPhaseLocker::new(Phase::empty()))
238    }
239    #[inline(always)]
240    pub(crate) fn lock(&self) -> MutexGuard<'_, T> {
241        let lk = if let LockResult::Write(l) = {
242            self.1.raw_lock(
243                |_p| LockNature::Write,
244                |_p| LockNature::Write,
245                Phase::empty(),
246            )
247        } {
248            l
249        } else {
250            unreachable!()
251        };
252        MutexGuard(unsafe { &mut *self.0.get() }, lk)
253    }
254}
255
256impl<'a, T> Deref for MutexGuard<'a, T> {
257    type Target = T;
258    #[inline(always)]
259    fn deref(&self) -> &T {
260        self.0
261    }
262}
263impl<'a, T> DerefMut for MutexGuard<'a, T> {
264    #[inline(always)]
265    fn deref_mut(&mut self) -> &mut T {
266        self.0
267    }
268}
269
270// Lock
271// ----
272
273// STATES:
274// LOCKED_BIT | <READ_WAITER_BIT|WRITE_WAITER_BIT> => Write lock held
275// any READER_BIT | <READ_WAITER_BIT|WRITE_WAITER_BIT> => Read lock held
276// LOCKED_BIT|any READER_BIT | <READ_WAITER_BIT|WRITE_WAITER_BIT>
277//       => wlock or rlock is being transfered to rlock
278//       => rlock are taken right now
279// any READ_WAITER_BIT,WRITE_WAITER_BIT => a lock is being released
280// during transfer to a write lock, the WRITE_WAITER_BIT is 0
281// but if the transfer succeed, it means that there where one or
282// more waiter for the write lock and WRITE_WAITER_BIT must be reset to 1
283// if a waiter is awaken.
284
285impl<'a> Lock<'a> {
286    #[inline(always)]
287    fn new(futex: &'a Futex, current: u32) -> Self {
288        let p = Phase::from_bits_truncate(current);
289        Self {
290            futex,
291            init_phase: p,
292            on_unlock: p,
293        }
294    }
295    #[inline(always)]
296    pub fn phase(&self) -> Phase {
297        let v = self.futex.load(Ordering::Relaxed);
298        Phase::from_bits_truncate(v)
299    }
300    #[inline(always)]
301    pub fn xor_phase(&self, xor: Phase) -> Phase {
302        let v = self.futex.fetch_xor(xor.bits(), Ordering::Release);
303        Phase::from_bits_truncate(v) ^ xor
304    }
305}
306
307impl<'a> Lock<'a> {
308    #[inline(always)]
309    fn into_read_lock(self, cur: Phase) -> ReadLock<'a> {
310        //state: old_phase | LOCKED_BIT | <0:READ_WAITER_BIT|0:WRITE_WAITER_BIT>
311        let xor = (cur ^ self.on_unlock).bits() | LOCKED_BIT | READER_UNITY;
312        //state: phase | READER_UNITY | <0:READ_WAITER_BIT|0:WRITE_WAITER_BIT>
313        let prev = self.futex.fetch_xor(xor, Ordering::Release);
314
315        let r = if prev & READ_WAITER_BIT != 0 {
316            wake_readers(&self.futex, 0, true)
317        } else {
318            ReadLock::new(self.futex, self.on_unlock.bits())
319        };
320
321        forget(self);
322
323        r
324    }
325}
326
327#[cold]
328#[inline]
329fn transfer_lock(futex: &Futex, mut cur: u32) {
330    // try to reaquire the lock
331    //state: phase | 0:READ_WAITER_BIT<|>0:WRITE_WAITER_BIT
332    assert_eq!(cur & (LOCKED_BIT | READER_BITS | READER_OVERF), 0);
333    assert_ne!(cur & (READ_WAITER_BIT | WRITE_WAITER_BIT), 0);
334    if futex.prefer_wake_one_writer() {
335        loop {
336            let mut un_activate_lock = 0;
337            if cur & WRITE_WAITER_BIT != 0 {
338                //state: phase | <READ_WAITER_BIT> | WRITE_WAITER_BIT
339                let prev = futex.fetch_xor(WRITE_WAITER_BIT | LOCKED_BIT, Ordering::Relaxed);
340                assert_ne!(prev & WRITE_WAITER_BIT, 0);
341                assert_eq!(prev & (LOCKED_BIT | READER_BITS | READER_OVERF), 0);
342                if futex.wake_one_writer() {
343                    return;
344                };
345                cur ^= WRITE_WAITER_BIT | LOCKED_BIT;
346                // turn the write lock into a read lock if
347                // there are reader waiting
348                un_activate_lock = LOCKED_BIT;
349                //phase: phase | LOCKED_BIT | <READ_WAITER_BIT>
350                //
351                //so here we own a write lock
352            }
353
354            if cur & READ_WAITER_BIT != 0 {
355                //phase: phase | <LOCKED_BIT> | READ_WAITER_BIT
356                wake_readers(futex, un_activate_lock, false);
357                //drop the acquired read lock
358                return;
359            }
360
361            //cur: phase | LOCKED_BIT
362            cur = futex.fetch_and(!LOCKED_BIT, Ordering::Relaxed);
363            assert_ne!(cur & LOCKED_BIT, 0);
364            if has_no_waiters(cur) {
365                break;
366            } //else new threads are waiting
367            cur &= !LOCKED_BIT; //unused
368            core::hint::spin_loop();
369        }
370    } else {
371        loop {
372            if cur & READ_WAITER_BIT != 0 {
373                //phase: phase | <WRITE_WAITER_BIT> | READ_WAITER_BIT
374                wake_readers(futex, 0, false);
375                return;
376            }
377
378            assert_ne!(cur & WRITE_WAITER_BIT, 0);
379
380            //state: phase | <READ_WAITER_BIT> | WRITE_WAITER_BIT
381            let prev = futex.fetch_xor(WRITE_WAITER_BIT | LOCKED_BIT, Ordering::Relaxed);
382            assert_ne!(prev & WRITE_WAITER_BIT, 0);
383            assert_eq!(prev & (LOCKED_BIT | READER_BITS | READER_OVERF), 0);
384            if futex.wake_one_writer() {
385                return;
386            };
387            //phase: phase | LOCKED_BIT | <READ_WAITER_BIT>
388
389            //cur: phase | LOCKED_BIT
390            cur = futex.fetch_and(!LOCKED_BIT, Ordering::Relaxed);
391
392            assert_ne!(cur & LOCKED_BIT, 0);
393
394            if has_no_waiters(cur) {
395                break;
396            } //else new threads are waiting
397            cur &= !LOCKED_BIT; //unused
398            core::hint::spin_loop();
399        }
400    }
401}
402
403impl<'a> Drop for Lock<'a> {
404    #[inline(always)]
405    fn drop(&mut self) {
406        //state: old_phase | LOCKED_BIT | <0:READ_WAITER_BIT|0:WRITE_WAITER_BIT>
407        let p = self.init_phase.bits();
408
409        match self.futex.compare_exchange(
410            p | LOCKED_BIT,
411            self.on_unlock.bits(),
412            Ordering::Release,
413            Ordering::Relaxed,
414        ) {
415            Ok(_) => return,
416            Err(x) => x,
417        };
418
419        //while let Err(x) = self.futex.compare_exchange_weak(
420        //    cur, cur & (READ_WAITER_BIT|WRITE_WAITER_BIT|READER_BITS|READER_OVERF) | p,Ordering::Release,Ordering::Relaxed) {
421        //    cur = x;
422        //}
423        let xor = (self.init_phase ^ self.on_unlock).bits() | LOCKED_BIT;
424        let prev = self.futex.fetch_xor(xor, Ordering::Release);
425        //state: phase | <1:READ_WAITER_BIT|1:WRITE_WAITER_BIT>
426        if has_waiters(prev) {
427            //let cur = cur & (READ_WAITER_BIT|WRITE_WAITER_BIT|READER_BITS|READER_OVERF) | p;
428            //state: phase | 1:READ_WAITER_BIT<|>1:WRITE_WAITER_BIT
429            transfer_lock(&self.futex, prev ^ xor);
430        }
431    }
432}
433
434impl<'a> From<Lock<'a>> for ReadLock<'a> {
435    #[inline(always)]
436    fn from(this: Lock<'a>) -> ReadLock<'a> {
437        let p = this.init_phase;
438        this.into_read_lock(p)
439    }
440}
441
442// ReadLock
443// --------
444impl<'a> ReadLock<'a> {
445    #[inline(always)]
446    fn new(futex: &'a Futex, current: u32) -> Self {
447        let p = Phase::from_bits_truncate(current);
448        Self {
449            futex,
450            init_phase: p,
451        }
452    }
453
454    //#[inline(always)]
455    //pub fn fast_clone(&self) -> Option<Self> {
456    //    let mut cur = self.futex.load(Ordering::Relaxed);
457
458    //    if has_readers_max(cur) {
459    //        return None;
460    //    }
461
462    //    match self.futex.compare_exchange_weak(cur, cur + READER_UNITY,Ordering::Acquire, Ordering::Relaxed) {
463    //        Ok(_) => return Some(ReadLock{futex:&self.futex,init_phase: self.init_phase}),
464    //        Err(c) => cur = c,
465    //    }
466
467    //    if has_readers_max(cur) {
468    //        return None;
469    //    }
470
471    //    match self.futex.compare_exchange(cur, cur + READER_UNITY,Ordering::Acquire, Ordering::Relaxed) {
472    //        Ok(_) => Some(ReadLock{futex:&self.futex,init_phase: self.init_phase}),
473    //        Err(_) => None,
474    //    }
475
476    //}
477}
478
479impl<'a> Drop for ReadLock<'a> {
480    #[inline(always)]
481    fn drop(&mut self) {
482        //state: phase | <LOCKED_BIT> | READER_UNITY*n | <0:READ_WAITER_BIT> |<0:WRITE_WAITER_BIT>
483        let prev = self.futex.fetch_sub(READER_UNITY, Ordering::Release);
484        //state: phase | <LOCKED_BIT> | READER_UNITY*(n-1) | <1:READ_WAITER_BIT> |<1:WRITE_WAITER_BIT>
485        if has_one_reader(prev) && is_not_write_locked(prev) && has_waiters(prev) {
486            //state: phase | READ_WAITER_BIT <|> WRITE_WAITER_BIT
487            let cur = prev - READER_UNITY;
488            transfer_lock(&self.futex, cur);
489        }
490    }
491}
492
493impl<'a> Clone for ReadLock<'a> {
494    fn clone(&self) -> Self {
495        let mut spin_wait = SpinWait::new();
496        let mut cur = self.futex.load(Ordering::Relaxed);
497        loop {
498            if !has_readers_max(cur) {
499                cur = match read_lock(&self.futex, |cur| !has_readers_max(cur), cur) {
500                    Ok(rl) => return rl,
501                    Err(cur) => cur,
502                }
503            }
504
505            if cur & READ_WAITER_BIT == 0 && spin_wait.spin() {
506                cur = self.futex.load(Ordering::Relaxed);
507                continue;
508            }
509
510            if cur & READ_WAITER_BIT == 0 {
511                match self.futex.compare_exchange_weak(
512                    cur,
513                    cur | READ_WAITER_BIT,
514                    Ordering::Relaxed,
515                    Ordering::Relaxed,
516                ) {
517                    Err(x) => {
518                        cur = x;
519                        continue;
520                    }
521                    Ok(_) => cur |= READ_WAITER_BIT,
522                }
523            }
524
525            if self.futex.compare_and_wait_as_reader(cur) {
526                let cur = self.futex.load(Ordering::Relaxed);
527
528                assert_ne!(cur & (READER_BITS | READER_OVERF), 0);
529
530                return ReadLock::new(&self.futex, cur);
531            }
532
533            spin_wait.reset();
534            cur = self.futex.load(Ordering::Relaxed);
535        }
536    }
537}
538
539#[inline(always)]
540fn has_no_readers(v: u32) -> bool {
541    v & (READER_OVERF | READER_BITS) == 0
542}
543
544#[inline(always)]
545fn has_readers(v: u32) -> bool {
546    v & (READER_OVERF | READER_BITS) != 0
547}
548
549#[inline(always)]
550fn has_one_reader(v: u32) -> bool {
551    v & (READER_OVERF | READER_BITS) == READER_UNITY
552}
553
554#[inline(always)]
555fn has_readers_max(v: u32) -> bool {
556    //can actualy happen in two condition:
557    //  - READER_BITS
558    //  - READER_BITS | READER_OVERF
559    v & READER_BITS == READER_BITS
560}
561
562#[inline(always)]
563fn is_not_write_locked(v: u32) -> bool {
564    v & LOCKED_BIT == 0
565}
566//#[inline(always)]
567//fn is_write_locked(v:u32) -> bool {
568//    v & LOCKED_BIT != 0
569//}
570#[inline(always)]
571fn has_waiters(v: u32) -> bool {
572    v & (READ_WAITER_BIT | WRITE_WAITER_BIT) != 0
573}
574#[inline(always)]
575fn has_no_waiters(v: u32) -> bool {
576    v & (READ_WAITER_BIT | WRITE_WAITER_BIT) == 0
577}
578
579#[inline(always)]
580fn is_write_lockable(v: u32) -> bool {
581    is_not_write_locked(v) && (has_readers(v) || has_no_waiters(v))
582}
583#[inline(always)]
584fn is_read_lockable(v: u32) -> bool {
585    (has_readers(v) || (has_no_waiters(v) && is_not_write_locked(v))) && !has_readers_max(v)
586}
587
588#[inline(always)]
589fn wake_readers(futex: &Futex, to_unactivate: u32, converting: bool) -> ReadLock {
590    // at least one reader must have been marked + READER_OVERF
591    let rb = if converting { 0 } else { READER_UNITY };
592    let v = futex.fetch_xor(
593        READ_WAITER_BIT | to_unactivate | READER_OVERF | rb,
594        Ordering::Relaxed,
595    );
596    assert_eq!(v & to_unactivate, to_unactivate);
597    if !converting {
598        //otherwise threads may be already taking read lock
599        assert_ne!(v & READER_UNITY, rb); //BUG: fired
600    }
601    assert_eq!((v ^ to_unactivate) & LOCKED_BIT, 0);
602
603    let c = futex.wake_readers();
604
605    let cur = futex.fetch_sub(READER_OVERF - READER_UNITY * (c as u32), Ordering::Relaxed);
606    ReadLock::new(futex, cur)
607}
608
609struct MutGuard<'a>(&'a mut Futex, Phase);
610impl<'a> Drop for MutGuard<'a> {
611    fn drop(&mut self) {
612        *self.0.get_mut() = self.1.bits();
613    }
614}
615
616// SyncPhaseLocker
617// ---------------
618//
619unsafe impl MutPhaseLocker for SyncPhaseLocker {
620    #[inline(always)]
621    fn get_phase_unique(&mut self) -> Phase {
622        Phase::from_bits(*self.0.get_mut()).unwrap()
623    }
624
625    #[inline(always)]
626    fn set_phase(&mut self, p: Phase) {
627        *self.0.get_mut() = p.bits();
628    }
629
630    #[inline(always)]
631    fn transition<R>(&mut self, f: impl FnOnce() -> R, on_success: Phase, on_panic: Phase) -> R {
632        let m = MutGuard(&mut self.0, on_panic);
633        let r = f();
634        forget(m);
635        Self::set_phase(self, on_success);
636        r
637    }
638}
639unsafe impl<'a, T: 'a> PhaseLocker<'a, T> for SyncPhaseLocker {
640    type ReadGuard = SyncReadPhaseGuard<'a, T>;
641    type WriteGuard = SyncPhaseGuard<'a, T>;
642
643    #[inline(always)]
644    fn lock<FL: Fn(Phase) -> LockNature, FW: Fn(Phase) -> LockNature>(
645        &'a self,
646        value: &'a T,
647        lock_nature: FL,
648        on_wake_nature: FW,
649        hint: Phase,
650    ) -> LockResult<Self::ReadGuard, Self::WriteGuard> {
651        Self::lock(self, value, lock_nature, on_wake_nature, hint)
652    }
653    #[inline(always)]
654    fn lock_mut(&'a mut self, value: &'a T) -> Self::WriteGuard {
655        Self::lock_mut(self, value)
656    }
657    #[inline(always)]
658    fn try_lock<F: Fn(Phase) -> LockNature>(
659        &'a self,
660        value: &'a T,
661        lock_nature: F,
662        hint: Phase,
663    ) -> Option<LockResult<Self::ReadGuard, Self::WriteGuard>> {
664        Self::try_lock(self, value, lock_nature, hint)
665    }
666    #[inline(always)]
667    fn phase(&self) -> Phase {
668        Self::phase(self)
669    }
670}
671impl Phased for SyncPhaseLocker {
672    #[inline(always)]
673    fn phase(this: &Self) -> Phase {
674        this.phase()
675    }
676}
677
678impl SyncPhaseLocker {
679    #[inline(always)]
680    pub const fn new(p: Phase) -> Self {
681        SyncPhaseLocker(Futex::new(p.bits()))
682    }
683    #[inline(always)]
684    /// Return the current phase and synchronize with the end of the
685    /// phase transition that leads to this phase.
686    pub fn phase(&self) -> Phase {
687        Phase::from_bits_truncate(self.0.load(Ordering::Acquire))
688    }
689    #[inline(always)]
690    /// Returns a mutable phase locker
691    pub fn lock_mut<'a, T: ?Sized>(&'a mut self, v: &'a T) -> SyncPhaseGuard<'_, T> {
692        let cur = self.0.fetch_or(LOCKED_BIT, Ordering::Acquire);
693        SyncPhaseGuard::new(v, Lock::new(&self.0, cur))
694    }
695    #[inline(always)]
696    /// lock the phase.
697    ///
698    /// If the returned value is a LockResult::Read, then other threads
699    /// may also hold a such a lock. This lock call synchronize with the
700    /// phase transition that leads to the current phase and the phase will
701    /// not change while this lock is held
702    ///
703    /// If the returned value is a LockResult::Write, then only this thread
704    /// hold the lock and the phase can be atomically transitionned using the
705    /// returned lock.
706    ///
707    /// If the returned value is LockResult::None, then the call to lock synchronize
708    /// whit the end of the phase transition that led to the current phase.
709    pub fn lock<'a, T: ?Sized>(
710        &'a self,
711        v: &'a T,
712        how: impl Fn(Phase) -> LockNature,
713        on_waiting_how: impl Fn(Phase) -> LockNature,
714        hint: Phase,
715    ) -> LockResult<SyncReadPhaseGuard<'_, T>, SyncPhaseGuard<'_, T>> {
716        match self.raw_lock(how, on_waiting_how, hint) {
717            LockResult::Write(l) => LockResult::Write(SyncPhaseGuard::new(v, l)),
718            LockResult::Read(l) => LockResult::Read(SyncReadPhaseGuard::new(v, l)),
719            LockResult::None(p) => LockResult::None(p),
720        }
721    }
722    #[inline(always)]
723    /// try to lock the phase.
724    ///
725    /// If the returned value is a Some(LockResult::Read), then other threads
726    /// may also hold a such a lock. This lock call synchronize with the
727    /// phase transition that leads to the current phase and the phase will
728    /// not change while this lock is held
729    ///
730    /// If the returned value is a Some(LockResult::Write), then only this thread
731    /// hold the lock and the phase can be atomically transitionned using the
732    /// returned lock.
733    ///
734    /// If the returned value is Some(LockResult::None), then the call to lock synchronize
735    /// whit the end of the phase transition that led to the current phase.
736    ///
737    /// If the returned value is None, the the lock is held by other threads and could
738    /// not be obtain.
739    pub fn try_lock<'a, T: ?Sized>(
740        &'a self,
741        v: &'a T,
742        how: impl Fn(Phase) -> LockNature,
743        hint: Phase,
744    ) -> Option<LockResult<SyncReadPhaseGuard<'_, T>, SyncPhaseGuard<'_, T>>> {
745        self.try_raw_lock(how, hint).map(|l| match l {
746            LockResult::Write(l) => LockResult::Write(SyncPhaseGuard::new(v, l)),
747            LockResult::Read(l) => LockResult::Read(SyncReadPhaseGuard::new(v, l)),
748            LockResult::None(p) => LockResult::None(p),
749        })
750    }
751    #[inline(always)]
752    fn try_raw_lock(
753        &self,
754        how: impl Fn(Phase) -> LockNature,
755        hint: Phase,
756    ) -> Option<LockResult<ReadLock<'_>, Lock<'_>>> {
757        let mut cur = match self.optimistic_lock(&how, hint) {
758            Ok(x) => return Some(x),
759            Err(cur) => cur,
760        };
761
762        #[cfg(feature = "lock_statistics")]
763        {
764            OPTIMISTIC_FAILURES.fetch_add(1, Ordering::Relaxed);
765        }
766
767        let p = Phase::from_bits_truncate(cur);
768
769        match how(p) {
770            LockNature::Write => {
771                if is_write_lockable(cur)
772                    && has_no_readers(cur)
773                    && self
774                        .0
775                        .compare_exchange(
776                            cur,
777                            cur | LOCKED_BIT,
778                            Ordering::Acquire,
779                            Ordering::Relaxed,
780                        )
781                        .is_ok()
782                {
783                    return Some(LockResult::Write(Lock::new(&self.0, cur)));
784                }
785            }
786            LockNature::Read => loop {
787                if !is_read_lockable(cur) {
788                    break;
789                }
790                match self.0.compare_exchange_weak(
791                    cur,
792                    cur + READER_UNITY,
793                    Ordering::Acquire,
794                    Ordering::Relaxed,
795                ) {
796                    Ok(_) => return Some(LockResult::Read(ReadLock::new(&self.0, cur))),
797                    Err(x) => {
798                        cur = x;
799                        if !(how(Phase::from_bits_truncate(cur)) == LockNature::Read) {
800                            break;
801                        }
802                    }
803                }
804            },
805            LockNature::None => {
806                fence(Ordering::Acquire);
807                return Some(LockResult::None(p));
808            }
809        }
810
811        #[cfg(feature = "lock_statistics")]
812        {
813            SECOND_ATTEMPT_FAILURES.fetch_add(1, Ordering::Relaxed);
814        }
815
816        None
817    }
818
819    #[inline(always)]
820    fn raw_lock(
821        &self,
822        how: impl Fn(Phase) -> LockNature,
823        on_waiting_how: impl Fn(Phase) -> LockNature,
824        hint: Phase,
825    ) -> LockResult<ReadLock<'_>, Lock<'_>> {
826        let cur = match self.optimistic_lock(&how, hint) {
827            Ok(x) => return x,
828            Err(cur) => cur,
829        };
830
831        #[cfg(feature = "lock_statistics")]
832        {
833            OPTIMISTIC_FAILURES.fetch_add(1, Ordering::Relaxed);
834        }
835
836        let p = Phase::from_bits_truncate(cur);
837
838        match how(p) {
839            LockNature::Write => {
840                if is_write_lockable(cur)
841                    && has_no_readers(cur)
842                    && self
843                        .0
844                        .compare_exchange_weak(
845                            cur,
846                            cur | LOCKED_BIT,
847                            Ordering::Acquire,
848                            Ordering::Relaxed,
849                        )
850                        .is_ok()
851                {
852                    return LockResult::Write(Lock::new(&self.0, cur));
853                }
854            }
855            LockNature::Read => {
856                if is_read_lockable(cur) {
857                    if let Ok(r) = read_lock(
858                        &self.0,
859                        |cur| {
860                            how(Phase::from_bits_truncate(cur)) == LockNature::Read
861                                && is_read_lockable(cur)
862                        },
863                        cur,
864                    ) {
865                        return LockResult::Read(r);
866                    }
867                }
868            }
869            LockNature::None => {
870                fence(Ordering::Acquire);
871                return LockResult::None(p);
872            }
873        }
874        #[cfg(feature = "lock_statistics")]
875        {
876            SECOND_ATTEMPT_FAILURES.fetch_add(1, Ordering::Relaxed);
877        }
878
879        self.raw_lock_slow(how, on_waiting_how)
880    }
881    #[cold]
882    fn raw_lock_slow(
883        &self,
884        how: impl Fn(Phase) -> LockNature,
885        on_waiting_how: impl Fn(Phase) -> LockNature,
886    ) -> LockResult<ReadLock<'_>, Lock<'_>> {
887        let mut spin_wait = SpinWait::new();
888
889        let mut cur = self.0.load(Ordering::Relaxed);
890
891        loop {
892            match how(Phase::from_bits_truncate(cur)) {
893                LockNature::None => {
894                    fence(Ordering::Acquire);
895                    return LockResult::None(Phase::from_bits_truncate(cur));
896                }
897                LockNature::Write => {
898                    if is_write_lockable(cur) {
899                        if has_no_readers(cur) {
900                            match self.0.compare_exchange_weak(
901                                cur,
902                                cur | LOCKED_BIT,
903                                Ordering::Acquire,
904                                Ordering::Relaxed,
905                            ) {
906                                Ok(_) => {
907                                    return LockResult::Write(Lock::new(&self.0, cur));
908                                }
909                                Err(x) => {
910                                    cur = x;
911                                    continue;
912                                }
913                            }
914                        } else {
915                            //lock while readers
916                            match self.0.compare_exchange_weak(
917                                cur,
918                                cur | LOCKED_BIT,
919                                Ordering::Acquire,
920                                Ordering::Relaxed,
921                            ) {
922                                Ok(x) => cur = x | LOCKED_BIT,
923                                Err(x) => {
924                                    cur = x;
925                                    continue;
926                                }
927                            }
928
929                            cur = match wait_for_readers(&self.0, cur) {
930                                Ok(l) => return LockResult::Write(l),
931                                Err(cur) => cur,
932                            };
933                            #[cfg(feature = "lock_statistics")]
934                            {
935                                WRITE_LOCK_WHILE_READER_FAILURES.fetch_add(1, Ordering::Relaxed);
936                            }
937                        }
938                    }
939                    if cur & WRITE_WAITER_BIT == 0 && spin_wait.spin() {
940                        cur = self.0.load(Ordering::Relaxed);
941                        continue;
942                    }
943                }
944                LockNature::Read => {
945                    if is_read_lockable(cur) {
946                        cur = match read_lock(
947                            &self.0,
948                            |cur| {
949                                how(Phase::from_bits_truncate(cur)) == LockNature::Read
950                                    && is_read_lockable(cur)
951                            },
952                            cur,
953                        ) {
954                            Ok(r) => return LockResult::Read(r),
955                            Err(cur) => cur,
956                        };
957                    }
958
959                    if has_no_waiters(cur) && spin_wait.spin() {
960                        cur = self.0.load(Ordering::Relaxed);
961                        continue;
962                    }
963                }
964            }
965
966            match on_waiting_how(Phase::from_bits_truncate(cur)) {
967                LockNature::None => {
968                    fence(Ordering::Acquire);
969                    return LockResult::None(Phase::from_bits_truncate(cur));
970                }
971
972                LockNature::Write => {
973                    if cur & WRITE_WAITER_BIT == 0 {
974                        match self.0.compare_exchange_weak(
975                            cur,
976                            cur | WRITE_WAITER_BIT,
977                            Ordering::Relaxed,
978                            Ordering::Relaxed,
979                        ) {
980                            Err(x) => {
981                                cur = x;
982                                continue;
983                            }
984                            Ok(_) => cur |= WRITE_WAITER_BIT,
985                        }
986                    }
987
988                    if let Some(lock) = wait_as_writer_then_wake_with_lock(&self.0, cur, &how) {
989                        #[cfg(feature = "lock_statistics")]
990                        {
991                            WRITE_WAIT_SUCCESSES.fetch_add(1, Ordering::Relaxed);
992                            if how(Phase::from_bits_truncate(cur)) != LockNature::Write {
993                                ADDAPTATIVE_WAIT_SUCCESSES.fetch_add(1, Ordering::Relaxed);
994                            }
995                        }
996                        return lock;
997                    } else {
998                        #[cfg(feature = "lock_statistics")]
999                        {
1000                            WRITE_WAIT_FAILURES.fetch_add(1, Ordering::Relaxed);
1001                        }
1002                    }
1003                }
1004                LockNature::Read => {
1005                    if cur & READ_WAITER_BIT == 0 {
1006                        match self.0.compare_exchange_weak(
1007                            cur,
1008                            cur | READ_WAITER_BIT,
1009                            Ordering::Relaxed,
1010                            Ordering::Relaxed,
1011                        ) {
1012                            Err(x) => {
1013                                cur = x;
1014                                continue;
1015                            }
1016                            Ok(_) => cur |= READ_WAITER_BIT,
1017                        }
1018                    }
1019
1020                    if let Some(lock) = wait_as_reader_then_wake_with_lock(&self.0, cur, &how) {
1021                        #[cfg(feature = "lock_statistics")]
1022                        {
1023                            READ_WAIT_SUCCESSES.fetch_add(1, Ordering::Relaxed);
1024                            if how(Phase::from_bits_truncate(cur)) != LockNature::Read {
1025                                ADDAPTATIVE_WAIT_SUCCESSES.fetch_add(1, Ordering::Relaxed);
1026                            }
1027                        }
1028                        return lock;
1029                    } else {
1030                        #[cfg(feature = "lock_statistics")]
1031                        {
1032                            READ_WAIT_FAILURES.fetch_add(1, Ordering::Relaxed);
1033                        }
1034                    }
1035                }
1036            }
1037            spin_wait.reset();
1038            cur = self.0.load(Ordering::Relaxed);
1039        }
1040    }
1041
1042    #[inline(always)]
1043    fn optimistic_lock(
1044        &self,
1045        how: impl Fn(Phase) -> LockNature,
1046        hint: Phase,
1047    ) -> Result<LockResult<ReadLock<'_>, Lock<'_>>, u32> {
1048        let mut cur = hint.bits();
1049        match how(hint) {
1050            LockNature::None => {
1051                cur = self.0.load(Ordering::Acquire);
1052                let p = Phase::from_bits_truncate(cur);
1053                if let LockNature::None = how(p) {
1054                    return Ok(LockResult::None(p));
1055                }
1056            }
1057            LockNature::Write => {
1058                match self.0.compare_exchange_weak(
1059                    cur,
1060                    cur | LOCKED_BIT,
1061                    Ordering::Acquire,
1062                    Ordering::Relaxed,
1063                ) {
1064                    Ok(_) => return Ok(LockResult::Write(Lock::new(&self.0, cur))),
1065                    Err(x) => {
1066                        cur = x;
1067                    }
1068                }
1069            }
1070            LockNature::Read => {
1071                match self.0.compare_exchange_weak(
1072                    cur,
1073                    cur + READER_UNITY,
1074                    Ordering::Acquire,
1075                    Ordering::Relaxed,
1076                ) {
1077                    Ok(_) => {
1078                        return Ok(LockResult::Read(ReadLock::new(&self.0, cur)));
1079                    }
1080                    Err(x) => {
1081                        cur = x;
1082                    }
1083                }
1084            }
1085        }
1086        Err(cur)
1087    }
1088}
1089
1090#[inline(always)]
1091fn read_lock(
1092    futex: &Futex,
1093    shall_continue: impl Fn(u32) -> bool,
1094    mut cur: u32,
1095) -> Result<ReadLock<'_>, u32> {
1096    let mut inner_spin_wait = SpinWait::new();
1097
1098    loop {
1099        match futex.compare_exchange_weak(
1100            cur,
1101            cur + READER_UNITY,
1102            Ordering::Acquire,
1103            Ordering::Relaxed,
1104        ) {
1105            Ok(_) => {
1106                return Ok(ReadLock::new(&futex, cur));
1107            }
1108            Err(_) => {
1109                inner_spin_wait.spin_no_yield();
1110                cur = futex.load(Ordering::Relaxed);
1111                if !shall_continue(cur) {
1112                    break;
1113                }
1114            }
1115        }
1116    }
1117    Err(cur)
1118}
1119
1120#[cold]
1121fn wait_as_writer_then_wake_with_lock(
1122    futex: &Futex,
1123    cur: u32,
1124    how: impl Fn(Phase) -> LockNature,
1125) -> Option<LockResult<ReadLock<'_>, Lock<'_>>> {
1126    debug_assert_ne!(cur & WRITE_WAITER_BIT, 0);
1127
1128    if futex.compare_and_wait_as_writer(cur) {
1129        let cur = futex.load(Ordering::Relaxed);
1130
1131        assert_ne!(cur & LOCKED_BIT, 0);
1132
1133        let lock = Lock::new(&futex, cur);
1134
1135        match how(Phase::from_bits_truncate(cur)) {
1136            LockNature::Write => return Some(LockResult::Write(lock)),
1137
1138            LockNature::Read => {
1139                #[cfg(feature = "lock_statistics")]
1140                {
1141                    LATE_ADDAPTATIONS.fetch_add(1, Ordering::Relaxed);
1142                }
1143                return Some(LockResult::Read(
1144                    lock.into_read_lock(Phase::from_bits_truncate(cur)),
1145                ));
1146            }
1147            LockNature::None => {
1148                #[cfg(feature = "lock_statistics")]
1149                {
1150                    LATE_ADDAPTATIONS.fetch_add(1, Ordering::Relaxed);
1151                }
1152                return Some(LockResult::None(Phase::from_bits_truncate(cur)));
1153            }
1154        }
1155    }
1156    None
1157}
1158
1159#[cold]
1160fn wait_as_reader_then_wake_with_lock(
1161    futex: &Futex,
1162    cur: u32,
1163    how: impl Fn(Phase) -> LockNature,
1164) -> Option<LockResult<ReadLock<'_>, Lock<'_>>> {
1165    debug_assert_ne!(cur & READ_WAITER_BIT, 0);
1166
1167    if futex.compare_and_wait_as_reader(cur) {
1168        let cur = futex.load(Ordering::Relaxed);
1169
1170        assert_ne!(cur & (READER_BITS | READER_OVERF), 0);
1171
1172        let lock = ReadLock::new(&futex, cur);
1173
1174        match how(Phase::from_bits_truncate(cur)) {
1175            LockNature::Read => return Some(LockResult::Read(lock)),
1176            LockNature::None => {
1177                #[cfg(feature = "lock_statistics")]
1178                {
1179                    LATE_ADDAPTATIONS.fetch_add(1, Ordering::Relaxed);
1180                }
1181                return Some(LockResult::None(Phase::from_bits_truncate(cur)));
1182            }
1183            LockNature::Write => (),
1184        }
1185    }
1186    None
1187}
1188
1189#[inline(always)]
1190fn wait_for_readers(futex: &Futex, mut cur: u32) -> Result<Lock<'_>, u32> {
1191    // wait for reader releasing the lock
1192    let mut spinwait = SpinWait::new();
1193    while spinwait.spin() {
1194        cur = futex.load(Ordering::Acquire);
1195        if has_no_readers(cur) {
1196            return Ok(Lock::new(&futex, cur));
1197        }
1198    }
1199
1200    loop {
1201        match futex.compare_exchange_weak(
1202            cur,
1203            (cur | WRITE_WAITER_BIT) & !LOCKED_BIT,
1204            Ordering::Relaxed,
1205            Ordering::Relaxed,
1206        ) {
1207            Err(x) => {
1208                cur = x;
1209                if has_no_readers(cur) {
1210                    fence(Ordering::Acquire);
1211                    return Ok(Lock::new(&futex, cur));
1212                }
1213            }
1214            Ok(_) => {
1215                cur = (cur | WRITE_WAITER_BIT) & !LOCKED_BIT;
1216                break;
1217            }
1218        }
1219    }
1220    Err(cur)
1221}