use std::cmp::*;
use std::f64;
use std::ops::*;
use std::sync::atomic::{AtomicI64 as StdAtomicI64, AtomicU64 as StdAtomicU64, Ordering};
pub trait Number:
Sized + AddAssign + SubAssign + PartialOrd + PartialEq + Copy + Send + Sync
{
fn from_i64(v: i64) -> Self;
fn into_f64(self) -> f64;
}
impl Number for i64 {
#[inline]
fn from_i64(v: i64) -> Self {
v
}
#[inline]
fn into_f64(self) -> f64 {
self as f64
}
}
impl Number for u64 {
#[inline]
fn from_i64(v: i64) -> Self {
v as u64
}
#[inline]
fn into_f64(self) -> f64 {
self as f64
}
}
impl Number for f64 {
#[inline]
fn from_i64(v: i64) -> Self {
v as f64
}
#[inline]
fn into_f64(self) -> f64 {
self
}
}
pub trait Atomic: Send + Sync {
type T: Number;
fn new(val: Self::T) -> Self;
fn set(&self, val: Self::T);
fn get(&self) -> Self::T;
fn inc_by(&self, delta: Self::T);
fn dec_by(&self, delta: Self::T);
}
#[derive(Debug)]
pub struct AtomicF64 {
inner: StdAtomicU64,
}
#[inline]
fn u64_to_f64(val: u64) -> f64 {
f64::from_bits(val)
}
#[inline]
fn f64_to_u64(val: f64) -> u64 {
f64::to_bits(val)
}
impl Atomic for AtomicF64 {
type T = f64;
fn new(val: Self::T) -> AtomicF64 {
AtomicF64 {
inner: StdAtomicU64::new(f64_to_u64(val)),
}
}
#[inline]
fn set(&self, val: Self::T) {
self.inner.store(f64_to_u64(val), Ordering::Relaxed);
}
#[inline]
fn get(&self) -> Self::T {
u64_to_f64(self.inner.load(Ordering::Relaxed))
}
#[inline]
fn inc_by(&self, delta: Self::T) {
loop {
let current = self.inner.load(Ordering::Acquire);
let new = u64_to_f64(current) + delta;
let result = self.inner.compare_exchange_weak(
current,
f64_to_u64(new),
Ordering::Release,
Ordering::Relaxed,
);
if result.is_ok() {
return;
}
}
}
#[inline]
fn dec_by(&self, delta: Self::T) {
self.inc_by(-delta);
}
}
impl AtomicF64 {
pub fn swap(&self, val: f64, ordering: Ordering) -> f64 {
u64_to_f64(self.inner.swap(f64_to_u64(val), ordering))
}
}
#[derive(Debug)]
pub struct AtomicI64 {
inner: StdAtomicI64,
}
impl Atomic for AtomicI64 {
type T = i64;
fn new(val: Self::T) -> AtomicI64 {
AtomicI64 {
inner: StdAtomicI64::new(val),
}
}
#[inline]
fn set(&self, val: Self::T) {
self.inner.store(val, Ordering::Relaxed);
}
#[inline]
fn get(&self) -> Self::T {
self.inner.load(Ordering::Relaxed)
}
#[inline]
fn inc_by(&self, delta: Self::T) {
self.inner.fetch_add(delta, Ordering::Relaxed);
}
#[inline]
fn dec_by(&self, delta: Self::T) {
self.inner.fetch_sub(delta, Ordering::Relaxed);
}
}
#[derive(Debug)]
pub struct AtomicU64 {
inner: StdAtomicU64,
}
impl Atomic for AtomicU64 {
type T = u64;
fn new(val: Self::T) -> AtomicU64 {
AtomicU64 {
inner: StdAtomicU64::new(val),
}
}
#[inline]
fn set(&self, val: Self::T) {
self.inner.store(val, Ordering::Relaxed);
}
#[inline]
fn get(&self) -> Self::T {
self.inner.load(Ordering::Relaxed)
}
#[inline]
fn inc_by(&self, delta: Self::T) {
self.inc_by_with_ordering(delta, Ordering::Relaxed);
}
#[inline]
fn dec_by(&self, delta: Self::T) {
self.inner.fetch_sub(delta, Ordering::Relaxed);
}
}
impl AtomicU64 {
pub(crate) fn compare_exchange_weak(
&self,
current: u64,
new: u64,
success: Ordering,
failure: Ordering,
) -> Result<u64, u64> {
self.inner
.compare_exchange_weak(current, new, success, failure)
}
pub fn inc_by_with_ordering(&self, delta: u64, ordering: Ordering) {
self.inner.fetch_add(delta, ordering);
}
pub fn swap(&self, val: u64, ordering: Ordering) -> u64 {
self.inner.swap(val, ordering)
}
}
#[cfg(test)]
mod test {
use std::f64::consts::PI;
use std::f64::{self, EPSILON};
use super::*;
#[test]
fn test_atomic_f64() {
let table: Vec<f64> = vec![0.0, 1.0, PI, f64::MIN, f64::MAX];
for f in table {
assert!((f - AtomicF64::new(f).get()).abs() < EPSILON);
}
}
#[test]
fn test_atomic_i64() {
let ai64 = AtomicI64::new(0);
assert_eq!(ai64.get(), 0);
ai64.inc_by(1);
assert_eq!(ai64.get(), 1);
ai64.inc_by(-5);
assert_eq!(ai64.get(), -4);
}
#[test]
fn test_atomic_u64() {
let au64 = AtomicU64::new(0);
assert_eq!(au64.get(), 0);
au64.inc_by(123);
assert_eq!(au64.get(), 123);
}
}