sc_transaction_pool/graph/
tracked_map.rs1use parking_lot::{RwLock, RwLockReadGuard, RwLockWriteGuard};
20use std::{
21 collections::HashMap,
22 sync::{
23 atomic::{AtomicIsize, Ordering as AtomicOrdering},
24 Arc,
25 },
26};
27
28pub trait Size {
30 fn size(&self) -> usize;
31}
32
33#[derive(Debug)]
37pub struct TrackedMap<K, V> {
38 index: Arc<RwLock<HashMap<K, V>>>,
39 bytes: AtomicIsize,
40 length: AtomicIsize,
41}
42
43impl<K, V> Default for TrackedMap<K, V> {
44 fn default() -> Self {
45 Self { index: Arc::new(HashMap::default().into()), bytes: 0.into(), length: 0.into() }
46 }
47}
48
49impl<K, V> Clone for TrackedMap<K, V>
50where
51 K: Clone,
52 V: Clone,
53{
54 fn clone(&self) -> Self {
55 Self {
56 index: Arc::from(RwLock::from(self.index.read().clone())),
57 bytes: self.bytes.load(AtomicOrdering::Relaxed).into(),
58 length: self.length.load(AtomicOrdering::Relaxed).into(),
59 }
60 }
61}
62
63impl<K, V> TrackedMap<K, V> {
64 pub fn len(&self) -> usize {
66 std::cmp::max(self.length.load(AtomicOrdering::Relaxed), 0) as usize
67 }
68
69 pub fn bytes(&self) -> usize {
71 std::cmp::max(self.bytes.load(AtomicOrdering::Relaxed), 0) as usize
72 }
73
74 pub fn read(&self) -> TrackedMapReadAccess<K, V> {
76 TrackedMapReadAccess { inner_guard: self.index.read() }
77 }
78
79 pub fn write(&self) -> TrackedMapWriteAccess<K, V> {
81 TrackedMapWriteAccess {
82 inner_guard: self.index.write(),
83 bytes: &self.bytes,
84 length: &self.length,
85 }
86 }
87}
88
89impl<K: Clone, V: Clone> TrackedMap<K, V> {
90 pub fn clone_map(&self) -> HashMap<K, V> {
92 self.index.read().clone()
93 }
94}
95
96pub struct TrackedMapReadAccess<'a, K, V> {
97 inner_guard: RwLockReadGuard<'a, HashMap<K, V>>,
98}
99
100impl<'a, K, V> TrackedMapReadAccess<'a, K, V>
101where
102 K: Eq + std::hash::Hash,
103{
104 pub fn contains_key(&self, key: &K) -> bool {
106 self.inner_guard.contains_key(key)
107 }
108
109 pub fn get(&self, key: &K) -> Option<&V> {
111 self.inner_guard.get(key)
112 }
113
114 pub fn values(&self) -> std::collections::hash_map::Values<K, V> {
116 self.inner_guard.values()
117 }
118}
119
120pub struct TrackedMapWriteAccess<'a, K, V> {
121 bytes: &'a AtomicIsize,
122 length: &'a AtomicIsize,
123 inner_guard: RwLockWriteGuard<'a, HashMap<K, V>>,
124}
125
126impl<'a, K, V> TrackedMapWriteAccess<'a, K, V>
127where
128 K: Eq + std::hash::Hash,
129 V: Size,
130{
131 pub fn insert(&mut self, key: K, val: V) -> Option<V> {
133 let new_bytes = val.size();
134 self.bytes.fetch_add(new_bytes as isize, AtomicOrdering::Relaxed);
135 self.length.fetch_add(1, AtomicOrdering::Relaxed);
136 self.inner_guard.insert(key, val).inspect(|old_val| {
137 self.bytes.fetch_sub(old_val.size() as isize, AtomicOrdering::Relaxed);
138 self.length.fetch_sub(1, AtomicOrdering::Relaxed);
139 })
140 }
141
142 pub fn remove(&mut self, key: &K) -> Option<V> {
144 let val = self.inner_guard.remove(key);
145 if let Some(size) = val.as_ref().map(Size::size) {
146 self.bytes.fetch_sub(size as isize, AtomicOrdering::Relaxed);
147 self.length.fetch_sub(1, AtomicOrdering::Relaxed);
148 }
149 val
150 }
151
152 pub fn get_mut(&mut self, key: &K) -> Option<&mut V> {
154 self.inner_guard.get_mut(key)
155 }
156}
157
158#[cfg(test)]
159mod tests {
160
161 use super::*;
162
163 impl Size for i32 {
164 fn size(&self) -> usize {
165 *self as usize / 10
166 }
167 }
168
169 #[test]
170 fn basic() {
171 let map = TrackedMap::default();
172 map.write().insert(5, 10);
173 map.write().insert(6, 20);
174
175 assert_eq!(map.bytes(), 3);
176 assert_eq!(map.len(), 2);
177
178 map.write().insert(6, 30);
179
180 assert_eq!(map.bytes(), 4);
181 assert_eq!(map.len(), 2);
182
183 map.write().remove(&6);
184 assert_eq!(map.bytes(), 1);
185 assert_eq!(map.len(), 1);
186 }
187}