referrerpolicy=no-referrer-when-downgrade

sc_transaction_pool/graph/
tracked_map.rs

1// This file is part of Substrate.
2
3// Copyright (C) Parity Technologies (UK) Ltd.
4// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
5
6// This program is free software: you can redistribute it and/or modify
7// it under the terms of the GNU General Public License as published by
8// the Free Software Foundation, either version 3 of the License, or
9// (at your option) any later version.
10
11// This program is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// You should have received a copy of the GNU General Public License
17// along with this program. If not, see <https://www.gnu.org/licenses/>.
18
19use parking_lot::{RwLock, RwLockReadGuard, RwLockWriteGuard};
20use std::{
21	collections::HashMap,
22	sync::{
23		atomic::{AtomicIsize, Ordering as AtomicOrdering},
24		Arc,
25	},
26};
27
28/// Something that can report its size.
29pub trait Size {
30	fn size(&self) -> usize;
31}
32
33/// Map with size tracking.
34///
35/// Size reported might be slightly off and only approximately true.
36#[derive(Debug)]
37pub struct TrackedMap<K, V> {
38	index: Arc<RwLock<HashMap<K, V>>>,
39	bytes: AtomicIsize,
40	length: AtomicIsize,
41}
42
43impl<K, V> Default for TrackedMap<K, V> {
44	fn default() -> Self {
45		Self { index: Arc::new(HashMap::default().into()), bytes: 0.into(), length: 0.into() }
46	}
47}
48
49impl<K, V> Clone for TrackedMap<K, V>
50where
51	K: Clone,
52	V: Clone,
53{
54	fn clone(&self) -> Self {
55		Self {
56			index: Arc::from(RwLock::from(self.index.read().clone())),
57			bytes: self.bytes.load(AtomicOrdering::Relaxed).into(),
58			length: self.length.load(AtomicOrdering::Relaxed).into(),
59		}
60	}
61}
62
63impl<K, V> TrackedMap<K, V> {
64	/// Current tracked length of the content.
65	pub fn len(&self) -> usize {
66		std::cmp::max(self.length.load(AtomicOrdering::Relaxed), 0) as usize
67	}
68
69	/// Current sum of content length.
70	pub fn bytes(&self) -> usize {
71		std::cmp::max(self.bytes.load(AtomicOrdering::Relaxed), 0) as usize
72	}
73
74	/// Lock map for read.
75	pub fn read(&self) -> TrackedMapReadAccess<K, V> {
76		TrackedMapReadAccess { inner_guard: self.index.read() }
77	}
78
79	/// Lock map for write.
80	pub fn write(&self) -> TrackedMapWriteAccess<K, V> {
81		TrackedMapWriteAccess {
82			inner_guard: self.index.write(),
83			bytes: &self.bytes,
84			length: &self.length,
85		}
86	}
87}
88
89impl<K: Clone, V: Clone> TrackedMap<K, V> {
90	/// Clone the inner map.
91	pub fn clone_map(&self) -> HashMap<K, V> {
92		self.index.read().clone()
93	}
94}
95
96pub struct TrackedMapReadAccess<'a, K, V> {
97	inner_guard: RwLockReadGuard<'a, HashMap<K, V>>,
98}
99
100impl<'a, K, V> TrackedMapReadAccess<'a, K, V>
101where
102	K: Eq + std::hash::Hash,
103{
104	/// Returns true if the map contains given key.
105	pub fn contains_key(&self, key: &K) -> bool {
106		self.inner_guard.contains_key(key)
107	}
108
109	/// Returns the reference to the contained value by key, if exists.
110	pub fn get(&self, key: &K) -> Option<&V> {
111		self.inner_guard.get(key)
112	}
113
114	/// Returns an iterator over all values.
115	pub fn values(&self) -> std::collections::hash_map::Values<K, V> {
116		self.inner_guard.values()
117	}
118}
119
120pub struct TrackedMapWriteAccess<'a, K, V> {
121	bytes: &'a AtomicIsize,
122	length: &'a AtomicIsize,
123	inner_guard: RwLockWriteGuard<'a, HashMap<K, V>>,
124}
125
126impl<'a, K, V> TrackedMapWriteAccess<'a, K, V>
127where
128	K: Eq + std::hash::Hash,
129	V: Size,
130{
131	/// Insert value and return previous (if any).
132	pub fn insert(&mut self, key: K, val: V) -> Option<V> {
133		let new_bytes = val.size();
134		self.bytes.fetch_add(new_bytes as isize, AtomicOrdering::Relaxed);
135		self.length.fetch_add(1, AtomicOrdering::Relaxed);
136		self.inner_guard.insert(key, val).inspect(|old_val| {
137			self.bytes.fetch_sub(old_val.size() as isize, AtomicOrdering::Relaxed);
138			self.length.fetch_sub(1, AtomicOrdering::Relaxed);
139		})
140	}
141
142	/// Remove value by key.
143	pub fn remove(&mut self, key: &K) -> Option<V> {
144		let val = self.inner_guard.remove(key);
145		if let Some(size) = val.as_ref().map(Size::size) {
146			self.bytes.fetch_sub(size as isize, AtomicOrdering::Relaxed);
147			self.length.fetch_sub(1, AtomicOrdering::Relaxed);
148		}
149		val
150	}
151
152	/// Returns mutable reference to the contained value by key, if exists.
153	pub fn get_mut(&mut self, key: &K) -> Option<&mut V> {
154		self.inner_guard.get_mut(key)
155	}
156}
157
158#[cfg(test)]
159mod tests {
160
161	use super::*;
162
163	impl Size for i32 {
164		fn size(&self) -> usize {
165			*self as usize / 10
166		}
167	}
168
169	#[test]
170	fn basic() {
171		let map = TrackedMap::default();
172		map.write().insert(5, 10);
173		map.write().insert(6, 20);
174
175		assert_eq!(map.bytes(), 3);
176		assert_eq!(map.len(), 2);
177
178		map.write().insert(6, 30);
179
180		assert_eq!(map.bytes(), 4);
181		assert_eq!(map.len(), 2);
182
183		map.write().remove(&6);
184		assert_eq!(map.bytes(), 1);
185		assert_eq!(map.len(), 1);
186	}
187}