sp_trie/cache/
shared_cache.rs

1// This file is part of Substrate.
2
3// Copyright (C) Parity Technologies (UK) Ltd.
4// SPDX-License-Identifier: Apache-2.0
5
6// Licensed under the Apache License, Version 2.0 (the "License");
7// you may not use this file except in compliance with the License.
8// You may obtain a copy of the License at
9//
10// 	http://www.apache.org/licenses/LICENSE-2.0
11//
12// Unless required by applicable law or agreed to in writing, software
13// distributed under the License is distributed on an "AS IS" BASIS,
14// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15// See the License for the specific language governing permissions and
16// limitations under the License.
17
18///! Provides the [`SharedNodeCache`], the [`SharedValueCache`] and the [`SharedTrieCache`]
19///! that combines both caches and is exported to the outside.
20use super::{CacheSize, NodeCached};
21use hash_db::Hasher;
22use nohash_hasher::BuildNoHashHasher;
23use parking_lot::{Mutex, RwLock, RwLockWriteGuard};
24use schnellru::LruMap;
25use std::{
26	collections::{hash_map::Entry as SetEntry, HashMap},
27	hash::{BuildHasher, Hasher as _},
28	sync::{Arc, LazyLock},
29};
30use trie_db::{node::NodeOwned, CachedValue};
31
32static RANDOM_STATE: LazyLock<ahash::RandomState> = LazyLock::new(|| {
33	use rand::Rng;
34	let mut rng = rand::thread_rng();
35	ahash::RandomState::generate_with(rng.gen(), rng.gen(), rng.gen(), rng.gen())
36});
37
38pub struct SharedNodeCacheLimiter {
39	/// The maximum size (in bytes) the cache can hold inline.
40	///
41	/// This space is always consumed whether there are any items in the map or not.
42	max_inline_size: usize,
43
44	/// The maximum size (in bytes) the cache can hold on the heap.
45	max_heap_size: usize,
46
47	/// The current size (in bytes) of data allocated by this cache on the heap.
48	///
49	/// This doesn't include the size of the map itself.
50	heap_size: usize,
51
52	/// A counter with the number of elements that got evicted from the cache.
53	///
54	/// Reset to zero before every update.
55	items_evicted: usize,
56
57	/// The maximum number of elements that we allow to be evicted.
58	///
59	/// Reset on every update.
60	max_items_evicted: usize,
61}
62
63impl<H> schnellru::Limiter<H, NodeOwned<H>> for SharedNodeCacheLimiter
64where
65	H: AsRef<[u8]>,
66{
67	type KeyToInsert<'a> = H;
68	type LinkType = u32;
69
70	#[inline]
71	fn is_over_the_limit(&self, _length: usize) -> bool {
72		// Once we hit the limit of max items evicted this will return `false` and prevent
73		// any further evictions, but this is fine because the outer loop which inserts
74		// items into this cache will just detect this and stop inserting new items.
75		self.items_evicted <= self.max_items_evicted && self.heap_size > self.max_heap_size
76	}
77
78	#[inline]
79	fn on_insert(
80		&mut self,
81		_length: usize,
82		key: Self::KeyToInsert<'_>,
83		node: NodeOwned<H>,
84	) -> Option<(H, NodeOwned<H>)> {
85		let new_item_heap_size = node.size_in_bytes() - std::mem::size_of::<NodeOwned<H>>();
86		if new_item_heap_size > self.max_heap_size {
87			// Item's too big to add even if the cache's empty; bail.
88			return None
89		}
90
91		self.heap_size += new_item_heap_size;
92		Some((key, node))
93	}
94
95	#[inline]
96	fn on_replace(
97		&mut self,
98		_length: usize,
99		_old_key: &mut H,
100		_new_key: H,
101		old_node: &mut NodeOwned<H>,
102		new_node: &mut NodeOwned<H>,
103	) -> bool {
104		debug_assert_eq!(_old_key.as_ref(), _new_key.as_ref());
105
106		let new_item_heap_size = new_node.size_in_bytes() - std::mem::size_of::<NodeOwned<H>>();
107		if new_item_heap_size > self.max_heap_size {
108			// Item's too big to add even if the cache's empty; bail.
109			return false
110		}
111
112		let old_item_heap_size = old_node.size_in_bytes() - std::mem::size_of::<NodeOwned<H>>();
113		self.heap_size = self.heap_size - old_item_heap_size + new_item_heap_size;
114		true
115	}
116
117	#[inline]
118	fn on_cleared(&mut self) {
119		self.heap_size = 0;
120	}
121
122	#[inline]
123	fn on_removed(&mut self, _: &mut H, node: &mut NodeOwned<H>) {
124		self.heap_size -= node.size_in_bytes() - std::mem::size_of::<NodeOwned<H>>();
125		self.items_evicted += 1;
126	}
127
128	#[inline]
129	fn on_grow(&mut self, new_memory_usage: usize) -> bool {
130		new_memory_usage <= self.max_inline_size
131	}
132}
133
134pub struct SharedValueCacheLimiter {
135	/// The maximum size (in bytes) the cache can hold inline.
136	///
137	/// This space is always consumed whether there are any items in the map or not.
138	max_inline_size: usize,
139
140	/// The maximum size (in bytes) the cache can hold on the heap.
141	max_heap_size: usize,
142
143	/// The current size (in bytes) of data allocated by this cache on the heap.
144	///
145	/// This doesn't include the size of the map itself.
146	heap_size: usize,
147
148	/// A set with all of the keys deduplicated to save on memory.
149	known_storage_keys: HashMap<Arc<[u8]>, (), ahash::RandomState>,
150
151	/// A counter with the number of elements that got evicted from the cache.
152	///
153	/// Reset to zero before every update.
154	items_evicted: usize,
155
156	/// The maximum number of elements that we allow to be evicted.
157	///
158	/// Reset on every update.
159	max_items_evicted: usize,
160}
161
162impl<H> schnellru::Limiter<ValueCacheKey<H>, CachedValue<H>> for SharedValueCacheLimiter
163where
164	H: AsRef<[u8]>,
165{
166	type KeyToInsert<'a> = ValueCacheKey<H>;
167	type LinkType = u32;
168
169	#[inline]
170	fn is_over_the_limit(&self, _length: usize) -> bool {
171		self.items_evicted <= self.max_items_evicted && self.heap_size > self.max_heap_size
172	}
173
174	#[inline]
175	fn on_insert(
176		&mut self,
177		_length: usize,
178		mut key: Self::KeyToInsert<'_>,
179		value: CachedValue<H>,
180	) -> Option<(ValueCacheKey<H>, CachedValue<H>)> {
181		match self.known_storage_keys.entry(key.storage_key.clone()) {
182			SetEntry::Vacant(entry) => {
183				let new_item_heap_size = key.storage_key.len();
184				if new_item_heap_size > self.max_heap_size {
185					// Item's too big to add even if the cache's empty; bail.
186					return None
187				}
188
189				self.heap_size += new_item_heap_size;
190				entry.insert(());
191			},
192			SetEntry::Occupied(entry) => {
193				key.storage_key = entry.key().clone();
194			},
195		}
196
197		Some((key, value))
198	}
199
200	#[inline]
201	fn on_replace(
202		&mut self,
203		_length: usize,
204		_old_key: &mut ValueCacheKey<H>,
205		_new_key: ValueCacheKey<H>,
206		_old_value: &mut CachedValue<H>,
207		_new_value: &mut CachedValue<H>,
208	) -> bool {
209		debug_assert_eq!(_new_key.storage_key, _old_key.storage_key);
210		true
211	}
212
213	#[inline]
214	fn on_removed(&mut self, key: &mut ValueCacheKey<H>, _: &mut CachedValue<H>) {
215		if Arc::strong_count(&key.storage_key) == 2 {
216			// There are only two instances of this key:
217			//   1) one memoized in `known_storage_keys`,
218			//   2) one inside the map.
219			//
220			// This means that after this remove goes through the `Arc` will be deallocated.
221			self.heap_size -= key.storage_key.len();
222			self.known_storage_keys.remove(&key.storage_key);
223		}
224		self.items_evicted += 1;
225	}
226
227	#[inline]
228	fn on_cleared(&mut self) {
229		self.heap_size = 0;
230		self.known_storage_keys.clear();
231	}
232
233	#[inline]
234	fn on_grow(&mut self, new_memory_usage: usize) -> bool {
235		new_memory_usage <= self.max_inline_size
236	}
237}
238
239type SharedNodeCacheMap<H> =
240	LruMap<H, NodeOwned<H>, SharedNodeCacheLimiter, schnellru::RandomState>;
241
242/// The shared node cache.
243///
244/// Internally this stores all cached nodes in a [`LruMap`]. It ensures that when updating the
245/// cache, that the cache stays within its allowed bounds.
246pub(super) struct SharedNodeCache<H>
247where
248	H: AsRef<[u8]>,
249{
250	/// The cached nodes, ordered by least recently used.
251	pub(super) lru: SharedNodeCacheMap<H>,
252}
253
254impl<H: AsRef<[u8]> + Eq + std::hash::Hash> SharedNodeCache<H> {
255	/// Create a new instance.
256	fn new(max_inline_size: usize, max_heap_size: usize) -> Self {
257		Self {
258			lru: LruMap::new(SharedNodeCacheLimiter {
259				max_inline_size,
260				max_heap_size,
261				heap_size: 0,
262				items_evicted: 0,
263				max_items_evicted: 0, // Will be set during `update`.
264			}),
265		}
266	}
267
268	/// Update the cache with the `list` of nodes which were either newly added or accessed.
269	pub fn update(&mut self, list: impl IntoIterator<Item = (H, NodeCached<H>)>) {
270		let mut access_count = 0;
271		let mut add_count = 0;
272
273		self.lru.limiter_mut().items_evicted = 0;
274		self.lru.limiter_mut().max_items_evicted =
275			self.lru.len() * 100 / super::SHARED_NODE_CACHE_MAX_REPLACE_PERCENT;
276
277		for (key, cached_node) in list {
278			if cached_node.is_from_shared_cache {
279				if self.lru.get(&key).is_some() {
280					access_count += 1;
281
282					if access_count >= super::SHARED_NODE_CACHE_MAX_PROMOTED_KEYS {
283						// Stop when we've promoted a large enough number of items.
284						break
285					}
286
287					continue
288				}
289			}
290
291			self.lru.insert(key, cached_node.node);
292			add_count += 1;
293
294			if self.lru.limiter().items_evicted > self.lru.limiter().max_items_evicted {
295				// Stop when we've evicted a big enough chunk of the shared cache.
296				break
297			}
298		}
299
300		tracing::debug!(
301			target: super::LOG_TARGET,
302			"Updated the shared node cache: {} accesses, {} new values, {}/{} evicted (length = {}, inline size={}/{}, heap size={}/{})",
303			access_count,
304			add_count,
305			self.lru.limiter().items_evicted,
306			self.lru.limiter().max_items_evicted,
307			self.lru.len(),
308			self.lru.memory_usage(),
309			self.lru.limiter().max_inline_size,
310			self.lru.limiter().heap_size,
311			self.lru.limiter().max_heap_size,
312		);
313	}
314
315	/// Reset the cache.
316	fn reset(&mut self) {
317		self.lru.clear();
318	}
319}
320
321/// The hash of [`ValueCacheKey`].
322#[derive(PartialEq, Eq, Clone, Copy, Hash)]
323#[repr(transparent)]
324pub struct ValueCacheKeyHash(u64);
325
326impl ValueCacheKeyHash {
327	pub fn raw(self) -> u64 {
328		self.0
329	}
330}
331
332impl ValueCacheKeyHash {
333	pub fn from_hasher_and_storage_key(
334		mut hasher: impl std::hash::Hasher,
335		storage_key: &[u8],
336	) -> Self {
337		hasher.write(storage_key);
338
339		Self(hasher.finish())
340	}
341}
342
343impl nohash_hasher::IsEnabled for ValueCacheKeyHash {}
344
345/// The key type that is being used to address a [`CachedValue`].
346#[derive(Eq)]
347pub(super) struct ValueCacheKey<H> {
348	/// The storage root of the trie this key belongs to.
349	pub storage_root: H,
350	/// The key to access the value in the storage.
351	pub storage_key: Arc<[u8]>,
352	/// The hash that identifies this instance of `storage_root` and `storage_key`.
353	pub hash: ValueCacheKeyHash,
354}
355
356/// A borrowed variant of [`ValueCacheKey`].
357pub(super) struct ValueCacheRef<'a, H> {
358	/// The storage root of the trie this key belongs to.
359	pub storage_root: H,
360	/// The key to access the value in the storage.
361	pub storage_key: &'a [u8],
362	/// The hash that identifies this instance of `storage_root` and `storage_key`.
363	pub hash: ValueCacheKeyHash,
364}
365
366impl<'a, H> ValueCacheRef<'a, H> {
367	pub fn new(storage_key: &'a [u8], storage_root: H) -> Self
368	where
369		H: AsRef<[u8]>,
370	{
371		let hash = ValueCacheKey::<H>::hash_data(&storage_key, &storage_root);
372		Self { storage_root, storage_key, hash }
373	}
374}
375
376impl<'a, H> From<ValueCacheRef<'a, H>> for ValueCacheKey<H> {
377	fn from(value: ValueCacheRef<'a, H>) -> Self {
378		ValueCacheKey {
379			storage_root: value.storage_root,
380			storage_key: value.storage_key.into(),
381			hash: value.hash,
382		}
383	}
384}
385
386impl<'a, H: std::hash::Hash> std::hash::Hash for ValueCacheRef<'a, H> {
387	fn hash<Hasher: std::hash::Hasher>(&self, state: &mut Hasher) {
388		self.hash.hash(state)
389	}
390}
391
392impl<'a, H> PartialEq<ValueCacheKey<H>> for ValueCacheRef<'a, H>
393where
394	H: AsRef<[u8]>,
395{
396	fn eq(&self, rhs: &ValueCacheKey<H>) -> bool {
397		self.storage_root.as_ref() == rhs.storage_root.as_ref() &&
398			self.storage_key == &*rhs.storage_key
399	}
400}
401
402impl<H> ValueCacheKey<H> {
403	/// Constructs [`Self::Value`].
404	#[cfg(test)] // Only used in tests.
405	pub fn new_value(storage_key: impl Into<Arc<[u8]>>, storage_root: H) -> Self
406	where
407		H: AsRef<[u8]>,
408	{
409		let storage_key = storage_key.into();
410		let hash = Self::hash_data(&storage_key, &storage_root);
411		Self { storage_root, storage_key, hash }
412	}
413
414	/// Returns a hasher prepared to build the final hash to identify [`Self`].
415	///
416	/// See [`Self::hash_data`] for building the hash directly.
417	pub fn hash_partial_data(storage_root: &H) -> impl std::hash::Hasher + Clone
418	where
419		H: AsRef<[u8]>,
420	{
421		let mut hasher = RANDOM_STATE.build_hasher();
422		hasher.write(storage_root.as_ref());
423		hasher
424	}
425
426	/// Hash the `key` and `storage_root` that identify [`Self`].
427	///
428	/// Returns a `u64` which represents the unique hash for the given inputs.
429	pub fn hash_data(key: &[u8], storage_root: &H) -> ValueCacheKeyHash
430	where
431		H: AsRef<[u8]>,
432	{
433		let hasher = Self::hash_partial_data(storage_root);
434
435		ValueCacheKeyHash::from_hasher_and_storage_key(hasher, key)
436	}
437
438	/// Checks whether the key is equal to the given `storage_key` and `storage_root`.
439	#[inline]
440	pub fn is_eq(&self, storage_root: &H, storage_key: &[u8]) -> bool
441	where
442		H: PartialEq,
443	{
444		self.storage_root == *storage_root && *self.storage_key == *storage_key
445	}
446}
447
448// Implement manually so that only `hash` is accessed.
449impl<H: std::hash::Hash> std::hash::Hash for ValueCacheKey<H> {
450	fn hash<Hasher: std::hash::Hasher>(&self, state: &mut Hasher) {
451		self.hash.hash(state)
452	}
453}
454
455impl<H> nohash_hasher::IsEnabled for ValueCacheKey<H> {}
456
457// Implement manually to not have to compare `hash`.
458impl<H: PartialEq> PartialEq for ValueCacheKey<H> {
459	#[inline]
460	fn eq(&self, other: &Self) -> bool {
461		self.is_eq(&other.storage_root, &other.storage_key)
462	}
463}
464
465type SharedValueCacheMap<H> = schnellru::LruMap<
466	ValueCacheKey<H>,
467	CachedValue<H>,
468	SharedValueCacheLimiter,
469	BuildNoHashHasher<ValueCacheKey<H>>,
470>;
471
472/// The shared value cache.
473///
474/// The cache ensures that it stays in the configured size bounds.
475pub(super) struct SharedValueCache<H>
476where
477	H: AsRef<[u8]>,
478{
479	/// The cached nodes, ordered by least recently used.
480	pub(super) lru: SharedValueCacheMap<H>,
481}
482
483impl<H: Eq + std::hash::Hash + Clone + Copy + AsRef<[u8]>> SharedValueCache<H> {
484	/// Create a new instance.
485	fn new(max_inline_size: usize, max_heap_size: usize) -> Self {
486		Self {
487			lru: schnellru::LruMap::with_hasher(
488				SharedValueCacheLimiter {
489					max_inline_size,
490					max_heap_size,
491					heap_size: 0,
492					known_storage_keys: HashMap::with_hasher(RANDOM_STATE.clone()),
493					items_evicted: 0,
494					max_items_evicted: 0, // Will be set during `update`.
495				},
496				Default::default(),
497			),
498		}
499	}
500
501	/// Update the cache with the `added` values and the `accessed` values.
502	///
503	/// The `added` values are the ones that have been collected by doing operations on the trie and
504	/// now should be stored in the shared cache. The `accessed` values are only referenced by the
505	/// [`ValueCacheKeyHash`] and represent the values that were retrieved from this shared cache.
506	/// These `accessed` values are being put to the front of the internal [`LruMap`] like the
507	/// `added` ones.
508	pub fn update(
509		&mut self,
510		added: impl IntoIterator<Item = (ValueCacheKey<H>, CachedValue<H>)>,
511		accessed: impl IntoIterator<Item = ValueCacheKeyHash>,
512	) {
513		let mut access_count = 0;
514		let mut add_count = 0;
515
516		for hash in accessed {
517			// Access every node in the map to put it to the front.
518			//
519			// Since we are only comparing the hashes here it may lead us to promoting the wrong
520			// values as the most recently accessed ones. However this is harmless as the only
521			// consequence is that we may accidentally prune a recently used value too early.
522			self.lru.get_by_hash(hash.raw(), |existing_key, _| existing_key.hash == hash);
523			access_count += 1;
524		}
525
526		// Insert all of the new items which were *not* found in the shared cache.
527		//
528		// Limit how many items we'll replace in the shared cache in one go so that
529		// we don't evict the whole shared cache nor we keep spinning our wheels
530		// evicting items which we've added ourselves in previous iterations of this loop.
531
532		self.lru.limiter_mut().items_evicted = 0;
533		self.lru.limiter_mut().max_items_evicted =
534			self.lru.len() * 100 / super::SHARED_VALUE_CACHE_MAX_REPLACE_PERCENT;
535
536		for (key, value) in added {
537			self.lru.insert(key, value);
538			add_count += 1;
539
540			if self.lru.limiter().items_evicted > self.lru.limiter().max_items_evicted {
541				// Stop when we've evicted a big enough chunk of the shared cache.
542				break
543			}
544		}
545
546		tracing::debug!(
547			target: super::LOG_TARGET,
548			"Updated the shared value cache: {} accesses, {} new values, {}/{} evicted (length = {}, known_storage_keys = {}, inline size={}/{}, heap size={}/{})",
549			access_count,
550			add_count,
551			self.lru.limiter().items_evicted,
552			self.lru.limiter().max_items_evicted,
553			self.lru.len(),
554			self.lru.limiter().known_storage_keys.len(),
555			self.lru.memory_usage(),
556			self.lru.limiter().max_inline_size,
557			self.lru.limiter().heap_size,
558			self.lru.limiter().max_heap_size
559		);
560	}
561
562	/// Reset the cache.
563	fn reset(&mut self) {
564		self.lru.clear();
565	}
566}
567
568/// The inner of [`SharedTrieCache`].
569pub(super) struct SharedTrieCacheInner<H: Hasher> {
570	node_cache: SharedNodeCache<H::Out>,
571	value_cache: SharedValueCache<H::Out>,
572}
573
574impl<H: Hasher> SharedTrieCacheInner<H> {
575	/// Returns a reference to the [`SharedValueCache`].
576	#[cfg(test)]
577	pub(super) fn value_cache(&self) -> &SharedValueCache<H::Out> {
578		&self.value_cache
579	}
580
581	/// Returns a mutable reference to the [`SharedValueCache`].
582	pub(super) fn value_cache_mut(&mut self) -> &mut SharedValueCache<H::Out> {
583		&mut self.value_cache
584	}
585
586	/// Returns a reference to the [`SharedNodeCache`].
587	#[cfg(test)]
588	pub(super) fn node_cache(&self) -> &SharedNodeCache<H::Out> {
589		&self.node_cache
590	}
591
592	/// Returns a mutable reference to the [`SharedNodeCache`].
593	pub(super) fn node_cache_mut(&mut self) -> &mut SharedNodeCache<H::Out> {
594		&mut self.node_cache
595	}
596}
597
598/// The shared trie cache.
599///
600/// It should be instantiated once per node. It will hold the trie nodes and values of all
601/// operations to the state. To not use all available memory it will ensure to stay in the
602/// bounds given via the [`CacheSize`] at startup.
603///
604/// The instance of this object can be shared between multiple threads.
605pub struct SharedTrieCache<H: Hasher> {
606	inner: Arc<RwLock<SharedTrieCacheInner<H>>>,
607}
608
609impl<H: Hasher> Clone for SharedTrieCache<H> {
610	fn clone(&self) -> Self {
611		Self { inner: self.inner.clone() }
612	}
613}
614
615impl<H: Hasher> SharedTrieCache<H> {
616	/// Create a new [`SharedTrieCache`].
617	pub fn new(cache_size: CacheSize) -> Self {
618		let total_budget = cache_size.0;
619
620		// Split our memory budget between the two types of caches.
621		let value_cache_budget = (total_budget as f32 * 0.20) as usize; // 20% for the value cache
622		let node_cache_budget = total_budget - value_cache_budget; // 80% for the node cache
623
624		// Split our memory budget between what we'll be holding inline in the map,
625		// and what we'll be holding on the heap.
626		let value_cache_inline_budget = (value_cache_budget as f32 * 0.70) as usize;
627		let node_cache_inline_budget = (node_cache_budget as f32 * 0.70) as usize;
628
629		// Calculate how much memory the maps will be allowed to hold inline given our budget.
630		let value_cache_max_inline_size =
631			SharedValueCacheMap::<H::Out>::memory_usage_for_memory_budget(
632				value_cache_inline_budget,
633			);
634
635		let node_cache_max_inline_size =
636			SharedNodeCacheMap::<H::Out>::memory_usage_for_memory_budget(node_cache_inline_budget);
637
638		// And this is how much data we'll at most keep on the heap for each cache.
639		let value_cache_max_heap_size = value_cache_budget - value_cache_max_inline_size;
640		let node_cache_max_heap_size = node_cache_budget - node_cache_max_inline_size;
641
642		tracing::debug!(
643			target: super::LOG_TARGET,
644			"Configured a shared trie cache with a budget of ~{} bytes (node_cache_max_inline_size = {}, node_cache_max_heap_size = {}, value_cache_max_inline_size = {}, value_cache_max_heap_size = {})",
645			total_budget,
646			node_cache_max_inline_size,
647			node_cache_max_heap_size,
648			value_cache_max_inline_size,
649			value_cache_max_heap_size,
650		);
651
652		Self {
653			inner: Arc::new(RwLock::new(SharedTrieCacheInner {
654				node_cache: SharedNodeCache::new(
655					node_cache_max_inline_size,
656					node_cache_max_heap_size,
657				),
658				value_cache: SharedValueCache::new(
659					value_cache_max_inline_size,
660					value_cache_max_heap_size,
661				),
662			})),
663		}
664	}
665
666	/// Create a new [`LocalTrieCache`](super::LocalTrieCache) instance from this shared cache.
667	pub fn local_cache(&self) -> super::LocalTrieCache<H> {
668		super::LocalTrieCache {
669			shared: self.clone(),
670			node_cache: Default::default(),
671			value_cache: Default::default(),
672			shared_value_cache_access: Mutex::new(super::ValueAccessSet::with_hasher(
673				schnellru::ByLength::new(super::SHARED_VALUE_CACHE_MAX_PROMOTED_KEYS),
674				Default::default(),
675			)),
676			stats: Default::default(),
677		}
678	}
679
680	/// Get a copy of the node for `key`.
681	///
682	/// This will temporarily lock the shared cache for reading.
683	///
684	/// This doesn't change the least recently order in the internal [`LruMap`].
685	#[inline]
686	pub fn peek_node(&self, key: &H::Out) -> Option<NodeOwned<H::Out>> {
687		self.inner.read().node_cache.lru.peek(key).cloned()
688	}
689
690	/// Get a copy of the [`CachedValue`] for `key`.
691	///
692	/// This will temporarily lock the shared cache for reading.
693	///
694	/// This doesn't reorder any of the elements in the internal [`LruMap`].
695	pub fn peek_value_by_hash(
696		&self,
697		hash: ValueCacheKeyHash,
698		storage_root: &H::Out,
699		storage_key: &[u8],
700	) -> Option<CachedValue<H::Out>> {
701		self.inner
702			.read()
703			.value_cache
704			.lru
705			.peek_by_hash(hash.0, |existing_key, _| existing_key.is_eq(storage_root, storage_key))
706			.cloned()
707	}
708
709	/// Returns the used memory size of this cache in bytes.
710	pub fn used_memory_size(&self) -> usize {
711		let inner = self.inner.read();
712		let value_cache_size =
713			inner.value_cache.lru.memory_usage() + inner.value_cache.lru.limiter().heap_size;
714		let node_cache_size =
715			inner.node_cache.lru.memory_usage() + inner.node_cache.lru.limiter().heap_size;
716
717		node_cache_size + value_cache_size
718	}
719
720	/// Reset the node cache.
721	pub fn reset_node_cache(&self) {
722		self.inner.write().node_cache.reset();
723	}
724
725	/// Reset the value cache.
726	pub fn reset_value_cache(&self) {
727		self.inner.write().value_cache.reset();
728	}
729
730	/// Reset the entire cache.
731	pub fn reset(&self) {
732		self.reset_node_cache();
733		self.reset_value_cache();
734	}
735
736	/// Returns the read locked inner.
737	#[cfg(test)]
738	pub(super) fn read_lock_inner(
739		&self,
740	) -> parking_lot::RwLockReadGuard<'_, SharedTrieCacheInner<H>> {
741		self.inner.read()
742	}
743
744	/// Returns the write locked inner.
745	pub(super) fn write_lock_inner(&self) -> Option<RwLockWriteGuard<'_, SharedTrieCacheInner<H>>> {
746		// This should never happen, but we *really* don't want to deadlock. So let's have it
747		// timeout, just in case. At worst it'll do nothing, and at best it'll avert a catastrophe
748		// and notify us that there's a problem.
749		self.inner.try_write_for(super::SHARED_CACHE_WRITE_LOCK_TIMEOUT)
750	}
751}
752
753#[cfg(test)]
754mod tests {
755	use super::*;
756	use sp_core::H256 as Hash;
757
758	#[test]
759	fn shared_value_cache_works() {
760		let mut cache = SharedValueCache::<sp_core::H256>::new(usize::MAX, 10 * 10);
761
762		let key = vec![0; 10];
763
764		let root0 = Hash::repeat_byte(1);
765		let root1 = Hash::repeat_byte(2);
766
767		cache.update(
768			vec![
769				(ValueCacheKey::new_value(&key[..], root0), CachedValue::NonExisting),
770				(ValueCacheKey::new_value(&key[..], root1), CachedValue::NonExisting),
771			],
772			vec![],
773		);
774
775		// Ensure that the basics are working
776		assert_eq!(1, cache.lru.limiter_mut().known_storage_keys.len());
777		assert_eq!(
778			3, // Two instances inside the cache + one extra in `known_storage_keys`.
779			Arc::strong_count(
780				cache.lru.limiter_mut().known_storage_keys.get_key_value(&key[..]).unwrap().0
781			)
782		);
783		assert_eq!(key.len(), cache.lru.limiter().heap_size);
784		assert_eq!(cache.lru.len(), 2);
785		assert_eq!(cache.lru.peek_newest().unwrap().0.storage_root, root1);
786		assert_eq!(cache.lru.peek_oldest().unwrap().0.storage_root, root0);
787		assert!(cache.lru.limiter().heap_size <= cache.lru.limiter().max_heap_size);
788		assert_eq!(cache.lru.limiter().heap_size, 10);
789
790		// Just accessing a key should not change anything on the size and number of entries.
791		cache.update(vec![], vec![ValueCacheKey::hash_data(&key[..], &root0)]);
792		assert_eq!(1, cache.lru.limiter_mut().known_storage_keys.len());
793		assert_eq!(
794			3,
795			Arc::strong_count(
796				cache.lru.limiter_mut().known_storage_keys.get_key_value(&key[..]).unwrap().0
797			)
798		);
799		assert_eq!(key.len(), cache.lru.limiter().heap_size);
800		assert_eq!(cache.lru.len(), 2);
801		assert_eq!(cache.lru.peek_newest().unwrap().0.storage_root, root0);
802		assert_eq!(cache.lru.peek_oldest().unwrap().0.storage_root, root1);
803		assert!(cache.lru.limiter().heap_size <= cache.lru.limiter().max_heap_size);
804		assert_eq!(cache.lru.limiter().heap_size, 10);
805
806		// Updating the cache again with exactly the same data should not change anything.
807		cache.update(
808			vec![
809				(ValueCacheKey::new_value(&key[..], root1), CachedValue::NonExisting),
810				(ValueCacheKey::new_value(&key[..], root0), CachedValue::NonExisting),
811			],
812			vec![],
813		);
814		assert_eq!(1, cache.lru.limiter_mut().known_storage_keys.len());
815		assert_eq!(
816			3,
817			Arc::strong_count(
818				cache.lru.limiter_mut().known_storage_keys.get_key_value(&key[..]).unwrap().0
819			)
820		);
821		assert_eq!(key.len(), cache.lru.limiter().heap_size);
822		assert_eq!(cache.lru.len(), 2);
823		assert_eq!(cache.lru.peek_newest().unwrap().0.storage_root, root0);
824		assert_eq!(cache.lru.peek_oldest().unwrap().0.storage_root, root1);
825		assert!(cache.lru.limiter().heap_size <= cache.lru.limiter().max_heap_size);
826		assert_eq!(cache.lru.limiter().items_evicted, 0);
827		assert_eq!(cache.lru.limiter().heap_size, 10);
828
829		// Add 10 other entries and this should move out two of the initial entries.
830		cache.update(
831			(1..11)
832				.map(|i| vec![i; 10])
833				.map(|key| (ValueCacheKey::new_value(&key[..], root0), CachedValue::NonExisting)),
834			vec![],
835		);
836
837		assert_eq!(cache.lru.limiter().items_evicted, 2);
838		assert_eq!(10, cache.lru.len());
839		assert_eq!(10, cache.lru.limiter_mut().known_storage_keys.len());
840		assert!(cache.lru.limiter_mut().known_storage_keys.get_key_value(&key[..]).is_none());
841		assert_eq!(key.len() * 10, cache.lru.limiter().heap_size);
842		assert_eq!(cache.lru.len(), 10);
843		assert!(cache.lru.limiter().heap_size <= cache.lru.limiter().max_heap_size);
844		assert_eq!(cache.lru.limiter().heap_size, 100);
845
846		assert!(matches!(
847			cache.lru.peek(&ValueCacheKey::new_value(&[1; 10][..], root0)).unwrap(),
848			CachedValue::<Hash>::NonExisting
849		));
850
851		assert!(cache.lru.peek(&ValueCacheKey::new_value(&[1; 10][..], root1)).is_none(),);
852
853		assert!(cache.lru.peek(&ValueCacheKey::new_value(&key[..], root0)).is_none());
854		assert!(cache.lru.peek(&ValueCacheKey::new_value(&key[..], root1)).is_none());
855
856		cache.update(
857			vec![(ValueCacheKey::new_value(vec![10; 10], root0), CachedValue::NonExisting)],
858			vec![],
859		);
860
861		assert!(cache.lru.limiter_mut().known_storage_keys.get_key_value(&key[..]).is_none());
862	}
863}