referrerpolicy=no-referrer-when-downgrade

sp_trie/cache/
mod.rs

1// This file is part of Substrate.
2
3// Copyright (C) Parity Technologies (UK) Ltd.
4// SPDX-License-Identifier: Apache-2.0
5
6// Licensed under the Apache License, Version 2.0 (the "License");
7// you may not use this file except in compliance with the License.
8// You may obtain a copy of the License at
9//
10// 	http://www.apache.org/licenses/LICENSE-2.0
11//
12// Unless required by applicable law or agreed to in writing, software
13// distributed under the License is distributed on an "AS IS" BASIS,
14// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15// See the License for the specific language governing permissions and
16// limitations under the License.
17
18//! Trie Cache
19//!
20//! Provides an implementation of the [`TrieCache`](trie_db::TrieCache) trait.
21//! The implementation is split into three types [`SharedTrieCache`], [`LocalTrieCache`] and
22//! [`TrieCache`]. The [`SharedTrieCache`] is the instance that should be kept around for the entire
23//! lifetime of the node. It will store all cached trie nodes and values on a global level. Then
24//! there is the [`LocalTrieCache`] that should be kept around per state instance requested from the
25//! backend. As there are very likely multiple accesses to the state per instance, this
26//! [`LocalTrieCache`] is used to cache the nodes and the values before they are merged back to the
27//! shared instance. Last but not least there is the [`TrieCache`] that is being used per access to
28//! the state. It will use the [`SharedTrieCache`] and the [`LocalTrieCache`] to fulfill cache
29//! requests. If both of them don't provide the requested data it will be inserted into the
30//! [`LocalTrieCache`] and then later into the [`SharedTrieCache`].
31//!
32//! The [`SharedTrieCache`] is bound to some maximum number of bytes. It is ensured that it never
33//! runs above this limit. However as long as data is cached inside a [`LocalTrieCache`] it isn't
34//! taken into account when limiting the [`SharedTrieCache`]. This means that for the lifetime of a
35//! [`LocalTrieCache`] the actual memory usage could be above the allowed maximum.
36
37use crate::{Error, NodeCodec};
38use hash_db::Hasher;
39use metrics::{HitStatsSnapshot, TrieHitStatsSnapshot};
40use nohash_hasher::BuildNoHashHasher;
41use parking_lot::{Mutex, MutexGuard, RwLockWriteGuard};
42use schnellru::LruMap;
43use shared_cache::{ValueCacheKey, ValueCacheRef};
44use std::{
45	collections::HashMap,
46	sync::{
47		atomic::{AtomicU64, Ordering},
48		Arc,
49	},
50	time::Duration,
51};
52use trie_db::{node::NodeOwned, CachedValue};
53
54mod metrics;
55mod shared_cache;
56
57pub use shared_cache::SharedTrieCache;
58
59use self::shared_cache::ValueCacheKeyHash;
60
61const LOG_TARGET: &str = "trie-cache";
62
63/// The maximum amount of time we'll wait trying to acquire the shared cache lock
64/// when the local cache is dropped and synchronized with the share cache.
65///
66/// This is just a failsafe; normally this should never trigger.
67const SHARED_CACHE_WRITE_LOCK_TIMEOUT: Duration = Duration::from_millis(100);
68
69/// The maximum number of existing keys in the shared cache that a single local cache
70/// can promote to the front of the LRU cache in one go.
71///
72/// If we have a big shared cache and the local cache hits all of those keys we don't
73/// want to spend forever bumping all of them.
74const SHARED_NODE_CACHE_MAX_PROMOTED_KEYS: u32 = 1792;
75/// Same as [`SHARED_NODE_CACHE_MAX_PROMOTED_KEYS`].
76const SHARED_VALUE_CACHE_MAX_PROMOTED_KEYS: u32 = 1792;
77
78/// The maximum portion of the shared cache (in percent) that a single local
79/// cache can replace in one go.
80///
81/// We don't want a single local cache instance to have the ability to replace
82/// everything in the shared cache.
83const SHARED_NODE_CACHE_MAX_REPLACE_PERCENT: usize = 33;
84/// Same as [`SHARED_NODE_CACHE_MAX_REPLACE_PERCENT`].
85const SHARED_VALUE_CACHE_MAX_REPLACE_PERCENT: usize = 33;
86
87/// The maximum inline capacity of the local cache, in bytes.
88///
89/// This is just an upper limit; since the maps are resized in powers of two
90/// their actual size will most likely not exactly match this.
91const LOCAL_NODE_CACHE_MAX_INLINE_SIZE: usize = 512 * 1024;
92/// Same as [`LOCAL_NODE_CACHE_MAX_INLINE_SIZE`].
93const LOCAL_VALUE_CACHE_MAX_INLINE_SIZE: usize = 512 * 1024;
94
95/// The maximum size of the memory allocated on the heap by the local cache, in bytes.
96///
97/// The size of the node cache should always be bigger than the value cache. The value
98/// cache is only holding weak references to the actual values found in the nodes and
99/// we account for the size of the node as part of the node cache.
100const LOCAL_NODE_CACHE_MAX_HEAP_SIZE: usize = 8 * 1024 * 1024;
101/// Same as [`LOCAL_NODE_CACHE_MAX_HEAP_SIZE`].
102const LOCAL_VALUE_CACHE_MAX_HEAP_SIZE: usize = 2 * 1024 * 1024;
103
104/// The size of the shared cache.
105#[derive(Debug, Clone, Copy)]
106pub struct CacheSize(usize);
107
108impl CacheSize {
109	/// An unlimited cache size.
110	pub const fn unlimited() -> Self {
111		CacheSize(usize::MAX)
112	}
113
114	/// A cache size `bytes` big.
115	pub const fn new(bytes: usize) -> Self {
116		CacheSize(bytes)
117	}
118}
119
120pub struct LocalNodeCacheLimiter {
121	/// The current size (in bytes) of data allocated by this cache on the heap.
122	///
123	/// This doesn't include the size of the map itself.
124	current_heap_size: usize,
125	config: LocalNodeCacheConfig,
126}
127
128impl LocalNodeCacheLimiter {
129	/// Creates a new limiter with the given configuration.
130	pub fn new(config: LocalNodeCacheConfig) -> Self {
131		Self { config, current_heap_size: 0 }
132	}
133}
134
135impl<H> schnellru::Limiter<H, NodeCached<H>> for LocalNodeCacheLimiter
136where
137	H: AsRef<[u8]> + std::fmt::Debug,
138{
139	type KeyToInsert<'a> = H;
140	type LinkType = u32;
141
142	#[inline]
143	fn is_over_the_limit(&self, length: usize) -> bool {
144		// Only enforce the limit if there's more than one element to make sure
145		// we can always add a new element to the cache.
146		if length <= 1 {
147			return false
148		}
149
150		self.current_heap_size > self.config.local_node_cache_max_heap_size
151	}
152
153	#[inline]
154	fn on_insert<'a>(
155		&mut self,
156		_length: usize,
157		key: H,
158		cached_node: NodeCached<H>,
159	) -> Option<(H, NodeCached<H>)> {
160		self.current_heap_size += cached_node.heap_size();
161		Some((key, cached_node))
162	}
163
164	#[inline]
165	fn on_replace(
166		&mut self,
167		_length: usize,
168		_old_key: &mut H,
169		_new_key: H,
170		old_node: &mut NodeCached<H>,
171		new_node: &mut NodeCached<H>,
172	) -> bool {
173		debug_assert_eq!(_old_key.as_ref().len(), _new_key.as_ref().len());
174		self.current_heap_size =
175			self.current_heap_size + new_node.heap_size() - old_node.heap_size();
176		true
177	}
178
179	#[inline]
180	fn on_removed(&mut self, _key: &mut H, cached_node: &mut NodeCached<H>) {
181		self.current_heap_size -= cached_node.heap_size();
182	}
183
184	#[inline]
185	fn on_cleared(&mut self) {
186		self.current_heap_size = 0;
187	}
188
189	#[inline]
190	fn on_grow(&mut self, new_memory_usage: usize) -> bool {
191		new_memory_usage <= self.config.local_node_cache_max_inline_size
192	}
193}
194
195/// A limiter for the local value cache. This makes sure the local cache doesn't grow too big.
196pub struct LocalValueCacheLimiter {
197	/// The current size (in bytes) of data allocated by this cache on the heap.
198	///
199	/// This doesn't include the size of the map itself.
200	current_heap_size: usize,
201
202	config: LocalValueCacheConfig,
203}
204
205impl LocalValueCacheLimiter {
206	/// Creates a new limiter with the given configuration.
207	pub fn new(config: LocalValueCacheConfig) -> Self {
208		Self { config, current_heap_size: 0 }
209	}
210}
211
212impl<H> schnellru::Limiter<ValueCacheKey<H>, CachedValue<H>> for LocalValueCacheLimiter
213where
214	H: AsRef<[u8]>,
215{
216	type KeyToInsert<'a> = ValueCacheRef<'a, H>;
217	type LinkType = u32;
218
219	#[inline]
220	fn is_over_the_limit(&self, length: usize) -> bool {
221		// Only enforce the limit if there's more than one element to make sure
222		// we can always add a new element to the cache.
223		if length <= 1 {
224			return false
225		}
226
227		self.current_heap_size > self.config.local_value_cache_max_heap_size
228	}
229
230	#[inline]
231	fn on_insert(
232		&mut self,
233		_length: usize,
234		key: Self::KeyToInsert<'_>,
235		value: CachedValue<H>,
236	) -> Option<(ValueCacheKey<H>, CachedValue<H>)> {
237		self.current_heap_size += key.storage_key.len();
238		Some((key.into(), value))
239	}
240
241	#[inline]
242	fn on_replace(
243		&mut self,
244		_length: usize,
245		_old_key: &mut ValueCacheKey<H>,
246		_new_key: ValueCacheRef<H>,
247		_old_value: &mut CachedValue<H>,
248		_new_value: &mut CachedValue<H>,
249	) -> bool {
250		debug_assert_eq!(_old_key.storage_key.len(), _new_key.storage_key.len());
251		true
252	}
253
254	#[inline]
255	fn on_removed(&mut self, key: &mut ValueCacheKey<H>, _: &mut CachedValue<H>) {
256		self.current_heap_size -= key.storage_key.len();
257	}
258
259	#[inline]
260	fn on_cleared(&mut self) {
261		self.current_heap_size = 0;
262	}
263
264	#[inline]
265	fn on_grow(&mut self, new_memory_usage: usize) -> bool {
266		new_memory_usage <= self.config.local_value_cache_max_inline_size
267	}
268}
269
270/// A struct to gather hit/miss stats to aid in debugging the performance of the cache.
271#[derive(Default)]
272struct HitStats {
273	shared_hits: AtomicU64,
274	shared_fetch_attempts: AtomicU64,
275	local_hits: AtomicU64,
276	local_fetch_attempts: AtomicU64,
277}
278
279impl HitStats {
280	/// Returns a snapshot of the hit/miss stats.
281	fn snapshot(&self) -> HitStatsSnapshot {
282		HitStatsSnapshot {
283			shared_hits: self.shared_hits.load(Ordering::Relaxed),
284			shared_fetch_attempts: self.shared_fetch_attempts.load(Ordering::Relaxed),
285			local_hits: self.local_hits.load(Ordering::Relaxed),
286			local_fetch_attempts: self.local_fetch_attempts.load(Ordering::Relaxed),
287		}
288	}
289}
290
291impl std::fmt::Display for HitStats {
292	fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
293		let snapshot = self.snapshot();
294		write!(f, "{}", snapshot)
295	}
296}
297
298/// A struct to gather hit/miss stats for the node cache and the value cache.
299#[derive(Default)]
300struct TrieHitStats {
301	node_cache: HitStats,
302	value_cache: HitStats,
303}
304
305impl TrieHitStats {
306	/// Returns a snapshot of the hit/miss stats.
307	fn snapshot(&self) -> TrieHitStatsSnapshot {
308		TrieHitStatsSnapshot {
309			node_cache: self.node_cache.snapshot(),
310			value_cache: self.value_cache.snapshot(),
311		}
312	}
313
314	/// Adds the stats from snapshot to this one.
315	fn add_snapshot(&self, other: &TrieHitStatsSnapshot) {
316		self.node_cache
317			.local_fetch_attempts
318			.fetch_add(other.node_cache.local_fetch_attempts, Ordering::Relaxed);
319
320		self.node_cache
321			.shared_fetch_attempts
322			.fetch_add(other.node_cache.shared_fetch_attempts, Ordering::Relaxed);
323
324		self.node_cache
325			.local_hits
326			.fetch_add(other.node_cache.local_hits, Ordering::Relaxed);
327
328		self.node_cache
329			.shared_hits
330			.fetch_add(other.node_cache.shared_hits, Ordering::Relaxed);
331
332		self.value_cache
333			.local_fetch_attempts
334			.fetch_add(other.value_cache.local_fetch_attempts, Ordering::Relaxed);
335
336		self.value_cache
337			.shared_fetch_attempts
338			.fetch_add(other.value_cache.shared_fetch_attempts, Ordering::Relaxed);
339
340		self.value_cache
341			.local_hits
342			.fetch_add(other.value_cache.local_hits, Ordering::Relaxed);
343
344		self.value_cache
345			.shared_hits
346			.fetch_add(other.value_cache.shared_hits, Ordering::Relaxed);
347	}
348}
349
350/// An internal struct to store the cached trie nodes.
351pub(crate) struct NodeCached<H> {
352	/// The cached node.
353	pub node: NodeOwned<H>,
354	/// Whether this node was fetched from the shared cache or not.
355	pub is_from_shared_cache: bool,
356}
357
358impl<H> NodeCached<H> {
359	/// Returns the number of bytes allocated on the heap by this node.
360	fn heap_size(&self) -> usize {
361		self.node.size_in_bytes() - std::mem::size_of::<NodeOwned<H>>()
362	}
363}
364
365type NodeCacheMap<H> = LruMap<H, NodeCached<H>, LocalNodeCacheLimiter, schnellru::RandomState>;
366
367type ValueCacheMap<H> = LruMap<
368	ValueCacheKey<H>,
369	CachedValue<H>,
370	LocalValueCacheLimiter,
371	BuildNoHashHasher<ValueCacheKey<H>>,
372>;
373
374type ValueAccessSet =
375	LruMap<ValueCacheKeyHash, (), schnellru::ByLength, BuildNoHashHasher<ValueCacheKeyHash>>;
376
377#[derive(Clone, Copy)]
378pub struct LocalValueCacheConfig {
379	// The maximum size of the value cache on the heap.
380	local_value_cache_max_heap_size: usize,
381	// The maximum size of the value cache in the inline storage.
382	local_value_cache_max_inline_size: usize,
383	// The maximum number of keys that can be promoted to the front of the LRU cache.
384	shared_value_cache_max_promoted_keys: u32,
385	// The maximum percentage of the shared cache that can be replaced, before giving up.
386	shared_value_cache_max_replace_percent: usize,
387}
388
389#[derive(Clone, Copy)]
390pub struct LocalNodeCacheConfig {
391	// The maximum size of the node cache on the heap.
392	local_node_cache_max_heap_size: usize,
393	// The maximum size of the node cache in the inline storage.
394	local_node_cache_max_inline_size: usize,
395	// The maximum number of keys that can be promoted to the front of the LRU cache, before giving
396	// up.
397	shared_node_cache_max_promoted_keys: u32,
398	// The maximum percentage of the shared cache that can be replaced, before giving up.
399	shared_node_cache_max_replace_percent: usize,
400}
401
402impl LocalNodeCacheConfig {
403	/// Creates a configuration that can be called from a trusted path and allows the local_cache
404	/// to grow to fit the needs, also everything is promoted to the shared cache.
405	///
406	/// This configuration is safe only for trusted paths because it allows the local cache
407	/// to grow up to the shared cache limits and it promotes all items into the shared cache.
408	/// This could lead to excessive memory usage if used in untrusted or uncontrolled environments.
409	/// It is intended for scenarios like block authoring or importing, where the operations
410	/// are bounded already and there are no risks of unbounded memory usage.
411	fn trusted(
412		local_node_cache_max_heap_size: usize,
413		local_node_cache_max_inline_size: usize,
414	) -> Self {
415		LocalNodeCacheConfig {
416			local_node_cache_max_heap_size: std::cmp::max(
417				local_node_cache_max_heap_size,
418				LOCAL_NODE_CACHE_MAX_HEAP_SIZE,
419			),
420			local_node_cache_max_inline_size: std::cmp::max(
421				local_node_cache_max_inline_size,
422				LOCAL_NODE_CACHE_MAX_INLINE_SIZE,
423			),
424			shared_node_cache_max_promoted_keys: u32::MAX,
425			shared_node_cache_max_replace_percent: 100,
426		}
427	}
428
429	/// Creates a configuration that can be called from an untrusted path.
430	///
431	/// It limits the local size of the cache and the amount of keys that can be promoted to the
432	/// shared cache.
433	fn untrusted() -> Self {
434		LocalNodeCacheConfig {
435			local_node_cache_max_inline_size: LOCAL_NODE_CACHE_MAX_INLINE_SIZE,
436			local_node_cache_max_heap_size: LOCAL_NODE_CACHE_MAX_HEAP_SIZE,
437			shared_node_cache_max_promoted_keys: SHARED_NODE_CACHE_MAX_PROMOTED_KEYS,
438			shared_node_cache_max_replace_percent: SHARED_NODE_CACHE_MAX_REPLACE_PERCENT,
439		}
440	}
441}
442
443impl LocalValueCacheConfig {
444	/// Creates a configuration that can be called from a trusted path and allows the local_cache
445	/// to grow to fit the needs, also everything is promoted to the shared cache.
446	///
447	/// This configuration is safe only for trusted paths because it allows the local cache
448	/// to grow up to the shared cache limits and it promotes all items into the shared cache.
449	/// This could lead to excessive memory usage if used in untrusted or uncontrolled environments.
450	/// It is intended for scenarios like block authoring or importing, where the operations
451	/// are bounded already and there are no risks of unbounded memory usage.
452	fn trusted(
453		local_value_cache_max_heap_size: usize,
454		local_value_cache_max_inline_size: usize,
455	) -> Self {
456		LocalValueCacheConfig {
457			shared_value_cache_max_promoted_keys: u32::MAX,
458			shared_value_cache_max_replace_percent: 100,
459			local_value_cache_max_inline_size: std::cmp::max(
460				local_value_cache_max_inline_size,
461				LOCAL_VALUE_CACHE_MAX_INLINE_SIZE,
462			),
463			local_value_cache_max_heap_size: std::cmp::max(
464				local_value_cache_max_heap_size,
465				LOCAL_VALUE_CACHE_MAX_HEAP_SIZE,
466			),
467		}
468	}
469
470	/// Creates a configuration that can be called from an untrusted path.
471	///
472	/// It limits the local size of the cache and the amount of keys that can be promoted to the
473	/// shared cache.
474	fn untrusted() -> Self {
475		LocalValueCacheConfig {
476			local_value_cache_max_inline_size: LOCAL_VALUE_CACHE_MAX_INLINE_SIZE,
477			local_value_cache_max_heap_size: LOCAL_VALUE_CACHE_MAX_HEAP_SIZE,
478			shared_value_cache_max_promoted_keys: SHARED_VALUE_CACHE_MAX_PROMOTED_KEYS,
479			shared_value_cache_max_replace_percent: SHARED_VALUE_CACHE_MAX_REPLACE_PERCENT,
480		}
481	}
482}
483
484/// The local trie cache.
485///
486/// This cache should be used per state instance created by the backend. One state instance is
487/// referring to the state of one block. It will cache all the accesses that are done to the state
488/// which could not be fulfilled by the [`SharedTrieCache`]. These locally cached items are merged
489/// back to the shared trie cache when this instance is dropped.
490///
491/// When using [`Self::as_trie_db_cache`] or [`Self::as_trie_db_mut_cache`], it will lock Mutexes.
492/// So, it is important that these methods are not called multiple times, because they otherwise
493/// deadlock.
494pub struct LocalTrieCache<H: Hasher> {
495	/// The shared trie cache that created this instance.
496	shared: SharedTrieCache<H>,
497
498	/// The local cache for the trie nodes.
499	node_cache: Mutex<NodeCacheMap<H::Out>>,
500
501	/// The local cache for the values.
502	value_cache: Mutex<ValueCacheMap<H::Out>>,
503
504	/// Keeps track of all values accessed in the shared cache.
505	///
506	/// This will be used to ensure that these nodes are brought to the front of the lru when this
507	/// local instance is merged back to the shared cache. This can actually lead to collision when
508	/// two [`ValueCacheKey`]s with different storage roots and keys map to the same hash. However,
509	/// as we only use this set to update the lru position it is fine, even if we bring the wrong
510	/// value to the top. The important part is that we always get the correct value from the value
511	/// cache for a given key.
512	shared_value_cache_access: Mutex<ValueAccessSet>,
513	/// The configuration for the value cache.
514	value_cache_config: LocalValueCacheConfig,
515	/// The configuration for the node cache.
516	node_cache_config: LocalNodeCacheConfig,
517	/// The stats for the cache.
518	stats: TrieHitStats,
519	/// Specifies if we are in a trusted path like block authoring and importing or not.
520	trusted: bool,
521}
522
523impl<H: Hasher> LocalTrieCache<H> {
524	/// Return self as a [`TrieDB`](trie_db::TrieDB) compatible cache.
525	///
526	/// The given `storage_root` needs to be the storage root of the trie this cache is used for.
527	pub fn as_trie_db_cache(&self, storage_root: H::Out) -> TrieCache<'_, H> {
528		let value_cache = ValueCache::ForStorageRoot {
529			storage_root,
530			local_value_cache: self.value_cache.lock(),
531			shared_value_cache_access: self.shared_value_cache_access.lock(),
532			buffered_value: None,
533		};
534
535		TrieCache {
536			shared_cache: self.shared.clone(),
537			local_cache: self.node_cache.lock(),
538			value_cache,
539			stats: &self.stats,
540		}
541	}
542
543	/// Return self as [`TrieDBMut`](trie_db::TrieDBMut) compatible cache.
544	///
545	/// After finishing all operations with [`TrieDBMut`](trie_db::TrieDBMut) and having obtained
546	/// the new storage root, [`TrieCache::merge_into`] should be called to update this local
547	/// cache instance. If the function is not called, cached data is just thrown away and not
548	/// propagated to the shared cache. So, accessing these new items will be slower, but nothing
549	/// would break because of this.
550	pub fn as_trie_db_mut_cache(&self) -> TrieCache<'_, H> {
551		TrieCache {
552			shared_cache: self.shared.clone(),
553			local_cache: self.node_cache.lock(),
554			value_cache: ValueCache::Fresh(Default::default()),
555			stats: &self.stats,
556		}
557	}
558}
559
560impl<H: Hasher> Drop for LocalTrieCache<H> {
561	fn drop(&mut self) {
562		tracing::debug!(
563			target: LOG_TARGET,
564			"Local node trie cache dropped: {}",
565			self.stats.node_cache
566		);
567
568		tracing::debug!(
569			target: LOG_TARGET,
570			"Local value trie cache dropped: {}",
571			self.stats.value_cache
572		);
573
574		let mut shared_inner = match self.shared.write_lock_inner() {
575			Some(inner) => inner,
576			None => {
577				tracing::warn!(
578					target: LOG_TARGET,
579					"Timeout while trying to acquire a write lock for the shared trie cache"
580				);
581				return
582			},
583		};
584		let stats_snapshot = self.stats.snapshot();
585		shared_inner.stats_add_snapshot(&stats_snapshot);
586		let metrics = shared_inner.metrics().cloned();
587		metrics.as_ref().map(|metrics| metrics.observe_hits_stats(&stats_snapshot));
588		{
589			let _node_update_duration =
590				metrics.as_ref().map(|metrics| metrics.start_shared_node_update_timer());
591			let node_cache = self.node_cache.get_mut();
592
593			metrics
594				.as_ref()
595				.map(|metrics| metrics.observe_local_node_cache_length(node_cache.len()));
596
597			shared_inner.node_cache_mut().update(
598				node_cache.drain(),
599				&self.node_cache_config,
600				&metrics,
601			);
602		}
603
604		// Since the trie cache is not called from a time sensitive context like block authoring or
605		// block import give the option to a more important task to acquire the lock and do its
606		// job.
607		if !self.trusted {
608			RwLockWriteGuard::bump(&mut shared_inner);
609		}
610
611		{
612			let _node_update_duration =
613				metrics.as_ref().map(|metrics| metrics.start_shared_value_update_timer());
614			let value_cache = self.shared_value_cache_access.get_mut();
615			metrics
616				.as_ref()
617				.map(|metrics| metrics.observe_local_value_cache_length(value_cache.len()));
618
619			shared_inner.value_cache_mut().update(
620				self.value_cache.get_mut().drain(),
621				value_cache.drain().map(|(key, ())| key),
622				&self.value_cache_config,
623				&metrics,
624			);
625		}
626	}
627}
628
629/// The abstraction of the value cache for the [`TrieCache`].
630enum ValueCache<'a, H: Hasher> {
631	/// The value cache is fresh, aka not yet associated to any storage root.
632	/// This is used for example when a new trie is being build, to cache new values.
633	Fresh(HashMap<Arc<[u8]>, CachedValue<H::Out>>),
634	/// The value cache is already bound to a specific storage root.
635	ForStorageRoot {
636		shared_value_cache_access: MutexGuard<'a, ValueAccessSet>,
637		local_value_cache: MutexGuard<'a, ValueCacheMap<H::Out>>,
638		storage_root: H::Out,
639		// The shared value cache needs to be temporarily locked when reading from it
640		// so we need to clone the value that is returned, but we need to be able to
641		// return a reference to the value, so we just buffer it here.
642		buffered_value: Option<CachedValue<H::Out>>,
643	},
644}
645
646impl<H: Hasher> ValueCache<'_, H> {
647	/// Get the value for the given `key`.
648	fn get(
649		&mut self,
650		key: &[u8],
651		shared_cache: &SharedTrieCache<H>,
652		stats: &HitStats,
653	) -> Option<&CachedValue<H::Out>> {
654		stats.local_fetch_attempts.fetch_add(1, Ordering::Relaxed);
655
656		match self {
657			Self::Fresh(map) =>
658				if let Some(value) = map.get(key) {
659					stats.local_hits.fetch_add(1, Ordering::Relaxed);
660					Some(value)
661				} else {
662					None
663				},
664			Self::ForStorageRoot {
665				local_value_cache,
666				shared_value_cache_access,
667				storage_root,
668				buffered_value,
669			} => {
670				// We first need to look up in the local cache and then the shared cache.
671				// It can happen that some value is cached in the shared cache, but the
672				// weak reference of the data can not be upgraded anymore. This for example
673				// happens when the node is dropped that contains the strong reference to the data.
674				//
675				// So, the logic of the trie would lookup the data and the node and store both
676				// in our local caches.
677
678				let hash = ValueCacheKey::hash_data(key, storage_root);
679
680				if let Some(value) = local_value_cache
681					.peek_by_hash(hash.raw(), |existing_key, _| {
682						existing_key.is_eq(storage_root, key)
683					}) {
684					stats.local_hits.fetch_add(1, Ordering::Relaxed);
685
686					return Some(value)
687				}
688
689				stats.shared_fetch_attempts.fetch_add(1, Ordering::Relaxed);
690				if let Some(value) = shared_cache.peek_value_by_hash(hash, storage_root, key) {
691					stats.shared_hits.fetch_add(1, Ordering::Relaxed);
692					shared_value_cache_access.insert(hash, ());
693					*buffered_value = Some(value.clone());
694					return buffered_value.as_ref()
695				}
696
697				None
698			},
699		}
700	}
701
702	/// Insert some new `value` under the given `key`.
703	fn insert(&mut self, key: &[u8], value: CachedValue<H::Out>) {
704		match self {
705			Self::Fresh(map) => {
706				map.insert(key.into(), value);
707			},
708			Self::ForStorageRoot { local_value_cache, storage_root, .. } => {
709				local_value_cache.insert(ValueCacheRef::new(key, *storage_root), value);
710			},
711		}
712	}
713}
714
715/// The actual [`TrieCache`](trie_db::TrieCache) implementation.
716///
717/// If this instance was created for using it with a [`TrieDBMut`](trie_db::TrieDBMut), it needs to
718/// be merged back into the [`LocalTrieCache`] with [`Self::merge_into`] after all operations are
719/// done.
720pub struct TrieCache<'a, H: Hasher> {
721	shared_cache: SharedTrieCache<H>,
722	local_cache: MutexGuard<'a, NodeCacheMap<H::Out>>,
723	value_cache: ValueCache<'a, H>,
724	stats: &'a TrieHitStats,
725}
726
727impl<'a, H: Hasher> TrieCache<'a, H> {
728	/// Merge this cache into the given [`LocalTrieCache`].
729	///
730	/// This function is only required to be called when this instance was created through
731	/// [`LocalTrieCache::as_trie_db_mut_cache`], otherwise this method is a no-op. The given
732	/// `storage_root` is the new storage root that was obtained after finishing all operations
733	/// using the [`TrieDBMut`](trie_db::TrieDBMut).
734	pub fn merge_into(self, local: &LocalTrieCache<H>, storage_root: H::Out) {
735		let ValueCache::Fresh(cache) = self.value_cache else { return };
736
737		if !cache.is_empty() {
738			let mut value_cache = local.value_cache.lock();
739			let partial_hash = ValueCacheKey::hash_partial_data(&storage_root);
740			cache.into_iter().for_each(|(k, v)| {
741				let hash = ValueCacheKeyHash::from_hasher_and_storage_key(partial_hash.clone(), &k);
742				let k = ValueCacheRef { storage_root, storage_key: &k, hash };
743				value_cache.insert(k, v);
744			});
745		}
746	}
747}
748
749impl<'a, H: Hasher> trie_db::TrieCache<NodeCodec<H>> for TrieCache<'a, H> {
750	fn get_or_insert_node(
751		&mut self,
752		hash: H::Out,
753		fetch_node: &mut dyn FnMut() -> trie_db::Result<NodeOwned<H::Out>, H::Out, Error<H::Out>>,
754	) -> trie_db::Result<&NodeOwned<H::Out>, H::Out, Error<H::Out>> {
755		let mut is_local_cache_hit = true;
756		self.stats.node_cache.local_fetch_attempts.fetch_add(1, Ordering::Relaxed);
757
758		// First try to grab the node from the local cache.
759		let node = self.local_cache.get_or_insert_fallible(hash, || {
760			is_local_cache_hit = false;
761
762			// It was not in the local cache; try the shared cache.
763			self.stats.node_cache.shared_fetch_attempts.fetch_add(1, Ordering::Relaxed);
764			if let Some(node) = self.shared_cache.peek_node(&hash) {
765				self.stats.node_cache.shared_hits.fetch_add(1, Ordering::Relaxed);
766				tracing::trace!(target: LOG_TARGET, ?hash, "Serving node from shared cache");
767
768				return Ok(NodeCached::<H::Out> { node: node.clone(), is_from_shared_cache: true })
769			}
770
771			// It was not in the shared cache; try fetching it from the database.
772			match fetch_node() {
773				Ok(node) => {
774					tracing::trace!(target: LOG_TARGET, ?hash, "Serving node from database");
775					Ok(NodeCached::<H::Out> { node, is_from_shared_cache: false })
776				},
777				Err(error) => {
778					tracing::trace!(target: LOG_TARGET, ?hash, "Serving node from database failed");
779					Err(error)
780				},
781			}
782		});
783
784		if is_local_cache_hit {
785			tracing::trace!(target: LOG_TARGET, ?hash, "Serving node from local cache");
786			self.stats.node_cache.local_hits.fetch_add(1, Ordering::Relaxed);
787		}
788
789		Ok(&node?
790			.expect("you can always insert at least one element into the local cache; qed")
791			.node)
792	}
793
794	fn get_node(&mut self, hash: &H::Out) -> Option<&NodeOwned<H::Out>> {
795		let mut is_local_cache_hit = true;
796		self.stats.node_cache.local_fetch_attempts.fetch_add(1, Ordering::Relaxed);
797
798		// First try to grab the node from the local cache.
799		let cached_node = self.local_cache.get_or_insert_fallible(*hash, || {
800			is_local_cache_hit = false;
801
802			// It was not in the local cache; try the shared cache.
803			self.stats.node_cache.shared_fetch_attempts.fetch_add(1, Ordering::Relaxed);
804			if let Some(node) = self.shared_cache.peek_node(&hash) {
805				self.stats.node_cache.shared_hits.fetch_add(1, Ordering::Relaxed);
806				tracing::trace!(target: LOG_TARGET, ?hash, "Serving node from shared cache");
807
808				Ok(NodeCached::<H::Out> { node: node.clone(), is_from_shared_cache: true })
809			} else {
810				tracing::trace!(target: LOG_TARGET, ?hash, "Serving node from cache failed");
811
812				Err(())
813			}
814		});
815
816		if is_local_cache_hit {
817			tracing::trace!(target: LOG_TARGET, ?hash, "Serving node from local cache");
818			self.stats.node_cache.local_hits.fetch_add(1, Ordering::Relaxed);
819		}
820
821		match cached_node {
822			Ok(Some(cached_node)) => Some(&cached_node.node),
823			Ok(None) => {
824				unreachable!(
825					"you can always insert at least one element into the local cache; qed"
826				);
827			},
828			Err(()) => None,
829		}
830	}
831
832	fn lookup_value_for_key(&mut self, key: &[u8]) -> Option<&CachedValue<H::Out>> {
833		let res = self.value_cache.get(key, &self.shared_cache, &self.stats.value_cache);
834
835		tracing::trace!(
836			target: LOG_TARGET,
837			key = ?sp_core::hexdisplay::HexDisplay::from(&key),
838			found = res.is_some(),
839			"Looked up value for key",
840		);
841
842		res
843	}
844
845	fn cache_value_for_key(&mut self, key: &[u8], data: CachedValue<H::Out>) {
846		tracing::trace!(
847			target: LOG_TARGET,
848			key = ?sp_core::hexdisplay::HexDisplay::from(&key),
849			"Caching value for key",
850		);
851
852		self.value_cache.insert(key, data);
853	}
854}
855
856#[cfg(test)]
857mod tests {
858	use super::*;
859	use rand::{thread_rng, Rng};
860	use sp_core::H256;
861	use trie_db::{Bytes, Trie, TrieDBBuilder, TrieDBMutBuilder, TrieHash, TrieMut};
862
863	type MemoryDB = crate::MemoryDB<sp_core::Blake2Hasher>;
864	type Layout = crate::LayoutV1<sp_core::Blake2Hasher>;
865	type Cache = super::SharedTrieCache<sp_core::Blake2Hasher>;
866	type Recorder = crate::recorder::Recorder<sp_core::Blake2Hasher>;
867
868	const TEST_DATA: &[(&[u8], &[u8])] =
869		&[(b"key1", b"val1"), (b"key2", &[2; 64]), (b"key3", b"val3"), (b"key4", &[4; 64])];
870	const CACHE_SIZE_RAW: usize = 1024 * 10;
871	const CACHE_SIZE: CacheSize = CacheSize::new(CACHE_SIZE_RAW);
872
873	fn create_trie() -> (MemoryDB, TrieHash<Layout>) {
874		let mut db = MemoryDB::default();
875		let mut root = Default::default();
876
877		{
878			let mut trie = TrieDBMutBuilder::<Layout>::new(&mut db, &mut root).build();
879			for (k, v) in TEST_DATA {
880				trie.insert(k, v).expect("Inserts data");
881			}
882		}
883
884		(db, root)
885	}
886
887	#[test]
888	fn basic_cache_works() {
889		let (db, root) = create_trie();
890
891		let shared_cache = Cache::new(CACHE_SIZE, None);
892		let local_cache = shared_cache.local_cache_untrusted();
893
894		{
895			let mut cache = local_cache.as_trie_db_cache(root);
896			let trie = TrieDBBuilder::<Layout>::new(&db, &root).with_cache(&mut cache).build();
897			assert_eq!(TEST_DATA[0].1.to_vec(), trie.get(TEST_DATA[0].0).unwrap().unwrap());
898		}
899
900		// Local cache wasn't dropped yet, so there should nothing in the shared caches.
901		assert!(shared_cache.read_lock_inner().value_cache().lru.is_empty());
902		assert!(shared_cache.read_lock_inner().node_cache().lru.is_empty());
903
904		drop(local_cache);
905
906		// Now we should have the cached items in the shared cache.
907		assert!(shared_cache.read_lock_inner().node_cache().lru.len() >= 1);
908		let cached_data = shared_cache
909			.read_lock_inner()
910			.value_cache()
911			.lru
912			.peek(&ValueCacheKey::new_value(TEST_DATA[0].0, root))
913			.unwrap()
914			.clone();
915		assert_eq!(Bytes::from(TEST_DATA[0].1.to_vec()), cached_data.data().flatten().unwrap());
916
917		let fake_data = Bytes::from(&b"fake_data"[..]);
918
919		let local_cache = shared_cache.local_cache_untrusted();
920		shared_cache.write_lock_inner().unwrap().value_cache_mut().lru.insert(
921			ValueCacheKey::new_value(TEST_DATA[1].0, root),
922			(fake_data.clone(), Default::default()).into(),
923		);
924
925		{
926			let mut cache = local_cache.as_trie_db_cache(root);
927			let trie = TrieDBBuilder::<Layout>::new(&db, &root).with_cache(&mut cache).build();
928
929			// We should now get the "fake_data", because we inserted this manually to the cache.
930			assert_eq!(b"fake_data".to_vec(), trie.get(TEST_DATA[1].0).unwrap().unwrap());
931		}
932	}
933
934	#[test]
935	fn trie_db_mut_cache_works() {
936		let (mut db, root) = create_trie();
937
938		let new_key = b"new_key".to_vec();
939		// Use some long value to not have it inlined
940		let new_value = vec![23; 64];
941
942		let shared_cache = Cache::new(CACHE_SIZE, None);
943		let mut new_root = root;
944
945		{
946			let local_cache = shared_cache.local_cache_untrusted();
947
948			let mut cache = local_cache.as_trie_db_mut_cache();
949
950			{
951				let mut trie = TrieDBMutBuilder::<Layout>::from_existing(&mut db, &mut new_root)
952					.with_cache(&mut cache)
953					.build();
954
955				trie.insert(&new_key, &new_value).unwrap();
956			}
957
958			cache.merge_into(&local_cache, new_root);
959		}
960
961		// After the local cache is dropped, all changes should have been merged back to the shared
962		// cache.
963		let cached_data = shared_cache
964			.read_lock_inner()
965			.value_cache()
966			.lru
967			.peek(&ValueCacheKey::new_value(new_key, new_root))
968			.unwrap()
969			.clone();
970		assert_eq!(Bytes::from(new_value), cached_data.data().flatten().unwrap());
971	}
972
973	#[test]
974	fn trie_db_cache_and_recorder_work_together() {
975		let (db, root) = create_trie();
976
977		let shared_cache = Cache::new(CACHE_SIZE, None);
978
979		for i in 0..5 {
980			// Clear some of the caches.
981			if i == 2 {
982				shared_cache.reset_node_cache();
983			} else if i == 3 {
984				shared_cache.reset_value_cache();
985			}
986
987			let local_cache = shared_cache.local_cache_untrusted();
988			let recorder = Recorder::default();
989
990			{
991				let mut cache = local_cache.as_trie_db_cache(root);
992				let mut recorder = recorder.as_trie_recorder(root);
993				let trie = TrieDBBuilder::<Layout>::new(&db, &root)
994					.with_cache(&mut cache)
995					.with_recorder(&mut recorder)
996					.build();
997
998				for (key, value) in TEST_DATA {
999					assert_eq!(*value, trie.get(&key).unwrap().unwrap());
1000				}
1001			}
1002
1003			let storage_proof = recorder.drain_storage_proof();
1004			let memory_db: MemoryDB = storage_proof.into_memory_db();
1005
1006			{
1007				let trie = TrieDBBuilder::<Layout>::new(&memory_db, &root).build();
1008
1009				for (key, value) in TEST_DATA {
1010					assert_eq!(*value, trie.get(&key).unwrap().unwrap());
1011				}
1012			}
1013		}
1014	}
1015
1016	#[test]
1017	fn trie_db_mut_cache_and_recorder_work_together() {
1018		const DATA_TO_ADD: &[(&[u8], &[u8])] = &[(b"key11", &[45; 78]), (b"key33", &[78; 89])];
1019
1020		let (db, root) = create_trie();
1021
1022		let shared_cache = Cache::new(CACHE_SIZE, None);
1023
1024		// Run this twice so that we use the data cache in the second run.
1025		for i in 0..5 {
1026			// Clear some of the caches.
1027			if i == 2 {
1028				shared_cache.reset_node_cache();
1029			} else if i == 3 {
1030				shared_cache.reset_value_cache();
1031			}
1032
1033			let recorder = Recorder::default();
1034			let local_cache = shared_cache.local_cache_untrusted();
1035			let mut new_root = root;
1036
1037			{
1038				let mut db = db.clone();
1039				let mut cache = local_cache.as_trie_db_cache(root);
1040				let mut recorder = recorder.as_trie_recorder(root);
1041				let mut trie = TrieDBMutBuilder::<Layout>::from_existing(&mut db, &mut new_root)
1042					.with_cache(&mut cache)
1043					.with_recorder(&mut recorder)
1044					.build();
1045
1046				for (key, value) in DATA_TO_ADD {
1047					trie.insert(key, value).unwrap();
1048				}
1049			}
1050
1051			let storage_proof = recorder.drain_storage_proof();
1052			let mut memory_db: MemoryDB = storage_proof.into_memory_db();
1053			let mut proof_root = root;
1054
1055			{
1056				let mut trie =
1057					TrieDBMutBuilder::<Layout>::from_existing(&mut memory_db, &mut proof_root)
1058						.build();
1059
1060				for (key, value) in DATA_TO_ADD {
1061					trie.insert(key, value).unwrap();
1062				}
1063			}
1064
1065			assert_eq!(new_root, proof_root)
1066		}
1067	}
1068
1069	#[test]
1070	fn cache_lru_works() {
1071		let (db, root) = create_trie();
1072
1073		let shared_cache = Cache::new(CACHE_SIZE, None);
1074
1075		{
1076			let local_cache = shared_cache.local_cache_untrusted();
1077
1078			let mut cache = local_cache.as_trie_db_cache(root);
1079			let trie = TrieDBBuilder::<Layout>::new(&db, &root).with_cache(&mut cache).build();
1080
1081			for (k, _) in TEST_DATA {
1082				trie.get(k).unwrap().unwrap();
1083			}
1084		}
1085
1086		// Check that all items are there.
1087		assert!(shared_cache
1088			.read_lock_inner()
1089			.value_cache()
1090			.lru
1091			.iter()
1092			.map(|d| d.0)
1093			.all(|l| TEST_DATA.iter().any(|d| &*l.storage_key == d.0)));
1094
1095		// Run this in a loop. The first time we check that with the filled value cache,
1096		// the expected values are at the top of the LRU.
1097		// The second run is using an empty value cache to ensure that we access the nodes.
1098		for _ in 0..2 {
1099			{
1100				let local_cache = shared_cache.local_cache_untrusted();
1101
1102				let mut cache = local_cache.as_trie_db_cache(root);
1103				let trie = TrieDBBuilder::<Layout>::new(&db, &root).with_cache(&mut cache).build();
1104
1105				for (k, _) in TEST_DATA.iter().take(2) {
1106					trie.get(k).unwrap().unwrap();
1107				}
1108			}
1109
1110			// Ensure that the accessed items are most recently used items of the shared value
1111			// cache.
1112			assert!(shared_cache
1113				.read_lock_inner()
1114				.value_cache()
1115				.lru
1116				.iter()
1117				.take(2)
1118				.map(|d| d.0)
1119				.all(|l| { TEST_DATA.iter().take(2).any(|d| &*l.storage_key == d.0) }));
1120
1121			// Delete the value cache, so that we access the nodes.
1122			shared_cache.reset_value_cache();
1123		}
1124
1125		let most_recently_used_nodes = shared_cache
1126			.read_lock_inner()
1127			.node_cache()
1128			.lru
1129			.iter()
1130			.map(|d| *d.0)
1131			.collect::<Vec<_>>();
1132
1133		{
1134			let local_cache = shared_cache.local_cache_untrusted();
1135
1136			let mut cache = local_cache.as_trie_db_cache(root);
1137			let trie = TrieDBBuilder::<Layout>::new(&db, &root).with_cache(&mut cache).build();
1138
1139			for (k, _) in TEST_DATA.iter().skip(2) {
1140				trie.get(k).unwrap().unwrap();
1141			}
1142		}
1143
1144		// Ensure that the most recently used nodes changed as well.
1145		assert_ne!(
1146			most_recently_used_nodes,
1147			shared_cache
1148				.read_lock_inner()
1149				.node_cache()
1150				.lru
1151				.iter()
1152				.map(|d| *d.0)
1153				.collect::<Vec<_>>()
1154		);
1155	}
1156
1157	#[test]
1158	fn cache_respects_bounds() {
1159		let (mut db, root) = create_trie();
1160
1161		let shared_cache = Cache::new(CACHE_SIZE, None);
1162		{
1163			let local_cache = shared_cache.local_cache_untrusted();
1164
1165			let mut new_root = root;
1166
1167			{
1168				let mut cache = local_cache.as_trie_db_cache(root);
1169				{
1170					let mut trie =
1171						TrieDBMutBuilder::<Layout>::from_existing(&mut db, &mut new_root)
1172							.with_cache(&mut cache)
1173							.build();
1174
1175					let value = vec![10u8; 100];
1176					// Ensure we add enough data that would overflow the cache.
1177					for i in 0..CACHE_SIZE_RAW / 100 * 2 {
1178						trie.insert(format!("key{}", i).as_bytes(), &value).unwrap();
1179					}
1180				}
1181
1182				cache.merge_into(&local_cache, new_root);
1183			}
1184		}
1185
1186		assert!(shared_cache.used_memory_size() < CACHE_SIZE_RAW);
1187	}
1188
1189	#[test]
1190	fn test_trusted_works() {
1191		let (mut db, root) = create_trie();
1192		// Configure cache size to make sure it is large enough to hold all the data.
1193		let cache_size = CacheSize::new(1024 * 1024 * 1024);
1194		let num_test_keys: usize = 40000;
1195		let shared_cache = Cache::new(cache_size, None);
1196
1197		// Create a random array of bytes to use as a value.
1198		let mut rng = thread_rng();
1199		let random_keys: Vec<Vec<u8>> =
1200			(0..num_test_keys).map(|_| (0..100).map(|_| rng.gen()).collect()).collect();
1201
1202		let value = vec![10u8; 100];
1203
1204		// Populate the trie cache with, use a local untrusted cache and confirm not everything ends
1205		// up in the shared trie cache.
1206		let root = {
1207			let local_cache = shared_cache.local_cache_untrusted();
1208
1209			let mut new_root = root;
1210
1211			{
1212				let mut cache = local_cache.as_trie_db_mut_cache();
1213				{
1214					let mut trie =
1215						TrieDBMutBuilder::<Layout>::from_existing(&mut db, &mut new_root)
1216							.with_cache(&mut cache)
1217							.build();
1218
1219					// Ensure we add enough data that would overflow the cache.
1220					for key in random_keys.iter() {
1221						trie.insert(key.as_ref(), &value).unwrap();
1222					}
1223				}
1224
1225				cache.merge_into(&local_cache, new_root);
1226			}
1227			new_root
1228		};
1229		let shared_value_cache_len = shared_cache.read_lock_inner().value_cache().lru.len();
1230		assert!(shared_value_cache_len < num_test_keys / 10);
1231
1232		// Read keys and check shared cache hits we should have a lot of misses.
1233		let stats = read_to_check_cache(&shared_cache, &mut db, root, &random_keys, value.clone());
1234		assert_eq!(stats.value_cache.shared_hits, shared_value_cache_len as u64);
1235
1236		assert_ne!(stats.value_cache.shared_fetch_attempts, stats.value_cache.shared_hits);
1237		assert_ne!(stats.node_cache.shared_fetch_attempts, stats.node_cache.shared_hits);
1238
1239		// Update the keys in the trie and check on subsequent reads all reads hit the shared cache.
1240		let shared_value_cache_len = shared_cache.read_lock_inner().value_cache().lru.len();
1241		let new_value = vec![9u8; 100];
1242		let root = {
1243			let local_cache = shared_cache.local_cache_trusted();
1244
1245			let mut new_root = root;
1246
1247			{
1248				let mut cache = local_cache.as_trie_db_mut_cache();
1249				{
1250					let mut trie =
1251						TrieDBMutBuilder::<Layout>::from_existing(&mut db, &mut new_root)
1252							.with_cache(&mut cache)
1253							.build();
1254
1255					// Ensure we add enough data that would overflow the cache.
1256					for key in random_keys.iter() {
1257						trie.insert(key.as_ref(), &new_value).unwrap();
1258					}
1259				}
1260
1261				cache.merge_into(&local_cache, new_root);
1262			}
1263			new_root
1264		};
1265
1266		// Check on subsequent reads all reads hit the shared cache.
1267		let stats =
1268			read_to_check_cache(&shared_cache, &mut db, root, &random_keys, new_value.clone());
1269
1270		assert_eq!(stats.value_cache.shared_fetch_attempts, stats.value_cache.shared_hits);
1271		assert_eq!(stats.node_cache.shared_fetch_attempts, stats.node_cache.shared_hits);
1272
1273		assert_eq!(stats.value_cache.shared_fetch_attempts, stats.value_cache.local_fetch_attempts);
1274		assert_eq!(stats.node_cache.shared_fetch_attempts, stats.node_cache.local_fetch_attempts);
1275
1276		// The length of the shared value cache should contain everything that existed before + all
1277		// keys that got updated with a trusted cache.
1278		assert_eq!(
1279			shared_cache.read_lock_inner().value_cache().lru.len(),
1280			shared_value_cache_len + num_test_keys
1281		);
1282	}
1283
1284	// Helper function to read from the trie.
1285	//
1286	// Returns the cache stats.
1287	fn read_to_check_cache(
1288		shared_cache: &Cache,
1289		db: &mut MemoryDB,
1290		root: H256,
1291		keys: &Vec<Vec<u8>>,
1292		expected_value: Vec<u8>,
1293	) -> TrieHitStatsSnapshot {
1294		let local_cache = shared_cache.local_cache_untrusted();
1295		let mut cache = local_cache.as_trie_db_cache(root);
1296		let trie = TrieDBBuilder::<Layout>::new(db, &root).with_cache(&mut cache).build();
1297
1298		for key in keys.iter() {
1299			assert_eq!(trie.get(key.as_ref()).unwrap().unwrap(), expected_value);
1300		}
1301		local_cache.stats.snapshot()
1302	}
1303}