1use super::{CacheSize, NodeCached};
21use hash_db::Hasher;
22use nohash_hasher::BuildNoHashHasher;
23use parking_lot::{Mutex, RwLock, RwLockWriteGuard};
24use schnellru::LruMap;
25use std::{
26 collections::{hash_map::Entry as SetEntry, HashMap},
27 hash::{BuildHasher, Hasher as _},
28 sync::{Arc, LazyLock},
29};
30use trie_db::{node::NodeOwned, CachedValue};
31
32static RANDOM_STATE: LazyLock<ahash::RandomState> = LazyLock::new(|| {
33 use rand::Rng;
34 let mut rng = rand::thread_rng();
35 ahash::RandomState::generate_with(rng.gen(), rng.gen(), rng.gen(), rng.gen())
36});
37
38pub struct SharedNodeCacheLimiter {
39 max_inline_size: usize,
43
44 max_heap_size: usize,
46
47 heap_size: usize,
51
52 items_evicted: usize,
56
57 max_items_evicted: usize,
61}
62
63impl<H> schnellru::Limiter<H, NodeOwned<H>> for SharedNodeCacheLimiter
64where
65 H: AsRef<[u8]>,
66{
67 type KeyToInsert<'a> = H;
68 type LinkType = u32;
69
70 #[inline]
71 fn is_over_the_limit(&self, _length: usize) -> bool {
72 self.items_evicted <= self.max_items_evicted && self.heap_size > self.max_heap_size
76 }
77
78 #[inline]
79 fn on_insert(
80 &mut self,
81 _length: usize,
82 key: Self::KeyToInsert<'_>,
83 node: NodeOwned<H>,
84 ) -> Option<(H, NodeOwned<H>)> {
85 let new_item_heap_size = node.size_in_bytes() - std::mem::size_of::<NodeOwned<H>>();
86 if new_item_heap_size > self.max_heap_size {
87 return None
89 }
90
91 self.heap_size += new_item_heap_size;
92 Some((key, node))
93 }
94
95 #[inline]
96 fn on_replace(
97 &mut self,
98 _length: usize,
99 _old_key: &mut H,
100 _new_key: H,
101 old_node: &mut NodeOwned<H>,
102 new_node: &mut NodeOwned<H>,
103 ) -> bool {
104 debug_assert_eq!(_old_key.as_ref(), _new_key.as_ref());
105
106 let new_item_heap_size = new_node.size_in_bytes() - std::mem::size_of::<NodeOwned<H>>();
107 if new_item_heap_size > self.max_heap_size {
108 return false
110 }
111
112 let old_item_heap_size = old_node.size_in_bytes() - std::mem::size_of::<NodeOwned<H>>();
113 self.heap_size = self.heap_size - old_item_heap_size + new_item_heap_size;
114 true
115 }
116
117 #[inline]
118 fn on_cleared(&mut self) {
119 self.heap_size = 0;
120 }
121
122 #[inline]
123 fn on_removed(&mut self, _: &mut H, node: &mut NodeOwned<H>) {
124 self.heap_size -= node.size_in_bytes() - std::mem::size_of::<NodeOwned<H>>();
125 self.items_evicted += 1;
126 }
127
128 #[inline]
129 fn on_grow(&mut self, new_memory_usage: usize) -> bool {
130 new_memory_usage <= self.max_inline_size
131 }
132}
133
134pub struct SharedValueCacheLimiter {
135 max_inline_size: usize,
139
140 max_heap_size: usize,
142
143 heap_size: usize,
147
148 known_storage_keys: HashMap<Arc<[u8]>, (), ahash::RandomState>,
150
151 items_evicted: usize,
155
156 max_items_evicted: usize,
160}
161
162impl<H> schnellru::Limiter<ValueCacheKey<H>, CachedValue<H>> for SharedValueCacheLimiter
163where
164 H: AsRef<[u8]>,
165{
166 type KeyToInsert<'a> = ValueCacheKey<H>;
167 type LinkType = u32;
168
169 #[inline]
170 fn is_over_the_limit(&self, _length: usize) -> bool {
171 self.items_evicted <= self.max_items_evicted && self.heap_size > self.max_heap_size
172 }
173
174 #[inline]
175 fn on_insert(
176 &mut self,
177 _length: usize,
178 mut key: Self::KeyToInsert<'_>,
179 value: CachedValue<H>,
180 ) -> Option<(ValueCacheKey<H>, CachedValue<H>)> {
181 match self.known_storage_keys.entry(key.storage_key.clone()) {
182 SetEntry::Vacant(entry) => {
183 let new_item_heap_size = key.storage_key.len();
184 if new_item_heap_size > self.max_heap_size {
185 return None
187 }
188
189 self.heap_size += new_item_heap_size;
190 entry.insert(());
191 },
192 SetEntry::Occupied(entry) => {
193 key.storage_key = entry.key().clone();
194 },
195 }
196
197 Some((key, value))
198 }
199
200 #[inline]
201 fn on_replace(
202 &mut self,
203 _length: usize,
204 _old_key: &mut ValueCacheKey<H>,
205 _new_key: ValueCacheKey<H>,
206 _old_value: &mut CachedValue<H>,
207 _new_value: &mut CachedValue<H>,
208 ) -> bool {
209 debug_assert_eq!(_new_key.storage_key, _old_key.storage_key);
210 true
211 }
212
213 #[inline]
214 fn on_removed(&mut self, key: &mut ValueCacheKey<H>, _: &mut CachedValue<H>) {
215 if Arc::strong_count(&key.storage_key) == 2 {
216 self.heap_size -= key.storage_key.len();
222 self.known_storage_keys.remove(&key.storage_key);
223 }
224 self.items_evicted += 1;
225 }
226
227 #[inline]
228 fn on_cleared(&mut self) {
229 self.heap_size = 0;
230 self.known_storage_keys.clear();
231 }
232
233 #[inline]
234 fn on_grow(&mut self, new_memory_usage: usize) -> bool {
235 new_memory_usage <= self.max_inline_size
236 }
237}
238
239type SharedNodeCacheMap<H> =
240 LruMap<H, NodeOwned<H>, SharedNodeCacheLimiter, schnellru::RandomState>;
241
242pub(super) struct SharedNodeCache<H>
247where
248 H: AsRef<[u8]>,
249{
250 pub(super) lru: SharedNodeCacheMap<H>,
252}
253
254impl<H: AsRef<[u8]> + Eq + std::hash::Hash> SharedNodeCache<H> {
255 fn new(max_inline_size: usize, max_heap_size: usize) -> Self {
257 Self {
258 lru: LruMap::new(SharedNodeCacheLimiter {
259 max_inline_size,
260 max_heap_size,
261 heap_size: 0,
262 items_evicted: 0,
263 max_items_evicted: 0, }),
265 }
266 }
267
268 pub fn update(&mut self, list: impl IntoIterator<Item = (H, NodeCached<H>)>) {
270 let mut access_count = 0;
271 let mut add_count = 0;
272
273 self.lru.limiter_mut().items_evicted = 0;
274 self.lru.limiter_mut().max_items_evicted =
275 self.lru.len() * 100 / super::SHARED_NODE_CACHE_MAX_REPLACE_PERCENT;
276
277 for (key, cached_node) in list {
278 if cached_node.is_from_shared_cache {
279 if self.lru.get(&key).is_some() {
280 access_count += 1;
281
282 if access_count >= super::SHARED_NODE_CACHE_MAX_PROMOTED_KEYS {
283 break
285 }
286
287 continue
288 }
289 }
290
291 self.lru.insert(key, cached_node.node);
292 add_count += 1;
293
294 if self.lru.limiter().items_evicted > self.lru.limiter().max_items_evicted {
295 break
297 }
298 }
299
300 tracing::debug!(
301 target: super::LOG_TARGET,
302 "Updated the shared node cache: {} accesses, {} new values, {}/{} evicted (length = {}, inline size={}/{}, heap size={}/{})",
303 access_count,
304 add_count,
305 self.lru.limiter().items_evicted,
306 self.lru.limiter().max_items_evicted,
307 self.lru.len(),
308 self.lru.memory_usage(),
309 self.lru.limiter().max_inline_size,
310 self.lru.limiter().heap_size,
311 self.lru.limiter().max_heap_size,
312 );
313 }
314
315 fn reset(&mut self) {
317 self.lru.clear();
318 }
319}
320
321#[derive(PartialEq, Eq, Clone, Copy, Hash)]
323#[repr(transparent)]
324pub struct ValueCacheKeyHash(u64);
325
326impl ValueCacheKeyHash {
327 pub fn raw(self) -> u64 {
328 self.0
329 }
330}
331
332impl ValueCacheKeyHash {
333 pub fn from_hasher_and_storage_key(
334 mut hasher: impl std::hash::Hasher,
335 storage_key: &[u8],
336 ) -> Self {
337 hasher.write(storage_key);
338
339 Self(hasher.finish())
340 }
341}
342
343impl nohash_hasher::IsEnabled for ValueCacheKeyHash {}
344
345#[derive(Eq)]
347pub(super) struct ValueCacheKey<H> {
348 pub storage_root: H,
350 pub storage_key: Arc<[u8]>,
352 pub hash: ValueCacheKeyHash,
354}
355
356pub(super) struct ValueCacheRef<'a, H> {
358 pub storage_root: H,
360 pub storage_key: &'a [u8],
362 pub hash: ValueCacheKeyHash,
364}
365
366impl<'a, H> ValueCacheRef<'a, H> {
367 pub fn new(storage_key: &'a [u8], storage_root: H) -> Self
368 where
369 H: AsRef<[u8]>,
370 {
371 let hash = ValueCacheKey::<H>::hash_data(&storage_key, &storage_root);
372 Self { storage_root, storage_key, hash }
373 }
374}
375
376impl<'a, H> From<ValueCacheRef<'a, H>> for ValueCacheKey<H> {
377 fn from(value: ValueCacheRef<'a, H>) -> Self {
378 ValueCacheKey {
379 storage_root: value.storage_root,
380 storage_key: value.storage_key.into(),
381 hash: value.hash,
382 }
383 }
384}
385
386impl<'a, H: std::hash::Hash> std::hash::Hash for ValueCacheRef<'a, H> {
387 fn hash<Hasher: std::hash::Hasher>(&self, state: &mut Hasher) {
388 self.hash.hash(state)
389 }
390}
391
392impl<'a, H> PartialEq<ValueCacheKey<H>> for ValueCacheRef<'a, H>
393where
394 H: AsRef<[u8]>,
395{
396 fn eq(&self, rhs: &ValueCacheKey<H>) -> bool {
397 self.storage_root.as_ref() == rhs.storage_root.as_ref() &&
398 self.storage_key == &*rhs.storage_key
399 }
400}
401
402impl<H> ValueCacheKey<H> {
403 #[cfg(test)] pub fn new_value(storage_key: impl Into<Arc<[u8]>>, storage_root: H) -> Self
406 where
407 H: AsRef<[u8]>,
408 {
409 let storage_key = storage_key.into();
410 let hash = Self::hash_data(&storage_key, &storage_root);
411 Self { storage_root, storage_key, hash }
412 }
413
414 pub fn hash_partial_data(storage_root: &H) -> impl std::hash::Hasher + Clone
418 where
419 H: AsRef<[u8]>,
420 {
421 let mut hasher = RANDOM_STATE.build_hasher();
422 hasher.write(storage_root.as_ref());
423 hasher
424 }
425
426 pub fn hash_data(key: &[u8], storage_root: &H) -> ValueCacheKeyHash
430 where
431 H: AsRef<[u8]>,
432 {
433 let hasher = Self::hash_partial_data(storage_root);
434
435 ValueCacheKeyHash::from_hasher_and_storage_key(hasher, key)
436 }
437
438 #[inline]
440 pub fn is_eq(&self, storage_root: &H, storage_key: &[u8]) -> bool
441 where
442 H: PartialEq,
443 {
444 self.storage_root == *storage_root && *self.storage_key == *storage_key
445 }
446}
447
448impl<H: std::hash::Hash> std::hash::Hash for ValueCacheKey<H> {
450 fn hash<Hasher: std::hash::Hasher>(&self, state: &mut Hasher) {
451 self.hash.hash(state)
452 }
453}
454
455impl<H> nohash_hasher::IsEnabled for ValueCacheKey<H> {}
456
457impl<H: PartialEq> PartialEq for ValueCacheKey<H> {
459 #[inline]
460 fn eq(&self, other: &Self) -> bool {
461 self.is_eq(&other.storage_root, &other.storage_key)
462 }
463}
464
465type SharedValueCacheMap<H> = schnellru::LruMap<
466 ValueCacheKey<H>,
467 CachedValue<H>,
468 SharedValueCacheLimiter,
469 BuildNoHashHasher<ValueCacheKey<H>>,
470>;
471
472pub(super) struct SharedValueCache<H>
476where
477 H: AsRef<[u8]>,
478{
479 pub(super) lru: SharedValueCacheMap<H>,
481}
482
483impl<H: Eq + std::hash::Hash + Clone + Copy + AsRef<[u8]>> SharedValueCache<H> {
484 fn new(max_inline_size: usize, max_heap_size: usize) -> Self {
486 Self {
487 lru: schnellru::LruMap::with_hasher(
488 SharedValueCacheLimiter {
489 max_inline_size,
490 max_heap_size,
491 heap_size: 0,
492 known_storage_keys: HashMap::with_hasher(RANDOM_STATE.clone()),
493 items_evicted: 0,
494 max_items_evicted: 0, },
496 Default::default(),
497 ),
498 }
499 }
500
501 pub fn update(
509 &mut self,
510 added: impl IntoIterator<Item = (ValueCacheKey<H>, CachedValue<H>)>,
511 accessed: impl IntoIterator<Item = ValueCacheKeyHash>,
512 ) {
513 let mut access_count = 0;
514 let mut add_count = 0;
515
516 for hash in accessed {
517 self.lru.get_by_hash(hash.raw(), |existing_key, _| existing_key.hash == hash);
523 access_count += 1;
524 }
525
526 self.lru.limiter_mut().items_evicted = 0;
533 self.lru.limiter_mut().max_items_evicted =
534 self.lru.len() * 100 / super::SHARED_VALUE_CACHE_MAX_REPLACE_PERCENT;
535
536 for (key, value) in added {
537 self.lru.insert(key, value);
538 add_count += 1;
539
540 if self.lru.limiter().items_evicted > self.lru.limiter().max_items_evicted {
541 break
543 }
544 }
545
546 tracing::debug!(
547 target: super::LOG_TARGET,
548 "Updated the shared value cache: {} accesses, {} new values, {}/{} evicted (length = {}, known_storage_keys = {}, inline size={}/{}, heap size={}/{})",
549 access_count,
550 add_count,
551 self.lru.limiter().items_evicted,
552 self.lru.limiter().max_items_evicted,
553 self.lru.len(),
554 self.lru.limiter().known_storage_keys.len(),
555 self.lru.memory_usage(),
556 self.lru.limiter().max_inline_size,
557 self.lru.limiter().heap_size,
558 self.lru.limiter().max_heap_size
559 );
560 }
561
562 fn reset(&mut self) {
564 self.lru.clear();
565 }
566}
567
568pub(super) struct SharedTrieCacheInner<H: Hasher> {
570 node_cache: SharedNodeCache<H::Out>,
571 value_cache: SharedValueCache<H::Out>,
572}
573
574impl<H: Hasher> SharedTrieCacheInner<H> {
575 #[cfg(test)]
577 pub(super) fn value_cache(&self) -> &SharedValueCache<H::Out> {
578 &self.value_cache
579 }
580
581 pub(super) fn value_cache_mut(&mut self) -> &mut SharedValueCache<H::Out> {
583 &mut self.value_cache
584 }
585
586 #[cfg(test)]
588 pub(super) fn node_cache(&self) -> &SharedNodeCache<H::Out> {
589 &self.node_cache
590 }
591
592 pub(super) fn node_cache_mut(&mut self) -> &mut SharedNodeCache<H::Out> {
594 &mut self.node_cache
595 }
596}
597
598pub struct SharedTrieCache<H: Hasher> {
606 inner: Arc<RwLock<SharedTrieCacheInner<H>>>,
607}
608
609impl<H: Hasher> Clone for SharedTrieCache<H> {
610 fn clone(&self) -> Self {
611 Self { inner: self.inner.clone() }
612 }
613}
614
615impl<H: Hasher> SharedTrieCache<H> {
616 pub fn new(cache_size: CacheSize) -> Self {
618 let total_budget = cache_size.0;
619
620 let value_cache_budget = (total_budget as f32 * 0.20) as usize; let node_cache_budget = total_budget - value_cache_budget; let value_cache_inline_budget = (value_cache_budget as f32 * 0.70) as usize;
627 let node_cache_inline_budget = (node_cache_budget as f32 * 0.70) as usize;
628
629 let value_cache_max_inline_size =
631 SharedValueCacheMap::<H::Out>::memory_usage_for_memory_budget(
632 value_cache_inline_budget,
633 );
634
635 let node_cache_max_inline_size =
636 SharedNodeCacheMap::<H::Out>::memory_usage_for_memory_budget(node_cache_inline_budget);
637
638 let value_cache_max_heap_size = value_cache_budget - value_cache_max_inline_size;
640 let node_cache_max_heap_size = node_cache_budget - node_cache_max_inline_size;
641
642 tracing::debug!(
643 target: super::LOG_TARGET,
644 "Configured a shared trie cache with a budget of ~{} bytes (node_cache_max_inline_size = {}, node_cache_max_heap_size = {}, value_cache_max_inline_size = {}, value_cache_max_heap_size = {})",
645 total_budget,
646 node_cache_max_inline_size,
647 node_cache_max_heap_size,
648 value_cache_max_inline_size,
649 value_cache_max_heap_size,
650 );
651
652 Self {
653 inner: Arc::new(RwLock::new(SharedTrieCacheInner {
654 node_cache: SharedNodeCache::new(
655 node_cache_max_inline_size,
656 node_cache_max_heap_size,
657 ),
658 value_cache: SharedValueCache::new(
659 value_cache_max_inline_size,
660 value_cache_max_heap_size,
661 ),
662 })),
663 }
664 }
665
666 pub fn local_cache(&self) -> super::LocalTrieCache<H> {
668 super::LocalTrieCache {
669 shared: self.clone(),
670 node_cache: Default::default(),
671 value_cache: Default::default(),
672 shared_value_cache_access: Mutex::new(super::ValueAccessSet::with_hasher(
673 schnellru::ByLength::new(super::SHARED_VALUE_CACHE_MAX_PROMOTED_KEYS),
674 Default::default(),
675 )),
676 stats: Default::default(),
677 }
678 }
679
680 #[inline]
686 pub fn peek_node(&self, key: &H::Out) -> Option<NodeOwned<H::Out>> {
687 self.inner.read().node_cache.lru.peek(key).cloned()
688 }
689
690 pub fn peek_value_by_hash(
696 &self,
697 hash: ValueCacheKeyHash,
698 storage_root: &H::Out,
699 storage_key: &[u8],
700 ) -> Option<CachedValue<H::Out>> {
701 self.inner
702 .read()
703 .value_cache
704 .lru
705 .peek_by_hash(hash.0, |existing_key, _| existing_key.is_eq(storage_root, storage_key))
706 .cloned()
707 }
708
709 pub fn used_memory_size(&self) -> usize {
711 let inner = self.inner.read();
712 let value_cache_size =
713 inner.value_cache.lru.memory_usage() + inner.value_cache.lru.limiter().heap_size;
714 let node_cache_size =
715 inner.node_cache.lru.memory_usage() + inner.node_cache.lru.limiter().heap_size;
716
717 node_cache_size + value_cache_size
718 }
719
720 pub fn reset_node_cache(&self) {
722 self.inner.write().node_cache.reset();
723 }
724
725 pub fn reset_value_cache(&self) {
727 self.inner.write().value_cache.reset();
728 }
729
730 pub fn reset(&self) {
732 self.reset_node_cache();
733 self.reset_value_cache();
734 }
735
736 #[cfg(test)]
738 pub(super) fn read_lock_inner(
739 &self,
740 ) -> parking_lot::RwLockReadGuard<'_, SharedTrieCacheInner<H>> {
741 self.inner.read()
742 }
743
744 pub(super) fn write_lock_inner(&self) -> Option<RwLockWriteGuard<'_, SharedTrieCacheInner<H>>> {
746 self.inner.try_write_for(super::SHARED_CACHE_WRITE_LOCK_TIMEOUT)
750 }
751}
752
753#[cfg(test)]
754mod tests {
755 use super::*;
756 use sp_core::H256 as Hash;
757
758 #[test]
759 fn shared_value_cache_works() {
760 let mut cache = SharedValueCache::<sp_core::H256>::new(usize::MAX, 10 * 10);
761
762 let key = vec![0; 10];
763
764 let root0 = Hash::repeat_byte(1);
765 let root1 = Hash::repeat_byte(2);
766
767 cache.update(
768 vec![
769 (ValueCacheKey::new_value(&key[..], root0), CachedValue::NonExisting),
770 (ValueCacheKey::new_value(&key[..], root1), CachedValue::NonExisting),
771 ],
772 vec![],
773 );
774
775 assert_eq!(1, cache.lru.limiter_mut().known_storage_keys.len());
777 assert_eq!(
778 3, Arc::strong_count(
780 cache.lru.limiter_mut().known_storage_keys.get_key_value(&key[..]).unwrap().0
781 )
782 );
783 assert_eq!(key.len(), cache.lru.limiter().heap_size);
784 assert_eq!(cache.lru.len(), 2);
785 assert_eq!(cache.lru.peek_newest().unwrap().0.storage_root, root1);
786 assert_eq!(cache.lru.peek_oldest().unwrap().0.storage_root, root0);
787 assert!(cache.lru.limiter().heap_size <= cache.lru.limiter().max_heap_size);
788 assert_eq!(cache.lru.limiter().heap_size, 10);
789
790 cache.update(vec![], vec![ValueCacheKey::hash_data(&key[..], &root0)]);
792 assert_eq!(1, cache.lru.limiter_mut().known_storage_keys.len());
793 assert_eq!(
794 3,
795 Arc::strong_count(
796 cache.lru.limiter_mut().known_storage_keys.get_key_value(&key[..]).unwrap().0
797 )
798 );
799 assert_eq!(key.len(), cache.lru.limiter().heap_size);
800 assert_eq!(cache.lru.len(), 2);
801 assert_eq!(cache.lru.peek_newest().unwrap().0.storage_root, root0);
802 assert_eq!(cache.lru.peek_oldest().unwrap().0.storage_root, root1);
803 assert!(cache.lru.limiter().heap_size <= cache.lru.limiter().max_heap_size);
804 assert_eq!(cache.lru.limiter().heap_size, 10);
805
806 cache.update(
808 vec![
809 (ValueCacheKey::new_value(&key[..], root1), CachedValue::NonExisting),
810 (ValueCacheKey::new_value(&key[..], root0), CachedValue::NonExisting),
811 ],
812 vec![],
813 );
814 assert_eq!(1, cache.lru.limiter_mut().known_storage_keys.len());
815 assert_eq!(
816 3,
817 Arc::strong_count(
818 cache.lru.limiter_mut().known_storage_keys.get_key_value(&key[..]).unwrap().0
819 )
820 );
821 assert_eq!(key.len(), cache.lru.limiter().heap_size);
822 assert_eq!(cache.lru.len(), 2);
823 assert_eq!(cache.lru.peek_newest().unwrap().0.storage_root, root0);
824 assert_eq!(cache.lru.peek_oldest().unwrap().0.storage_root, root1);
825 assert!(cache.lru.limiter().heap_size <= cache.lru.limiter().max_heap_size);
826 assert_eq!(cache.lru.limiter().items_evicted, 0);
827 assert_eq!(cache.lru.limiter().heap_size, 10);
828
829 cache.update(
831 (1..11)
832 .map(|i| vec![i; 10])
833 .map(|key| (ValueCacheKey::new_value(&key[..], root0), CachedValue::NonExisting)),
834 vec![],
835 );
836
837 assert_eq!(cache.lru.limiter().items_evicted, 2);
838 assert_eq!(10, cache.lru.len());
839 assert_eq!(10, cache.lru.limiter_mut().known_storage_keys.len());
840 assert!(cache.lru.limiter_mut().known_storage_keys.get_key_value(&key[..]).is_none());
841 assert_eq!(key.len() * 10, cache.lru.limiter().heap_size);
842 assert_eq!(cache.lru.len(), 10);
843 assert!(cache.lru.limiter().heap_size <= cache.lru.limiter().max_heap_size);
844 assert_eq!(cache.lru.limiter().heap_size, 100);
845
846 assert!(matches!(
847 cache.lru.peek(&ValueCacheKey::new_value(&[1; 10][..], root0)).unwrap(),
848 CachedValue::<Hash>::NonExisting
849 ));
850
851 assert!(cache.lru.peek(&ValueCacheKey::new_value(&[1; 10][..], root1)).is_none(),);
852
853 assert!(cache.lru.peek(&ValueCacheKey::new_value(&key[..], root0)).is_none());
854 assert!(cache.lru.peek(&ValueCacheKey::new_value(&key[..], root1)).is_none());
855
856 cache.update(
857 vec![(ValueCacheKey::new_value(vec![10; 10], root0), CachedValue::NonExisting)],
858 vec![],
859 );
860
861 assert!(cache.lru.limiter_mut().known_storage_keys.get_key_value(&key[..]).is_none());
862 }
863}