1use super::{
21 metrics::Metrics, CacheSize, LocalNodeCacheConfig, LocalNodeCacheLimiter,
22 LocalValueCacheConfig, LocalValueCacheLimiter, NodeCached, TrieHitStats, TrieHitStatsSnapshot,
23};
24use crate::cache::LOG_TARGET;
25use core::{hash::Hash, time::Duration};
26use hash_db::Hasher;
27use nohash_hasher::BuildNoHashHasher;
28use parking_lot::{Mutex, RwLock, RwLockWriteGuard};
29use prometheus_endpoint::Registry;
30use schnellru::LruMap;
31use std::{
32 collections::{hash_map::Entry as SetEntry, HashMap},
33 hash::{BuildHasher, Hasher as _},
34 sync::{Arc, LazyLock},
35 time::Instant,
36};
37use trie_db::{node::NodeOwned, CachedValue};
38static RANDOM_STATE: LazyLock<ahash::RandomState> = LazyLock::new(|| {
39 use rand::Rng;
40 let mut rng = rand::thread_rng();
41 ahash::RandomState::generate_with(rng.gen(), rng.gen(), rng.gen(), rng.gen())
42});
43
44pub struct SharedNodeCacheLimiter {
45 max_inline_size: usize,
49
50 max_heap_size: usize,
52
53 heap_size: usize,
57
58 items_evicted: usize,
62
63 max_items_evicted: usize,
67}
68
69impl<H> schnellru::Limiter<H, NodeOwned<H>> for SharedNodeCacheLimiter
70where
71 H: AsRef<[u8]>,
72{
73 type KeyToInsert<'a> = H;
74 type LinkType = u32;
75
76 #[inline]
77 fn is_over_the_limit(&self, _length: usize) -> bool {
78 self.items_evicted <= self.max_items_evicted && self.heap_size > self.max_heap_size
82 }
83
84 #[inline]
85 fn on_insert(
86 &mut self,
87 _length: usize,
88 key: Self::KeyToInsert<'_>,
89 node: NodeOwned<H>,
90 ) -> Option<(H, NodeOwned<H>)> {
91 let new_item_heap_size = node.size_in_bytes() - std::mem::size_of::<NodeOwned<H>>();
92 if new_item_heap_size > self.max_heap_size {
93 return None
95 }
96
97 self.heap_size += new_item_heap_size;
98 Some((key, node))
99 }
100
101 #[inline]
102 fn on_replace(
103 &mut self,
104 _length: usize,
105 _old_key: &mut H,
106 _new_key: H,
107 old_node: &mut NodeOwned<H>,
108 new_node: &mut NodeOwned<H>,
109 ) -> bool {
110 debug_assert_eq!(_old_key.as_ref(), _new_key.as_ref());
111
112 let new_item_heap_size = new_node.size_in_bytes() - std::mem::size_of::<NodeOwned<H>>();
113 if new_item_heap_size > self.max_heap_size {
114 return false
116 }
117
118 let old_item_heap_size = old_node.size_in_bytes() - std::mem::size_of::<NodeOwned<H>>();
119 self.heap_size = self.heap_size - old_item_heap_size + new_item_heap_size;
120 true
121 }
122
123 #[inline]
124 fn on_cleared(&mut self) {
125 self.heap_size = 0;
126 }
127
128 #[inline]
129 fn on_removed(&mut self, _: &mut H, node: &mut NodeOwned<H>) {
130 self.heap_size -= node.size_in_bytes() - std::mem::size_of::<NodeOwned<H>>();
131 self.items_evicted += 1;
132 }
133
134 #[inline]
135 fn on_grow(&mut self, new_memory_usage: usize) -> bool {
136 new_memory_usage <= self.max_inline_size
137 }
138}
139
140pub struct SharedValueCacheLimiter {
141 max_inline_size: usize,
145
146 max_heap_size: usize,
148
149 heap_size: usize,
153
154 known_storage_keys: HashMap<Arc<[u8]>, (), ahash::RandomState>,
156
157 items_evicted: usize,
161
162 max_items_evicted: usize,
166}
167
168impl<H> schnellru::Limiter<ValueCacheKey<H>, CachedValue<H>> for SharedValueCacheLimiter
169where
170 H: AsRef<[u8]>,
171{
172 type KeyToInsert<'a> = ValueCacheKey<H>;
173 type LinkType = u32;
174
175 #[inline]
176 fn is_over_the_limit(&self, _length: usize) -> bool {
177 self.items_evicted <= self.max_items_evicted && self.heap_size > self.max_heap_size
178 }
179
180 #[inline]
181 fn on_insert(
182 &mut self,
183 _length: usize,
184 mut key: Self::KeyToInsert<'_>,
185 value: CachedValue<H>,
186 ) -> Option<(ValueCacheKey<H>, CachedValue<H>)> {
187 match self.known_storage_keys.entry(key.storage_key.clone()) {
188 SetEntry::Vacant(entry) => {
189 let new_item_heap_size = key.storage_key.len();
190 if new_item_heap_size > self.max_heap_size {
191 return None
193 }
194
195 self.heap_size += new_item_heap_size;
196 entry.insert(());
197 },
198 SetEntry::Occupied(entry) => {
199 key.storage_key = entry.key().clone();
200 },
201 }
202
203 Some((key, value))
204 }
205
206 #[inline]
207 fn on_replace(
208 &mut self,
209 _length: usize,
210 _old_key: &mut ValueCacheKey<H>,
211 _new_key: ValueCacheKey<H>,
212 _old_value: &mut CachedValue<H>,
213 _new_value: &mut CachedValue<H>,
214 ) -> bool {
215 debug_assert_eq!(_new_key.storage_key, _old_key.storage_key);
216 true
217 }
218
219 #[inline]
220 fn on_removed(&mut self, key: &mut ValueCacheKey<H>, _: &mut CachedValue<H>) {
221 if Arc::strong_count(&key.storage_key) == 2 {
222 self.heap_size -= key.storage_key.len();
228 self.known_storage_keys.remove(&key.storage_key);
229 }
230 self.items_evicted += 1;
231 }
232
233 #[inline]
234 fn on_cleared(&mut self) {
235 self.heap_size = 0;
236 self.known_storage_keys.clear();
237 }
238
239 #[inline]
240 fn on_grow(&mut self, new_memory_usage: usize) -> bool {
241 new_memory_usage <= self.max_inline_size
242 }
243}
244
245type SharedNodeCacheMap<H> =
246 LruMap<H, NodeOwned<H>, SharedNodeCacheLimiter, schnellru::RandomState>;
247
248pub(super) struct SharedNodeCache<H>
253where
254 H: AsRef<[u8]>,
255{
256 pub(super) lru: SharedNodeCacheMap<H>,
258}
259
260impl<H: AsRef<[u8]> + Eq + std::hash::Hash> SharedNodeCache<H> {
261 fn new(max_inline_size: usize, max_heap_size: usize) -> Self {
263 Self {
264 lru: LruMap::new(SharedNodeCacheLimiter {
265 max_inline_size,
266 max_heap_size,
267 heap_size: 0,
268 items_evicted: 0,
269 max_items_evicted: 0, }),
271 }
272 }
273
274 pub fn update(
276 &mut self,
277 list: impl IntoIterator<Item = (H, NodeCached<H>)>,
278 config: &LocalNodeCacheConfig,
279 metrics: &Option<Metrics>,
280 ) {
281 let mut access_count = 0;
282 let mut add_count = 0;
283
284 self.lru.limiter_mut().items_evicted = 0;
285 self.lru.limiter_mut().max_items_evicted =
286 self.lru.len() * 100 / config.shared_node_cache_max_replace_percent;
287
288 for (key, cached_node) in list {
289 if cached_node.is_from_shared_cache {
290 if self.lru.get(&key).is_some() {
291 access_count += 1;
292
293 if access_count >= config.shared_node_cache_max_promoted_keys {
294 break
296 }
297
298 continue
299 }
300 }
301
302 self.lru.insert(key, cached_node.node);
303 add_count += 1;
304
305 if self.lru.limiter().items_evicted > self.lru.limiter().max_items_evicted {
306 break
308 }
309 }
310
311 metrics.as_ref().map(|m| {
312 m.observe_node_cache_inline_size(self.lru.memory_usage());
313 m.observe_node_cache_heap_size(self.lru.limiter().heap_size);
314 });
315
316 tracing::debug!(
317 target: super::LOG_TARGET,
318 "Updated the shared node cache: {} accesses, {} new values, {}/{} evicted (length = {}, inline size={}/{}, heap size={}/{})",
319 access_count,
320 add_count,
321 self.lru.limiter().items_evicted,
322 self.lru.limiter().max_items_evicted,
323 self.lru.len(),
324 self.lru.memory_usage(),
325 self.lru.limiter().max_inline_size,
326 self.lru.limiter().heap_size,
327 self.lru.limiter().max_heap_size,
328 );
329 }
330
331 fn reset(&mut self) {
333 self.lru.clear();
334 }
335}
336
337#[derive(PartialEq, Eq, Clone, Copy, Hash)]
339#[repr(transparent)]
340pub struct ValueCacheKeyHash(u64);
341
342impl ValueCacheKeyHash {
343 pub fn raw(self) -> u64 {
344 self.0
345 }
346}
347
348impl ValueCacheKeyHash {
349 pub fn from_hasher_and_storage_key(
350 mut hasher: impl std::hash::Hasher,
351 storage_key: &[u8],
352 ) -> Self {
353 hasher.write(storage_key);
354
355 Self(hasher.finish())
356 }
357}
358
359impl nohash_hasher::IsEnabled for ValueCacheKeyHash {}
360
361#[derive(Eq)]
363pub(super) struct ValueCacheKey<H> {
364 pub storage_root: H,
366 pub storage_key: Arc<[u8]>,
368 pub hash: ValueCacheKeyHash,
370}
371
372pub(super) struct ValueCacheRef<'a, H> {
374 pub storage_root: H,
376 pub storage_key: &'a [u8],
378 pub hash: ValueCacheKeyHash,
380}
381
382impl<'a, H> ValueCacheRef<'a, H> {
383 pub fn new(storage_key: &'a [u8], storage_root: H) -> Self
384 where
385 H: AsRef<[u8]>,
386 {
387 let hash = ValueCacheKey::<H>::hash_data(&storage_key, &storage_root);
388 Self { storage_root, storage_key, hash }
389 }
390}
391
392impl<'a, H> From<ValueCacheRef<'a, H>> for ValueCacheKey<H> {
393 fn from(value: ValueCacheRef<'a, H>) -> Self {
394 ValueCacheKey {
395 storage_root: value.storage_root,
396 storage_key: value.storage_key.into(),
397 hash: value.hash,
398 }
399 }
400}
401
402impl<'a, H: std::hash::Hash> std::hash::Hash for ValueCacheRef<'a, H> {
403 fn hash<Hasher: std::hash::Hasher>(&self, state: &mut Hasher) {
404 self.hash.hash(state)
405 }
406}
407
408impl<'a, H> PartialEq<ValueCacheKey<H>> for ValueCacheRef<'a, H>
409where
410 H: AsRef<[u8]>,
411{
412 fn eq(&self, rhs: &ValueCacheKey<H>) -> bool {
413 self.storage_root.as_ref() == rhs.storage_root.as_ref() &&
414 self.storage_key == &*rhs.storage_key
415 }
416}
417
418impl<H> ValueCacheKey<H> {
419 #[cfg(test)] pub fn new_value(storage_key: impl Into<Arc<[u8]>>, storage_root: H) -> Self
422 where
423 H: AsRef<[u8]>,
424 {
425 let storage_key = storage_key.into();
426 let hash = Self::hash_data(&storage_key, &storage_root);
427 Self { storage_root, storage_key, hash }
428 }
429
430 pub fn hash_partial_data(storage_root: &H) -> impl std::hash::Hasher + Clone
434 where
435 H: AsRef<[u8]>,
436 {
437 let mut hasher = RANDOM_STATE.build_hasher();
438 hasher.write(storage_root.as_ref());
439 hasher
440 }
441
442 pub fn hash_data(key: &[u8], storage_root: &H) -> ValueCacheKeyHash
446 where
447 H: AsRef<[u8]>,
448 {
449 let hasher = Self::hash_partial_data(storage_root);
450
451 ValueCacheKeyHash::from_hasher_and_storage_key(hasher, key)
452 }
453
454 #[inline]
456 pub fn is_eq(&self, storage_root: &H, storage_key: &[u8]) -> bool
457 where
458 H: PartialEq,
459 {
460 self.storage_root == *storage_root && *self.storage_key == *storage_key
461 }
462}
463
464impl<H: std::hash::Hash> std::hash::Hash for ValueCacheKey<H> {
466 fn hash<Hasher: std::hash::Hasher>(&self, state: &mut Hasher) {
467 self.hash.hash(state)
468 }
469}
470
471impl<H> nohash_hasher::IsEnabled for ValueCacheKey<H> {}
472
473impl<H: PartialEq> PartialEq for ValueCacheKey<H> {
475 #[inline]
476 fn eq(&self, other: &Self) -> bool {
477 self.is_eq(&other.storage_root, &other.storage_key)
478 }
479}
480
481type SharedValueCacheMap<H> = schnellru::LruMap<
482 ValueCacheKey<H>,
483 CachedValue<H>,
484 SharedValueCacheLimiter,
485 BuildNoHashHasher<ValueCacheKey<H>>,
486>;
487
488pub(super) struct SharedValueCache<H>
492where
493 H: AsRef<[u8]>,
494{
495 pub(super) lru: SharedValueCacheMap<H>,
497}
498
499impl<H: Eq + std::hash::Hash + Clone + Copy + AsRef<[u8]>> SharedValueCache<H> {
500 fn new(max_inline_size: usize, max_heap_size: usize) -> Self {
502 Self {
503 lru: schnellru::LruMap::with_hasher(
504 SharedValueCacheLimiter {
505 max_inline_size,
506 max_heap_size,
507 heap_size: 0,
508 known_storage_keys: HashMap::with_hasher(RANDOM_STATE.clone()),
509 items_evicted: 0,
510 max_items_evicted: 0, },
512 Default::default(),
513 ),
514 }
515 }
516
517 pub fn update(
525 &mut self,
526 added: impl IntoIterator<Item = (ValueCacheKey<H>, CachedValue<H>)>,
527 accessed: impl IntoIterator<Item = ValueCacheKeyHash>,
528 config: &LocalValueCacheConfig,
529 metrics: &Option<Metrics>,
530 ) {
531 let mut access_count = 0;
532 let mut add_count = 0;
533
534 for hash in accessed {
535 self.lru.get_by_hash(hash.raw(), |existing_key, _| existing_key.hash == hash);
541 access_count += 1;
542 }
543
544 self.lru.limiter_mut().items_evicted = 0;
551 self.lru.limiter_mut().max_items_evicted =
552 self.lru.len() * 100 / config.shared_value_cache_max_replace_percent;
553
554 for (key, value) in added {
555 self.lru.insert(key, value);
556 add_count += 1;
557
558 if self.lru.limiter().items_evicted > self.lru.limiter().max_items_evicted {
559 break
561 }
562 }
563
564 metrics.as_ref().map(|m| {
565 m.observe_value_cache_inline_size(self.lru.memory_usage());
566 m.observe_value_cache_heap_size(self.lru.limiter().heap_size);
567 });
568
569 tracing::debug!(
570 target: super::LOG_TARGET,
571 "Updated the shared value cache: {} accesses, {} new values, {}/{} evicted (length = {}, known_storage_keys = {}, inline size={}/{}, heap size={}/{})",
572 access_count,
573 add_count,
574 self.lru.limiter().items_evicted,
575 self.lru.limiter().max_items_evicted,
576 self.lru.len(),
577 self.lru.limiter().known_storage_keys.len(),
578 self.lru.memory_usage(),
579 self.lru.limiter().max_inline_size,
580 self.lru.limiter().heap_size,
581 self.lru.limiter().max_heap_size
582 );
583 }
584
585 fn reset(&mut self) {
587 self.lru.clear();
588 }
589}
590
591pub(super) struct SharedTrieCacheInner<H: Hasher> {
593 node_cache: SharedNodeCache<H::Out>,
594 value_cache: SharedValueCache<H::Out>,
595 stats: TrieHitStats,
596 metrics: Option<Metrics>,
597 previous_stats_dump: Instant,
598}
599
600impl<H: Hasher> SharedTrieCacheInner<H> {
601 #[cfg(test)]
603 pub(super) fn value_cache(&self) -> &SharedValueCache<H::Out> {
604 &self.value_cache
605 }
606
607 pub(super) fn value_cache_mut(&mut self) -> &mut SharedValueCache<H::Out> {
609 &mut self.value_cache
610 }
611
612 #[cfg(test)]
614 pub(super) fn node_cache(&self) -> &SharedNodeCache<H::Out> {
615 &self.node_cache
616 }
617
618 pub(super) fn node_cache_mut(&mut self) -> &mut SharedNodeCache<H::Out> {
620 &mut self.node_cache
621 }
622
623 pub(super) fn metrics(&self) -> Option<&Metrics> {
624 self.metrics.as_ref()
625 }
626
627 pub(super) fn stats_add_snapshot(&mut self, snapshot: &TrieHitStatsSnapshot) {
629 self.stats.add_snapshot(&snapshot);
630 if self.previous_stats_dump.elapsed() > Duration::from_secs(60) {
632 self.previous_stats_dump = Instant::now();
633 let snapshot = self.stats.snapshot();
634 tracing::trace!(target: LOG_TARGET, node_cache = %snapshot.node_cache, value_cache = %snapshot.value_cache, "Shared trie cache stats");
635 }
636 }
637}
638
639pub struct SharedTrieCache<H: Hasher> {
647 inner: Arc<RwLock<SharedTrieCacheInner<H>>>,
648 trusted_node_cache_config: LocalNodeCacheConfig,
649 trusted_value_cache_config: LocalValueCacheConfig,
650}
651
652impl<H: Hasher> Clone for SharedTrieCache<H> {
653 fn clone(&self) -> Self {
654 Self {
655 inner: self.inner.clone(),
656 trusted_node_cache_config: self.trusted_node_cache_config,
657 trusted_value_cache_config: self.trusted_value_cache_config,
658 }
659 }
660}
661
662impl<H: Hasher> SharedTrieCache<H> {
663 pub fn new(cache_size: CacheSize, metrics_registry: Option<&Registry>) -> Self {
665 let total_budget = cache_size.0;
666
667 let value_cache_budget = (total_budget as f32 * 0.20) as usize; let node_cache_budget = total_budget - value_cache_budget; let value_cache_inline_budget = (value_cache_budget as f32 * 0.70) as usize;
674 let node_cache_inline_budget = (node_cache_budget as f32 * 0.70) as usize;
675
676 let value_cache_max_inline_size =
678 SharedValueCacheMap::<H::Out>::memory_usage_for_memory_budget(
679 value_cache_inline_budget,
680 );
681
682 let node_cache_max_inline_size =
683 SharedNodeCacheMap::<H::Out>::memory_usage_for_memory_budget(node_cache_inline_budget);
684
685 let value_cache_max_heap_size = value_cache_budget - value_cache_max_inline_size;
687 let node_cache_max_heap_size = node_cache_budget - node_cache_max_inline_size;
688
689 tracing::debug!(
690 target: super::LOG_TARGET,
691 "Configured a shared trie cache with a budget of ~{} bytes (node_cache_max_inline_size = {}, node_cache_max_heap_size = {}, value_cache_max_inline_size = {}, value_cache_max_heap_size = {})",
692 total_budget,
693 node_cache_max_inline_size,
694 node_cache_max_heap_size,
695 value_cache_max_inline_size,
696 value_cache_max_heap_size,
697 );
698
699 Self {
700 inner: Arc::new(RwLock::new(SharedTrieCacheInner {
701 node_cache: SharedNodeCache::new(
702 node_cache_max_inline_size,
703 node_cache_max_heap_size,
704 ),
705 value_cache: SharedValueCache::new(
706 value_cache_max_inline_size,
707 value_cache_max_heap_size,
708 ),
709 stats: Default::default(),
710 previous_stats_dump: Instant::now(),
711 metrics: metrics_registry.and_then(|registry| Metrics::register(registry).ok()),
712 })),
713 trusted_node_cache_config: LocalNodeCacheConfig::trusted(
714 node_cache_max_heap_size,
715 node_cache_max_inline_size,
716 ),
717 trusted_value_cache_config: LocalValueCacheConfig::trusted(
718 value_cache_max_heap_size,
719 value_cache_max_inline_size,
720 ),
721 }
722 }
723
724 pub fn local_cache_untrusted(&self) -> super::LocalTrieCache<H> {
726 let local_value_cache_config = LocalValueCacheConfig::untrusted();
727 let local_node_cache_config = LocalNodeCacheConfig::untrusted();
728 tracing::debug!(
729 target: super::LOG_TARGET,
730 "Configuring a local un-trusted cache"
731 );
732
733 super::LocalTrieCache {
734 shared: self.clone(),
735 node_cache: Mutex::new(LruMap::new(LocalNodeCacheLimiter::new(
736 local_node_cache_config,
737 ))),
738 value_cache: Mutex::new(LruMap::with_hasher(
739 LocalValueCacheLimiter::new(local_value_cache_config),
740 Default::default(),
741 )),
742 shared_value_cache_access: Mutex::new(super::ValueAccessSet::with_hasher(
743 schnellru::ByLength::new(
744 local_value_cache_config.shared_value_cache_max_promoted_keys,
745 ),
746 Default::default(),
747 )),
748 value_cache_config: local_value_cache_config,
749 node_cache_config: local_node_cache_config,
750 stats: Default::default(),
751 trusted: false,
752 }
753 }
754
755 pub fn local_cache_trusted(&self) -> super::LocalTrieCache<H> {
762 tracing::debug!(
763 target: super::LOG_TARGET,
764 "Configuring a local trusted cache"
765 );
766 super::LocalTrieCache {
767 shared: self.clone(),
768 node_cache: Mutex::new(LruMap::new(LocalNodeCacheLimiter::new(
769 self.trusted_node_cache_config,
770 ))),
771 value_cache: Mutex::new(LruMap::with_hasher(
772 LocalValueCacheLimiter::new(self.trusted_value_cache_config),
773 Default::default(),
774 )),
775 shared_value_cache_access: Mutex::new(super::ValueAccessSet::with_hasher(
776 schnellru::ByLength::new(
777 self.trusted_value_cache_config.shared_value_cache_max_promoted_keys,
778 ),
779 Default::default(),
780 )),
781 value_cache_config: self.trusted_value_cache_config,
782 node_cache_config: self.trusted_node_cache_config,
783 stats: Default::default(),
784 trusted: true,
785 }
786 }
787
788 #[inline]
794 pub fn peek_node(&self, key: &H::Out) -> Option<NodeOwned<H::Out>> {
795 self.inner.read().node_cache.lru.peek(key).cloned()
796 }
797
798 pub fn peek_value_by_hash(
804 &self,
805 hash: ValueCacheKeyHash,
806 storage_root: &H::Out,
807 storage_key: &[u8],
808 ) -> Option<CachedValue<H::Out>> {
809 self.inner
810 .read()
811 .value_cache
812 .lru
813 .peek_by_hash(hash.0, |existing_key, _| existing_key.is_eq(storage_root, storage_key))
814 .cloned()
815 }
816
817 pub fn used_memory_size(&self) -> usize {
819 let inner = self.inner.read();
820 let value_cache_size =
821 inner.value_cache.lru.memory_usage() + inner.value_cache.lru.limiter().heap_size;
822 let node_cache_size =
823 inner.node_cache.lru.memory_usage() + inner.node_cache.lru.limiter().heap_size;
824
825 node_cache_size + value_cache_size
826 }
827
828 pub fn reset_node_cache(&self) {
830 self.inner.write().node_cache.reset();
831 }
832
833 pub fn reset_value_cache(&self) {
835 self.inner.write().value_cache.reset();
836 }
837
838 pub fn reset(&self) {
840 self.reset_node_cache();
841 self.reset_value_cache();
842 }
843
844 #[cfg(test)]
846 pub(super) fn read_lock_inner(
847 &self,
848 ) -> parking_lot::RwLockReadGuard<'_, SharedTrieCacheInner<H>> {
849 self.inner.read()
850 }
851
852 pub(super) fn write_lock_inner(&self) -> Option<RwLockWriteGuard<'_, SharedTrieCacheInner<H>>> {
854 self.inner.try_write_for(super::SHARED_CACHE_WRITE_LOCK_TIMEOUT)
858 }
859}
860
861#[cfg(test)]
862mod tests {
863 use super::*;
864 use sp_core::H256 as Hash;
865
866 #[test]
867 fn shared_value_cache_works() {
868 let mut cache = SharedValueCache::<sp_core::H256>::new(usize::MAX, 10 * 10);
869
870 let key = vec![0; 10];
871
872 let root0 = Hash::repeat_byte(1);
873 let root1 = Hash::repeat_byte(2);
874
875 cache.update(
876 vec![
877 (ValueCacheKey::new_value(&key[..], root0), CachedValue::NonExisting),
878 (ValueCacheKey::new_value(&key[..], root1), CachedValue::NonExisting),
879 ],
880 vec![],
881 &LocalValueCacheConfig::untrusted(),
882 &None,
883 );
884
885 assert_eq!(1, cache.lru.limiter_mut().known_storage_keys.len());
887 assert_eq!(
888 3, Arc::strong_count(
890 cache.lru.limiter_mut().known_storage_keys.get_key_value(&key[..]).unwrap().0
891 )
892 );
893 assert_eq!(key.len(), cache.lru.limiter().heap_size);
894 assert_eq!(cache.lru.len(), 2);
895 assert_eq!(cache.lru.peek_newest().unwrap().0.storage_root, root1);
896 assert_eq!(cache.lru.peek_oldest().unwrap().0.storage_root, root0);
897 assert!(cache.lru.limiter().heap_size <= cache.lru.limiter().max_heap_size);
898 assert_eq!(cache.lru.limiter().heap_size, 10);
899
900 cache.update(
902 vec![],
903 vec![ValueCacheKey::hash_data(&key[..], &root0)],
904 &LocalValueCacheConfig::untrusted(),
905 &None,
906 );
907 assert_eq!(1, cache.lru.limiter_mut().known_storage_keys.len());
908 assert_eq!(
909 3,
910 Arc::strong_count(
911 cache.lru.limiter_mut().known_storage_keys.get_key_value(&key[..]).unwrap().0
912 )
913 );
914 assert_eq!(key.len(), cache.lru.limiter().heap_size);
915 assert_eq!(cache.lru.len(), 2);
916 assert_eq!(cache.lru.peek_newest().unwrap().0.storage_root, root0);
917 assert_eq!(cache.lru.peek_oldest().unwrap().0.storage_root, root1);
918 assert!(cache.lru.limiter().heap_size <= cache.lru.limiter().max_heap_size);
919 assert_eq!(cache.lru.limiter().heap_size, 10);
920
921 cache.update(
923 vec![
924 (ValueCacheKey::new_value(&key[..], root1), CachedValue::NonExisting),
925 (ValueCacheKey::new_value(&key[..], root0), CachedValue::NonExisting),
926 ],
927 vec![],
928 &LocalValueCacheConfig::untrusted(),
929 &None,
930 );
931 assert_eq!(1, cache.lru.limiter_mut().known_storage_keys.len());
932 assert_eq!(
933 3,
934 Arc::strong_count(
935 cache.lru.limiter_mut().known_storage_keys.get_key_value(&key[..]).unwrap().0
936 )
937 );
938 assert_eq!(key.len(), cache.lru.limiter().heap_size);
939 assert_eq!(cache.lru.len(), 2);
940 assert_eq!(cache.lru.peek_newest().unwrap().0.storage_root, root0);
941 assert_eq!(cache.lru.peek_oldest().unwrap().0.storage_root, root1);
942 assert!(cache.lru.limiter().heap_size <= cache.lru.limiter().max_heap_size);
943 assert_eq!(cache.lru.limiter().items_evicted, 0);
944 assert_eq!(cache.lru.limiter().heap_size, 10);
945
946 cache.update(
948 (1..11)
949 .map(|i| vec![i; 10])
950 .map(|key| (ValueCacheKey::new_value(&key[..], root0), CachedValue::NonExisting)),
951 vec![],
952 &LocalValueCacheConfig::untrusted(),
953 &None,
954 );
955
956 assert_eq!(cache.lru.limiter().items_evicted, 2);
957 assert_eq!(10, cache.lru.len());
958 assert_eq!(10, cache.lru.limiter_mut().known_storage_keys.len());
959 assert!(cache.lru.limiter_mut().known_storage_keys.get_key_value(&key[..]).is_none());
960 assert_eq!(key.len() * 10, cache.lru.limiter().heap_size);
961 assert_eq!(cache.lru.len(), 10);
962 assert!(cache.lru.limiter().heap_size <= cache.lru.limiter().max_heap_size);
963 assert_eq!(cache.lru.limiter().heap_size, 100);
964
965 assert!(matches!(
966 cache.lru.peek(&ValueCacheKey::new_value(&[1; 10][..], root0)).unwrap(),
967 CachedValue::<Hash>::NonExisting
968 ));
969
970 assert!(cache.lru.peek(&ValueCacheKey::new_value(&[1; 10][..], root1)).is_none(),);
971
972 assert!(cache.lru.peek(&ValueCacheKey::new_value(&key[..], root0)).is_none());
973 assert!(cache.lru.peek(&ValueCacheKey::new_value(&key[..], root1)).is_none());
974
975 cache.update(
976 vec![(ValueCacheKey::new_value(vec![10; 10], root0), CachedValue::NonExisting)],
977 vec![],
978 &LocalValueCacheConfig::untrusted(),
979 &None,
980 );
981
982 assert!(cache.lru.limiter_mut().known_storage_keys.get_key_value(&key[..]).is_none());
983 }
984}