1#![doc = include_str!("../README.md")]
2#![no_std]
3#![deny(missing_docs)]
4#![deny(unused_must_use)]
5#![allow(clippy::while_let_on_iterator)]
6
7use core::hash::{BuildHasher, Hash, Hasher};
8use hashbrown::raw::{Bucket, RawTable};
9
10extern crate alloc;
11
12mod limiters;
13pub use limiters::*;
14
15cfg_if::cfg_if! {
17 if #[cfg(all(
18 target_feature = "sse2",
19 any(target_arch = "x86", target_arch = "x86_64"),
20 not(miri)
21 ))] {
22 const GROUP_WIDTH: usize = 16;
23 } else if
24 #[cfg(any(
25 target_pointer_width = "64",
26 target_arch = "aarch64",
27 target_arch = "x86_64",
28 target_arch = "wasm32",
29 ))] {
30 const GROUP_WIDTH: usize = 8;
31 } else {
32 const GROUP_WIDTH: usize = 4;
33 }
34}
35
36const fn buckets_for_capacity(capacity: usize, max: usize) -> Option<usize> {
40 if capacity == 0 {
41 Some(0)
42 } else if capacity < 4 {
43 Some(4)
44 } else if capacity < 8 {
45 Some(8)
46 } else {
47 let capacity_times_8 = match capacity.checked_mul(8) {
49 Some(value) => value,
50 None => return None,
51 };
52
53 let buckets = (capacity_times_8 / 7).next_power_of_two();
54 if buckets >= max {
55 None
56 } else {
57 Some(buckets)
58 }
59 }
60}
61
62const fn full_capacity_for_buckets(buckets: usize) -> usize {
64 if buckets == 0 {
65 0
66 } else if buckets <= 8 {
67 buckets - 1
68 } else {
69 buckets / 8 * 7
70 }
71}
72
73mod private {
74 #[doc(hidden)]
81 pub unsafe trait LinkType: Copy + PartialEq + Eq + core::fmt::Debug {
82 const MAX: Self;
84
85 fn from_usize(value: usize) -> Self;
86 fn into_usize(self) -> usize;
87 }
88
89 unsafe impl LinkType for usize {
90 const MAX: Self = usize::MAX;
91
92 #[inline]
93 fn from_usize(value: usize) -> Self {
94 value
95 }
96
97 #[inline]
98 fn into_usize(self) -> usize {
99 self
100 }
101 }
102
103 unsafe impl LinkType for u32 {
104 const MAX: Self = u32::MAX;
105
106 #[inline]
107 fn from_usize(value: usize) -> Self {
108 value as u32
109 }
110
111 #[inline]
112 fn into_usize(self) -> usize {
113 self as usize
114 }
115 }
116
117 unsafe impl LinkType for u16 {
118 const MAX: Self = u16::MAX;
119
120 #[inline]
121 fn from_usize(value: usize) -> Self {
122 value as u16
123 }
124
125 #[inline]
126 fn into_usize(self) -> usize {
127 self as usize
128 }
129 }
130
131 unsafe impl LinkType for u8 {
132 const MAX: Self = u8::MAX;
133
134 #[inline]
135 fn from_usize(value: usize) -> Self {
136 value as u8
137 }
138
139 #[inline]
140 fn into_usize(self) -> usize {
141 self as usize
142 }
143 }
144}
145
146use private::LinkType;
147
148pub trait Limiter<K, V> {
150 type KeyToInsert<'a>;
158
159 type LinkType: LinkType;
174
175 fn is_over_the_limit(&self, length: usize) -> bool;
182
183 fn on_insert(&mut self, length: usize, key: Self::KeyToInsert<'_>, value: V) -> Option<(K, V)>;
193
194 fn on_replace(
211 &mut self,
212 length: usize,
213 old_key: &mut K,
214 new_key: Self::KeyToInsert<'_>,
215 old_value: &mut V,
216 new_value: &mut V,
217 ) -> bool;
218
219 fn on_removed(&mut self, key: &mut K, value: &mut V);
223
224 fn on_cleared(&mut self);
228
229 fn on_grow(&mut self, new_memory_usage: usize) -> bool;
237}
238
239#[derive(Clone)]
240struct Entry<K, V, E> {
241 key: K,
242 value: V,
243 older: E,
244 newer: E,
245}
246
247pub struct DefaultHasher(ahash::AHasher);
250
251impl Hasher for DefaultHasher {
252 #[inline]
253 fn finish(&self) -> u64 {
254 self.0.finish()
255 }
256
257 #[inline]
258 fn write(&mut self, bytes: &[u8]) {
259 self.0.write(bytes)
260 }
261
262 #[inline]
263 fn write_u8(&mut self, value: u8) {
264 self.0.write_u8(value)
265 }
266
267 #[inline]
268 fn write_u16(&mut self, value: u16) {
269 self.0.write_u16(value)
270 }
271
272 #[inline]
273 fn write_u32(&mut self, value: u32) {
274 self.0.write_u32(value)
275 }
276
277 #[inline]
278 fn write_u128(&mut self, value: u128) {
279 self.0.write_u128(value)
280 }
281
282 #[inline]
283 fn write_usize(&mut self, value: usize) {
284 self.0.write_usize(value)
285 }
286
287 #[inline]
288 fn write_u64(&mut self, value: u64) {
289 self.0.write_u64(value)
290 }
291}
292
293#[derive(Debug)]
296#[cfg_attr(feature = "runtime-rng", derive(Default))]
297pub struct RandomState(ahash::RandomState);
298
299impl BuildHasher for RandomState {
300 type Hasher = DefaultHasher;
301
302 #[inline]
303 fn build_hasher(&self) -> Self::Hasher {
304 DefaultHasher(self.0.build_hasher())
305 }
306}
307
308#[derive(Clone)]
310pub struct LruMap<K, V, L = ByLength, S = RandomState>
311where
312 L: Limiter<K, V>,
313{
314 hasher: S,
316 map: RawTable<Entry<K, V, L::LinkType>>,
317 newest: <L as Limiter<K, V>>::LinkType,
318 oldest: <L as Limiter<K, V>>::LinkType,
319 limiter: L,
320}
321
322impl<K, V, L, S> core::fmt::Debug for LruMap<K, V, L, S>
323where
324 L: Limiter<K, V>,
325{
326 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
327 f.debug_struct("LruMap")
328 .field("len", &self.map.len())
329 .field("key_type", &core::any::type_name::<K>())
330 .field("value_type", &core::any::type_name::<V>())
331 .finish_non_exhaustive()
332 }
333}
334
335impl<K, V, L, S> Default for LruMap<K, V, L, S>
336where
337 K: Hash + PartialEq,
338 S: BuildHasher + Default,
339 L: Limiter<K, V> + Default,
340{
341 fn default() -> Self {
342 Self::with_hasher(L::default(), S::default())
343 }
344}
345
346impl<K, V, L> LruMap<K, V, L, RandomState>
347where
348 K: Hash + PartialEq,
349 L: Limiter<K, V>,
350{
351 #[cfg(feature = "runtime-rng")]
353 pub fn new(limiter: L) -> Self {
354 Self::with_hasher(limiter, RandomState::default())
355 }
356
357 pub const fn with_seed(limiter: L, seed: [u64; 4]) -> Self {
359 let hasher = ahash::RandomState::with_seeds(seed[0], seed[1], seed[2], seed[3]);
360 Self::with_hasher(limiter, RandomState(hasher))
361 }
362}
363
364impl<K, V, S> LruMap<K, V, ByMemoryUsage, S>
365where
366 K: Hash + PartialEq,
367 S: BuildHasher,
368{
369 pub const fn with_memory_budget_and_hasher(memory_budget: usize, hasher: S) -> Self {
376 let limiter = ByMemoryUsage::new(memory_budget);
377 Self::with_hasher(limiter, hasher)
378 }
379}
380
381impl<K, V> LruMap<K, V, ByMemoryUsage, RandomState>
382where
383 K: Hash + PartialEq,
384{
385 #[cfg(feature = "runtime-rng")]
392 pub fn with_memory_budget(memory_budget: usize) -> Self {
393 Self::with_memory_budget_and_hasher(memory_budget, RandomState::default())
394 }
395}
396
397impl<K, V, L, S> LruMap<K, V, L, S>
398where
399 K: Hash + PartialEq,
400 S: BuildHasher,
401 L: Limiter<K, V>,
402{
403 fn assert_not_empty(&self) {
404 debug_assert_ne!(self.newest, L::LinkType::MAX);
405 debug_assert_ne!(self.oldest, L::LinkType::MAX);
406 debug_assert!(!self.map.is_empty());
407 }
408
409 fn assert_empty(&self) {
410 debug_assert_eq!(self.newest, L::LinkType::MAX);
411 debug_assert_eq!(self.oldest, L::LinkType::MAX);
412 debug_assert!(self.map.is_empty());
413 }
414
415 #[doc(hidden)]
419 pub fn assert_check_internal_state(&self) {
420 if self.map.is_empty() {
421 assert_eq!(self.newest, L::LinkType::MAX);
422 assert_eq!(self.oldest, L::LinkType::MAX);
423 return;
424 }
425
426 assert_ne!(self.newest, L::LinkType::MAX);
427 assert_ne!(self.oldest, L::LinkType::MAX);
428
429 if self.map.len() == 1 {
430 assert_eq!(self.newest, self.oldest);
431
432 {
433 let node = unsafe { self.map.bucket(self.newest.into_usize()).as_ref() };
434 assert_eq!(node.newer, L::LinkType::MAX);
435 assert_eq!(node.older, L::LinkType::MAX);
436 }
437 } else {
438 assert_ne!(self.newest, self.oldest);
439
440 {
441 let newest = unsafe { self.map.bucket(self.newest.into_usize()).as_ref() };
442 assert_eq!(newest.newer, L::LinkType::MAX);
443 assert_ne!(newest.older, L::LinkType::MAX);
444 }
445
446 {
447 let oldest = unsafe { self.map.bucket(self.oldest.into_usize()).as_ref() };
448 assert_ne!(oldest.newer, L::LinkType::MAX);
449 assert_eq!(oldest.older, L::LinkType::MAX);
450 }
451 }
452
453 let mut seen = hashbrown::HashSet::with_capacity_and_hasher(
454 self.len(),
455 ahash::random_state::RandomState::with_seeds(12, 34, 56, 78),
456 );
457 let mut previous_index = L::LinkType::MAX;
458 let mut index = self.newest;
459 loop {
460 assert!(seen.insert(index.into_usize()));
461 let node = unsafe { self.map.bucket(index.into_usize()).as_ref() };
462 assert_eq!(node.newer, previous_index);
463
464 previous_index = index;
465 index = node.older;
466
467 if index == L::LinkType::MAX {
468 break;
469 }
470 }
471
472 assert_eq!(seen.len(), self.map.len());
473 assert_eq!(
474 Self::memory_usage_for_memory_budget(self.memory_usage()),
475 self.memory_usage()
476 );
477 }
478
479 pub const fn with_hasher(limiter: L, hasher: S) -> Self {
481 LruMap {
482 hasher,
483 map: RawTable::new(),
484 newest: L::LinkType::MAX,
485 oldest: L::LinkType::MAX,
486 limiter,
487 }
488 }
489
490 pub fn reserve_or_panic(&mut self, additional: usize) {
497 let capacity = self.len() + additional;
498 if self.guaranteed_capacity() >= capacity {
499 return;
500 }
501
502 if !self.try_grow(capacity) {
503 panic!("failed to reserve space for extra elements");
505 }
506 }
507
508 pub fn guaranteed_capacity(&self) -> usize {
519 self.map.capacity()
520 }
521
522 fn buckets_for_capacity(capacity: usize) -> Option<usize> {
524 buckets_for_capacity(capacity, L::LinkType::MAX.into_usize())
525 }
526
527 fn memory_usage_for_buckets(buckets: usize) -> Option<usize> {
534 if buckets == 0 {
535 return Some(0);
536 }
537
538 let entry_layout = core::alloc::Layout::new::<Entry<K, V, L::LinkType>>();
539 let size = entry_layout.size();
540 let ctrl_align = if entry_layout.align() > GROUP_WIDTH {
541 entry_layout.align()
542 } else {
543 GROUP_WIDTH
544 };
545
546 let ctrl_offset = size.checked_mul(buckets)?.checked_add(ctrl_align - 1)? & !(ctrl_align - 1);
547
548 let len = ctrl_offset.checked_add(buckets + GROUP_WIDTH)?;
549 if len > isize::MAX as usize - (ctrl_align - 1) {
550 return None;
551 }
552
553 Some(len)
554 }
555
556 pub fn memory_usage_for_memory_budget(memory_budget: usize) -> usize {
558 let mut memory_usage = 0;
559 let mut capacity = 1;
560 while let Some(buckets) = Self::buckets_for_capacity(capacity) {
561 match Self::memory_usage_for_buckets(buckets) {
562 Some(usage) if usage <= memory_budget => memory_usage = usage,
563 _ => break,
564 }
565 capacity = buckets + 1;
566 }
567
568 memory_usage
569 }
570
571 pub fn memory_usage(&self) -> usize {
573 self.map.allocation_info().1.size()
574 }
575
576 #[doc(hidden)]
578 pub fn allocation_pointer(&self) -> *const u8 {
579 let (ptr, layout) = self.map.allocation_info();
580 if layout.size() == 0 {
581 core::ptr::null()
582 } else {
583 ptr.as_ptr() as *const u8
584 }
585 }
586
587 pub fn len(&self) -> usize {
589 self.map.len()
590 }
591
592 pub fn is_empty(&self) -> bool {
594 self.map.is_empty()
595 }
596
597 #[inline]
599 fn hash_key<T>(&self, key: &T) -> u64
600 where
601 T: Hash + ?Sized,
602 {
603 let mut hasher = self.hasher.build_hasher();
604 key.hash(&mut hasher);
605 hasher.finish()
606 }
607
608 #[inline]
611 pub fn get(&mut self, key: &(impl Hash + PartialEq<K> + ?Sized)) -> Option<&mut V> {
612 let hash = self.hash_key(key);
613 self.get_by_hash(hash, |existing_key, _| *key == *existing_key)
614 }
615
616 #[inline]
619 pub fn get_by_hash(&mut self, hash: u64, mut is_eq: impl FnMut(&K, &V) -> bool) -> Option<&mut V> {
620 let bucket = self.map.find(hash, |entry| is_eq(&entry.key, &entry.value))?;
621
622 unsafe {
624 self.promote_existing_bucket(&bucket);
625 }
626
627 let entry = unsafe { bucket.as_mut() };
629 Some(&mut entry.value)
630 }
631
632 #[inline]
637 pub fn get_or_insert<'a>(
638 &mut self,
639 key: (impl Into<L::KeyToInsert<'a>> + Hash + PartialEq<K> + ?Sized),
640 get: impl FnOnce() -> V,
641 ) -> Option<&mut V>
642 where
643 L::KeyToInsert<'a>: Hash + PartialEq<K>,
644 {
645 match self.get_or_insert_fallible(key, || Ok(get())) {
646 Ok(value) => value,
647 Err(()) => unreachable!(),
648 }
649 }
650
651 #[inline]
653 pub fn get_or_insert_fallible<'a, E>(
654 &mut self,
655 key: (impl Into<L::KeyToInsert<'a>> + Hash + PartialEq<K> + ?Sized),
656 get: impl FnOnce() -> Result<V, E>,
657 ) -> Result<Option<&mut V>, E>
658 where
659 L::KeyToInsert<'a>: Hash + PartialEq<K>,
660 {
661 #[allow(clippy::needless_lifetimes)]
662 #[inline(always)]
663 unsafe fn cast_lifetime<'a, 'b, T>(x: &'a mut T) -> &'b mut T {
664 core::mem::transmute(x)
665 }
666
667 let hash = self.hash_key(&key);
668 if let Some(value) = self.get_by_hash(hash, |existing_key, _| key == *existing_key) {
669 let value = unsafe { cast_lifetime(value) };
672 return Ok(Some(value));
673 }
674
675 let value = get()?;
676 if self.insert(key.into(), value) {
677 debug_assert_ne!(self.newest, L::LinkType::MAX);
678
679 unsafe {
681 let bucket = self.map.bucket(self.newest.into_usize());
682 Ok(Some(&mut bucket.as_mut().value))
683 }
684 } else {
685 Ok(None)
686 }
687 }
688
689 #[inline]
691 pub fn peek<'a>(&'a self, key: &(impl Hash + PartialEq<K> + ?Sized)) -> Option<&'a V> {
692 let hash = self.hash_key(&key);
693 self.peek_by_hash(hash, |existing_key, _| *key == *existing_key)
694 }
695
696 #[inline]
698 pub fn peek_mut<'a>(&'a mut self, key: &(impl Hash + PartialEq<K> + ?Sized)) -> Option<&'a mut V> {
699 let hash = self.hash_key(&key);
700 self.peek_by_hash_mut(hash, |existing_key, _| *key == *existing_key)
701 }
702
703 #[inline]
706 pub fn peek_by_hash(&self, hash: u64, mut is_eq: impl FnMut(&K, &V) -> bool) -> Option<&V> {
707 let bucket = self.map.find(hash, |entry| is_eq(&entry.key, &entry.value))?;
708
709 let entry = unsafe { bucket.as_ref() };
711 Some(&entry.value)
712 }
713
714 #[inline]
717 pub fn peek_by_hash_mut(&mut self, hash: u64, mut is_eq: impl FnMut(&K, &V) -> bool) -> Option<&mut V> {
718 let bucket = self.map.find(hash, |entry| is_eq(&entry.key, &entry.value))?;
719
720 let entry = unsafe { bucket.as_mut() };
722 Some(&mut entry.value)
723 }
724
725 pub fn remove(&mut self, key: &(impl Hash + PartialEq<K> + ?Sized)) -> Option<V> {
727 let hash = self.hash_key(&key);
728 let bucket = self.map.find(hash, |entry| *key == entry.key)?;
729
730 let entry = unsafe { self.remove_bucket(bucket) };
732
733 Some(entry.value)
734 }
735
736 #[inline]
742 pub fn insert<'a>(&mut self, key: L::KeyToInsert<'a>, mut value: V) -> bool
743 where
744 L::KeyToInsert<'a>: Hash + PartialEq<K>,
745 {
746 let hash = self.hash_key(&key);
747 if let Some(bucket) = self.map.find(hash, |entry| key == entry.key) {
748 let can_replace = {
750 let entry = unsafe { bucket.as_mut() };
752 self.limiter
753 .on_replace(self.map.len(), &mut entry.key, key, &mut entry.value, &mut value)
754 };
755
756 if !can_replace {
757 unsafe { self.remove_bucket_noinline(bucket) };
762 return false;
763 } else {
764 unsafe {
766 let old_value = core::mem::replace(&mut bucket.as_mut().value, value);
767 self.promote_existing_bucket(&bucket);
768 core::mem::drop(old_value);
769 }
770 }
771 } else {
772 let (key, value) = match self.limiter.on_insert(self.map.len(), key, value) {
773 Some(key_value) => key_value,
774 None => {
775 return false;
777 }
778 };
779
780 if !self.map.is_empty() && self.limiter.is_over_the_limit(self.map.len() + 1) {
781 self.pop_oldest();
785 }
786
787 let was_inserted = if self.map.is_empty() {
788 unsafe { self.insert_into_empty_map(hash, key, value) }
790 } else {
791 unsafe { self.insert_into_new_bucket(hash, key, value) }
793 };
794
795 if !was_inserted {
796 return false;
797 }
798 }
799
800 if self.limiter.is_over_the_limit(self.map.len()) {
801 self.pop_until_under_the_limit();
803 }
804
805 !self.is_empty()
806 }
807
808 pub fn clear(&mut self) {
810 self.oldest = L::LinkType::MAX;
811 self.newest = L::LinkType::MAX;
812 self.map.clear();
813 self.limiter.on_cleared();
814 }
815
816 #[inline]
818 pub fn pop_oldest(&mut self) -> Option<(K, V)> {
819 if self.is_empty() {
820 return None;
821 }
822
823 let mut oldest_entry = unsafe {
825 let oldest_bucket = self.map.bucket(self.oldest.into_usize());
826 self.map.remove(oldest_bucket)
827 };
828 debug_assert_eq!(oldest_entry.older, L::LinkType::MAX);
829
830 self.oldest = oldest_entry.newer;
831 if self.map.is_empty() {
832 debug_assert_eq!(self.oldest, L::LinkType::MAX);
833 self.newest = L::LinkType::MAX;
834 } else {
835 unsafe {
838 self.map.bucket(oldest_entry.newer.into_usize()).as_mut().older = L::LinkType::MAX;
839 }
840 }
841
842 self.limiter.on_removed(&mut oldest_entry.key, &mut oldest_entry.value);
843 Some((oldest_entry.key, oldest_entry.value))
844 }
845
846 #[inline(never)]
848 fn pop_until_under_the_limit(&mut self) {
849 loop {
850 if self.pop_oldest().is_none() {
851 break;
856 }
857
858 if !self.limiter.is_over_the_limit(self.map.len()) {
859 break;
860 }
861 }
862 }
863
864 pub fn pop_newest(&mut self) -> Option<(K, V)> {
866 if self.is_empty() {
867 return None;
868 }
869
870 let mut newest_entry = unsafe {
872 let newest_bucket = self.map.bucket(self.newest.into_usize());
873 self.map.remove(newest_bucket)
874 };
875 debug_assert_eq!(newest_entry.newer, L::LinkType::MAX);
876
877 self.newest = newest_entry.older;
878 if self.map.is_empty() {
879 debug_assert_eq!(self.newest, L::LinkType::MAX);
880 self.oldest = L::LinkType::MAX;
881 } else {
882 unsafe {
885 self.map.bucket(newest_entry.older.into_usize()).as_mut().newer = L::LinkType::MAX;
886 }
887 }
888
889 self.limiter.on_removed(&mut newest_entry.key, &mut newest_entry.value);
890 Some((newest_entry.key, newest_entry.value))
891 }
892
893 pub fn peek_newest(&self) -> Option<(&K, &V)> {
896 if self.newest == L::LinkType::MAX {
897 return None;
898 }
899
900 unsafe {
902 let entry = self.map.bucket(self.newest.into_usize()).as_ref();
903 Some((&entry.key, &entry.value))
904 }
905 }
906
907 pub fn peek_oldest(&self) -> Option<(&K, &V)> {
910 if self.oldest == L::LinkType::MAX {
911 return None;
912 }
913
914 unsafe {
916 let entry = self.map.bucket(self.oldest.into_usize()).as_ref();
917 Some((&entry.key, &entry.value))
918 }
919 }
920
921 #[inline]
927 unsafe fn promote_existing_bucket(&mut self, bucket: &Bucket<Entry<K, V, L::LinkType>>) {
928 self.assert_not_empty();
929
930 let index = L::LinkType::from_usize(self.map.bucket_index(bucket));
931 if self.newest == index {
932 return;
934 }
935
936 if self.oldest != index {
939 self.map.bucket(bucket.as_ref().older.into_usize()).as_mut().newer = bucket.as_ref().newer;
941 } else {
942 self.oldest = bucket.as_ref().newer;
944 }
945
946 self.map.bucket(bucket.as_ref().newer.into_usize()).as_mut().older = bucket.as_ref().older;
948 bucket.as_mut().newer = L::LinkType::MAX;
949
950 self.map.bucket(self.newest.into_usize()).as_mut().newer = index;
952 bucket.as_mut().older = self.newest;
953
954 self.newest = index;
956 }
957
958 #[inline]
966 unsafe fn finish_inserting_bucket(&mut self, bucket: Bucket<Entry<K, V, L::LinkType>>) {
967 debug_assert_ne!(bucket.as_ref().older, L::LinkType::MAX);
968 debug_assert_eq!(bucket.as_ref().newer, L::LinkType::MAX);
969 debug_assert!(self.map.len() >= 2);
970
971 let index = L::LinkType::from_usize(self.map.bucket_index(&bucket));
972 self.map.bucket(self.newest.into_usize()).as_mut().newer = index;
973 bucket.as_mut().older = self.newest;
974 self.newest = index;
975 }
976
977 #[inline(never)]
979 unsafe fn remove_bucket_noinline(&mut self, bucket: Bucket<Entry<K, V, L::LinkType>>) {
980 self.remove_bucket(bucket);
981 }
982
983 #[inline]
989 unsafe fn remove_bucket(&mut self, bucket: Bucket<Entry<K, V, L::LinkType>>) -> Entry<K, V, L::LinkType> {
990 self.assert_not_empty();
991 let index = L::LinkType::from_usize(self.map.bucket_index(&bucket));
992 let mut entry = self.map.remove(bucket);
993
994 if self.newest == index {
995 self.newest = entry.older;
996 } else {
997 self.map.bucket(entry.newer.into_usize()).as_mut().older = entry.older;
998 }
999
1000 if self.oldest == index {
1001 self.oldest = entry.newer;
1002 } else {
1003 self.map.bucket(entry.older.into_usize()).as_mut().newer = entry.newer;
1004 }
1005
1006 self.limiter.on_removed(&mut entry.key, &mut entry.value);
1007 entry
1008 }
1009
1010 #[inline(never)]
1016 #[cold]
1017 #[must_use]
1018 unsafe fn insert_into_empty_map(&mut self, hash: u64, key: K, value: V) -> bool {
1019 self.assert_empty();
1020
1021 if self.map.capacity() == 0 && !self.try_grow(1) {
1022 return false;
1023 }
1024
1025 assert!(self.map.buckets() < L::LinkType::MAX.into_usize());
1026
1027 let entry = Entry {
1028 key,
1029 value,
1030 older: L::LinkType::MAX,
1031 newer: L::LinkType::MAX,
1032 };
1033 let bucket = self.map.insert_no_grow(hash, entry);
1034 let index = L::LinkType::from_usize(self.map.bucket_index(&bucket));
1035
1036 self.oldest = index;
1037 self.newest = index;
1038
1039 true
1040 }
1041
1042 #[inline]
1048 #[must_use]
1049 unsafe fn insert_into_new_bucket(&mut self, hash: u64, key: K, value: V) -> bool {
1050 self.assert_not_empty();
1051
1052 let entry = Entry {
1053 key,
1054 value,
1055 older: self.newest,
1056 newer: L::LinkType::MAX,
1057 };
1058 match self.map.try_insert_no_grow(hash, entry) {
1059 Ok(bucket) => {
1060 unsafe {
1062 self.finish_inserting_bucket(bucket);
1063 }
1064
1065 true
1066 }
1067 Err(entry) => {
1068 unsafe {
1070 self.try_grow_and_insert(hash, entry.key, entry.value)
1072 }
1073 }
1074 }
1075 }
1076
1077 #[inline(never)]
1083 #[cold]
1084 unsafe fn try_grow_and_insert(&mut self, hash: u64, mut key: K, mut value: V) -> bool {
1085 self.assert_not_empty();
1086
1087 let new_capacity = self.len() + 1;
1088 if self.try_grow(new_capacity) {
1089 let entry = Entry {
1090 key,
1091 value,
1092 older: self.newest,
1093 newer: L::LinkType::MAX,
1094 };
1095
1096 unsafe {
1098 let bucket = self.map.insert_no_grow(hash, entry);
1099 self.finish_inserting_bucket(bucket);
1100 }
1101
1102 true
1103 } else {
1104 loop {
1106 self.pop_oldest();
1110
1111 if self.is_empty() {
1112 return self.insert_into_empty_map(hash, key, value);
1113 }
1114
1115 let entry = Entry {
1116 key,
1117 value,
1118 older: self.newest,
1119 newer: L::LinkType::MAX,
1120 };
1121
1122 let full_capacity = full_capacity_for_buckets(self.map.buckets());
1123 if self.len() < full_capacity / 2 {
1124 if !self.rehash_in_place() {
1126 return false;
1130 }
1131
1132 assert!(self.map.capacity() > self.map.len());
1133
1134 unsafe {
1136 let bucket = self.map.insert_no_grow(hash, entry);
1137 self.finish_inserting_bucket(bucket);
1138 }
1139
1140 return true;
1141 }
1142
1143 match self.map.try_insert_no_grow(hash, entry) {
1144 Ok(bucket) => {
1145 unsafe {
1147 self.finish_inserting_bucket(bucket);
1148 }
1149
1150 return true;
1151 }
1152 Err(entry) => {
1153 key = entry.key;
1154 value = entry.value;
1155
1156 continue;
1157 }
1158 }
1159 }
1160 }
1161 }
1162
1163 #[inline(never)]
1167 #[cold]
1168 fn rehash_in_place(&mut self) -> bool {
1169 if self.is_empty() {
1170 self.map.reserve(1, |_| unreachable!());
1173 return true;
1174 }
1175
1176 let bucket_count = self.map.buckets();
1177 let mut buffer = alloc::vec::Vec::<core::mem::MaybeUninit<L::LinkType>>::new();
1178 if buffer.try_reserve_exact(bucket_count * 2).is_err() {
1179 return false;
1180 }
1181
1182 unsafe {
1184 buffer.set_len(bucket_count * 2);
1185 }
1186
1187 let (older_to_old_index_map, index_map) = buffer.split_at_mut(bucket_count);
1188
1189 let old_oldest = core::mem::replace(&mut self.oldest, L::LinkType::MAX);
1190 let old_newest = core::mem::replace(&mut self.newest, L::LinkType::MAX);
1191
1192 unsafe {
1195 let mut iter = self.map.iter();
1196 while let Some(old_bucket) = iter.next() {
1197 let old_index = self.map.bucket_index(&old_bucket);
1198 let entry = old_bucket.as_ref();
1199 if entry.older != L::LinkType::MAX {
1200 older_to_old_index_map[entry.older.into_usize()] =
1207 core::mem::MaybeUninit::new(L::LinkType::from_usize(old_index));
1208 }
1209 }
1210 }
1211
1212 {
1214 struct Guard<'a, K, V, L>
1216 where
1217 L: Limiter<K, V>,
1218 {
1219 map: &'a mut RawTable<Entry<K, V, L::LinkType>>,
1220 limiter: &'a mut L,
1221 }
1222
1223 impl<'a, K, V, L> Drop for Guard<'a, K, V, L>
1224 where
1225 L: Limiter<K, V>,
1226 {
1227 fn drop(&mut self) {
1228 self.map.clear();
1234 self.limiter.on_cleared();
1235 }
1236 }
1237
1238 let guard = Guard {
1239 map: &mut self.map,
1240 limiter: &mut self.limiter,
1241 };
1242
1243 let extra_capacity = full_capacity_for_buckets(bucket_count) / 2 - guard.map.len();
1246 let hasher = &self.hasher;
1247 guard.map.reserve(extra_capacity, |entry| {
1248 let mut hasher = hasher.build_hasher();
1249 entry.key.hash(&mut hasher);
1250 hasher.finish()
1251 });
1252
1253 core::mem::forget(guard);
1254 }
1255
1256 debug_assert_eq!(self.map.buckets(), bucket_count); unsafe {
1262 let mut iter = self.map.iter();
1264 while let Some(new_bucket) = iter.next() {
1265 let new_index = self.map.bucket_index(&new_bucket);
1266 let entry = new_bucket.as_ref();
1267 let old_index = if entry.older == L::LinkType::MAX {
1268 old_oldest
1269 } else {
1270 older_to_old_index_map[entry.older.into_usize()].assume_init()
1271 }
1272 .into_usize();
1273
1274 index_map[old_index] = core::mem::MaybeUninit::new(L::LinkType::from_usize(new_index));
1275 }
1276 }
1277
1278 unsafe {
1282 let mut iter = self.map.iter();
1284 while let Some(new_bucket) = iter.next() {
1285 let entry = new_bucket.as_mut();
1286
1287 if entry.older != L::LinkType::MAX {
1288 entry.older = index_map[entry.older.into_usize()].assume_init();
1289 }
1290
1291 if entry.newer != L::LinkType::MAX {
1292 entry.newer = index_map[entry.newer.into_usize()].assume_init()
1293 }
1294 }
1295
1296 debug_assert_ne!(old_oldest, L::LinkType::MAX);
1297 debug_assert_ne!(old_newest, L::LinkType::MAX);
1298
1299 self.oldest = index_map[old_oldest.into_usize()].assume_init();
1300 self.newest = index_map[old_newest.into_usize()].assume_init();
1301 }
1302
1303 true
1304 }
1305
1306 fn try_grow(&mut self, mut required_length: usize) -> bool {
1310 let mut new_bucket_count = match Self::buckets_for_capacity(required_length) {
1311 Some(new_bucket_count) => new_bucket_count,
1312 None => {
1313 return false;
1315 }
1316 };
1317
1318 let old_bucket_count = self.map.buckets();
1319 debug_assert!(old_bucket_count < L::LinkType::MAX.into_usize());
1320
1321 if new_bucket_count <= old_bucket_count {
1322 let full_capacity = full_capacity_for_buckets(old_bucket_count);
1324 if required_length <= full_capacity / 2 {
1325 return self.rehash_in_place();
1327 }
1328
1329 required_length = full_capacity_for_buckets(old_bucket_count * 2);
1331 new_bucket_count = match Self::buckets_for_capacity(required_length) {
1332 Some(new_bucket_count) => new_bucket_count,
1333 None => {
1334 return false;
1336 }
1337 };
1338 }
1339
1340 debug_assert!(new_bucket_count > old_bucket_count);
1341
1342 let new_memory_usage = match Self::memory_usage_for_buckets(new_bucket_count) {
1343 Some(new_memory_usage) => new_memory_usage,
1344 None => {
1345 return false;
1347 }
1348 };
1349
1350 if !self.limiter.on_grow(new_memory_usage) {
1351 return false;
1353 }
1354
1355 let new_map = match RawTable::try_with_capacity(required_length) {
1356 Ok(new_map) => new_map,
1357 Err(_) => return false,
1358 };
1359
1360 debug_assert_eq!(new_map.buckets(), new_bucket_count);
1361 if new_map.buckets() >= L::LinkType::MAX.into_usize() {
1362 debug_assert!(false, "unreachable");
1364 return false;
1365 }
1366
1367 if self.is_empty() {
1368 self.map = new_map;
1370 return true;
1371 }
1372
1373 let mut index_map = alloc::vec::Vec::<core::mem::MaybeUninit<L::LinkType>>::new();
1374 if index_map.try_reserve_exact(old_bucket_count).is_err() {
1375 return false;
1376 }
1377
1378 let mut old_map = core::mem::replace(&mut self.map, new_map);
1379 let old_oldest = core::mem::replace(&mut self.oldest, L::LinkType::MAX);
1380 let old_newest = core::mem::replace(&mut self.newest, L::LinkType::MAX);
1381
1382 unsafe {
1383 index_map.set_len(old_bucket_count);
1384
1385 let mut iter = old_map.iter();
1387 while let Some(old_bucket) = iter.next() {
1388 let old_index = old_map.bucket_index(&old_bucket);
1389 let entry = old_map.remove(old_bucket);
1390 let hash = self.hash_key(&entry.key);
1391
1392 let new_bucket = self.map.insert_no_grow(hash, entry);
1393 let new_index = self.map.bucket_index(&new_bucket);
1394
1395 index_map[old_index] = core::mem::MaybeUninit::new(L::LinkType::from_usize(new_index));
1396 }
1397 }
1398
1399 unsafe {
1400 let mut iter = self.map.iter();
1402 while let Some(new_bucket) = iter.next() {
1403 let entry = new_bucket.as_mut();
1404
1405 if entry.older != L::LinkType::MAX {
1406 entry.older = index_map[entry.older.into_usize()].assume_init();
1407 }
1408
1409 if entry.newer != L::LinkType::MAX {
1410 entry.newer = index_map[entry.newer.into_usize()].assume_init()
1411 }
1412 }
1413
1414 debug_assert_ne!(old_oldest, L::LinkType::MAX);
1415 debug_assert_ne!(old_newest, L::LinkType::MAX);
1416
1417 self.oldest = index_map[old_oldest.into_usize()].assume_init();
1418 self.newest = index_map[old_newest.into_usize()].assume_init();
1419 }
1420
1421 true
1422 }
1423
1424 pub fn iter(&self) -> Iter<K, V, L> {
1426 Iter {
1427 map: &self.map,
1428 newest: self.newest,
1429 oldest: self.oldest,
1430 remaining: self.len(),
1431 }
1432 }
1433
1434 pub fn iter_mut(&mut self) -> IterMut<K, V, L> {
1436 let newest = self.newest;
1437 let oldest = self.oldest;
1438 let remaining = self.len();
1439 IterMut {
1440 map: &mut self.map,
1441 newest,
1442 oldest,
1443 remaining,
1444 }
1445 }
1446
1447 pub fn drain(&mut self) -> Drain<K, V, L, S> {
1451 Drain { map: self }
1452 }
1453
1454 pub fn limiter(&self) -> &L {
1456 &self.limiter
1457 }
1458
1459 pub fn limiter_mut(&mut self) -> &mut L {
1461 &mut self.limiter
1462 }
1463}
1464
1465pub struct Iter<'a, K, V, L>
1467where
1468 L: Limiter<K, V>,
1469{
1470 map: &'a RawTable<Entry<K, V, L::LinkType>>,
1471 newest: L::LinkType,
1472 oldest: L::LinkType,
1473 remaining: usize,
1474}
1475
1476impl<'a, K, V, L> Iterator for Iter<'a, K, V, L>
1477where
1478 L: Limiter<K, V>,
1479{
1480 type Item = (&'a K, &'a V);
1481 fn next(&mut self) -> Option<Self::Item> {
1482 if self.remaining == 0 {
1483 return None;
1484 }
1485
1486 self.remaining -= 1;
1487
1488 unsafe {
1489 let bucket = self.map.bucket(self.newest.into_usize());
1490 let entry = bucket.as_ref();
1491 self.newest = entry.older;
1492
1493 Some((&entry.key, &entry.value))
1494 }
1495 }
1496
1497 fn size_hint(&self) -> (usize, Option<usize>) {
1498 (self.remaining, Some(self.remaining))
1499 }
1500}
1501
1502impl<'a, K, V, L> DoubleEndedIterator for Iter<'a, K, V, L>
1503where
1504 L: Limiter<K, V>,
1505{
1506 fn next_back(&mut self) -> Option<Self::Item> {
1507 if self.remaining == 0 {
1508 return None;
1509 }
1510
1511 self.remaining -= 1;
1512
1513 unsafe {
1514 let bucket = self.map.bucket(self.oldest.into_usize());
1515 let entry = bucket.as_ref();
1516 self.oldest = entry.newer;
1517
1518 Some((&entry.key, &entry.value))
1519 }
1520 }
1521}
1522
1523impl<'a, K, V, L> ExactSizeIterator for Iter<'a, K, V, L> where L: Limiter<K, V> {}
1524
1525pub struct IterMut<'a, K, V, L>
1527where
1528 L: Limiter<K, V>,
1529{
1530 map: &'a mut RawTable<Entry<K, V, L::LinkType>>,
1531 newest: L::LinkType,
1532 oldest: L::LinkType,
1533 remaining: usize,
1534}
1535
1536impl<'a, K, V, L> Iterator for IterMut<'a, K, V, L>
1537where
1538 L: Limiter<K, V>,
1539{
1540 type Item = (&'a K, &'a mut V);
1541 fn next(&mut self) -> Option<Self::Item> {
1542 if self.remaining == 0 {
1543 return None;
1544 }
1545
1546 self.remaining -= 1;
1547
1548 unsafe {
1549 let bucket = self.map.bucket(self.newest.into_usize());
1550 let entry = bucket.as_mut();
1551 self.newest = entry.older;
1552
1553 Some((&entry.key, &mut entry.value))
1554 }
1555 }
1556
1557 fn size_hint(&self) -> (usize, Option<usize>) {
1558 (self.remaining, Some(self.remaining))
1559 }
1560}
1561
1562impl<'a, K, V, L> DoubleEndedIterator for IterMut<'a, K, V, L>
1563where
1564 L: Limiter<K, V>,
1565{
1566 fn next_back(&mut self) -> Option<Self::Item> {
1567 if self.remaining == 0 {
1568 return None;
1569 }
1570
1571 self.remaining -= 1;
1572
1573 unsafe {
1574 let bucket = self.map.bucket(self.oldest.into_usize());
1575 let entry = bucket.as_mut();
1576 self.oldest = entry.newer;
1577
1578 Some((&entry.key, &mut entry.value))
1579 }
1580 }
1581}
1582
1583impl<'a, K, V, L> ExactSizeIterator for IterMut<'a, K, V, L> where L: Limiter<K, V> {}
1584
1585pub struct Drain<'a, K, V, L, S>
1587where
1588 K: PartialEq + Hash,
1589 L: Limiter<K, V>,
1590 S: BuildHasher,
1591{
1592 map: &'a mut LruMap<K, V, L, S>,
1593}
1594
1595impl<'a, K, V, L, S> Iterator for Drain<'a, K, V, L, S>
1596where
1597 K: PartialEq + Hash,
1598 S: BuildHasher,
1599 L: Limiter<K, V>,
1600{
1601 type Item = (K, V);
1602 fn next(&mut self) -> Option<Self::Item> {
1603 self.map.pop_newest()
1604 }
1605
1606 fn size_hint(&self) -> (usize, Option<usize>) {
1607 let remaining = self.map.len();
1608 (remaining, Some(remaining))
1609 }
1610}
1611
1612impl<'a, K, V, L, S> DoubleEndedIterator for Drain<'a, K, V, L, S>
1613where
1614 K: PartialEq + Hash,
1615 S: BuildHasher,
1616 L: Limiter<K, V>,
1617{
1618 fn next_back(&mut self) -> Option<Self::Item> {
1619 self.map.pop_oldest()
1620 }
1621}
1622
1623impl<'a, K, V, L, S> Drop for Drain<'a, K, V, L, S>
1624where
1625 K: PartialEq + Hash,
1626 S: BuildHasher,
1627 L: Limiter<K, V>,
1628{
1629 fn drop(&mut self) {
1630 self.map.clear();
1631 }
1632}
1633
1634impl<'a, K, V, L, S> ExactSizeIterator for Drain<'a, K, V, L, S>
1635where
1636 K: PartialEq + Hash,
1637 S: BuildHasher,
1638 L: Limiter<K, V>,
1639{
1640}
1641
1642#[cfg(test)]
1643mod tests {
1644 pub use super::*;
1645
1646 extern crate std;
1647 use std::string::String;
1648 use std::{vec, vec::Vec};
1649
1650 fn to_vec<K, V, L, S>(lru: &LruMap<K, V, L, S>) -> Vec<(K, V)>
1651 where
1652 K: Copy + PartialEq + Hash,
1653 V: Copy,
1654 L: Limiter<K, V>,
1655 S: BuildHasher,
1656 {
1657 lru.iter().map(|(key, value)| (*key, *value)).collect()
1658 }
1659
1660 #[derive(Copy, Clone, Debug)]
1661 pub struct ByValue {
1662 limit: usize,
1663 cost: usize,
1664 length: usize,
1665 }
1666
1667 impl ByValue {
1668 fn new(limit: usize) -> Self {
1669 ByValue {
1670 limit,
1671 cost: 0,
1672 length: 0,
1673 }
1674 }
1675 }
1676
1677 impl Limiter<usize, usize> for ByValue {
1678 type KeyToInsert<'a> = usize;
1679 type LinkType = usize;
1680
1681 fn is_over_the_limit(&self, length: usize) -> bool {
1682 assert_eq!(length, self.length);
1683 self.cost > self.limit
1684 }
1685
1686 fn on_insert<'a>(&mut self, length: usize, key: usize, value: usize) -> Option<(usize, usize)> {
1687 assert_eq!(length, self.length);
1688 if value > self.limit {
1689 return None;
1690 }
1691
1692 self.cost += value;
1693 self.length += 1;
1694 Some((key, value))
1695 }
1696
1697 fn on_replace(
1698 &mut self,
1699 length: usize,
1700 _: &mut usize,
1701 _: usize,
1702 old_value: &mut usize,
1703 new_value: &mut usize,
1704 ) -> bool {
1705 assert_eq!(length, self.length);
1706 if *new_value > self.limit {
1707 return false;
1708 }
1709
1710 self.cost += *new_value - *old_value;
1711 true
1712 }
1713
1714 fn on_cleared(&mut self) {
1715 self.cost = 0;
1716 self.length = 0;
1717 }
1718
1719 fn on_removed(&mut self, _: &mut usize, value: &mut usize) {
1720 self.cost -= *value;
1721 self.length -= 1;
1722 }
1723
1724 fn on_grow(&mut self, _: usize) -> bool {
1725 true
1726 }
1727 }
1728
1729 impl Limiter<usize, (usize, usize)> for ByValue {
1730 type KeyToInsert<'a> = usize;
1731 type LinkType = usize;
1732
1733 fn is_over_the_limit(&self, _: usize) -> bool {
1734 self.cost > self.limit
1735 }
1736
1737 fn on_insert<'a>(&mut self, _: usize, key: usize, value: (usize, usize)) -> Option<(usize, (usize, usize))> {
1738 if value.0 > self.limit {
1739 return None;
1740 }
1741
1742 self.cost += value.0;
1743 Some((key, value))
1744 }
1745
1746 fn on_replace(
1747 &mut self,
1748 _: usize,
1749 _: &mut usize,
1750 _: usize,
1751 old_value: &mut (usize, usize),
1752 new_value: &mut (usize, usize),
1753 ) -> bool {
1754 if new_value.0 > self.limit {
1755 return false;
1756 }
1757
1758 self.cost += new_value.0 - old_value.0;
1759 true
1760 }
1761
1762 fn on_cleared(&mut self) {
1763 self.cost = 0;
1764 }
1765
1766 fn on_removed(&mut self, _: &mut usize, &mut (value, _): &mut (usize, usize)) {
1767 self.cost -= value;
1768 }
1769
1770 fn on_grow(&mut self, _: usize) -> bool {
1771 true
1772 }
1773 }
1774
1775 #[derive(Copy, Clone, Debug, Default)]
1776 pub struct UnlimitedU8;
1777
1778 impl<K, V> Limiter<K, V> for UnlimitedU8 {
1779 type KeyToInsert<'a> = K;
1780 type LinkType = u8;
1781
1782 fn is_over_the_limit(&self, _: usize) -> bool {
1783 false
1784 }
1785
1786 fn on_insert<'a>(&mut self, _: usize, key: K, value: V) -> Option<(K, V)> {
1787 Some((key, value))
1788 }
1789
1790 fn on_replace(&mut self, _: usize, _: &mut K, _: K, _: &mut V, _: &mut V) -> bool {
1791 true
1792 }
1793
1794 fn on_cleared(&mut self) {}
1795 fn on_removed(&mut self, _: &mut K, _: &mut V) {}
1796 fn on_grow(&mut self, _: usize) -> bool {
1797 true
1798 }
1799 }
1800
1801 #[derive(Copy, Clone, Debug, Default)]
1802 pub struct UnlimitedU16;
1803
1804 impl<K, V> Limiter<K, V> for UnlimitedU16 {
1805 type KeyToInsert<'a> = K;
1806 type LinkType = u16;
1807
1808 fn is_over_the_limit(&self, _: usize) -> bool {
1809 false
1810 }
1811
1812 fn on_insert<'a>(&mut self, _: usize, key: K, value: V) -> Option<(K, V)> {
1813 Some((key, value))
1814 }
1815
1816 fn on_replace(&mut self, _: usize, _: &mut K, _: K, _: &mut V, _: &mut V) -> bool {
1817 true
1818 }
1819
1820 fn on_cleared(&mut self) {}
1821 fn on_removed(&mut self, _: &mut K, _: &mut V) {}
1822 fn on_grow(&mut self, _: usize) -> bool {
1823 true
1824 }
1825 }
1826
1827 #[derive(Copy, Clone, Debug, Default)]
1828 pub struct Manual {
1829 overflow: bool,
1830 }
1831
1832 impl<K, V> Limiter<K, V> for Manual {
1833 type KeyToInsert<'a> = K;
1834 type LinkType = usize;
1835
1836 fn is_over_the_limit(&self, _: usize) -> bool {
1837 self.overflow
1838 }
1839
1840 fn on_insert<'a>(&mut self, _: usize, key: K, value: V) -> Option<(K, V)> {
1841 Some((key, value))
1842 }
1843
1844 fn on_replace(&mut self, _: usize, _: &mut K, _: K, _: &mut V, _: &mut V) -> bool {
1845 true
1846 }
1847
1848 fn on_cleared(&mut self) {}
1849 fn on_removed(&mut self, _: &mut K, _: &mut V) {}
1850 fn on_grow(&mut self, _: usize) -> bool {
1851 true
1852 }
1853 }
1854
1855 #[derive(Copy, Clone, Debug, Default)]
1856 pub struct WithCustomInsertKey;
1857 pub struct InsertKey<'a>(&'a str);
1858
1859 impl<'a> PartialEq<String> for InsertKey<'a> {
1860 fn eq(&self, rhs: &String) -> bool {
1861 self.0 == rhs
1862 }
1863 }
1864
1865 impl<'a> core::hash::Hash for InsertKey<'a> {
1866 fn hash<H>(&self, state: &mut H)
1867 where
1868 H: core::hash::Hasher,
1869 {
1870 self.0.hash(state)
1871 }
1872 }
1873
1874 impl<V> Limiter<String, V> for WithCustomInsertKey {
1875 type KeyToInsert<'a> = InsertKey<'a>;
1876 type LinkType = usize;
1877
1878 fn is_over_the_limit(&self, _: usize) -> bool {
1879 false
1880 }
1881
1882 fn on_insert<'a>(&mut self, _: usize, key: InsertKey<'a>, value: V) -> Option<(String, V)> {
1883 Some((String::from(key.0), value))
1884 }
1885
1886 fn on_replace<'a>(&mut self, _: usize, _: &mut String, _: InsertKey<'a>, _: &mut V, _: &mut V) -> bool {
1887 true
1888 }
1889
1890 fn on_cleared(&mut self) {}
1891 fn on_removed(&mut self, _: &mut String, _: &mut V) {}
1892 fn on_grow(&mut self, _: usize) -> bool {
1893 true
1894 }
1895 }
1896
1897 #[test]
1898 fn insert_works() {
1899 let mut lru = LruMap::new(UnlimitedCompact);
1900 assert!(lru.is_empty());
1901 assert_eq!(lru.len(), 0);
1902 assert_eq!(lru.iter().collect::<Vec<_>>(), vec![]);
1903
1904 let mut vec = Vec::new();
1905 for n in 1..=32 {
1906 lru.insert(n, n * 10);
1907 vec.insert(0, (n, n * 10));
1908 assert!(!lru.is_empty());
1909 assert_eq!(lru.len(), n);
1910 assert_eq!(to_vec(&lru), vec);
1911 }
1912
1913 lru.assert_check_internal_state();
1914 }
1915
1916 #[test]
1917 fn insert_with_limiter_works() {
1918 let mut lru = LruMap::new(ByLength::new(3));
1919 lru.insert(1, 10);
1920 lru.insert(2, 20);
1921 lru.insert(3, 30);
1922 assert_eq!(to_vec(&lru), vec![(3, 30), (2, 20), (1, 10)]);
1923
1924 lru.insert(4, 40);
1925 assert_eq!(to_vec(&lru), vec![(4, 40), (3, 30), (2, 20)]);
1926
1927 lru.insert(5, 50);
1928 assert_eq!(to_vec(&lru), vec![(5, 50), (4, 40), (3, 30)]);
1929
1930 lru.insert(6, 60);
1931 assert_eq!(to_vec(&lru), vec![(6, 60), (5, 50), (4, 40)]);
1932
1933 lru.insert(5, 500);
1934 assert_eq!(to_vec(&lru), vec![(5, 500), (6, 60), (4, 40)]);
1935
1936 lru.insert(4, 400);
1937 assert_eq!(to_vec(&lru), vec![(4, 400), (5, 500), (6, 60)]);
1938
1939 lru.assert_check_internal_state();
1940 }
1941
1942 #[test]
1943 fn limiter_does_not_allow_the_map_to_grow() {
1944 let mut lru = LruMap::with_seed(ByLength::new(7), [12, 34, 56, 78]);
1945 assert_eq!(lru.len(), 0);
1946 assert_eq!(lru.guaranteed_capacity(), 0);
1947 for n in 9..32 {
1948 lru.insert(n, n * 10);
1949 }
1950 assert_eq!(lru.len(), 7);
1951 assert_eq!(lru.guaranteed_capacity(), 7);
1952 lru.assert_check_internal_state();
1953
1954 let mut lru = LruMap::new(ByLength::new(8));
1955 for n in 9..32 {
1956 lru.insert(n, n * 10);
1957 }
1958 assert_eq!(lru.len(), 8);
1959 assert_eq!(lru.guaranteed_capacity(), 14);
1960 lru.assert_check_internal_state();
1961 }
1962
1963 #[test]
1964 fn get_or_insert_with_limiter_works() {
1965 let mut lru = LruMap::new(ByLength::new(3));
1966 assert_eq!(lru.get_or_insert(1, || 10), Some(&mut 10));
1967 assert_eq!(lru.get_or_insert(2, || 20), Some(&mut 20));
1968 assert_eq!(lru.get_or_insert(3, || 30), Some(&mut 30));
1969 assert_eq!(to_vec(&lru), vec![(3, 30), (2, 20), (1, 10)]);
1970
1971 assert_eq!(lru.get_or_insert(4, || 40), Some(&mut 40));
1972 assert_eq!(to_vec(&lru), vec![(4, 40), (3, 30), (2, 20)]);
1973
1974 assert_eq!(lru.get_or_insert(5, || 50), Some(&mut 50));
1975 assert_eq!(to_vec(&lru), vec![(5, 50), (4, 40), (3, 30)]);
1976
1977 assert_eq!(lru.get_or_insert(3, || unreachable!()), Some(&mut 30));
1978 assert_eq!(to_vec(&lru), vec![(3, 30), (5, 50), (4, 40)]);
1979
1980 assert_eq!(lru.get_or_insert(5, || unreachable!()), Some(&mut 50));
1981 assert_eq!(to_vec(&lru), vec![(5, 50), (3, 30), (4, 40)]);
1982
1983 lru.assert_check_internal_state();
1984 }
1985
1986 #[test]
1987 fn get_or_insert_fallible_works() {
1988 let mut lru = LruMap::new(ByLength::new(2));
1989 assert_eq!(
1990 lru.get_or_insert_fallible(1, || Ok::<_, ()>(10)).unwrap(),
1991 Some(&mut 10)
1992 );
1993 assert_eq!(
1994 lru.get_or_insert_fallible(2, || Ok::<_, ()>(20)).unwrap(),
1995 Some(&mut 20)
1996 );
1997 assert_eq!(to_vec(&lru), vec![(2, 20), (1, 10)]);
1998
1999 assert_eq!(
2000 lru.get_or_insert_fallible(3, || Ok::<_, ()>(30)).unwrap(),
2001 Some(&mut 30)
2002 );
2003 assert_eq!(to_vec(&lru), vec![(3, 30), (2, 20)]);
2004
2005 assert!(lru.get_or_insert_fallible(4, || Err(())).is_err());
2006 assert_eq!(to_vec(&lru), vec![(3, 30), (2, 20)]);
2007
2008 lru.assert_check_internal_state();
2009 }
2010
2011 #[test]
2012 fn insert_with_memory_usage_limiter_works() {
2013 let mut lru = LruMap::new(UnlimitedCompact);
2014 assert_eq!(lru.guaranteed_capacity(), 0);
2015 assert_eq!(lru.memory_usage(), 0);
2016 lru.insert(1, 10);
2017 let memory_usage_step_1 = lru.memory_usage();
2018 lru.insert(2, 20);
2019 lru.insert(3, 30);
2020 lru.insert(4, 40);
2021 let memory_usage_step_2 = lru.memory_usage();
2022 assert_ne!(memory_usage_step_1, memory_usage_step_2);
2023
2024 let mut lru = LruMap::new(ByMemoryUsage::new(memory_usage_step_2));
2025 assert_eq!(lru.guaranteed_capacity(), 0);
2026 assert_eq!(lru.memory_usage(), 0);
2027 lru.insert(1, 10);
2028 assert_eq!(lru.guaranteed_capacity(), 3);
2029 assert_eq!(lru.memory_usage(), memory_usage_step_1);
2030 lru.insert(2, 20);
2031 lru.insert(3, 30);
2032 assert_eq!(to_vec(&lru), vec![(3, 30), (2, 20), (1, 10)]);
2033 assert_eq!(lru.guaranteed_capacity(), 3);
2034 assert_eq!(lru.memory_usage(), memory_usage_step_1);
2035 lru.insert(4, 40);
2036 assert_eq!(to_vec(&lru), vec![(4, 40), (3, 30), (2, 20), (1, 10)]);
2037 assert_eq!(lru.guaranteed_capacity(), 7);
2038 assert_eq!(lru.memory_usage(), memory_usage_step_2);
2039 lru.insert(5, 50);
2040 lru.insert(6, 60);
2041 lru.insert(7, 70);
2042 assert_eq!(
2043 to_vec(&lru),
2044 vec![(7, 70), (6, 60), (5, 50), (4, 40), (3, 30), (2, 20), (1, 10)]
2045 );
2046 assert!(lru.insert(8, 80));
2047 assert_eq!(
2048 to_vec(&lru),
2049 vec![(8, 80), (7, 70), (6, 60), (5, 50), (4, 40), (3, 30), (2, 20)]
2050 );
2051
2052 assert_eq!(lru.guaranteed_capacity(), 7);
2053 assert_eq!(lru.memory_usage(), memory_usage_step_2);
2054 lru.assert_check_internal_state();
2055
2056 let mut lru = LruMap::new(ByMemoryUsage::new(memory_usage_step_2 + 1));
2057 for n in 1..=8 {
2058 lru.insert(n, n * 10);
2059 }
2060 assert_eq!(lru.len(), 7);
2061 assert_eq!(lru.guaranteed_capacity(), 7);
2062 assert_eq!(lru.memory_usage(), memory_usage_step_2);
2063
2064 let mut lru = LruMap::new(ByMemoryUsage::new(memory_usage_step_1 - 1));
2065 for n in 1..=8 {
2066 assert!(!lru.insert(n, n * 10));
2067 }
2068 assert_eq!(lru.len(), 0);
2069 assert_eq!(lru.guaranteed_capacity(), 0);
2070 assert_eq!(lru.memory_usage(), 0);
2071 }
2072
2073 #[test]
2074 fn remove_works() {
2075 let mut lru = LruMap::new(UnlimitedCompact);
2076 lru.insert(1, 10);
2077 lru.insert(2, 20);
2078 lru.insert(3, 30);
2079 lru.insert(4, 40);
2080 lru.insert(5, 50);
2081 lru.insert(6, 60);
2082 assert_eq!(to_vec(&lru), vec![(6, 60), (5, 50), (4, 40), (3, 30), (2, 20), (1, 10)]);
2083
2084 assert_eq!(lru.remove(&123), None);
2085 assert_eq!(to_vec(&lru), vec![(6, 60), (5, 50), (4, 40), (3, 30), (2, 20), (1, 10)]);
2086
2087 assert_eq!(lru.remove(&3), Some(30));
2088 assert_eq!(to_vec(&lru), vec![(6, 60), (5, 50), (4, 40), (2, 20), (1, 10)]);
2089
2090 assert_eq!(lru.remove(&1), Some(10));
2091 assert_eq!(to_vec(&lru), vec![(6, 60), (5, 50), (4, 40), (2, 20)]);
2092
2093 assert_eq!(lru.remove(&6), Some(60));
2094 assert_eq!(to_vec(&lru), vec![(5, 50), (4, 40), (2, 20)]);
2095
2096 assert_eq!(lru.remove(&5), Some(50));
2097 assert_eq!(lru.remove(&4), Some(40));
2098 assert_eq!(lru.remove(&2), Some(20));
2099 assert_eq!(to_vec(&lru), vec![]);
2100 }
2101
2102 #[test]
2103 fn get_works_and_promotes_item_single_item() {
2104 let mut lru = LruMap::new(UnlimitedCompact);
2105 lru.insert(1, 10);
2106 assert_eq!(to_vec(&lru), vec![(1, 10)]);
2107
2108 assert!(lru.get(&1000).is_none());
2110 assert_eq!(to_vec(&lru), vec![(1, 10)]);
2111
2112 assert_eq!(*lru.get(&1).unwrap(), 10);
2114 assert_eq!(to_vec(&lru), vec![(1, 10)]);
2115 assert_eq!(lru.peek_newest().unwrap(), (&1, &10));
2116 assert_eq!(lru.peek_oldest().unwrap(), (&1, &10));
2117
2118 lru.assert_check_internal_state();
2119 }
2120
2121 #[test]
2122 fn get_works_and_promotes_item_two_items() {
2123 let mut lru = LruMap::new(UnlimitedCompact);
2124 lru.insert(2, 20);
2125 lru.insert(1, 10);
2126 assert_eq!(to_vec(&lru), vec![(1, 10), (2, 20)]);
2127
2128 assert!(lru.get(&1000).is_none());
2130 assert_eq!(to_vec(&lru), vec![(1, 10), (2, 20)]);
2131
2132 assert_eq!(*lru.get(&1).unwrap(), 10);
2134 assert_eq!(to_vec(&lru), vec![(1, 10), (2, 20)]);
2135 assert_eq!(lru.peek_newest().unwrap(), (&1, &10));
2136 assert_eq!(lru.peek_oldest().unwrap(), (&2, &20));
2137
2138 assert_eq!(*lru.get(&2).unwrap(), 20);
2140 assert_eq!(to_vec(&lru), vec![(2, 20), (1, 10)]);
2141 assert_eq!(lru.peek_newest().unwrap(), (&2, &20));
2142 assert_eq!(lru.peek_oldest().unwrap(), (&1, &10));
2143
2144 lru.assert_check_internal_state();
2145 }
2146
2147 #[test]
2148 fn get_works_and_promotes_item_three_items() {
2149 let mut lru = LruMap::new(UnlimitedCompact);
2150 lru.insert(3, 30);
2151 lru.insert(2, 20);
2152 lru.insert(1, 10);
2153 assert_eq!(to_vec(&lru), vec![(1, 10), (2, 20), (3, 30)]);
2154
2155 assert!(lru.get(&1000).is_none());
2157 assert_eq!(to_vec(&lru), vec![(1, 10), (2, 20), (3, 30)]);
2158
2159 assert_eq!(*lru.get(&1).unwrap(), 10);
2161 assert_eq!(to_vec(&lru), vec![(1, 10), (2, 20), (3, 30)]);
2162 assert_eq!(lru.peek_newest().unwrap(), (&1, &10));
2163 assert_eq!(lru.peek_oldest().unwrap(), (&3, &30));
2164
2165 assert_eq!(*lru.get(&3).unwrap(), 30);
2167 assert_eq!(to_vec(&lru), vec![(3, 30), (1, 10), (2, 20)]);
2168 assert_eq!(lru.peek_newest().unwrap(), (&3, &30));
2169 assert_eq!(lru.peek_oldest().unwrap(), (&2, &20));
2170
2171 assert_eq!(*lru.get(&1).unwrap(), 10);
2173 assert_eq!(to_vec(&lru), vec![(1, 10), (3, 30), (2, 20)]);
2174 assert_eq!(lru.peek_newest().unwrap(), (&1, &10));
2175 assert_eq!(lru.peek_oldest().unwrap(), (&2, &20));
2176
2177 lru.assert_check_internal_state();
2178 }
2179
2180 #[test]
2181 fn insert_existing_key() {
2182 let mut lru = LruMap::new(UnlimitedCompact);
2183 lru.insert(3, 30);
2184 lru.insert(2, 20);
2185 lru.insert(1, 10);
2186 assert_eq!(to_vec(&lru), vec![(1, 10), (2, 20), (3, 30)]);
2187
2188 lru.insert(2, 200);
2189 assert_eq!(to_vec(&lru), vec![(2, 200), (1, 10), (3, 30)]);
2190
2191 lru.assert_check_internal_state();
2192 }
2193
2194 #[test]
2195 fn insert_succeeds_when_first_element_is_cheap() {
2196 let mut lru = LruMap::new(ByValue::new(1));
2197 lru.insert(1, 1);
2198 assert_eq!(lru.limiter().cost, 1);
2199 assert_eq!(to_vec(&lru), vec![(1, 1)]);
2200
2201 lru.assert_check_internal_state();
2202 }
2203
2204 #[test]
2205 fn insert_fails_when_first_element_is_too_costly() {
2206 let mut lru = LruMap::new(ByValue::new(1));
2207 lru.insert(1, 2);
2208 assert_eq!(lru.limiter().cost, 0);
2209 assert_eq!(to_vec(&lru), vec![]);
2210
2211 lru.assert_check_internal_state();
2212 }
2213
2214 #[test]
2215 fn replacing_existing_value_with_another_with_the_same_cost_always_works() {
2216 let mut lru = LruMap::new(ByValue::new(1));
2217 lru.insert(1, (1, 100));
2218 assert_eq!(lru.limiter().cost, 1);
2219 assert_eq!(to_vec(&lru), vec![(1, (1, 100))]);
2220
2221 lru.insert(1, (1, 200));
2222 assert_eq!(lru.limiter().cost, 1);
2223 assert_eq!(to_vec(&lru), vec![(1, (1, 200))]);
2224
2225 lru.assert_check_internal_state();
2226 }
2227
2228 #[test]
2229 fn when_new_value_is_too_costly_an_existing_item_will_be_removed_map_is_cleared() {
2230 let mut lru = LruMap::new(ByValue::new(1));
2231 lru.insert(1, 1);
2232 assert_eq!(lru.limiter().cost, 1);
2233 assert_eq!(to_vec(&lru), vec![(1, 1)]);
2234
2235 lru.insert(1, 2);
2236 assert_eq!(lru.limiter().cost, 0);
2237 assert_eq!(to_vec(&lru), vec![]);
2238
2239 lru.assert_check_internal_state();
2240 }
2241
2242 #[test]
2243 fn when_new_value_is_too_costly_an_existing_item_will_be_removed_map_is_not_cleared() {
2244 let mut lru = LruMap::new(ByValue::new(2));
2245 lru.insert(1, 1);
2246 lru.insert(2, 2);
2247 assert_eq!(lru.limiter().cost, 2);
2248 assert_eq!(to_vec(&lru), vec![(2, 2)]);
2249
2250 lru.assert_check_internal_state();
2251 }
2252
2253 #[test]
2254 fn when_new_value_is_too_costly_an_existing_item_will_not_be_removed_if_the_key_is_different() {
2255 let mut lru = LruMap::new(ByValue::new(1));
2256 lru.insert(1, 1);
2257 assert_eq!(lru.limiter().cost, 1);
2258 assert_eq!(to_vec(&lru), vec![(1, 1)]);
2259
2260 lru.insert(2, 2);
2261 assert_eq!(lru.limiter().cost, 1);
2262 assert_eq!(to_vec(&lru), vec![(1, 1)]);
2263
2264 lru.assert_check_internal_state();
2265 }
2266
2267 #[test]
2268 fn when_new_value_is_too_costly_all_items_will_be_removed() {
2269 let mut lru = LruMap::new(ByValue::new(3));
2270 lru.insert(1, 1);
2271 lru.insert(2, 2);
2272 assert_eq!(lru.limiter().cost, 3);
2273 assert_eq!(to_vec(&lru), vec![(2, 2), (1, 1)]);
2274
2275 lru.insert(3, 3);
2276 assert_eq!(lru.limiter().cost, 3);
2277 assert_eq!(to_vec(&lru), vec![(3, 3)]);
2278
2279 lru.assert_check_internal_state();
2280 }
2281
2282 #[test]
2283 fn when_new_value_is_too_costly_multiple_items_will_be_removed() {
2284 let mut lru = LruMap::new(ByValue::new(3));
2285 lru.insert(1, 1);
2286 lru.insert(2, 1);
2287 lru.insert(3, 1);
2288 assert_eq!(lru.limiter().cost, 3);
2289 assert_eq!(to_vec(&lru), vec![(3, 1), (2, 1), (1, 1)]);
2290
2291 lru.insert(4, 2);
2292 assert_eq!(lru.limiter().cost, 3);
2293 assert_eq!(to_vec(&lru), vec![(4, 2), (3, 1)]);
2294
2295 lru.assert_check_internal_state();
2296 }
2297
2298 #[test]
2299 fn pop_oldest_works() {
2300 let mut lru = LruMap::new(ByValue::new(10));
2301 lru.insert(1, 1);
2302 lru.insert(2, 2);
2303 lru.insert(3, 3);
2304 assert_eq!(lru.limiter().cost, 6);
2305 assert_eq!(to_vec(&lru), vec![(3, 3), (2, 2), (1, 1)]);
2306
2307 assert_eq!(lru.pop_oldest().unwrap(), (1, 1));
2308 assert_eq!(lru.limiter().cost, 5);
2309
2310 assert_eq!(lru.pop_oldest().unwrap(), (2, 2));
2311 assert_eq!(lru.limiter().cost, 3);
2312
2313 assert_eq!(lru.pop_oldest().unwrap(), (3, 3));
2314 assert_eq!(lru.limiter().cost, 0);
2315
2316 assert!(lru.pop_oldest().is_none());
2317
2318 lru.assert_check_internal_state();
2319 }
2320
2321 #[test]
2322 fn pop_newest_works() {
2323 let mut lru = LruMap::new(ByValue::new(10));
2324 lru.insert(1, 1);
2325 lru.insert(2, 2);
2326 lru.insert(3, 3);
2327 assert_eq!(lru.limiter().cost, 6);
2328 assert_eq!(to_vec(&lru), vec![(3, 3), (2, 2), (1, 1)]);
2329
2330 assert_eq!(lru.pop_newest().unwrap(), (3, 3));
2331 assert_eq!(lru.limiter().cost, 3);
2332
2333 assert_eq!(lru.pop_newest().unwrap(), (2, 2));
2334 assert_eq!(lru.limiter().cost, 1);
2335
2336 assert_eq!(lru.pop_newest().unwrap(), (1, 1));
2337 assert_eq!(lru.limiter().cost, 0);
2338
2339 assert!(lru.pop_newest().is_none());
2340
2341 lru.assert_check_internal_state();
2342 }
2343
2344 #[test]
2345 fn clear_works() {
2346 let mut lru = LruMap::new(ByValue::new(10));
2347 lru.insert(1, 1);
2348 lru.insert(2, 2);
2349 lru.insert(3, 3);
2350 assert_eq!(lru.limiter().cost, 6);
2351 assert_eq!(to_vec(&lru), vec![(3, 3), (2, 2), (1, 1)]);
2352
2353 lru.clear();
2354
2355 assert_eq!(lru.limiter().cost, 0);
2356 assert_eq!(to_vec(&lru), vec![]);
2357
2358 lru.assert_check_internal_state();
2359 }
2360
2361 #[test]
2362 fn drain_works() {
2363 let mut lru = LruMap::new(ByValue::new(10));
2364 lru.insert(1, 1);
2365 lru.insert(2, 2);
2366 lru.insert(3, 3);
2367 assert_eq!(lru.limiter().cost, 6);
2368
2369 let vec: Vec<_> = lru.drain().collect();
2370 assert_eq!(vec, vec![(3, 3), (2, 2), (1, 1)]);
2371 assert_eq!(to_vec(&lru), vec![]);
2372 assert!(lru.is_empty());
2373 assert_eq!(lru.limiter().cost, 0);
2374
2375 lru.assert_check_internal_state();
2376 }
2377
2378 #[test]
2379 fn drain_in_reverse_works() {
2380 let mut lru = LruMap::new(ByValue::new(10));
2381 lru.insert(1, 1);
2382 lru.insert(2, 2);
2383 lru.insert(3, 3);
2384 assert_eq!(lru.limiter().cost, 6);
2385
2386 let vec: Vec<_> = lru.drain().rev().collect();
2387 assert_eq!(vec, vec![(1, 1), (2, 2), (3, 3)]);
2388 assert_eq!(to_vec(&lru), vec![]);
2389 assert!(lru.is_empty());
2390 assert_eq!(lru.limiter().cost, 0);
2391
2392 lru.assert_check_internal_state();
2393 }
2394
2395 #[test]
2396 fn dropping_drain_clears_the_map() {
2397 let mut lru = LruMap::new(ByValue::new(10));
2398 lru.insert(1, 1);
2399 lru.insert(2, 2);
2400 lru.insert(3, 3);
2401 assert_eq!(lru.limiter().cost, 6);
2402
2403 let vec: Vec<_> = lru.drain().take(1).collect();
2404 assert_eq!(vec, vec![(3, 3)]);
2405 assert_eq!(to_vec(&lru), vec![]);
2406 assert!(lru.is_empty());
2407 assert_eq!(lru.limiter().cost, 0);
2408
2409 lru.assert_check_internal_state();
2410 }
2411
2412 #[test]
2413 fn the_maximum_number_of_elements_is_bound_by_the_link_type_u8() {
2414 let mut lru = LruMap::with_seed(UnlimitedU8, [12, 34, 56, 78]);
2415 let limit = 112;
2416
2417 for n in 0..limit {
2418 lru.insert(n, n);
2419 assert_eq!(lru.len(), n + 1);
2420 assert_eq!(lru.peek_oldest().unwrap(), (&0, &0));
2421 assert_eq!(lru.peek_newest().unwrap(), (&n, &n));
2422 }
2423
2424 lru.insert(limit, limit);
2425 assert!(lru.len() <= limit as usize, "failed with length = {}", lru.len());
2426 assert_ne!(lru.peek_oldest().unwrap(), (&0, &0));
2427 assert_eq!(lru.peek_newest().unwrap(), (&limit, &limit));
2428
2429 lru.assert_check_internal_state();
2430 }
2431
2432 #[test]
2433 #[cfg_attr(miri, ignore)] fn the_maximum_number_of_elements_is_bound_by_the_link_type_u16() {
2435 let mut lru = LruMap::with_seed(UnlimitedU16, [12, 34, 56, 78]);
2436 let limit = 28672;
2437
2438 for n in 0..limit {
2439 lru.insert(n, n);
2440 assert_eq!(lru.len(), n + 1);
2441 assert_eq!(lru.peek_oldest().unwrap(), (&0, &0), "failed at {}", n);
2442 assert_eq!(lru.peek_newest().unwrap(), (&n, &n));
2443 }
2444
2445 lru.insert(limit, limit);
2446 assert!(lru.len() <= limit as usize, "failed with length = {}", lru.len());
2447 assert_ne!(lru.peek_oldest().unwrap(), (&0, &0));
2448 assert_eq!(lru.peek_newest().unwrap(), (&limit, &limit));
2449 assert!(lru.get(&0).is_none());
2450
2451 lru.assert_check_internal_state();
2452 }
2453
2454 #[test]
2455 #[ignore] fn element_limit() {
2457 let mut lru = LruMap::with_seed(UnlimitedCompact, [12, 34, 56, 78]);
2458 let limit = 1879048192_u32;
2459 lru.reserve_or_panic(limit as usize);
2460
2461 for n in 0..limit {
2462 lru.insert(n, ());
2463 assert_eq!(lru.len(), n as usize + 1, "failed at {}", n);
2464 assert_eq!(lru.peek_oldest().unwrap(), (&0, &()), "failed at {}", n);
2465 assert_eq!(lru.peek_newest().unwrap(), (&n, &()));
2466 }
2467
2468 lru.insert(limit, ());
2469 assert!(lru.len() <= limit as usize, "failed with length = {}", lru.len());
2470 assert_ne!(lru.peek_oldest().unwrap(), (&0, &()));
2471 assert_eq!(lru.peek_newest().unwrap(), (&limit, &()));
2472 assert!(lru.get(&0).is_none());
2473 }
2474
2475 #[test]
2476 fn lru_with_a_string_key_works() {
2477 use alloc::string::ToString;
2480 let mut lru = LruMap::new(UnlimitedCompact);
2481 lru.insert("1".to_string(), 1);
2482 lru.insert("2".to_string(), 2);
2483 lru.insert("3".to_string(), 3);
2484
2485 assert_eq!(lru.peek("2").unwrap(), &2);
2486 assert_eq!(lru.get("2").unwrap(), &2);
2487
2488 let value = lru.get_or_insert("2".to_string(), || 20).unwrap();
2489 assert_eq!(value, &2);
2490
2491 let value = lru.get_or_insert("4".to_string(), || 40).unwrap();
2492 assert_eq!(value, &40);
2493
2494 let value = lru.get_or_insert("2", || 200).unwrap();
2495 assert_eq!(value, &2);
2496
2497 let value = lru.get_or_insert("6", || 6).unwrap();
2498 assert_eq!(value, &6);
2499
2500 lru.assert_check_internal_state();
2501 }
2502
2503 #[test]
2504 fn lru_with_a_custom_insert_key_works() {
2505 let mut lru = LruMap::new(WithCustomInsertKey);
2508 lru.insert(InsertKey("1"), 1);
2509 lru.insert(InsertKey("2"), 2);
2510 lru.insert(InsertKey("3"), 3);
2511
2512 assert_eq!(lru.peek("2").unwrap(), &2);
2513 assert_eq!(lru.get("2").unwrap(), &2);
2514
2515 let value = lru.get_or_insert(InsertKey("2"), || 20).unwrap();
2516 assert_eq!(value, &2);
2517
2518 let value = lru.get_or_insert(InsertKey("4"), || 40).unwrap();
2519 assert_eq!(value, &40);
2520
2521 lru.assert_check_internal_state();
2522 }
2523
2524 #[test]
2525 #[cfg_attr(miri, ignore)]
2526 fn capacity_growth() {
2527 let mut lru = LruMap::new(UnlimitedCompact);
2528 assert_eq!(lru.guaranteed_capacity(), 0);
2529
2530 let sizes = [
2531 0, 3, 7, 14, 28, 56, 112, 224, 448, 896, 1792, 3584, 7168, 14336, 28672, 57344, 114688, 229376, 458752,
2532 917504, 1835008,
2533 ];
2534
2535 for pair in sizes.windows(2) {
2536 for n in pair[0]..pair[1] {
2537 lru.insert(n, ());
2538 assert_eq!(lru.guaranteed_capacity(), pair[1]);
2539 }
2540 }
2541 }
2542
2543 #[test]
2544 fn inserting_can_evict_the_whole_cache() {
2545 let mut lru = LruMap::new(Manual { overflow: false });
2546 lru.insert(1, 10);
2547 lru.insert(2, 20);
2548 assert_eq!(lru.len(), 2);
2549
2550 lru.limiter_mut().overflow = true;
2551 assert!(lru.get_or_insert(3, || 30).is_none());
2552 assert!(lru.is_empty());
2553
2554 lru.limiter_mut().overflow = false;
2555 lru.insert(1, 10);
2556 lru.insert(2, 20);
2557 assert_eq!(lru.len(), 2);
2558
2559 lru.limiter_mut().overflow = true;
2560 assert!(!lru.insert(3, 30));
2561 assert!(lru.is_empty());
2562 }
2563
2564 #[test]
2565 fn insert_and_remove_a_lot_of_elements() {
2566 let mut lru = LruMap::with_seed(UnlimitedCompact, [12, 34, 56, 78]);
2567 for n in 0..1024 {
2568 lru.insert(n % 256, 255);
2569 lru.insert(65535, 255);
2570 lru.remove(&65535);
2571
2572 if n % 1024 == 0 {
2573 lru.assert_check_internal_state();
2574 }
2575 }
2576 }
2577
2578 #[test]
2579 fn randomly_insert_and_remove_elements_in_a_memory_bound_map() {
2580 let limiter = ByMemoryUsage::new(512);
2581 let mut lru = LruMap::with_seed(limiter, [12, 34, 56, 78]);
2582
2583 let count = if cfg!(miri) { 1024 * 4 } else { 1024 * 64 };
2584
2585 let mut rng = oorandom::Rand32::new(1234);
2586 for n in 0..count {
2587 let key = rng.rand_range(0..255) as u16;
2588 lru.insert(key, n as u8);
2589
2590 let key = rng.rand_range(0..255) as u16;
2591 lru.remove(&key);
2592
2593 if n % 1024 == 0 {
2594 lru.assert_check_internal_state();
2595 }
2596 }
2597 }
2598
2599 #[test]
2600 fn peek_works() {
2601 let mut lru = LruMap::new(UnlimitedCompact);
2602 lru.insert(1, 10);
2603 lru.insert(2, 20);
2604 lru.insert(3, 30);
2605
2606 assert_eq!(to_vec(&lru), vec![(3, 30), (2, 20), (1, 10)]);
2607 assert_eq!(*lru.peek(&2).unwrap(), 20);
2608 assert_eq!(to_vec(&lru), vec![(3, 30), (2, 20), (1, 10)]);
2610
2611 assert_eq!(*lru.peek_mut(&2).unwrap(), 20);
2612 assert_eq!(to_vec(&lru), vec![(3, 30), (2, 20), (1, 10)]);
2614
2615 *lru.peek_mut(&2).unwrap() = 200;
2616 assert_eq!(to_vec(&lru), vec![(3, 30), (2, 200), (1, 10)]);
2617 }
2618
2619 #[test]
2620 fn iter_mut_works() {
2621 let mut lru = LruMap::new(UnlimitedCompact);
2622 assert!(lru.is_empty());
2623 assert_eq!(lru.len(), 0);
2624 assert_eq!(lru.iter_mut().collect::<Vec<_>>(), vec![]);
2625
2626 lru.insert(0, 1);
2627 lru.insert(1, 2);
2628 lru.insert(2, 3);
2629 assert!(!lru.is_empty());
2630 assert_eq!(lru.len(), 3);
2631 for (key, value) in lru.iter_mut() {
2632 if key % 2 == 0 {
2633 *value *= 2;
2634 }
2635 }
2636 assert_eq!(lru.get(&2), Some(&mut 6));
2639
2640 let keys: Vec<_> = lru.iter_mut().map(|(key, _value)| key.clone()).collect();
2642 assert_eq!(keys, vec![2, 1, 0]);
2643
2644 let last_key = lru.iter_mut().next_back().unwrap().0;
2646 assert_eq!(last_key, &0);
2647
2648 lru.get(&0);
2650 let keys: Vec<_> = lru.iter_mut().map(|(key, _value)| key.clone()).collect();
2651 assert_eq!(keys, vec![0, 2, 1]);
2652
2653 let last_key = lru.iter_mut().next_back().unwrap().0;
2655 assert_eq!(last_key, &1);
2656 lru.assert_check_internal_state();
2657 }
2658}