wasmtime_runtime/instance/allocator/pooling/
index_allocator.rs

1//! Index/slot allocator policies for the pooling allocator.
2
3use crate::CompiledModuleId;
4use std::collections::hash_map::{Entry, HashMap};
5use std::mem;
6use std::sync::Mutex;
7
8/// A slot index. The job of this allocator is to hand out these
9/// indices.
10#[derive(Hash, Clone, Copy, Debug, PartialEq, Eq)]
11pub struct SlotId(pub u32);
12impl SlotId {
13    /// The index of this slot.
14    pub fn index(self) -> usize {
15        self.0 as usize
16    }
17}
18
19#[derive(Debug)]
20pub struct IndexAllocator(Mutex<Inner>);
21
22#[derive(Debug)]
23struct Inner {
24    /// Maximum  number of "unused warm slots" which will be allowed during
25    /// allocation.
26    ///
27    /// This is a user-configurable knob which can be used to influence the
28    /// maximum number of unused slots at any one point in time. A "warm slot"
29    /// is one that's considered having been previously allocated.
30    max_unused_warm_slots: u32,
31
32    /// Current count of "warm slots", or those that were previously allocated
33    /// which are now no longer in use.
34    ///
35    /// This is the size of the `warm` list.
36    unused_warm_slots: u32,
37
38    /// A linked list (via indices) which enumerates all "warm and unused"
39    /// slots, or those which have previously been allocated and then free'd.
40    warm: List,
41
42    /// Last slot that was allocated for the first time ever.
43    ///
44    /// This is initially 0 and is incremented during `pick_cold`. If this
45    /// matches `max_cold`, there are no more cold slots left.
46    last_cold: u32,
47
48    /// The state of any given slot.
49    ///
50    /// Records indices in the above list (empty) or two lists (with affinity),
51    /// and these indices are kept up-to-date to allow fast removal.
52    slot_state: Vec<SlotState>,
53
54    /// Affine slot management which tracks which slots are free and were last
55    /// used with the specified `CompiledModuleId`.
56    ///
57    /// The `List` here is appended to during deallocation and removal happens
58    /// from the tail during allocation.
59    module_affine: HashMap<CompiledModuleId, List>,
60}
61
62/// A helper "linked list" data structure which is based on indices.
63#[derive(Default, Debug)]
64struct List {
65    head: Option<SlotId>,
66    tail: Option<SlotId>,
67}
68
69/// A helper data structure for an intrusive linked list, coupled with the
70/// `List` type.
71#[derive(Default, Debug, Copy, Clone)]
72struct Link {
73    prev: Option<SlotId>,
74    next: Option<SlotId>,
75}
76
77#[derive(Clone, Debug)]
78enum SlotState {
79    /// This slot is currently in use and is affine to the specified module.
80    Used(Option<CompiledModuleId>),
81
82    /// This slot is not currently used, and has never been used.
83    UnusedCold,
84
85    /// This slot is not currently used, but was previously allocated.
86    ///
87    /// The payload here is metadata about the lists that this slot is contained
88    /// within.
89    UnusedWarm(Unused),
90}
91
92impl SlotState {
93    fn unwrap_unused(&mut self) -> &mut Unused {
94        match self {
95            SlotState::UnusedWarm(u) => u,
96            _ => unreachable!(),
97        }
98    }
99}
100
101#[derive(Default, Copy, Clone, Debug)]
102struct Unused {
103    /// Which module this slot was historically affine to, if any.
104    affinity: Option<CompiledModuleId>,
105
106    /// Metadata about the linked list for all slots affine to `affinity`.
107    affine_list_link: Link,
108
109    /// Metadata within the `warm` list of the main allocator.
110    unused_list_link: Link,
111}
112
113enum AllocMode {
114    ForceAffineAndClear,
115    AnySlot,
116}
117
118impl IndexAllocator {
119    /// Create the default state for this strategy.
120    pub fn new(max_instances: u32, max_unused_warm_slots: u32) -> Self {
121        IndexAllocator(Mutex::new(Inner {
122            last_cold: 0,
123            max_unused_warm_slots,
124            unused_warm_slots: 0,
125            module_affine: HashMap::new(),
126            slot_state: (0..max_instances).map(|_| SlotState::UnusedCold).collect(),
127            warm: List::default(),
128        }))
129    }
130
131    /// Allocate a new index from this allocator optionally using `id` as an
132    /// affinity request if the allocation strategy supports it.
133    ///
134    /// Returns `None` if no more slots are available.
135    pub fn alloc(&self, module_id: Option<CompiledModuleId>) -> Option<SlotId> {
136        self._alloc(module_id, AllocMode::AnySlot)
137    }
138
139    /// Attempts to allocate a guaranteed-affine slot to the module `id`
140    /// specified.
141    ///
142    /// Returns `None` if there are no slots affine to `id`. The allocation of
143    /// this slot will not record the affinity to `id`, instead simply listing
144    /// it as taken. This is intended to be used for clearing out all affine
145    /// slots to a module.
146    pub fn alloc_affine_and_clear_affinity(&self, module_id: CompiledModuleId) -> Option<SlotId> {
147        self._alloc(Some(module_id), AllocMode::ForceAffineAndClear)
148    }
149
150    fn _alloc(&self, module_id: Option<CompiledModuleId>, mode: AllocMode) -> Option<SlotId> {
151        let mut inner = self.0.lock().unwrap();
152        let inner = &mut *inner;
153
154        // As a first-pass always attempt an affine allocation. This will
155        // succeed if any slots are considered affine to `module_id` (if it's
156        // specified). Failing that something else is attempted to be chosen.
157        let slot_id = inner.pick_affine(module_id).or_else(|| {
158            match mode {
159                // If any slot is requested then this is a normal instantiation
160                // looking for an index. Without any affine candidates there are
161                // two options here:
162                //
163                // 1. Pick a slot amongst previously allocated slots
164                // 2. Pick a slot that's never been used before
165                //
166                // The choice here is guided by the initial configuration of
167                // `max_unused_warm_slots`. If our unused warm slots, which are
168                // likely all affine, is below this threshold then the affinity
169                // of the warm slots isn't tampered with and first a cold slot
170                // is chosen. If the cold slot allocation fails, however, a warm
171                // slot is evicted.
172                //
173                // The opposite happens when we're above our threshold for the
174                // maximum number of warm slots, meaning that a warm slot is
175                // attempted to be picked from first with a cold slot following
176                // that. Note that the warm slot allocation in this case should
177                // only fail of `max_unused_warm_slots` is 0, otherwise
178                // `pick_warm` will always succeed.
179                AllocMode::AnySlot => {
180                    if inner.unused_warm_slots < inner.max_unused_warm_slots {
181                        inner.pick_cold().or_else(|| inner.pick_warm())
182                    } else {
183                        inner.pick_warm().or_else(|| {
184                            debug_assert!(inner.max_unused_warm_slots == 0);
185                            inner.pick_cold()
186                        })
187                    }
188                }
189
190                // In this mode an affinity-based allocation is always performed
191                // as the purpose here is to clear out slots relevant to
192                // `module_id` during module teardown. This means that there's
193                // no consulting non-affine slots in this path.
194                AllocMode::ForceAffineAndClear => None,
195            }
196        })?;
197
198        inner.slot_state[slot_id.index()] = SlotState::Used(match mode {
199            AllocMode::ForceAffineAndClear => None,
200            AllocMode::AnySlot => module_id,
201        });
202
203        Some(slot_id)
204    }
205
206    pub(crate) fn free(&self, index: SlotId) {
207        let mut inner = self.0.lock().unwrap();
208        let inner = &mut *inner;
209        let module = match inner.slot_state[index.index()] {
210            SlotState::Used(module) => module,
211            _ => unreachable!(),
212        };
213
214        // Bump the number of warm slots since this slot is now considered
215        // previously used. Afterwards append it to the linked list of all
216        // unused and warm slots.
217        inner.unused_warm_slots += 1;
218        let unused_list_link = inner
219            .warm
220            .append(index, &mut inner.slot_state, |s| &mut s.unused_list_link);
221
222        let affine_list_link = match module {
223            // If this slot is affine to a particular module then append this
224            // index to the linked list for the affine module. Otherwise insert
225            // a new one-element linked list.
226            Some(module) => match inner.module_affine.entry(module) {
227                Entry::Occupied(mut e) => e
228                    .get_mut()
229                    .append(index, &mut inner.slot_state, |s| &mut s.affine_list_link),
230                Entry::Vacant(v) => {
231                    v.insert(List::new(index));
232                    Link::default()
233                }
234            },
235
236            // If this slot has no affinity then the affine link is empty.
237            None => Link::default(),
238        };
239
240        inner.slot_state[index.index()] = SlotState::UnusedWarm(Unused {
241            affinity: module,
242            affine_list_link,
243            unused_list_link,
244        });
245    }
246
247    /// For testing only, we want to be able to assert what is on the
248    /// single freelist, for the policies that keep just one.
249    #[cfg(test)]
250    pub(crate) fn testing_freelist(&self) -> Vec<SlotId> {
251        let inner = self.0.lock().unwrap();
252        inner
253            .warm
254            .iter(&inner.slot_state, |s| &s.unused_list_link)
255            .collect()
256    }
257
258    /// For testing only, get the list of all modules with at least
259    /// one slot with affinity for that module.
260    #[cfg(test)]
261    pub(crate) fn testing_module_affinity_list(&self) -> Vec<CompiledModuleId> {
262        let inner = self.0.lock().unwrap();
263        inner.module_affine.keys().copied().collect()
264    }
265}
266
267impl Inner {
268    /// Attempts to allocate a slot already affine to `id`, returning `None` if
269    /// `id` is `None` or if there are no affine slots.
270    fn pick_affine(&mut self, module_id: Option<CompiledModuleId>) -> Option<SlotId> {
271        // Note that the `tail` is chosen here of the affine list as it's the
272        // most recently used, which for affine allocations is what we want --
273        // maximizing temporal reuse.
274        let ret = self.module_affine.get(&module_id?)?.tail?;
275        self.remove(ret);
276        Some(ret)
277    }
278
279    fn pick_warm(&mut self) -> Option<SlotId> {
280        // Insertions into the `unused` list happen at the `tail`, so the
281        // least-recently-used item will be at the head. That's our goal here,
282        // pick the least-recently-used slot since something "warm" is being
283        // evicted anyway.
284        let head = self.warm.head?;
285        self.remove(head);
286        Some(head)
287    }
288
289    fn remove(&mut self, slot: SlotId) {
290        // Decrement the size of the warm list, and additionally remove it from
291        // the `warm` linked list.
292        self.unused_warm_slots -= 1;
293        self.warm
294            .remove(slot, &mut self.slot_state, |u| &mut u.unused_list_link);
295
296        // If this slot is affine to a module then additionally remove it from
297        // that module's affinity linked list. Note that if the module's affine
298        // list is empty then the module's entry in the map is completely
299        // removed as well.
300        let module = self.slot_state[slot.index()].unwrap_unused().affinity;
301        if let Some(module) = module {
302            let mut list = match self.module_affine.entry(module) {
303                Entry::Occupied(e) => e,
304                Entry::Vacant(_) => unreachable!(),
305            };
306            list.get_mut()
307                .remove(slot, &mut self.slot_state, |u| &mut u.affine_list_link);
308
309            if list.get_mut().head.is_none() {
310                list.remove();
311            }
312        }
313    }
314
315    fn pick_cold(&mut self) -> Option<SlotId> {
316        if (self.last_cold as usize) == self.slot_state.len() {
317            None
318        } else {
319            let ret = Some(SlotId(self.last_cold));
320            self.last_cold += 1;
321            ret
322        }
323    }
324}
325
326impl List {
327    /// Creates a new one-element list pointing at `id`.
328    fn new(id: SlotId) -> List {
329        List {
330            head: Some(id),
331            tail: Some(id),
332        }
333    }
334
335    /// Appends the `id` to this list whose links are determined by `link`.
336    fn append(
337        &mut self,
338        id: SlotId,
339        states: &mut [SlotState],
340        link: fn(&mut Unused) -> &mut Link,
341    ) -> Link {
342        // This `id` is the new tail...
343        let tail = mem::replace(&mut self.tail, Some(id));
344
345        // If the tail was present, then update its `next` field to ourselves as
346        // we've been appended, otherwise update the `head` since the list was
347        // previously empty.
348        match tail {
349            Some(tail) => link(states[tail.index()].unwrap_unused()).next = Some(id),
350            None => self.head = Some(id),
351        }
352        Link {
353            prev: tail,
354            next: None,
355        }
356    }
357
358    /// Removes `id` from this list whose links are determined by `link`.
359    fn remove(
360        &mut self,
361        id: SlotId,
362        slot_state: &mut [SlotState],
363        link: fn(&mut Unused) -> &mut Link,
364    ) -> Unused {
365        let mut state = *slot_state[id.index()].unwrap_unused();
366        let next = link(&mut state).next;
367        let prev = link(&mut state).prev;
368
369        // If a `next` node is present for this link, then its previous was our
370        // own previous now. Otherwise we are the tail so the new tail is our
371        // previous.
372        match next {
373            Some(next) => link(slot_state[next.index()].unwrap_unused()).prev = prev,
374            None => self.tail = prev,
375        }
376
377        // Same as the `next` node, except everything is in reverse.
378        match prev {
379            Some(prev) => link(slot_state[prev.index()].unwrap_unused()).next = next,
380            None => self.head = next,
381        }
382        state
383    }
384
385    #[cfg(test)]
386    fn iter<'a>(
387        &'a self,
388        states: &'a [SlotState],
389        link: fn(&Unused) -> &Link,
390    ) -> impl Iterator<Item = SlotId> + 'a {
391        let mut cur = self.head;
392        let mut prev = None;
393        std::iter::from_fn(move || {
394            if cur.is_none() {
395                assert_eq!(prev, self.tail);
396            }
397            let ret = cur?;
398            match &states[ret.index()] {
399                SlotState::UnusedWarm(u) => {
400                    assert_eq!(link(u).prev, prev);
401                    prev = Some(ret);
402                    cur = link(u).next
403                }
404                _ => unreachable!(),
405            }
406            Some(ret)
407        })
408    }
409}
410
411#[cfg(test)]
412mod test {
413    use super::{IndexAllocator, SlotId};
414    use crate::CompiledModuleIdAllocator;
415
416    #[test]
417    fn test_next_available_allocation_strategy() {
418        for size in 0..20 {
419            let state = IndexAllocator::new(size, 0);
420            for i in 0..size {
421                assert_eq!(state.alloc(None).unwrap().index(), i as usize);
422            }
423            assert!(state.alloc(None).is_none());
424        }
425    }
426
427    #[test]
428    fn test_affinity_allocation_strategy() {
429        let id_alloc = CompiledModuleIdAllocator::new();
430        let id1 = id_alloc.alloc();
431        let id2 = id_alloc.alloc();
432        let state = IndexAllocator::new(100, 100);
433
434        let index1 = state.alloc(Some(id1)).unwrap();
435        assert_eq!(index1.index(), 0);
436        let index2 = state.alloc(Some(id2)).unwrap();
437        assert_eq!(index2.index(), 1);
438        assert_ne!(index1, index2);
439
440        state.free(index1);
441        let index3 = state.alloc(Some(id1)).unwrap();
442        assert_eq!(index3, index1);
443        state.free(index3);
444
445        state.free(index2);
446
447        // Both id1 and id2 should have some slots with affinity.
448        let affinity_modules = state.testing_module_affinity_list();
449        assert_eq!(2, affinity_modules.len());
450        assert!(affinity_modules.contains(&id1));
451        assert!(affinity_modules.contains(&id2));
452
453        // Now there is 1 free instance for id2 and 1 free instance
454        // for id1, and 98 empty. Allocate 100 for id2. The first
455        // should be equal to the one we know was previously used for
456        // id2. The next 99 are arbitrary.
457
458        let mut indices = vec![];
459        for _ in 0..100 {
460            indices.push(state.alloc(Some(id2)).unwrap());
461        }
462        assert!(state.alloc(None).is_none());
463        assert_eq!(indices[0], index2);
464
465        for i in indices {
466            state.free(i);
467        }
468
469        // Now there should be no slots left with affinity for id1.
470        let affinity_modules = state.testing_module_affinity_list();
471        assert_eq!(1, affinity_modules.len());
472        assert!(affinity_modules.contains(&id2));
473
474        // Allocate an index we know previously had an instance but
475        // now does not (list ran empty).
476        let index = state.alloc(Some(id1)).unwrap();
477        state.free(index);
478    }
479
480    #[test]
481    fn clear_affine() {
482        let id_alloc = CompiledModuleIdAllocator::new();
483        let id = id_alloc.alloc();
484
485        for max_unused_warm_slots in [0, 1, 2] {
486            let state = IndexAllocator::new(100, max_unused_warm_slots);
487
488            let index1 = state.alloc(Some(id)).unwrap();
489            let index2 = state.alloc(Some(id)).unwrap();
490            state.free(index2);
491            state.free(index1);
492            assert!(state.alloc_affine_and_clear_affinity(id).is_some());
493            assert!(state.alloc_affine_and_clear_affinity(id).is_some());
494            assert_eq!(state.alloc_affine_and_clear_affinity(id), None);
495        }
496    }
497
498    #[test]
499    fn test_affinity_allocation_strategy_random() {
500        use rand::Rng;
501        let mut rng = rand::thread_rng();
502
503        let id_alloc = CompiledModuleIdAllocator::new();
504        let ids = std::iter::repeat_with(|| id_alloc.alloc())
505            .take(10)
506            .collect::<Vec<_>>();
507        let state = IndexAllocator::new(1000, 1000);
508        let mut allocated: Vec<SlotId> = vec![];
509        let mut last_id = vec![None; 1000];
510
511        let mut hits = 0;
512        for _ in 0..100_000 {
513            loop {
514                if !allocated.is_empty() && rng.gen_bool(0.5) {
515                    let i = rng.gen_range(0..allocated.len());
516                    let to_free_idx = allocated.swap_remove(i);
517                    state.free(to_free_idx);
518                } else {
519                    let id = ids[rng.gen_range(0..ids.len())];
520                    let index = match state.alloc(Some(id)) {
521                        Some(id) => id,
522                        None => continue,
523                    };
524                    if last_id[index.index()] == Some(id) {
525                        hits += 1;
526                    }
527                    last_id[index.index()] = Some(id);
528                    allocated.push(index);
529                }
530                break;
531            }
532        }
533
534        // 10% reuse would be random chance (because we have 10 module
535        // IDs). Check for at least double that to ensure some sort of
536        // affinity is occurring.
537        assert!(
538            hits > 20000,
539            "expected at least 20000 (20%) ID-reuses but got {}",
540            hits
541        );
542    }
543
544    #[test]
545    fn test_affinity_threshold() {
546        let id_alloc = CompiledModuleIdAllocator::new();
547        let id1 = id_alloc.alloc();
548        let id2 = id_alloc.alloc();
549        let id3 = id_alloc.alloc();
550        let state = IndexAllocator::new(10, 2);
551
552        // Set some slot affinities
553        assert_eq!(state.alloc(Some(id1)), Some(SlotId(0)));
554        state.free(SlotId(0));
555        assert_eq!(state.alloc(Some(id2)), Some(SlotId(1)));
556        state.free(SlotId(1));
557
558        // Only 2 slots are allowed to be unused and warm, so we're at our
559        // threshold, meaning one must now be evicted.
560        assert_eq!(state.alloc(Some(id3)), Some(SlotId(0)));
561        state.free(SlotId(0));
562
563        // pickup `id2` again, it should be affine.
564        assert_eq!(state.alloc(Some(id2)), Some(SlotId(1)));
565
566        // with only one warm slot available allocation for `id1` should pick a
567        // fresh slot
568        assert_eq!(state.alloc(Some(id1)), Some(SlotId(2)));
569
570        state.free(SlotId(1));
571        state.free(SlotId(2));
572
573        // ensure everything stays affine
574        assert_eq!(state.alloc(Some(id1)), Some(SlotId(2)));
575        assert_eq!(state.alloc(Some(id2)), Some(SlotId(1)));
576        assert_eq!(state.alloc(Some(id3)), Some(SlotId(0)));
577
578        state.free(SlotId(1));
579        state.free(SlotId(2));
580        state.free(SlotId(0));
581
582        // LRU is 1, so that should be picked
583        assert_eq!(state.alloc(Some(id_alloc.alloc())), Some(SlotId(1)));
584
585        // Pick another LRU entry, this time 2
586        assert_eq!(state.alloc(Some(id_alloc.alloc())), Some(SlotId(2)));
587
588        // This should preserve slot `0` and pick up something new
589        assert_eq!(state.alloc(Some(id_alloc.alloc())), Some(SlotId(3)));
590
591        state.free(SlotId(1));
592        state.free(SlotId(2));
593        state.free(SlotId(3));
594
595        // for good measure make sure id3 is still affine
596        assert_eq!(state.alloc(Some(id3)), Some(SlotId(0)));
597    }
598}