cranelift_wasm/
heap.rs

1//! Heaps to implement WebAssembly linear memories.
2
3use cranelift_codegen::ir::{GlobalValue, Type};
4use cranelift_entity::entity_impl;
5
6/// An opaque reference to a [`HeapData`][crate::HeapData].
7///
8/// While the order is stable, it is arbitrary.
9#[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
10#[cfg_attr(feature = "enable-serde", derive(serde::Serialize, serde::Deserialize))]
11pub struct Heap(u32);
12entity_impl!(Heap, "heap");
13
14/// A heap implementing a WebAssembly linear memory.
15///
16/// Code compiled from WebAssembly runs in a sandbox where it can't access all
17/// process memory. Instead, it is given a small set of memory areas to work in,
18/// and all accesses are bounds checked. `cranelift-wasm` models this through
19/// the concept of *heaps*.
20///
21/// Heap addresses can be smaller than the native pointer size, for example
22/// unsigned `i32` offsets on a 64-bit architecture.
23///
24/// A heap appears as three consecutive ranges of address space:
25///
26/// 1. The *mapped pages* are the accessible memory range in the heap. A heap
27///    may have a minimum guaranteed size which means that some mapped pages are
28///    always present.
29///
30/// 2. The *unmapped pages* is a possibly empty range of address space that may
31///    be mapped in the future when the heap is grown. They are addressable
32///    but not accessible.
33///
34/// 3. The *offset-guard pages* is a range of address space that is guaranteed
35///    to always cause a trap when accessed. It is used to optimize bounds
36///    checking for heap accesses with a shared base pointer. They are
37///    addressable but not accessible.
38///
39/// The *heap bound* is the total size of the mapped and unmapped pages. This is
40/// the bound that `heap_addr` checks against. Memory accesses inside the heap
41/// bounds can trap if they hit an unmapped page (which is not accessible).
42///
43/// Two styles of heaps are supported, *static* and *dynamic*. They behave
44/// differently when resized.
45///
46/// #### Static heaps
47///
48/// A *static heap* starts out with all the address space it will ever need, so it
49/// never moves to a different address. At the base address is a number of mapped
50/// pages corresponding to the heap's current size. Then follows a number of
51/// unmapped pages where the heap can grow up to its maximum size. After the
52/// unmapped pages follow the offset-guard pages which are also guaranteed to
53/// generate a trap when accessed.
54///
55/// #### Dynamic heaps
56///
57/// A *dynamic heap* can be relocated to a different base address when it is
58/// resized, and its bound can move dynamically. The offset-guard pages move
59/// when the heap is resized. The bound of a dynamic heap is stored in a global
60/// value.
61#[derive(Clone, PartialEq, Hash)]
62#[cfg_attr(feature = "enable-serde", derive(serde::Serialize, serde::Deserialize))]
63pub struct HeapData {
64    /// The address of the start of the heap's storage.
65    pub base: GlobalValue,
66
67    /// Guaranteed minimum heap size in bytes. Heap accesses before `min_size`
68    /// don't need bounds checking.
69    pub min_size: u64,
70
71    /// Size in bytes of the offset-guard pages following the heap.
72    pub offset_guard_size: u64,
73
74    /// Heap style, with additional style-specific info.
75    pub style: HeapStyle,
76
77    /// The index type for the heap.
78    pub index_type: Type,
79}
80
81/// Style of heap including style-specific information.
82#[derive(Clone, PartialEq, Hash)]
83#[cfg_attr(feature = "enable-serde", derive(serde::Serialize, serde::Deserialize))]
84pub enum HeapStyle {
85    /// A dynamic heap can be relocated to a different base address when it is
86    /// grown.
87    Dynamic {
88        /// Global value providing the current bound of the heap in bytes.
89        bound_gv: GlobalValue,
90    },
91
92    /// A static heap has a fixed base address and a number of not-yet-allocated
93    /// pages before the offset-guard pages.
94    Static {
95        /// Heap bound in bytes. The offset-guard pages are allocated after the
96        /// bound.
97        bound: u64,
98    },
99}