wasmtime_runtime/instance/
allocator.rs

1use crate::imports::Imports;
2use crate::instance::{Instance, InstanceHandle, RuntimeMemoryCreator};
3use crate::memory::{DefaultMemoryCreator, Memory};
4use crate::table::Table;
5use crate::{CompiledModuleId, ModuleRuntimeInfo, Store};
6use anyhow::{anyhow, bail, Result};
7use std::alloc;
8use std::any::Any;
9use std::convert::TryFrom;
10use std::ptr;
11use std::sync::Arc;
12use wasmtime_environ::{
13    DefinedMemoryIndex, DefinedTableIndex, HostPtr, InitMemory, MemoryInitialization,
14    MemoryInitializer, Module, PrimaryMap, TableInitialization, TableInitializer, Trap, VMOffsets,
15    WasmType, WASM_PAGE_SIZE,
16};
17
18#[cfg(feature = "pooling-allocator")]
19mod pooling;
20
21#[cfg(feature = "pooling-allocator")]
22pub use self::pooling::{InstanceLimits, PoolingInstanceAllocator, PoolingInstanceAllocatorConfig};
23
24/// Represents a request for a new runtime instance.
25pub struct InstanceAllocationRequest<'a> {
26    /// The info related to the compiled version of this module,
27    /// needed for instantiation: function metadata, JIT code
28    /// addresses, precomputed images for lazy memory and table
29    /// initialization, and the like. This Arc is cloned and held for
30    /// the lifetime of the instance.
31    pub runtime_info: &'a Arc<dyn ModuleRuntimeInfo>,
32
33    /// The imports to use for the instantiation.
34    pub imports: Imports<'a>,
35
36    /// The host state to associate with the instance.
37    pub host_state: Box<dyn Any + Send + Sync>,
38
39    /// A pointer to the "store" for this instance to be allocated. The store
40    /// correlates with the `Store` in wasmtime itself, and lots of contextual
41    /// information about the execution of wasm can be learned through the store.
42    ///
43    /// Note that this is a raw pointer and has a static lifetime, both of which
44    /// are a bit of a lie. This is done purely so a store can learn about
45    /// itself when it gets called as a host function, and additionally so this
46    /// runtime can access internals as necessary (such as the
47    /// VMExternRefActivationsTable or the resource limiter methods).
48    ///
49    /// Note that this ends up being a self-pointer to the instance when stored.
50    /// The reason is that the instance itself is then stored within the store.
51    /// We use a number of `PhantomPinned` declarations to indicate this to the
52    /// compiler. More info on this in `wasmtime/src/store.rs`
53    pub store: StorePtr,
54}
55
56/// A pointer to a Store. This Option<*mut dyn Store> is wrapped in a struct
57/// so that the function to create a &mut dyn Store is a method on a member of
58/// InstanceAllocationRequest, rather than on a &mut InstanceAllocationRequest
59/// itself, because several use-sites require a split mut borrow on the
60/// InstanceAllocationRequest.
61pub struct StorePtr(Option<*mut dyn Store>);
62impl StorePtr {
63    /// A pointer to no Store.
64    pub fn empty() -> Self {
65        Self(None)
66    }
67    /// A pointer to a Store.
68    pub fn new(ptr: *mut dyn Store) -> Self {
69        Self(Some(ptr))
70    }
71    /// The raw contents of this struct
72    pub fn as_raw(&self) -> Option<*mut dyn Store> {
73        self.0.clone()
74    }
75    /// Use the StorePtr as a mut ref to the Store.
76    /// Safety: must not be used outside the original lifetime of the borrow.
77    pub(crate) unsafe fn get(&mut self) -> Option<&mut dyn Store> {
78        match self.0 {
79            Some(ptr) => Some(&mut *ptr),
80            None => None,
81        }
82    }
83}
84
85/// Represents a runtime instance allocator.
86///
87/// # Safety
88///
89/// This trait is unsafe as it requires knowledge of Wasmtime's runtime internals to implement correctly.
90pub unsafe trait InstanceAllocator {
91    /// Validates that a module is supported by the allocator.
92    fn validate(&self, module: &Module, offsets: &VMOffsets<HostPtr>) -> Result<()> {
93        drop((module, offsets));
94        Ok(())
95    }
96
97    /// Allocates a fresh `InstanceHandle` for the `req` given.
98    ///
99    /// This will allocate memories and tables internally from this allocator
100    /// and weave that altogether into a final and complete `InstanceHandle`
101    /// ready to be registered with a store.
102    ///
103    /// Note that the returned instance must still have `.initialize(..)` called
104    /// on it to complete the instantiation process.
105    fn allocate(&self, mut req: InstanceAllocationRequest) -> Result<InstanceHandle> {
106        let index = self.allocate_index(&req)?;
107        let module = req.runtime_info.module();
108        let mut memories =
109            PrimaryMap::with_capacity(module.memory_plans.len() - module.num_imported_memories);
110        let mut tables =
111            PrimaryMap::with_capacity(module.table_plans.len() - module.num_imported_tables);
112
113        let result = self
114            .allocate_memories(index, &mut req, &mut memories)
115            .and_then(|()| self.allocate_tables(index, &mut req, &mut tables));
116        if let Err(e) = result {
117            self.deallocate_memories(index, &mut memories);
118            self.deallocate_tables(index, &mut tables);
119            self.deallocate_index(index);
120            return Err(e);
121        }
122
123        unsafe { Ok(Instance::new(req, index, memories, tables)) }
124    }
125
126    /// Deallocates the provided instance.
127    ///
128    /// This will null-out the pointer within `handle` and otherwise reclaim
129    /// resources such as tables, memories, and the instance memory itself.
130    fn deallocate(&self, handle: &mut InstanceHandle) {
131        let index = handle.instance().index;
132        self.deallocate_memories(index, &mut handle.instance_mut().memories);
133        self.deallocate_tables(index, &mut handle.instance_mut().tables);
134        unsafe {
135            let layout = Instance::alloc_layout(handle.instance().offsets());
136            ptr::drop_in_place(handle.instance);
137            alloc::dealloc(handle.instance.cast(), layout);
138            handle.instance = std::ptr::null_mut();
139        }
140        self.deallocate_index(index);
141    }
142
143    /// Optionally allocates an allocator-defined index for the `req` provided.
144    ///
145    /// The return value here, if successful, is passed to the various methods
146    /// below for memory/table allocation/deallocation.
147    fn allocate_index(&self, req: &InstanceAllocationRequest) -> Result<usize>;
148
149    /// Deallocates indices allocated by `allocate_index`.
150    fn deallocate_index(&self, index: usize);
151
152    /// Attempts to allocate all defined linear memories for a module.
153    ///
154    /// Pushes all memories for `req` onto the `mems` storage provided which is
155    /// already appropriately allocated to contain all memories.
156    ///
157    /// Note that this is allowed to fail. Failure can additionally happen after
158    /// some memories have already been successfully allocated. All memories
159    /// pushed onto `mem` are guaranteed to one day make their way to
160    /// `deallocate_memories`.
161    fn allocate_memories(
162        &self,
163        index: usize,
164        req: &mut InstanceAllocationRequest,
165        mems: &mut PrimaryMap<DefinedMemoryIndex, Memory>,
166    ) -> Result<()>;
167
168    /// Deallocates all memories provided, optionally reclaiming resources for
169    /// the pooling allocator for example.
170    fn deallocate_memories(&self, index: usize, mems: &mut PrimaryMap<DefinedMemoryIndex, Memory>);
171
172    /// Same as `allocate_memories`, but for tables.
173    fn allocate_tables(
174        &self,
175        index: usize,
176        req: &mut InstanceAllocationRequest,
177        tables: &mut PrimaryMap<DefinedTableIndex, Table>,
178    ) -> Result<()>;
179
180    /// Same as `deallocate_memories`, but for tables.
181    fn deallocate_tables(&self, index: usize, tables: &mut PrimaryMap<DefinedTableIndex, Table>);
182
183    /// Allocates a fiber stack for calling async functions on.
184    #[cfg(feature = "async")]
185    fn allocate_fiber_stack(&self) -> Result<wasmtime_fiber::FiberStack>;
186
187    /// Deallocates a fiber stack that was previously allocated with `allocate_fiber_stack`.
188    ///
189    /// # Safety
190    ///
191    /// The provided stack is required to have been allocated with `allocate_fiber_stack`.
192    #[cfg(feature = "async")]
193    unsafe fn deallocate_fiber_stack(&self, stack: &wasmtime_fiber::FiberStack);
194
195    /// Purges all lingering resources related to `module` from within this
196    /// allocator.
197    ///
198    /// Primarily present for the pooling allocator to remove mappings of
199    /// this module from slots in linear memory.
200    fn purge_module(&self, module: CompiledModuleId);
201}
202
203fn get_table_init_start(init: &TableInitializer, instance: &mut Instance) -> Result<u32> {
204    match init.base {
205        Some(base) => {
206            let val = unsafe { *(*instance.defined_or_imported_global_ptr(base)).as_u32() };
207
208            init.offset
209                .checked_add(val)
210                .ok_or_else(|| anyhow!("element segment global base overflows"))
211        }
212        None => Ok(init.offset),
213    }
214}
215
216fn check_table_init_bounds(instance: &mut Instance, module: &Module) -> Result<()> {
217    match &module.table_initialization {
218        TableInitialization::FuncTable { segments, .. }
219        | TableInitialization::Segments { segments } => {
220            for segment in segments {
221                let table = unsafe { &*instance.get_table(segment.table_index) };
222                let start = get_table_init_start(segment, instance)?;
223                let start = usize::try_from(start).unwrap();
224                let end = start.checked_add(segment.elements.len());
225
226                match end {
227                    Some(end) if end <= table.size() as usize => {
228                        // Initializer is in bounds
229                    }
230                    _ => {
231                        bail!("table out of bounds: elements segment does not fit")
232                    }
233                }
234            }
235        }
236    }
237
238    Ok(())
239}
240
241fn initialize_tables(instance: &mut Instance, module: &Module) -> Result<()> {
242    // Note: if the module's table initializer state is in
243    // FuncTable mode, we will lazily initialize tables based on
244    // any statically-precomputed image of FuncIndexes, but there
245    // may still be "leftover segments" that could not be
246    // incorporated. So we have a unified handler here that
247    // iterates over all segments (Segments mode) or leftover
248    // segments (FuncTable mode) to initialize.
249    match &module.table_initialization {
250        TableInitialization::FuncTable { segments, .. }
251        | TableInitialization::Segments { segments } => {
252            for segment in segments {
253                let start = get_table_init_start(segment, instance)?;
254                instance.table_init_segment(
255                    segment.table_index,
256                    &segment.elements,
257                    start,
258                    0,
259                    segment.elements.len() as u32,
260                )?;
261            }
262        }
263    }
264
265    Ok(())
266}
267
268fn get_memory_init_start(init: &MemoryInitializer, instance: &mut Instance) -> Result<u64> {
269    match init.base {
270        Some(base) => {
271            let mem64 = instance.module().memory_plans[init.memory_index]
272                .memory
273                .memory64;
274            let val = unsafe {
275                let global = instance.defined_or_imported_global_ptr(base);
276                if mem64 {
277                    *(*global).as_u64()
278                } else {
279                    u64::from(*(*global).as_u32())
280                }
281            };
282
283            init.offset
284                .checked_add(val)
285                .ok_or_else(|| anyhow!("data segment global base overflows"))
286        }
287        None => Ok(init.offset),
288    }
289}
290
291fn check_memory_init_bounds(
292    instance: &mut Instance,
293    initializers: &[MemoryInitializer],
294) -> Result<()> {
295    for init in initializers {
296        let memory = instance.get_memory(init.memory_index);
297        let start = get_memory_init_start(init, instance)?;
298        let end = usize::try_from(start)
299            .ok()
300            .and_then(|start| start.checked_add(init.data.len()));
301
302        match end {
303            Some(end) if end <= memory.current_length() => {
304                // Initializer is in bounds
305            }
306            _ => {
307                bail!("memory out of bounds: data segment does not fit")
308            }
309        }
310    }
311
312    Ok(())
313}
314
315fn initialize_memories(instance: &mut Instance, module: &Module) -> Result<()> {
316    let memory_size_in_pages = &|instance: &mut Instance, memory| {
317        (instance.get_memory(memory).current_length() as u64) / u64::from(WASM_PAGE_SIZE)
318    };
319
320    // Loads the `global` value and returns it as a `u64`, but sign-extends
321    // 32-bit globals which can be used as the base for 32-bit memories.
322    let get_global_as_u64 = &mut |instance: &mut Instance, global| unsafe {
323        let def = instance.defined_or_imported_global_ptr(global);
324        if module.globals[global].wasm_ty == WasmType::I64 {
325            *(*def).as_u64()
326        } else {
327            u64::from(*(*def).as_u32())
328        }
329    };
330
331    // Delegates to the `init_memory` method which is sort of a duplicate of
332    // `instance.memory_init_segment` but is used at compile-time in other
333    // contexts so is shared here to have only one method of memory
334    // initialization.
335    //
336    // This call to `init_memory` notably implements all the bells and whistles
337    // so errors only happen if an out-of-bounds segment is found, in which case
338    // a trap is returned.
339    let ok = module.memory_initialization.init_memory(
340        instance,
341        InitMemory::Runtime {
342            memory_size_in_pages,
343            get_global_as_u64,
344        },
345        |instance, memory_index, init| {
346            // If this initializer applies to a defined memory but that memory
347            // doesn't need initialization, due to something like copy-on-write
348            // pre-initializing it via mmap magic, then this initializer can be
349            // skipped entirely.
350            if let Some(memory_index) = module.defined_memory_index(memory_index) {
351                if !instance.memories[memory_index].needs_init() {
352                    return true;
353                }
354            }
355            let memory = instance.get_memory(memory_index);
356
357            unsafe {
358                let src = instance.wasm_data(init.data.clone());
359                let dst = memory.base.add(usize::try_from(init.offset).unwrap());
360                // FIXME audit whether this is safe in the presence of shared
361                // memory
362                // (https://github.com/bytecodealliance/wasmtime/issues/4203).
363                ptr::copy_nonoverlapping(src.as_ptr(), dst, src.len())
364            }
365            true
366        },
367    );
368    if !ok {
369        return Err(Trap::MemoryOutOfBounds.into());
370    }
371
372    Ok(())
373}
374
375fn check_init_bounds(instance: &mut Instance, module: &Module) -> Result<()> {
376    check_table_init_bounds(instance, module)?;
377
378    match &module.memory_initialization {
379        MemoryInitialization::Segmented(initializers) => {
380            check_memory_init_bounds(instance, initializers)?;
381        }
382        // Statically validated already to have everything in-bounds.
383        MemoryInitialization::Static { .. } => {}
384    }
385
386    Ok(())
387}
388
389pub(super) fn initialize_instance(
390    instance: &mut Instance,
391    module: &Module,
392    is_bulk_memory: bool,
393) -> Result<()> {
394    // If bulk memory is not enabled, bounds check the data and element segments before
395    // making any changes. With bulk memory enabled, initializers are processed
396    // in-order and side effects are observed up to the point of an out-of-bounds
397    // initializer, so the early checking is not desired.
398    if !is_bulk_memory {
399        check_init_bounds(instance, module)?;
400    }
401
402    // Initialize the tables
403    initialize_tables(instance, module)?;
404
405    // Initialize the memories
406    initialize_memories(instance, &module)?;
407
408    Ok(())
409}
410
411/// Represents the on-demand instance allocator.
412#[derive(Clone)]
413pub struct OnDemandInstanceAllocator {
414    mem_creator: Option<Arc<dyn RuntimeMemoryCreator>>,
415    #[cfg(feature = "async")]
416    stack_size: usize,
417}
418
419impl OnDemandInstanceAllocator {
420    /// Creates a new on-demand instance allocator.
421    pub fn new(mem_creator: Option<Arc<dyn RuntimeMemoryCreator>>, stack_size: usize) -> Self {
422        drop(stack_size); // suppress unused warnings w/o async feature
423        Self {
424            mem_creator,
425            #[cfg(feature = "async")]
426            stack_size,
427        }
428    }
429}
430
431impl Default for OnDemandInstanceAllocator {
432    fn default() -> Self {
433        Self {
434            mem_creator: None,
435            #[cfg(feature = "async")]
436            stack_size: 0,
437        }
438    }
439}
440
441unsafe impl InstanceAllocator for OnDemandInstanceAllocator {
442    fn allocate_index(&self, _req: &InstanceAllocationRequest) -> Result<usize> {
443        Ok(0)
444    }
445
446    fn deallocate_index(&self, index: usize) {
447        assert_eq!(index, 0);
448    }
449
450    fn allocate_memories(
451        &self,
452        _index: usize,
453        req: &mut InstanceAllocationRequest,
454        memories: &mut PrimaryMap<DefinedMemoryIndex, Memory>,
455    ) -> Result<()> {
456        let module = req.runtime_info.module();
457        let creator = self
458            .mem_creator
459            .as_deref()
460            .unwrap_or_else(|| &DefaultMemoryCreator);
461        let num_imports = module.num_imported_memories;
462        for (memory_idx, plan) in module.memory_plans.iter().skip(num_imports) {
463            let defined_memory_idx = module
464                .defined_memory_index(memory_idx)
465                .expect("Skipped imports, should never be None");
466            let image = req.runtime_info.memory_image(defined_memory_idx)?;
467
468            memories.push(Memory::new_dynamic(
469                plan,
470                creator,
471                unsafe {
472                    req.store
473                        .get()
474                        .expect("if module has memory plans, store is not empty")
475                },
476                image,
477            )?);
478        }
479        Ok(())
480    }
481
482    fn deallocate_memories(
483        &self,
484        _index: usize,
485        _mems: &mut PrimaryMap<DefinedMemoryIndex, Memory>,
486    ) {
487        // normal destructors do cleanup here
488    }
489
490    fn allocate_tables(
491        &self,
492        _index: usize,
493        req: &mut InstanceAllocationRequest,
494        tables: &mut PrimaryMap<DefinedTableIndex, Table>,
495    ) -> Result<()> {
496        let module = req.runtime_info.module();
497        let num_imports = module.num_imported_tables;
498        for (_, table) in module.table_plans.iter().skip(num_imports) {
499            tables.push(Table::new_dynamic(table, unsafe {
500                req.store
501                    .get()
502                    .expect("if module has table plans, store is not empty")
503            })?);
504        }
505        Ok(())
506    }
507
508    fn deallocate_tables(&self, _index: usize, _tables: &mut PrimaryMap<DefinedTableIndex, Table>) {
509        // normal destructors do cleanup here
510    }
511
512    #[cfg(feature = "async")]
513    fn allocate_fiber_stack(&self) -> Result<wasmtime_fiber::FiberStack> {
514        if self.stack_size == 0 {
515            bail!("fiber stacks are not supported by the allocator")
516        }
517
518        let stack = wasmtime_fiber::FiberStack::new(self.stack_size)?;
519        Ok(stack)
520    }
521
522    #[cfg(feature = "async")]
523    unsafe fn deallocate_fiber_stack(&self, _stack: &wasmtime_fiber::FiberStack) {
524        // The on-demand allocator has no further bookkeeping for fiber stacks
525    }
526
527    fn purge_module(&self, _: CompiledModuleId) {}
528}