wasmtime_runtime/
instance.rs

1//! An `Instance` contains all the runtime state used by execution of a
2//! wasm module (except its callstack and register state). An
3//! `InstanceHandle` is a reference-counting handle for an `Instance`.
4
5use crate::export::Export;
6use crate::externref::VMExternRefActivationsTable;
7use crate::memory::{Memory, RuntimeMemoryCreator};
8use crate::table::{Table, TableElement, TableElementType};
9use crate::vmcontext::{
10    VMBuiltinFunctionsArray, VMCallerCheckedFuncRef, VMContext, VMFunctionImport,
11    VMGlobalDefinition, VMGlobalImport, VMMemoryDefinition, VMMemoryImport, VMOpaqueContext,
12    VMRuntimeLimits, VMTableDefinition, VMTableImport, VMCONTEXT_MAGIC,
13};
14use crate::{
15    ExportFunction, ExportGlobal, ExportMemory, ExportTable, Imports, ModuleRuntimeInfo, Store,
16    VMFunctionBody, VMSharedSignatureIndex, WasmFault,
17};
18use anyhow::Error;
19use anyhow::Result;
20use memoffset::offset_of;
21use std::alloc::{self, Layout};
22use std::any::Any;
23use std::convert::TryFrom;
24use std::hash::Hash;
25use std::ops::Range;
26use std::ptr::NonNull;
27use std::sync::atomic::AtomicU64;
28use std::sync::Arc;
29use std::{mem, ptr};
30use wasmtime_environ::{
31    packed_option::ReservedValue, DataIndex, DefinedGlobalIndex, DefinedMemoryIndex,
32    DefinedTableIndex, ElemIndex, EntityIndex, EntityRef, EntitySet, FuncIndex, GlobalIndex,
33    GlobalInit, HostPtr, MemoryIndex, Module, PrimaryMap, SignatureIndex, TableIndex,
34    TableInitialization, Trap, VMOffsets, WasmType,
35};
36
37mod allocator;
38
39pub use allocator::*;
40
41/// A type that roughly corresponds to a WebAssembly instance, but is also used
42/// for host-defined objects.
43///
44/// This structure is is never allocated directly but is instead managed through
45/// an `InstanceHandle`. This structure ends with a `VMContext` which has a
46/// dynamic size corresponding to the `module` configured within. Memory
47/// management of this structure is always externalized.
48///
49/// Instances here can correspond to actual instantiated modules, but it's also
50/// used ubiquitously for host-defined objects. For example creating a
51/// host-defined memory will have a `module` that looks like it exports a single
52/// memory (and similar for other constructs).
53///
54/// This `Instance` type is used as a ubiquitous representation for WebAssembly
55/// values, whether or not they were created on the host or through a module.
56#[repr(C)] // ensure that the vmctx field is last.
57pub(crate) struct Instance {
58    /// The runtime info (corresponding to the "compiled module"
59    /// abstraction in higher layers) that is retained and needed for
60    /// lazy initialization. This provides access to the underlying
61    /// Wasm module entities, the compiled JIT code, metadata about
62    /// functions, lazy initialization state, etc.
63    runtime_info: Arc<dyn ModuleRuntimeInfo>,
64
65    /// WebAssembly linear memory data.
66    ///
67    /// This is where all runtime information about defined linear memories in
68    /// this module lives.
69    memories: PrimaryMap<DefinedMemoryIndex, Memory>,
70
71    /// WebAssembly table data.
72    ///
73    /// Like memories, this is only for defined tables in the module and
74    /// contains all of their runtime state.
75    tables: PrimaryMap<DefinedTableIndex, Table>,
76
77    /// Stores the dropped passive element segments in this instantiation by index.
78    /// If the index is present in the set, the segment has been dropped.
79    dropped_elements: EntitySet<ElemIndex>,
80
81    /// Stores the dropped passive data segments in this instantiation by index.
82    /// If the index is present in the set, the segment has been dropped.
83    dropped_data: EntitySet<DataIndex>,
84
85    /// Hosts can store arbitrary per-instance information here.
86    ///
87    /// Most of the time from Wasmtime this is `Box::new(())`, a noop
88    /// allocation, but some host-defined objects will store their state here.
89    host_state: Box<dyn Any + Send + Sync>,
90
91    /// Instance of this instance within its `InstanceAllocator` trait
92    /// implementation.
93    ///
94    /// This is always 0 for the on-demand instance allocator and it's the
95    /// index of the slot in the pooling allocator.
96    index: usize,
97
98    /// Additional context used by compiled wasm code. This field is last, and
99    /// represents a dynamically-sized array that extends beyond the nominal
100    /// end of the struct (similar to a flexible array member).
101    vmctx: VMContext,
102}
103
104#[allow(clippy::cast_ptr_alignment)]
105impl Instance {
106    /// Create an instance at the given memory address.
107    ///
108    /// It is assumed the memory was properly aligned and the
109    /// allocation was `alloc_size` in bytes.
110    unsafe fn new(
111        req: InstanceAllocationRequest,
112        index: usize,
113        memories: PrimaryMap<DefinedMemoryIndex, Memory>,
114        tables: PrimaryMap<DefinedTableIndex, Table>,
115    ) -> InstanceHandle {
116        // The allocation must be *at least* the size required of `Instance`.
117        let layout = Self::alloc_layout(req.runtime_info.offsets());
118        let ptr = alloc::alloc(layout);
119        if ptr.is_null() {
120            alloc::handle_alloc_error(layout);
121        }
122        let ptr = ptr.cast::<Instance>();
123
124        let module = req.runtime_info.module();
125        let dropped_elements = EntitySet::with_capacity(module.passive_elements.len());
126        let dropped_data = EntitySet::with_capacity(module.passive_data_map.len());
127
128        ptr::write(
129            ptr,
130            Instance {
131                runtime_info: req.runtime_info.clone(),
132                index,
133                memories,
134                tables,
135                dropped_elements,
136                dropped_data,
137                host_state: req.host_state,
138                vmctx: VMContext {
139                    _marker: std::marker::PhantomPinned,
140                },
141            },
142        );
143
144        (*ptr).initialize_vmctx(module, req.runtime_info.offsets(), req.store, req.imports);
145        InstanceHandle { instance: ptr }
146    }
147
148    /// Helper function to access various locations offset from our `*mut
149    /// VMContext` object.
150    unsafe fn vmctx_plus_offset<T>(&self, offset: u32) -> *const T {
151        (std::ptr::addr_of!(self.vmctx).cast::<u8>())
152            .add(usize::try_from(offset).unwrap())
153            .cast()
154    }
155
156    unsafe fn vmctx_plus_offset_mut<T>(&mut self, offset: u32) -> *mut T {
157        (std::ptr::addr_of_mut!(self.vmctx).cast::<u8>())
158            .add(usize::try_from(offset).unwrap())
159            .cast()
160    }
161
162    pub(crate) fn module(&self) -> &Arc<Module> {
163        self.runtime_info.module()
164    }
165
166    fn offsets(&self) -> &VMOffsets<HostPtr> {
167        self.runtime_info.offsets()
168    }
169
170    /// Return the indexed `VMFunctionImport`.
171    fn imported_function(&self, index: FuncIndex) -> &VMFunctionImport {
172        unsafe { &*self.vmctx_plus_offset(self.offsets().vmctx_vmfunction_import(index)) }
173    }
174
175    /// Return the index `VMTableImport`.
176    fn imported_table(&self, index: TableIndex) -> &VMTableImport {
177        unsafe { &*self.vmctx_plus_offset(self.offsets().vmctx_vmtable_import(index)) }
178    }
179
180    /// Return the indexed `VMMemoryImport`.
181    fn imported_memory(&self, index: MemoryIndex) -> &VMMemoryImport {
182        unsafe { &*self.vmctx_plus_offset(self.offsets().vmctx_vmmemory_import(index)) }
183    }
184
185    /// Return the indexed `VMGlobalImport`.
186    fn imported_global(&self, index: GlobalIndex) -> &VMGlobalImport {
187        unsafe { &*self.vmctx_plus_offset(self.offsets().vmctx_vmglobal_import(index)) }
188    }
189
190    /// Return the indexed `VMTableDefinition`.
191    #[allow(dead_code)]
192    fn table(&mut self, index: DefinedTableIndex) -> VMTableDefinition {
193        unsafe { *self.table_ptr(index) }
194    }
195
196    /// Updates the value for a defined table to `VMTableDefinition`.
197    fn set_table(&mut self, index: DefinedTableIndex, table: VMTableDefinition) {
198        unsafe {
199            *self.table_ptr(index) = table;
200        }
201    }
202
203    /// Return the indexed `VMTableDefinition`.
204    fn table_ptr(&mut self, index: DefinedTableIndex) -> *mut VMTableDefinition {
205        unsafe { self.vmctx_plus_offset_mut(self.offsets().vmctx_vmtable_definition(index)) }
206    }
207
208    /// Get a locally defined or imported memory.
209    pub(crate) fn get_memory(&self, index: MemoryIndex) -> VMMemoryDefinition {
210        if let Some(defined_index) = self.module().defined_memory_index(index) {
211            self.memory(defined_index)
212        } else {
213            let import = self.imported_memory(index);
214            unsafe { VMMemoryDefinition::load(import.from) }
215        }
216    }
217
218    /// Get a locally defined or imported memory.
219    pub(crate) fn get_runtime_memory(&mut self, index: MemoryIndex) -> &mut Memory {
220        if let Some(defined_index) = self.module().defined_memory_index(index) {
221            unsafe { &mut *self.get_defined_memory(defined_index) }
222        } else {
223            let import = self.imported_memory(index);
224            let ctx = unsafe { &mut *import.vmctx };
225            unsafe { &mut *ctx.instance_mut().get_defined_memory(import.index) }
226        }
227    }
228
229    /// Return the indexed `VMMemoryDefinition`.
230    fn memory(&self, index: DefinedMemoryIndex) -> VMMemoryDefinition {
231        unsafe { VMMemoryDefinition::load(self.memory_ptr(index)) }
232    }
233
234    /// Set the indexed memory to `VMMemoryDefinition`.
235    fn set_memory(&self, index: DefinedMemoryIndex, mem: VMMemoryDefinition) {
236        unsafe {
237            *self.memory_ptr(index) = mem;
238        }
239    }
240
241    /// Return the indexed `VMMemoryDefinition`.
242    fn memory_ptr(&self, index: DefinedMemoryIndex) -> *mut VMMemoryDefinition {
243        unsafe { *self.vmctx_plus_offset(self.offsets().vmctx_vmmemory_pointer(index)) }
244    }
245
246    /// Return the indexed `VMGlobalDefinition`.
247    fn global(&mut self, index: DefinedGlobalIndex) -> &VMGlobalDefinition {
248        unsafe { &*self.global_ptr(index) }
249    }
250
251    /// Return the indexed `VMGlobalDefinition`.
252    fn global_ptr(&mut self, index: DefinedGlobalIndex) -> *mut VMGlobalDefinition {
253        unsafe { self.vmctx_plus_offset_mut(self.offsets().vmctx_vmglobal_definition(index)) }
254    }
255
256    /// Get a raw pointer to the global at the given index regardless whether it
257    /// is defined locally or imported from another module.
258    ///
259    /// Panics if the index is out of bound or is the reserved value.
260    pub(crate) fn defined_or_imported_global_ptr(
261        &mut self,
262        index: GlobalIndex,
263    ) -> *mut VMGlobalDefinition {
264        if let Some(index) = self.module().defined_global_index(index) {
265            self.global_ptr(index)
266        } else {
267            self.imported_global(index).from
268        }
269    }
270
271    /// Return a pointer to the interrupts structure
272    pub fn runtime_limits(&mut self) -> *mut *const VMRuntimeLimits {
273        unsafe { self.vmctx_plus_offset_mut(self.offsets().vmctx_runtime_limits()) }
274    }
275
276    /// Return a pointer to the global epoch counter used by this instance.
277    pub fn epoch_ptr(&mut self) -> *mut *const AtomicU64 {
278        unsafe { self.vmctx_plus_offset_mut(self.offsets().vmctx_epoch_ptr()) }
279    }
280
281    /// Return a pointer to the `VMExternRefActivationsTable`.
282    pub fn externref_activations_table(&mut self) -> *mut *mut VMExternRefActivationsTable {
283        unsafe { self.vmctx_plus_offset_mut(self.offsets().vmctx_externref_activations_table()) }
284    }
285
286    /// Gets a pointer to this instance's `Store` which was originally
287    /// configured on creation.
288    ///
289    /// # Panics
290    ///
291    /// This will panic if the originally configured store was `None`. That can
292    /// happen for host functions so host functions can't be queried what their
293    /// original `Store` was since it's just retained as null (since host
294    /// functions are shared amongst threads and don't all share the same
295    /// store).
296    #[inline]
297    pub fn store(&self) -> *mut dyn Store {
298        let ptr =
299            unsafe { *self.vmctx_plus_offset::<*mut dyn Store>(self.offsets().vmctx_store()) };
300        assert!(!ptr.is_null());
301        ptr
302    }
303
304    pub unsafe fn set_store(&mut self, store: Option<*mut dyn Store>) {
305        if let Some(store) = store {
306            *self.vmctx_plus_offset_mut(self.offsets().vmctx_store()) = store;
307            *self.runtime_limits() = (*store).vmruntime_limits();
308            *self.epoch_ptr() = (*store).epoch_ptr();
309            *self.externref_activations_table() = (*store).externref_activations_table().0;
310        } else {
311            assert_eq!(
312                mem::size_of::<*mut dyn Store>(),
313                mem::size_of::<[*mut (); 2]>()
314            );
315            *self.vmctx_plus_offset_mut::<[*mut (); 2]>(self.offsets().vmctx_store()) =
316                [ptr::null_mut(), ptr::null_mut()];
317
318            *self.runtime_limits() = ptr::null_mut();
319            *self.epoch_ptr() = ptr::null_mut();
320            *self.externref_activations_table() = ptr::null_mut();
321        }
322    }
323
324    pub(crate) unsafe fn set_callee(&mut self, callee: Option<NonNull<VMFunctionBody>>) {
325        *self.vmctx_plus_offset_mut(self.offsets().vmctx_callee()) =
326            callee.map_or(ptr::null_mut(), |c| c.as_ptr());
327    }
328
329    /// Return a reference to the vmctx used by compiled wasm code.
330    #[inline]
331    pub fn vmctx(&self) -> &VMContext {
332        &self.vmctx
333    }
334
335    /// Return a raw pointer to the vmctx used by compiled wasm code.
336    #[inline]
337    pub fn vmctx_ptr(&self) -> *mut VMContext {
338        self.vmctx() as *const VMContext as *mut VMContext
339    }
340
341    fn get_exported_func(&mut self, index: FuncIndex) -> ExportFunction {
342        let anyfunc = self.get_caller_checked_anyfunc(index).unwrap();
343        let anyfunc = NonNull::new(anyfunc as *const VMCallerCheckedFuncRef as *mut _).unwrap();
344        ExportFunction { anyfunc }
345    }
346
347    fn get_exported_table(&mut self, index: TableIndex) -> ExportTable {
348        let (definition, vmctx) = if let Some(def_index) = self.module().defined_table_index(index)
349        {
350            (self.table_ptr(def_index), self.vmctx_ptr())
351        } else {
352            let import = self.imported_table(index);
353            (import.from, import.vmctx)
354        };
355        ExportTable {
356            definition,
357            vmctx,
358            table: self.module().table_plans[index].clone(),
359        }
360    }
361
362    fn get_exported_memory(&mut self, index: MemoryIndex) -> ExportMemory {
363        let (definition, vmctx, def_index) =
364            if let Some(def_index) = self.module().defined_memory_index(index) {
365                (self.memory_ptr(def_index), self.vmctx_ptr(), def_index)
366            } else {
367                let import = self.imported_memory(index);
368                (import.from, import.vmctx, import.index)
369            };
370        ExportMemory {
371            definition,
372            vmctx,
373            memory: self.module().memory_plans[index].clone(),
374            index: def_index,
375        }
376    }
377
378    fn get_exported_global(&mut self, index: GlobalIndex) -> ExportGlobal {
379        ExportGlobal {
380            definition: if let Some(def_index) = self.module().defined_global_index(index) {
381                self.global_ptr(def_index)
382            } else {
383                self.imported_global(index).from
384            },
385            global: self.module().globals[index],
386        }
387    }
388
389    /// Return an iterator over the exports of this instance.
390    ///
391    /// Specifically, it provides access to the key-value pairs, where the keys
392    /// are export names, and the values are export declarations which can be
393    /// resolved `lookup_by_declaration`.
394    pub fn exports(&self) -> indexmap::map::Iter<String, EntityIndex> {
395        self.module().exports.iter()
396    }
397
398    /// Return a reference to the custom state attached to this instance.
399    #[inline]
400    pub fn host_state(&self) -> &dyn Any {
401        &*self.host_state
402    }
403
404    /// Return the offset from the vmctx pointer to its containing Instance.
405    #[inline]
406    pub(crate) fn vmctx_offset() -> isize {
407        offset_of!(Self, vmctx) as isize
408    }
409
410    /// Return the table index for the given `VMTableDefinition`.
411    unsafe fn table_index(&mut self, table: &VMTableDefinition) -> DefinedTableIndex {
412        let index = DefinedTableIndex::new(
413            usize::try_from(
414                (table as *const VMTableDefinition)
415                    .offset_from(self.table_ptr(DefinedTableIndex::new(0))),
416            )
417            .unwrap(),
418        );
419        assert!(index.index() < self.tables.len());
420        index
421    }
422
423    /// Grow memory by the specified amount of pages.
424    ///
425    /// Returns `None` if memory can't be grown by the specified amount
426    /// of pages. Returns `Some` with the old size in bytes if growth was
427    /// successful.
428    pub(crate) fn memory_grow(
429        &mut self,
430        index: MemoryIndex,
431        delta: u64,
432    ) -> Result<Option<usize>, Error> {
433        let (idx, instance) = if let Some(idx) = self.module().defined_memory_index(index) {
434            (idx, self)
435        } else {
436            let import = self.imported_memory(index);
437            unsafe {
438                let foreign_instance = (*import.vmctx).instance_mut();
439                (import.index, foreign_instance)
440            }
441        };
442        let store = unsafe { &mut *instance.store() };
443        let memory = &mut instance.memories[idx];
444
445        let result = unsafe { memory.grow(delta, Some(store)) };
446
447        // Update the state used by a non-shared Wasm memory in case the base
448        // pointer and/or the length changed.
449        if memory.as_shared_memory().is_none() {
450            let vmmemory = memory.vmmemory();
451            instance.set_memory(idx, vmmemory);
452        }
453
454        result
455    }
456
457    pub(crate) fn table_element_type(&mut self, table_index: TableIndex) -> TableElementType {
458        unsafe { (*self.get_table(table_index)).element_type() }
459    }
460
461    /// Grow table by the specified amount of elements, filling them with
462    /// `init_value`.
463    ///
464    /// Returns `None` if table can't be grown by the specified amount of
465    /// elements, or if `init_value` is the wrong type of table element.
466    pub(crate) fn table_grow(
467        &mut self,
468        table_index: TableIndex,
469        delta: u32,
470        init_value: TableElement,
471    ) -> Result<Option<u32>, Error> {
472        let (defined_table_index, instance) =
473            self.get_defined_table_index_and_instance(table_index);
474        instance.defined_table_grow(defined_table_index, delta, init_value)
475    }
476
477    fn defined_table_grow(
478        &mut self,
479        table_index: DefinedTableIndex,
480        delta: u32,
481        init_value: TableElement,
482    ) -> Result<Option<u32>, Error> {
483        let store = unsafe { &mut *self.store() };
484        let table = self
485            .tables
486            .get_mut(table_index)
487            .unwrap_or_else(|| panic!("no table for index {}", table_index.index()));
488
489        let result = unsafe { table.grow(delta, init_value, store) };
490
491        // Keep the `VMContext` pointers used by compiled Wasm code up to
492        // date.
493        let element = self.tables[table_index].vmtable();
494        self.set_table(table_index, element);
495
496        result
497    }
498
499    fn alloc_layout(offsets: &VMOffsets<HostPtr>) -> Layout {
500        let size = mem::size_of::<Self>()
501            .checked_add(usize::try_from(offsets.size_of_vmctx()).unwrap())
502            .unwrap();
503        let align = mem::align_of::<Self>();
504        Layout::from_size_align(size, align).unwrap()
505    }
506
507    /// Construct a new VMCallerCheckedFuncRef for the given function
508    /// (imported or defined in this module) and store into the given
509    /// location. Used during lazy initialization.
510    ///
511    /// Note that our current lazy-init scheme actually calls this every
512    /// time the anyfunc pointer is fetched; this turns out to be better
513    /// than tracking state related to whether it's been initialized
514    /// before, because resetting that state on (re)instantiation is
515    /// very expensive if there are many anyfuncs.
516    fn construct_anyfunc(
517        &mut self,
518        index: FuncIndex,
519        sig: SignatureIndex,
520        into: *mut VMCallerCheckedFuncRef,
521    ) {
522        let type_index = unsafe {
523            let base: *const VMSharedSignatureIndex =
524                *self.vmctx_plus_offset_mut(self.offsets().vmctx_signature_ids_array());
525            *base.add(sig.index())
526        };
527
528        let (func_ptr, vmctx) = if let Some(def_index) = self.module().defined_func_index(index) {
529            (
530                self.runtime_info.function(def_index),
531                VMOpaqueContext::from_vmcontext(self.vmctx_ptr()),
532            )
533        } else {
534            let import = self.imported_function(index);
535            (import.body.as_ptr(), import.vmctx)
536        };
537
538        // Safety: we have a `&mut self`, so we have exclusive access
539        // to this Instance.
540        unsafe {
541            *into = VMCallerCheckedFuncRef {
542                vmctx,
543                type_index,
544                func_ptr: NonNull::new(func_ptr).expect("Non-null function pointer"),
545            };
546        }
547    }
548
549    /// Get a `&VMCallerCheckedFuncRef` for the given `FuncIndex`.
550    ///
551    /// Returns `None` if the index is the reserved index value.
552    ///
553    /// The returned reference is a stable reference that won't be moved and can
554    /// be passed into JIT code.
555    pub(crate) fn get_caller_checked_anyfunc(
556        &mut self,
557        index: FuncIndex,
558    ) -> Option<*mut VMCallerCheckedFuncRef> {
559        if index == FuncIndex::reserved_value() {
560            return None;
561        }
562
563        // Safety: we have a `&mut self`, so we have exclusive access
564        // to this Instance.
565        unsafe {
566            // For now, we eagerly initialize an anyfunc struct in-place
567            // whenever asked for a reference to it. This is mostly
568            // fine, because in practice each anyfunc is unlikely to be
569            // requested more than a few times: once-ish for funcref
570            // tables used for call_indirect (the usual compilation
571            // strategy places each function in the table at most once),
572            // and once or a few times when fetching exports via API.
573            // Note that for any case driven by table accesses, the lazy
574            // table init behaves like a higher-level cache layer that
575            // protects this initialization from happening multiple
576            // times, via that particular table at least.
577            //
578            // When `ref.func` becomes more commonly used or if we
579            // otherwise see a use-case where this becomes a hotpath,
580            // we can reconsider by using some state to track
581            // "uninitialized" explicitly, for example by zeroing the
582            // anyfuncs (perhaps together with other
583            // zeroed-at-instantiate-time state) or using a separate
584            // is-initialized bitmap.
585            //
586            // We arrived at this design because zeroing memory is
587            // expensive, so it's better for instantiation performance
588            // if we don't have to track "is-initialized" state at
589            // all!
590            let func = &self.module().functions[index];
591            let sig = func.signature;
592            let anyfunc: *mut VMCallerCheckedFuncRef = self
593                .vmctx_plus_offset_mut::<VMCallerCheckedFuncRef>(
594                    self.offsets().vmctx_anyfunc(func.anyfunc),
595                );
596            self.construct_anyfunc(index, sig, anyfunc);
597
598            Some(anyfunc)
599        }
600    }
601
602    /// The `table.init` operation: initializes a portion of a table with a
603    /// passive element.
604    ///
605    /// # Errors
606    ///
607    /// Returns a `Trap` error when the range within the table is out of bounds
608    /// or the range within the passive element is out of bounds.
609    pub(crate) fn table_init(
610        &mut self,
611        table_index: TableIndex,
612        elem_index: ElemIndex,
613        dst: u32,
614        src: u32,
615        len: u32,
616    ) -> Result<(), Trap> {
617        // TODO: this `clone()` shouldn't be necessary but is used for now to
618        // inform `rustc` that the lifetime of the elements here are
619        // disconnected from the lifetime of `self`.
620        let module = self.module().clone();
621
622        let elements = match module.passive_elements_map.get(&elem_index) {
623            Some(index) if !self.dropped_elements.contains(elem_index) => {
624                module.passive_elements[*index].as_ref()
625            }
626            _ => &[],
627        };
628        self.table_init_segment(table_index, elements, dst, src, len)
629    }
630
631    pub(crate) fn table_init_segment(
632        &mut self,
633        table_index: TableIndex,
634        elements: &[FuncIndex],
635        dst: u32,
636        src: u32,
637        len: u32,
638    ) -> Result<(), Trap> {
639        // https://webassembly.github.io/bulk-memory-operations/core/exec/instructions.html#exec-table-init
640
641        let table = unsafe { &mut *self.get_table(table_index) };
642
643        let elements = match elements
644            .get(usize::try_from(src).unwrap()..)
645            .and_then(|s| s.get(..usize::try_from(len).unwrap()))
646        {
647            Some(elements) => elements,
648            None => return Err(Trap::TableOutOfBounds),
649        };
650
651        match table.element_type() {
652            TableElementType::Func => {
653                table.init_funcs(
654                    dst,
655                    elements.iter().map(|idx| {
656                        self.get_caller_checked_anyfunc(*idx)
657                            .unwrap_or(std::ptr::null_mut())
658                    }),
659                )?;
660            }
661
662            TableElementType::Extern => {
663                debug_assert!(elements.iter().all(|e| *e == FuncIndex::reserved_value()));
664                table.fill(dst, TableElement::ExternRef(None), len)?;
665            }
666        }
667        Ok(())
668    }
669
670    /// Drop an element.
671    pub(crate) fn elem_drop(&mut self, elem_index: ElemIndex) {
672        // https://webassembly.github.io/reference-types/core/exec/instructions.html#exec-elem-drop
673
674        self.dropped_elements.insert(elem_index);
675
676        // Note that we don't check that we actually removed a segment because
677        // dropping a non-passive segment is a no-op (not a trap).
678    }
679
680    /// Get a locally-defined memory.
681    pub(crate) fn get_defined_memory(&mut self, index: DefinedMemoryIndex) -> *mut Memory {
682        ptr::addr_of_mut!(self.memories[index])
683    }
684
685    /// Do a `memory.copy`
686    ///
687    /// # Errors
688    ///
689    /// Returns a `Trap` error when the source or destination ranges are out of
690    /// bounds.
691    pub(crate) fn memory_copy(
692        &mut self,
693        dst_index: MemoryIndex,
694        dst: u64,
695        src_index: MemoryIndex,
696        src: u64,
697        len: u64,
698    ) -> Result<(), Trap> {
699        // https://webassembly.github.io/reference-types/core/exec/instructions.html#exec-memory-copy
700
701        let src_mem = self.get_memory(src_index);
702        let dst_mem = self.get_memory(dst_index);
703
704        let src = self.validate_inbounds(src_mem.current_length(), src, len)?;
705        let dst = self.validate_inbounds(dst_mem.current_length(), dst, len)?;
706
707        // Bounds and casts are checked above, by this point we know that
708        // everything is safe.
709        unsafe {
710            let dst = dst_mem.base.add(dst);
711            let src = src_mem.base.add(src);
712            // FIXME audit whether this is safe in the presence of shared memory
713            // (https://github.com/bytecodealliance/wasmtime/issues/4203).
714            ptr::copy(src, dst, len as usize);
715        }
716
717        Ok(())
718    }
719
720    fn validate_inbounds(&self, max: usize, ptr: u64, len: u64) -> Result<usize, Trap> {
721        let oob = || Trap::MemoryOutOfBounds;
722        let end = ptr
723            .checked_add(len)
724            .and_then(|i| usize::try_from(i).ok())
725            .ok_or_else(oob)?;
726        if end > max {
727            Err(oob())
728        } else {
729            Ok(ptr as usize)
730        }
731    }
732
733    /// Perform the `memory.fill` operation on a locally defined memory.
734    ///
735    /// # Errors
736    ///
737    /// Returns a `Trap` error if the memory range is out of bounds.
738    pub(crate) fn memory_fill(
739        &mut self,
740        memory_index: MemoryIndex,
741        dst: u64,
742        val: u8,
743        len: u64,
744    ) -> Result<(), Trap> {
745        let memory = self.get_memory(memory_index);
746        let dst = self.validate_inbounds(memory.current_length(), dst, len)?;
747
748        // Bounds and casts are checked above, by this point we know that
749        // everything is safe.
750        unsafe {
751            let dst = memory.base.add(dst);
752            // FIXME audit whether this is safe in the presence of shared memory
753            // (https://github.com/bytecodealliance/wasmtime/issues/4203).
754            ptr::write_bytes(dst, val, len as usize);
755        }
756
757        Ok(())
758    }
759
760    /// Performs the `memory.init` operation.
761    ///
762    /// # Errors
763    ///
764    /// Returns a `Trap` error if the destination range is out of this module's
765    /// memory's bounds or if the source range is outside the data segment's
766    /// bounds.
767    pub(crate) fn memory_init(
768        &mut self,
769        memory_index: MemoryIndex,
770        data_index: DataIndex,
771        dst: u64,
772        src: u32,
773        len: u32,
774    ) -> Result<(), Trap> {
775        let range = match self.module().passive_data_map.get(&data_index).cloned() {
776            Some(range) if !self.dropped_data.contains(data_index) => range,
777            _ => 0..0,
778        };
779        self.memory_init_segment(memory_index, range, dst, src, len)
780    }
781
782    pub(crate) fn wasm_data(&self, range: Range<u32>) -> &[u8] {
783        &self.runtime_info.wasm_data()[range.start as usize..range.end as usize]
784    }
785
786    pub(crate) fn memory_init_segment(
787        &mut self,
788        memory_index: MemoryIndex,
789        range: Range<u32>,
790        dst: u64,
791        src: u32,
792        len: u32,
793    ) -> Result<(), Trap> {
794        // https://webassembly.github.io/bulk-memory-operations/core/exec/instructions.html#exec-memory-init
795
796        let memory = self.get_memory(memory_index);
797        let data = self.wasm_data(range);
798        let dst = self.validate_inbounds(memory.current_length(), dst, len.into())?;
799        let src = self.validate_inbounds(data.len(), src.into(), len.into())?;
800        let len = len as usize;
801
802        unsafe {
803            let src_start = data.as_ptr().add(src);
804            let dst_start = memory.base.add(dst);
805            // FIXME audit whether this is safe in the presence of shared memory
806            // (https://github.com/bytecodealliance/wasmtime/issues/4203).
807            ptr::copy_nonoverlapping(src_start, dst_start, len);
808        }
809
810        Ok(())
811    }
812
813    /// Drop the given data segment, truncating its length to zero.
814    pub(crate) fn data_drop(&mut self, data_index: DataIndex) {
815        self.dropped_data.insert(data_index);
816
817        // Note that we don't check that we actually removed a segment because
818        // dropping a non-passive segment is a no-op (not a trap).
819    }
820
821    /// Get a table by index regardless of whether it is locally-defined
822    /// or an imported, foreign table. Ensure that the given range of
823    /// elements in the table is lazily initialized.  We define this
824    /// operation all-in-one for safety, to ensure the lazy-init
825    /// happens.
826    ///
827    /// Takes an `Iterator` for the index-range to lazy-initialize,
828    /// for flexibility. This can be a range, single item, or empty
829    /// sequence, for example. The iterator should return indices in
830    /// increasing order, so that the break-at-out-of-bounds behavior
831    /// works correctly.
832    pub(crate) fn get_table_with_lazy_init(
833        &mut self,
834        table_index: TableIndex,
835        range: impl Iterator<Item = u32>,
836    ) -> *mut Table {
837        let (idx, instance) = self.get_defined_table_index_and_instance(table_index);
838        let elt_ty = instance.tables[idx].element_type();
839
840        if elt_ty == TableElementType::Func {
841            for i in range {
842                let value = match instance.tables[idx].get(i) {
843                    Some(value) => value,
844                    None => {
845                        // Out-of-bounds; caller will handle by likely
846                        // throwing a trap. No work to do to lazy-init
847                        // beyond the end.
848                        break;
849                    }
850                };
851                if value.is_uninit() {
852                    let table_init = match &instance.module().table_initialization {
853                        // We unfortunately can't borrow `tables`
854                        // outside the loop because we need to call
855                        // `get_caller_checked_anyfunc` (a `&mut`
856                        // method) below; so unwrap it dynamically
857                        // here.
858                        TableInitialization::FuncTable { tables, .. } => tables,
859                        _ => break,
860                    }
861                    .get(table_index);
862
863                    // The TableInitialization::FuncTable elements table may
864                    // be smaller than the current size of the table: it
865                    // always matches the initial table size, if present. We
866                    // want to iterate up through the end of the accessed
867                    // index range so that we set an "initialized null" even
868                    // if there is no initializer. We do a checked `get()` on
869                    // the initializer table below and unwrap to a null if
870                    // we're past its end.
871                    let func_index =
872                        table_init.and_then(|indices| indices.get(i as usize).cloned());
873                    let anyfunc = func_index
874                        .and_then(|func_index| instance.get_caller_checked_anyfunc(func_index))
875                        .unwrap_or(std::ptr::null_mut());
876
877                    let value = TableElement::FuncRef(anyfunc);
878
879                    instance.tables[idx]
880                        .set(i, value)
881                        .expect("Table type should match and index should be in-bounds");
882                }
883            }
884        }
885
886        ptr::addr_of_mut!(instance.tables[idx])
887    }
888
889    /// Get a table by index regardless of whether it is locally-defined or an
890    /// imported, foreign table.
891    pub(crate) fn get_table(&mut self, table_index: TableIndex) -> *mut Table {
892        let (idx, instance) = self.get_defined_table_index_and_instance(table_index);
893        ptr::addr_of_mut!(instance.tables[idx])
894    }
895
896    /// Get a locally-defined table.
897    pub(crate) fn get_defined_table(&mut self, index: DefinedTableIndex) -> *mut Table {
898        ptr::addr_of_mut!(self.tables[index])
899    }
900
901    pub(crate) fn get_defined_table_index_and_instance(
902        &mut self,
903        index: TableIndex,
904    ) -> (DefinedTableIndex, &mut Instance) {
905        if let Some(defined_table_index) = self.module().defined_table_index(index) {
906            (defined_table_index, self)
907        } else {
908            let import = self.imported_table(index);
909            unsafe {
910                let foreign_instance = (*import.vmctx).instance_mut();
911                let foreign_table_def = &*import.from;
912                let foreign_table_index = foreign_instance.table_index(foreign_table_def);
913                (foreign_table_index, foreign_instance)
914            }
915        }
916    }
917
918    /// Initialize the VMContext data associated with this Instance.
919    ///
920    /// The `VMContext` memory is assumed to be uninitialized; any field
921    /// that we need in a certain state will be explicitly written by this
922    /// function.
923    unsafe fn initialize_vmctx(
924        &mut self,
925        module: &Module,
926        offsets: &VMOffsets<HostPtr>,
927        store: StorePtr,
928        imports: Imports,
929    ) {
930        assert!(std::ptr::eq(module, self.module().as_ref()));
931
932        *self.vmctx_plus_offset_mut(offsets.vmctx_magic()) = VMCONTEXT_MAGIC;
933        self.set_callee(None);
934        self.set_store(store.as_raw());
935
936        // Initialize shared signatures
937        let signatures = self.runtime_info.signature_ids();
938        *self.vmctx_plus_offset_mut(offsets.vmctx_signature_ids_array()) = signatures.as_ptr();
939
940        // Initialize the built-in functions
941        *self.vmctx_plus_offset_mut(offsets.vmctx_builtin_functions()) =
942            &VMBuiltinFunctionsArray::INIT;
943
944        // Initialize the imports
945        debug_assert_eq!(imports.functions.len(), module.num_imported_funcs);
946        ptr::copy_nonoverlapping(
947            imports.functions.as_ptr(),
948            self.vmctx_plus_offset_mut(offsets.vmctx_imported_functions_begin()),
949            imports.functions.len(),
950        );
951        debug_assert_eq!(imports.tables.len(), module.num_imported_tables);
952        ptr::copy_nonoverlapping(
953            imports.tables.as_ptr(),
954            self.vmctx_plus_offset_mut(offsets.vmctx_imported_tables_begin()),
955            imports.tables.len(),
956        );
957        debug_assert_eq!(imports.memories.len(), module.num_imported_memories);
958        ptr::copy_nonoverlapping(
959            imports.memories.as_ptr(),
960            self.vmctx_plus_offset_mut(offsets.vmctx_imported_memories_begin()),
961            imports.memories.len(),
962        );
963        debug_assert_eq!(imports.globals.len(), module.num_imported_globals);
964        ptr::copy_nonoverlapping(
965            imports.globals.as_ptr(),
966            self.vmctx_plus_offset_mut(offsets.vmctx_imported_globals_begin()),
967            imports.globals.len(),
968        );
969
970        // N.B.: there is no need to initialize the anyfuncs array because
971        // we eagerly construct each element in it whenever asked for a
972        // reference to that element. In other words, there is no state
973        // needed to track the lazy-init, so we don't need to initialize
974        // any state now.
975
976        // Initialize the defined tables
977        let mut ptr = self.vmctx_plus_offset_mut(offsets.vmctx_tables_begin());
978        for i in 0..module.table_plans.len() - module.num_imported_tables {
979            ptr::write(ptr, self.tables[DefinedTableIndex::new(i)].vmtable());
980            ptr = ptr.add(1);
981        }
982
983        // Initialize the defined memories. This fills in both the
984        // `defined_memories` table and the `owned_memories` table at the same
985        // time. Entries in `defined_memories` hold a pointer to a definition
986        // (all memories) whereas the `owned_memories` hold the actual
987        // definitions of memories owned (not shared) in the module.
988        let mut ptr = self.vmctx_plus_offset_mut(offsets.vmctx_memories_begin());
989        let mut owned_ptr = self.vmctx_plus_offset_mut(offsets.vmctx_owned_memories_begin());
990        for i in 0..module.memory_plans.len() - module.num_imported_memories {
991            let defined_memory_index = DefinedMemoryIndex::new(i);
992            let memory_index = module.memory_index(defined_memory_index);
993            if module.memory_plans[memory_index].memory.shared {
994                let def_ptr = self.memories[defined_memory_index]
995                    .as_shared_memory()
996                    .unwrap()
997                    .vmmemory_ptr();
998                ptr::write(ptr, def_ptr.cast_mut());
999            } else {
1000                ptr::write(owned_ptr, self.memories[defined_memory_index].vmmemory());
1001                ptr::write(ptr, owned_ptr);
1002                owned_ptr = owned_ptr.add(1);
1003            }
1004            ptr = ptr.add(1);
1005        }
1006
1007        // Initialize the defined globals
1008        self.initialize_vmctx_globals(module);
1009    }
1010
1011    unsafe fn initialize_vmctx_globals(&mut self, module: &Module) {
1012        let num_imports = module.num_imported_globals;
1013        for (index, global) in module.globals.iter().skip(num_imports) {
1014            let def_index = module.defined_global_index(index).unwrap();
1015            let to = self.global_ptr(def_index);
1016
1017            // Initialize the global before writing to it
1018            ptr::write(to, VMGlobalDefinition::new());
1019
1020            match global.initializer {
1021                GlobalInit::I32Const(x) => *(*to).as_i32_mut() = x,
1022                GlobalInit::I64Const(x) => *(*to).as_i64_mut() = x,
1023                GlobalInit::F32Const(x) => *(*to).as_f32_bits_mut() = x,
1024                GlobalInit::F64Const(x) => *(*to).as_f64_bits_mut() = x,
1025                GlobalInit::V128Const(x) => *(*to).as_u128_mut() = x,
1026                GlobalInit::GetGlobal(x) => {
1027                    let from = if let Some(def_x) = module.defined_global_index(x) {
1028                        self.global(def_x)
1029                    } else {
1030                        &*self.imported_global(x).from
1031                    };
1032                    // Globals of type `externref` need to manage the reference
1033                    // count as values move between globals, everything else is just
1034                    // copy-able bits.
1035                    match global.wasm_ty {
1036                        WasmType::ExternRef => {
1037                            *(*to).as_externref_mut() = from.as_externref().clone()
1038                        }
1039                        _ => ptr::copy_nonoverlapping(from, to, 1),
1040                    }
1041                }
1042                GlobalInit::RefFunc(f) => {
1043                    *(*to).as_anyfunc_mut() = self.get_caller_checked_anyfunc(f).unwrap()
1044                        as *const VMCallerCheckedFuncRef;
1045                }
1046                GlobalInit::RefNullConst => match global.wasm_ty {
1047                    // `VMGlobalDefinition::new()` already zeroed out the bits
1048                    WasmType::FuncRef => {}
1049                    WasmType::ExternRef => {}
1050                    ty => panic!("unsupported reference type for global: {:?}", ty),
1051                },
1052                GlobalInit::Import => panic!("locally-defined global initialized as import"),
1053            }
1054        }
1055    }
1056
1057    fn wasm_fault(&self, addr: usize) -> Option<WasmFault> {
1058        let mut fault = None;
1059        for (_, memory) in self.memories.iter() {
1060            let accessible = memory.wasm_accessible();
1061            if accessible.start <= addr && addr < accessible.end {
1062                // All linear memories should be disjoint so assert that no
1063                // prior fault has been found.
1064                assert!(fault.is_none());
1065                fault = Some(WasmFault {
1066                    memory_size: memory.byte_size(),
1067                    wasm_address: u64::try_from(addr - accessible.start).unwrap(),
1068                });
1069            }
1070        }
1071        fault
1072    }
1073}
1074
1075impl Drop for Instance {
1076    fn drop(&mut self) {
1077        // Drop any defined globals
1078        let module = self.module().clone();
1079        for (idx, global) in module.globals.iter() {
1080            let idx = match module.defined_global_index(idx) {
1081                Some(idx) => idx,
1082                None => continue,
1083            };
1084            match global.wasm_ty {
1085                // For now only externref globals need to get destroyed
1086                WasmType::ExternRef => {}
1087                _ => continue,
1088            }
1089            unsafe {
1090                drop((*self.global_ptr(idx)).as_externref_mut().take());
1091            }
1092        }
1093    }
1094}
1095
1096/// A handle holding an `Instance` of a WebAssembly module.
1097#[derive(Hash, PartialEq, Eq)]
1098pub struct InstanceHandle {
1099    instance: *mut Instance,
1100}
1101
1102// These are only valid if the `Instance` type is send/sync, hence the
1103// assertion below.
1104unsafe impl Send for InstanceHandle {}
1105unsafe impl Sync for InstanceHandle {}
1106
1107fn _assert_send_sync() {
1108    fn _assert<T: Send + Sync>() {}
1109    _assert::<Instance>();
1110}
1111
1112impl InstanceHandle {
1113    /// Create a new `InstanceHandle` pointing at the instance
1114    /// pointed to by the given `VMContext` pointer.
1115    ///
1116    /// # Safety
1117    /// This is unsafe because it doesn't work on just any `VMContext`, it must
1118    /// be a `VMContext` allocated as part of an `Instance`.
1119    #[inline]
1120    pub unsafe fn from_vmctx(vmctx: *mut VMContext) -> Self {
1121        let instance = (&mut *vmctx).instance();
1122        Self {
1123            instance: instance as *const Instance as *mut Instance,
1124        }
1125    }
1126
1127    /// Return a reference to the vmctx used by compiled wasm code.
1128    pub fn vmctx(&self) -> &VMContext {
1129        self.instance().vmctx()
1130    }
1131
1132    /// Return a raw pointer to the vmctx used by compiled wasm code.
1133    #[inline]
1134    pub fn vmctx_ptr(&self) -> *mut VMContext {
1135        self.instance().vmctx_ptr()
1136    }
1137
1138    /// Return a reference to a module.
1139    pub fn module(&self) -> &Arc<Module> {
1140        self.instance().module()
1141    }
1142
1143    /// Lookup a function by index.
1144    pub fn get_exported_func(&mut self, export: FuncIndex) -> ExportFunction {
1145        self.instance_mut().get_exported_func(export)
1146    }
1147
1148    /// Lookup a global by index.
1149    pub fn get_exported_global(&mut self, export: GlobalIndex) -> ExportGlobal {
1150        self.instance_mut().get_exported_global(export)
1151    }
1152
1153    /// Lookup a memory by index.
1154    pub fn get_exported_memory(&mut self, export: MemoryIndex) -> ExportMemory {
1155        self.instance_mut().get_exported_memory(export)
1156    }
1157
1158    /// Lookup a table by index.
1159    pub fn get_exported_table(&mut self, export: TableIndex) -> ExportTable {
1160        self.instance_mut().get_exported_table(export)
1161    }
1162
1163    /// Lookup an item with the given index.
1164    pub fn get_export_by_index(&mut self, export: EntityIndex) -> Export {
1165        match export {
1166            EntityIndex::Function(i) => Export::Function(self.get_exported_func(i)),
1167            EntityIndex::Global(i) => Export::Global(self.get_exported_global(i)),
1168            EntityIndex::Table(i) => Export::Table(self.get_exported_table(i)),
1169            EntityIndex::Memory(i) => Export::Memory(self.get_exported_memory(i)),
1170        }
1171    }
1172
1173    /// Return an iterator over the exports of this instance.
1174    ///
1175    /// Specifically, it provides access to the key-value pairs, where the keys
1176    /// are export names, and the values are export declarations which can be
1177    /// resolved `lookup_by_declaration`.
1178    pub fn exports(&self) -> indexmap::map::Iter<String, EntityIndex> {
1179        self.instance().exports()
1180    }
1181
1182    /// Return a reference to the custom state attached to this instance.
1183    pub fn host_state(&self) -> &dyn Any {
1184        self.instance().host_state()
1185    }
1186
1187    /// Get a memory defined locally within this module.
1188    pub fn get_defined_memory(&mut self, index: DefinedMemoryIndex) -> *mut Memory {
1189        self.instance_mut().get_defined_memory(index)
1190    }
1191
1192    /// Return the table index for the given `VMTableDefinition` in this instance.
1193    pub unsafe fn table_index(&mut self, table: &VMTableDefinition) -> DefinedTableIndex {
1194        self.instance_mut().table_index(table)
1195    }
1196
1197    /// Get a table defined locally within this module.
1198    pub fn get_defined_table(&mut self, index: DefinedTableIndex) -> *mut Table {
1199        self.instance_mut().get_defined_table(index)
1200    }
1201
1202    /// Get a table defined locally within this module, lazily
1203    /// initializing the given range first.
1204    pub fn get_defined_table_with_lazy_init(
1205        &mut self,
1206        index: DefinedTableIndex,
1207        range: impl Iterator<Item = u32>,
1208    ) -> *mut Table {
1209        let index = self.instance().module().table_index(index);
1210        self.instance_mut().get_table_with_lazy_init(index, range)
1211    }
1212
1213    /// Return a reference to the contained `Instance`.
1214    #[inline]
1215    pub(crate) fn instance(&self) -> &Instance {
1216        unsafe { &*(self.instance as *const Instance) }
1217    }
1218
1219    pub(crate) fn instance_mut(&mut self) -> &mut Instance {
1220        unsafe { &mut *self.instance }
1221    }
1222
1223    /// Returns the `Store` pointer that was stored on creation
1224    #[inline]
1225    pub fn store(&self) -> *mut dyn Store {
1226        self.instance().store()
1227    }
1228
1229    /// Configure the `*mut dyn Store` internal pointer after-the-fact.
1230    ///
1231    /// This is provided for the original `Store` itself to configure the first
1232    /// self-pointer after the original `Box` has been initialized.
1233    pub unsafe fn set_store(&mut self, store: *mut dyn Store) {
1234        self.instance_mut().set_store(Some(store));
1235    }
1236
1237    /// Returns a clone of this instance.
1238    ///
1239    /// This is unsafe because the returned handle here is just a cheap clone
1240    /// of the internals, there's no lifetime tracking around its validity.
1241    /// You'll need to ensure that the returned handles all go out of scope at
1242    /// the same time.
1243    #[inline]
1244    pub unsafe fn clone(&self) -> InstanceHandle {
1245        InstanceHandle {
1246            instance: self.instance,
1247        }
1248    }
1249
1250    /// Performs post-initialization of an instance after its handle has been
1251    /// creqtaed and registered with a store.
1252    ///
1253    /// Failure of this function means that the instance still must persist
1254    /// within the store since failure may indicate partial failure, or some
1255    /// state could be referenced by other instances.
1256    pub fn initialize(&mut self, module: &Module, is_bulk_memory: bool) -> Result<()> {
1257        allocator::initialize_instance(self.instance_mut(), module, is_bulk_memory)
1258    }
1259
1260    /// Attempts to convert from the host `addr` specified to a WebAssembly
1261    /// based address recorded in `WasmFault`.
1262    ///
1263    /// This method will check all linear memories that this instance contains
1264    /// to see if any of them contain `addr`. If one does then `Some` is
1265    /// returned with metadata about the wasm fault. Otherwise `None` is
1266    /// returned and `addr` doesn't belong to this instance.
1267    pub fn wasm_fault(&self, addr: usize) -> Option<WasmFault> {
1268        self.instance().wasm_fault(addr)
1269    }
1270}