wasmtime/runtime/vm/
vmcontext.rs

1//! This file declares `VMContext` and several related structs which contain
2//! fields that compiled wasm code accesses directly.
3
4mod vm_host_func_context;
5
6pub use self::vm_host_func_context::VMArrayCallHostFuncContext;
7use crate::prelude::*;
8use crate::runtime::vm::{GcStore, InterpreterRef, VMGcRef, VmPtr, VmSafe, f32x4, f64x2, i8x16};
9use crate::store::StoreOpaque;
10use crate::vm::stack_switching::VMStackChain;
11use core::cell::UnsafeCell;
12use core::ffi::c_void;
13use core::fmt;
14use core::marker;
15use core::mem::{self, MaybeUninit};
16use core::ops::Range;
17use core::ptr::{self, NonNull};
18use core::sync::atomic::{AtomicUsize, Ordering};
19use wasmtime_environ::{
20    BuiltinFunctionIndex, DefinedGlobalIndex, DefinedMemoryIndex, DefinedTableIndex,
21    DefinedTagIndex, Unsigned, VMCONTEXT_MAGIC, VMSharedTypeIndex, WasmHeapTopType, WasmValType,
22};
23
24/// A function pointer that exposes the array calling convention.
25///
26/// Regardless of the underlying Wasm function type, all functions using the
27/// array calling convention have the same Rust signature.
28///
29/// Arguments:
30///
31/// * Callee `vmctx` for the function itself.
32///
33/// * Caller's `vmctx` (so that host functions can access the linear memory of
34///   their Wasm callers).
35///
36/// * A pointer to a buffer of `ValRaw`s where both arguments are passed into
37///   this function, and where results are returned from this function.
38///
39/// * The capacity of the `ValRaw` buffer. Must always be at least
40///   `max(len(wasm_params), len(wasm_results))`.
41///
42/// Return value:
43///
44/// * `true` if this call succeeded.
45/// * `false` if this call failed and a trap was recorded in TLS.
46pub type VMArrayCallNative = unsafe extern "C" fn(
47    NonNull<VMOpaqueContext>,
48    NonNull<VMContext>,
49    NonNull<ValRaw>,
50    usize,
51) -> bool;
52
53/// An opaque function pointer which might be `VMArrayCallNative` or it might be
54/// pulley bytecode. Requires external knowledge to determine what kind of
55/// function pointer this is.
56#[repr(transparent)]
57pub struct VMArrayCallFunction(VMFunctionBody);
58
59/// A function pointer that exposes the Wasm calling convention.
60///
61/// In practice, different Wasm function types end up mapping to different Rust
62/// function types, so this isn't simply a type alias the way that
63/// `VMArrayCallFunction` is. However, the exact details of the calling
64/// convention are left to the Wasm compiler (e.g. Cranelift or Winch). Runtime
65/// code never does anything with these function pointers except shuffle them
66/// around and pass them back to Wasm.
67#[repr(transparent)]
68pub struct VMWasmCallFunction(VMFunctionBody);
69
70/// An imported function.
71#[derive(Debug, Copy, Clone)]
72#[repr(C)]
73pub struct VMFunctionImport {
74    /// Function pointer to use when calling this imported function from Wasm.
75    pub wasm_call: VmPtr<VMWasmCallFunction>,
76
77    /// Function pointer to use when calling this imported function with the
78    /// "array" calling convention that `Func::new` et al use.
79    pub array_call: VmPtr<VMArrayCallFunction>,
80
81    /// The VM state associated with this function.
82    ///
83    /// For Wasm functions defined by core wasm instances this will be `*mut
84    /// VMContext`, but for lifted/lowered component model functions this will
85    /// be a `VMComponentContext`, and for a host function it will be a
86    /// `VMHostFuncContext`, etc.
87    pub vmctx: VmPtr<VMOpaqueContext>,
88}
89
90// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
91unsafe impl VmSafe for VMFunctionImport {}
92
93#[cfg(test)]
94mod test_vmfunction_import {
95    use super::VMFunctionImport;
96    use core::mem::offset_of;
97    use std::mem::size_of;
98    use wasmtime_environ::{HostPtr, Module, VMOffsets};
99
100    #[test]
101    fn check_vmfunction_import_offsets() {
102        let module = Module::new();
103        let offsets = VMOffsets::new(HostPtr, &module);
104        assert_eq!(
105            size_of::<VMFunctionImport>(),
106            usize::from(offsets.size_of_vmfunction_import())
107        );
108        assert_eq!(
109            offset_of!(VMFunctionImport, wasm_call),
110            usize::from(offsets.vmfunction_import_wasm_call())
111        );
112        assert_eq!(
113            offset_of!(VMFunctionImport, array_call),
114            usize::from(offsets.vmfunction_import_array_call())
115        );
116        assert_eq!(
117            offset_of!(VMFunctionImport, vmctx),
118            usize::from(offsets.vmfunction_import_vmctx())
119        );
120    }
121}
122
123/// A placeholder byte-sized type which is just used to provide some amount of type
124/// safety when dealing with pointers to JIT-compiled function bodies. Note that it's
125/// deliberately not Copy, as we shouldn't be carelessly copying function body bytes
126/// around.
127#[repr(C)]
128pub struct VMFunctionBody(u8);
129
130// SAFETY: this structure is never read and is safe to pass to jit code.
131unsafe impl VmSafe for VMFunctionBody {}
132
133#[cfg(test)]
134mod test_vmfunction_body {
135    use super::VMFunctionBody;
136    use std::mem::size_of;
137
138    #[test]
139    fn check_vmfunction_body_offsets() {
140        assert_eq!(size_of::<VMFunctionBody>(), 1);
141    }
142}
143
144/// The fields compiled code needs to access to utilize a WebAssembly table
145/// imported from another instance.
146#[derive(Debug, Copy, Clone)]
147#[repr(C)]
148pub struct VMTableImport {
149    /// A pointer to the imported table description.
150    pub from: VmPtr<VMTableDefinition>,
151
152    /// A pointer to the `VMContext` that owns the table description.
153    pub vmctx: VmPtr<VMContext>,
154
155    /// The table index, within `vmctx`, this definition resides at.
156    pub index: DefinedTableIndex,
157}
158
159// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
160unsafe impl VmSafe for VMTableImport {}
161
162#[cfg(test)]
163mod test_vmtable {
164    use super::VMTableImport;
165    use core::mem::offset_of;
166    use std::mem::size_of;
167    use wasmtime_environ::component::{Component, VMComponentOffsets};
168    use wasmtime_environ::{HostPtr, Module, VMOffsets};
169
170    #[test]
171    fn check_vmtable_offsets() {
172        let module = Module::new();
173        let offsets = VMOffsets::new(HostPtr, &module);
174        assert_eq!(
175            size_of::<VMTableImport>(),
176            usize::from(offsets.size_of_vmtable_import())
177        );
178        assert_eq!(
179            offset_of!(VMTableImport, from),
180            usize::from(offsets.vmtable_import_from())
181        );
182    }
183
184    #[test]
185    fn ensure_sizes_match() {
186        // Because we use `VMTableImport` for recording tables used by components, we
187        // want to make sure that the size calculations between `VMOffsets` and
188        // `VMComponentOffsets` stay the same.
189        let module = Module::new();
190        let vm_offsets = VMOffsets::new(HostPtr, &module);
191        let component = Component::default();
192        let vm_component_offsets = VMComponentOffsets::new(HostPtr, &component);
193        assert_eq!(
194            vm_offsets.size_of_vmtable_import(),
195            vm_component_offsets.size_of_vmtable_import()
196        );
197    }
198}
199
200/// The fields compiled code needs to access to utilize a WebAssembly linear
201/// memory imported from another instance.
202#[derive(Debug, Copy, Clone)]
203#[repr(C)]
204pub struct VMMemoryImport {
205    /// A pointer to the imported memory description.
206    pub from: VmPtr<VMMemoryDefinition>,
207
208    /// A pointer to the `VMContext` that owns the memory description.
209    pub vmctx: VmPtr<VMContext>,
210
211    /// The index of the memory in the containing `vmctx`.
212    pub index: DefinedMemoryIndex,
213}
214
215// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
216unsafe impl VmSafe for VMMemoryImport {}
217
218#[cfg(test)]
219mod test_vmmemory_import {
220    use super::VMMemoryImport;
221    use core::mem::offset_of;
222    use std::mem::size_of;
223    use wasmtime_environ::{HostPtr, Module, VMOffsets};
224
225    #[test]
226    fn check_vmmemory_import_offsets() {
227        let module = Module::new();
228        let offsets = VMOffsets::new(HostPtr, &module);
229        assert_eq!(
230            size_of::<VMMemoryImport>(),
231            usize::from(offsets.size_of_vmmemory_import())
232        );
233        assert_eq!(
234            offset_of!(VMMemoryImport, from),
235            usize::from(offsets.vmmemory_import_from())
236        );
237    }
238}
239
240/// The fields compiled code needs to access to utilize a WebAssembly global
241/// variable imported from another instance.
242///
243/// Note that unlike with functions, tables, and memories, `VMGlobalImport`
244/// doesn't include a `vmctx` pointer. Globals are never resized, and don't
245/// require a `vmctx` pointer to access.
246#[derive(Debug, Copy, Clone)]
247#[repr(C)]
248pub struct VMGlobalImport {
249    /// A pointer to the imported global variable description.
250    pub from: VmPtr<VMGlobalDefinition>,
251
252    /// A pointer to the context that owns the global.
253    ///
254    /// Exactly what's stored here is dictated by `kind` below. This is `None`
255    /// for `VMGlobalKind::Host`, it's a `VMContext` for
256    /// `VMGlobalKind::Instance`, and it's `VMComponentContext` for
257    /// `VMGlobalKind::ComponentFlags`.
258    pub vmctx: Option<VmPtr<VMOpaqueContext>>,
259
260    /// The kind of global, and extra location information in addition to
261    /// `vmctx` above.
262    pub kind: VMGlobalKind,
263}
264
265// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
266unsafe impl VmSafe for VMGlobalImport {}
267
268/// The kinds of globals that Wasmtime has.
269#[derive(Debug, Copy, Clone)]
270#[repr(C, u32)]
271pub enum VMGlobalKind {
272    /// Host globals, stored in a `StoreOpaque`.
273    Host(DefinedGlobalIndex),
274    /// Instance globals, stored in `VMContext`s
275    Instance(DefinedGlobalIndex),
276    /// Flags for a component instance, stored in `VMComponentContext`.
277    #[cfg(feature = "component-model")]
278    ComponentFlags(wasmtime_environ::component::RuntimeComponentInstanceIndex),
279}
280
281// SAFETY: the above enum is repr(C) and stores nothing else
282unsafe impl VmSafe for VMGlobalKind {}
283
284#[cfg(test)]
285mod test_vmglobal_import {
286    use super::VMGlobalImport;
287    use core::mem::offset_of;
288    use std::mem::size_of;
289    use wasmtime_environ::{HostPtr, Module, VMOffsets};
290
291    #[test]
292    fn check_vmglobal_import_offsets() {
293        let module = Module::new();
294        let offsets = VMOffsets::new(HostPtr, &module);
295        assert_eq!(
296            size_of::<VMGlobalImport>(),
297            usize::from(offsets.size_of_vmglobal_import())
298        );
299        assert_eq!(
300            offset_of!(VMGlobalImport, from),
301            usize::from(offsets.vmglobal_import_from())
302        );
303    }
304}
305
306/// The fields compiled code needs to access to utilize a WebAssembly
307/// tag imported from another instance.
308#[derive(Debug, Copy, Clone)]
309#[repr(C)]
310pub struct VMTagImport {
311    /// A pointer to the imported tag description.
312    pub from: VmPtr<VMTagDefinition>,
313
314    /// The instance that owns this tag.
315    pub vmctx: VmPtr<VMContext>,
316
317    /// The index of the tag in the containing `vmctx`.
318    pub index: DefinedTagIndex,
319}
320
321// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
322unsafe impl VmSafe for VMTagImport {}
323
324#[cfg(test)]
325mod test_vmtag_import {
326    use super::VMTagImport;
327    use core::mem::{offset_of, size_of};
328    use wasmtime_environ::{HostPtr, Module, VMOffsets};
329
330    #[test]
331    fn check_vmtag_import_offsets() {
332        let module = Module::new();
333        let offsets = VMOffsets::new(HostPtr, &module);
334        assert_eq!(
335            size_of::<VMTagImport>(),
336            usize::from(offsets.size_of_vmtag_import())
337        );
338        assert_eq!(
339            offset_of!(VMTagImport, from),
340            usize::from(offsets.vmtag_import_from())
341        );
342    }
343}
344
345/// The fields compiled code needs to access to utilize a WebAssembly linear
346/// memory defined within the instance, namely the start address and the
347/// size in bytes.
348#[derive(Debug)]
349#[repr(C)]
350pub struct VMMemoryDefinition {
351    /// The start address.
352    pub base: VmPtr<u8>,
353
354    /// The current logical size of this linear memory in bytes.
355    ///
356    /// This is atomic because shared memories must be able to grow their length
357    /// atomically. For relaxed access, see
358    /// [`VMMemoryDefinition::current_length()`].
359    pub current_length: AtomicUsize,
360}
361
362// SAFETY: the above definition has `repr(C)` and each field individually
363// implements `VmSafe`, which satisfies the requirements of this trait.
364unsafe impl VmSafe for VMMemoryDefinition {}
365
366impl VMMemoryDefinition {
367    /// Return the current length (in bytes) of the [`VMMemoryDefinition`] by
368    /// performing a relaxed load; do not use this function for situations in
369    /// which a precise length is needed. Owned memories (i.e., non-shared) will
370    /// always return a precise result (since no concurrent modification is
371    /// possible) but shared memories may see an imprecise value--a
372    /// `current_length` potentially smaller than what some other thread
373    /// observes. Since Wasm memory only grows, this under-estimation may be
374    /// acceptable in certain cases.
375    #[inline]
376    pub fn current_length(&self) -> usize {
377        self.current_length.load(Ordering::Relaxed)
378    }
379
380    /// Return a copy of the [`VMMemoryDefinition`] using the relaxed value of
381    /// `current_length`; see [`VMMemoryDefinition::current_length()`].
382    #[inline]
383    pub unsafe fn load(ptr: *mut Self) -> Self {
384        let other = &*ptr;
385        VMMemoryDefinition {
386            base: other.base,
387            current_length: other.current_length().into(),
388        }
389    }
390}
391
392#[cfg(test)]
393mod test_vmmemory_definition {
394    use super::VMMemoryDefinition;
395    use core::mem::offset_of;
396    use std::mem::size_of;
397    use wasmtime_environ::{HostPtr, Module, PtrSize, VMOffsets};
398
399    #[test]
400    fn check_vmmemory_definition_offsets() {
401        let module = Module::new();
402        let offsets = VMOffsets::new(HostPtr, &module);
403        assert_eq!(
404            size_of::<VMMemoryDefinition>(),
405            usize::from(offsets.ptr.size_of_vmmemory_definition())
406        );
407        assert_eq!(
408            offset_of!(VMMemoryDefinition, base),
409            usize::from(offsets.ptr.vmmemory_definition_base())
410        );
411        assert_eq!(
412            offset_of!(VMMemoryDefinition, current_length),
413            usize::from(offsets.ptr.vmmemory_definition_current_length())
414        );
415        /* TODO: Assert that the size of `current_length` matches.
416        assert_eq!(
417            size_of::<VMMemoryDefinition::current_length>(),
418            usize::from(offsets.size_of_vmmemory_definition_current_length())
419        );
420        */
421    }
422}
423
424/// The fields compiled code needs to access to utilize a WebAssembly table
425/// defined within the instance.
426#[derive(Debug, Copy, Clone)]
427#[repr(C)]
428pub struct VMTableDefinition {
429    /// Pointer to the table data.
430    pub base: VmPtr<u8>,
431
432    /// The current number of elements in the table.
433    pub current_elements: usize,
434}
435
436// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
437unsafe impl VmSafe for VMTableDefinition {}
438
439#[cfg(test)]
440mod test_vmtable_definition {
441    use super::VMTableDefinition;
442    use core::mem::offset_of;
443    use std::mem::size_of;
444    use wasmtime_environ::{HostPtr, Module, VMOffsets};
445
446    #[test]
447    fn check_vmtable_definition_offsets() {
448        let module = Module::new();
449        let offsets = VMOffsets::new(HostPtr, &module);
450        assert_eq!(
451            size_of::<VMTableDefinition>(),
452            usize::from(offsets.size_of_vmtable_definition())
453        );
454        assert_eq!(
455            offset_of!(VMTableDefinition, base),
456            usize::from(offsets.vmtable_definition_base())
457        );
458        assert_eq!(
459            offset_of!(VMTableDefinition, current_elements),
460            usize::from(offsets.vmtable_definition_current_elements())
461        );
462    }
463}
464
465/// The storage for a WebAssembly global defined within the instance.
466///
467/// TODO: Pack the globals more densely, rather than using the same size
468/// for every type.
469#[derive(Debug)]
470#[repr(C, align(16))]
471pub struct VMGlobalDefinition {
472    storage: [u8; 16],
473    // If more elements are added here, remember to add offset_of tests below!
474}
475
476// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
477unsafe impl VmSafe for VMGlobalDefinition {}
478
479#[cfg(test)]
480mod test_vmglobal_definition {
481    use super::VMGlobalDefinition;
482    use std::mem::{align_of, size_of};
483    use wasmtime_environ::{HostPtr, Module, PtrSize, VMOffsets};
484
485    #[test]
486    fn check_vmglobal_definition_alignment() {
487        assert!(align_of::<VMGlobalDefinition>() >= align_of::<i32>());
488        assert!(align_of::<VMGlobalDefinition>() >= align_of::<i64>());
489        assert!(align_of::<VMGlobalDefinition>() >= align_of::<f32>());
490        assert!(align_of::<VMGlobalDefinition>() >= align_of::<f64>());
491        assert!(align_of::<VMGlobalDefinition>() >= align_of::<[u8; 16]>());
492        assert!(align_of::<VMGlobalDefinition>() >= align_of::<[f32; 4]>());
493        assert!(align_of::<VMGlobalDefinition>() >= align_of::<[f64; 2]>());
494    }
495
496    #[test]
497    fn check_vmglobal_definition_offsets() {
498        let module = Module::new();
499        let offsets = VMOffsets::new(HostPtr, &module);
500        assert_eq!(
501            size_of::<VMGlobalDefinition>(),
502            usize::from(offsets.ptr.size_of_vmglobal_definition())
503        );
504    }
505
506    #[test]
507    fn check_vmglobal_begins_aligned() {
508        let module = Module::new();
509        let offsets = VMOffsets::new(HostPtr, &module);
510        assert_eq!(offsets.vmctx_globals_begin() % 16, 0);
511    }
512
513    #[test]
514    #[cfg(feature = "gc")]
515    fn check_vmglobal_can_contain_gc_ref() {
516        assert!(size_of::<crate::runtime::vm::VMGcRef>() <= size_of::<VMGlobalDefinition>());
517    }
518}
519
520impl VMGlobalDefinition {
521    /// Construct a `VMGlobalDefinition`.
522    pub fn new() -> Self {
523        Self { storage: [0; 16] }
524    }
525
526    /// Create a `VMGlobalDefinition` from a `ValRaw`.
527    ///
528    /// # Unsafety
529    ///
530    /// This raw value's type must match the given `WasmValType`.
531    pub unsafe fn from_val_raw(
532        store: &mut StoreOpaque,
533        wasm_ty: WasmValType,
534        raw: ValRaw,
535    ) -> Result<Self> {
536        let mut global = Self::new();
537        match wasm_ty {
538            WasmValType::I32 => *global.as_i32_mut() = raw.get_i32(),
539            WasmValType::I64 => *global.as_i64_mut() = raw.get_i64(),
540            WasmValType::F32 => *global.as_f32_bits_mut() = raw.get_f32(),
541            WasmValType::F64 => *global.as_f64_bits_mut() = raw.get_f64(),
542            WasmValType::V128 => global.set_u128(raw.get_v128()),
543            WasmValType::Ref(r) => match r.heap_type.top() {
544                WasmHeapTopType::Extern => {
545                    let r = VMGcRef::from_raw_u32(raw.get_externref());
546                    global.init_gc_ref(store.gc_store_mut()?, r.as_ref())
547                }
548                WasmHeapTopType::Any => {
549                    let r = VMGcRef::from_raw_u32(raw.get_anyref());
550                    global.init_gc_ref(store.gc_store_mut()?, r.as_ref())
551                }
552                WasmHeapTopType::Func => *global.as_func_ref_mut() = raw.get_funcref().cast(),
553                WasmHeapTopType::Cont => *global.as_func_ref_mut() = raw.get_funcref().cast(), // TODO(#10248): temporary hack.
554            },
555        }
556        Ok(global)
557    }
558
559    /// Get this global's value as a `ValRaw`.
560    ///
561    /// # Unsafety
562    ///
563    /// This global's value's type must match the given `WasmValType`.
564    pub unsafe fn to_val_raw(
565        &self,
566        store: &mut StoreOpaque,
567        wasm_ty: WasmValType,
568    ) -> Result<ValRaw> {
569        Ok(match wasm_ty {
570            WasmValType::I32 => ValRaw::i32(*self.as_i32()),
571            WasmValType::I64 => ValRaw::i64(*self.as_i64()),
572            WasmValType::F32 => ValRaw::f32(*self.as_f32_bits()),
573            WasmValType::F64 => ValRaw::f64(*self.as_f64_bits()),
574            WasmValType::V128 => ValRaw::v128(self.get_u128()),
575            WasmValType::Ref(r) => match r.heap_type.top() {
576                WasmHeapTopType::Extern => ValRaw::externref(match self.as_gc_ref() {
577                    Some(r) => store.gc_store_mut()?.clone_gc_ref(r).as_raw_u32(),
578                    None => 0,
579                }),
580                WasmHeapTopType::Any => ValRaw::anyref({
581                    match self.as_gc_ref() {
582                        Some(r) => store.gc_store_mut()?.clone_gc_ref(r).as_raw_u32(),
583                        None => 0,
584                    }
585                }),
586                WasmHeapTopType::Func => ValRaw::funcref(self.as_func_ref().cast()),
587                WasmHeapTopType::Cont => todo!(), // FIXME: #10248 stack switching support.
588            },
589        })
590    }
591
592    /// Return a reference to the value as an i32.
593    pub unsafe fn as_i32(&self) -> &i32 {
594        &*(self.storage.as_ref().as_ptr().cast::<i32>())
595    }
596
597    /// Return a mutable reference to the value as an i32.
598    pub unsafe fn as_i32_mut(&mut self) -> &mut i32 {
599        &mut *(self.storage.as_mut().as_mut_ptr().cast::<i32>())
600    }
601
602    /// Return a reference to the value as a u32.
603    pub unsafe fn as_u32(&self) -> &u32 {
604        &*(self.storage.as_ref().as_ptr().cast::<u32>())
605    }
606
607    /// Return a mutable reference to the value as an u32.
608    pub unsafe fn as_u32_mut(&mut self) -> &mut u32 {
609        &mut *(self.storage.as_mut().as_mut_ptr().cast::<u32>())
610    }
611
612    /// Return a reference to the value as an i64.
613    pub unsafe fn as_i64(&self) -> &i64 {
614        &*(self.storage.as_ref().as_ptr().cast::<i64>())
615    }
616
617    /// Return a mutable reference to the value as an i64.
618    pub unsafe fn as_i64_mut(&mut self) -> &mut i64 {
619        &mut *(self.storage.as_mut().as_mut_ptr().cast::<i64>())
620    }
621
622    /// Return a reference to the value as an u64.
623    pub unsafe fn as_u64(&self) -> &u64 {
624        &*(self.storage.as_ref().as_ptr().cast::<u64>())
625    }
626
627    /// Return a mutable reference to the value as an u64.
628    pub unsafe fn as_u64_mut(&mut self) -> &mut u64 {
629        &mut *(self.storage.as_mut().as_mut_ptr().cast::<u64>())
630    }
631
632    /// Return a reference to the value as an f32.
633    pub unsafe fn as_f32(&self) -> &f32 {
634        &*(self.storage.as_ref().as_ptr().cast::<f32>())
635    }
636
637    /// Return a mutable reference to the value as an f32.
638    pub unsafe fn as_f32_mut(&mut self) -> &mut f32 {
639        &mut *(self.storage.as_mut().as_mut_ptr().cast::<f32>())
640    }
641
642    /// Return a reference to the value as f32 bits.
643    pub unsafe fn as_f32_bits(&self) -> &u32 {
644        &*(self.storage.as_ref().as_ptr().cast::<u32>())
645    }
646
647    /// Return a mutable reference to the value as f32 bits.
648    pub unsafe fn as_f32_bits_mut(&mut self) -> &mut u32 {
649        &mut *(self.storage.as_mut().as_mut_ptr().cast::<u32>())
650    }
651
652    /// Return a reference to the value as an f64.
653    pub unsafe fn as_f64(&self) -> &f64 {
654        &*(self.storage.as_ref().as_ptr().cast::<f64>())
655    }
656
657    /// Return a mutable reference to the value as an f64.
658    pub unsafe fn as_f64_mut(&mut self) -> &mut f64 {
659        &mut *(self.storage.as_mut().as_mut_ptr().cast::<f64>())
660    }
661
662    /// Return a reference to the value as f64 bits.
663    pub unsafe fn as_f64_bits(&self) -> &u64 {
664        &*(self.storage.as_ref().as_ptr().cast::<u64>())
665    }
666
667    /// Return a mutable reference to the value as f64 bits.
668    pub unsafe fn as_f64_bits_mut(&mut self) -> &mut u64 {
669        &mut *(self.storage.as_mut().as_mut_ptr().cast::<u64>())
670    }
671
672    /// Gets the underlying 128-bit vector value.
673    //
674    // Note that vectors are stored in little-endian format while other types
675    // are stored in native-endian format.
676    pub unsafe fn get_u128(&self) -> u128 {
677        u128::from_le(*(self.storage.as_ref().as_ptr().cast::<u128>()))
678    }
679
680    /// Sets the 128-bit vector values.
681    //
682    // Note that vectors are stored in little-endian format while other types
683    // are stored in native-endian format.
684    pub unsafe fn set_u128(&mut self, val: u128) {
685        *self.storage.as_mut().as_mut_ptr().cast::<u128>() = val.to_le();
686    }
687
688    /// Return a reference to the value as u128 bits.
689    pub unsafe fn as_u128_bits(&self) -> &[u8; 16] {
690        &*(self.storage.as_ref().as_ptr().cast::<[u8; 16]>())
691    }
692
693    /// Return a mutable reference to the value as u128 bits.
694    pub unsafe fn as_u128_bits_mut(&mut self) -> &mut [u8; 16] {
695        &mut *(self.storage.as_mut().as_mut_ptr().cast::<[u8; 16]>())
696    }
697
698    /// Return a reference to the global value as a borrowed GC reference.
699    pub unsafe fn as_gc_ref(&self) -> Option<&VMGcRef> {
700        let raw_ptr = self.storage.as_ref().as_ptr().cast::<Option<VMGcRef>>();
701        let ret = (*raw_ptr).as_ref();
702        assert!(cfg!(feature = "gc") || ret.is_none());
703        ret
704    }
705
706    /// Initialize a global to the given GC reference.
707    pub unsafe fn init_gc_ref(&mut self, gc_store: &mut GcStore, gc_ref: Option<&VMGcRef>) {
708        assert!(cfg!(feature = "gc") || gc_ref.is_none());
709
710        let dest = &mut *(self
711            .storage
712            .as_mut()
713            .as_mut_ptr()
714            .cast::<MaybeUninit<Option<VMGcRef>>>());
715
716        gc_store.init_gc_ref(dest, gc_ref)
717    }
718
719    /// Write a GC reference into this global value.
720    pub unsafe fn write_gc_ref(&mut self, gc_store: &mut GcStore, gc_ref: Option<&VMGcRef>) {
721        assert!(cfg!(feature = "gc") || gc_ref.is_none());
722
723        let dest = &mut *(self.storage.as_mut().as_mut_ptr().cast::<Option<VMGcRef>>());
724        assert!(cfg!(feature = "gc") || dest.is_none());
725
726        gc_store.write_gc_ref(dest, gc_ref)
727    }
728
729    /// Return a reference to the value as a `VMFuncRef`.
730    pub unsafe fn as_func_ref(&self) -> *mut VMFuncRef {
731        *(self.storage.as_ref().as_ptr().cast::<*mut VMFuncRef>())
732    }
733
734    /// Return a mutable reference to the value as a `VMFuncRef`.
735    pub unsafe fn as_func_ref_mut(&mut self) -> &mut *mut VMFuncRef {
736        &mut *(self.storage.as_mut().as_mut_ptr().cast::<*mut VMFuncRef>())
737    }
738}
739
740#[cfg(test)]
741mod test_vmshared_type_index {
742    use super::VMSharedTypeIndex;
743    use std::mem::size_of;
744    use wasmtime_environ::{HostPtr, Module, VMOffsets};
745
746    #[test]
747    fn check_vmshared_type_index() {
748        let module = Module::new();
749        let offsets = VMOffsets::new(HostPtr, &module);
750        assert_eq!(
751            size_of::<VMSharedTypeIndex>(),
752            usize::from(offsets.size_of_vmshared_type_index())
753        );
754    }
755}
756
757/// A WebAssembly tag defined within the instance.
758///
759#[derive(Debug)]
760#[repr(C)]
761pub struct VMTagDefinition {
762    /// Function signature's type id.
763    pub type_index: VMSharedTypeIndex,
764}
765
766impl VMTagDefinition {
767    pub fn new(type_index: VMSharedTypeIndex) -> Self {
768        Self { type_index }
769    }
770}
771
772// SAFETY: the above structure is repr(C) and only contains VmSafe
773// fields.
774unsafe impl VmSafe for VMTagDefinition {}
775
776#[cfg(test)]
777mod test_vmtag_definition {
778    use super::VMTagDefinition;
779    use std::mem::size_of;
780    use wasmtime_environ::{HostPtr, Module, PtrSize, VMOffsets};
781
782    #[test]
783    fn check_vmtag_definition_offsets() {
784        let module = Module::new();
785        let offsets = VMOffsets::new(HostPtr, &module);
786        assert_eq!(
787            size_of::<VMTagDefinition>(),
788            usize::from(offsets.ptr.size_of_vmtag_definition())
789        );
790    }
791
792    #[test]
793    fn check_vmtag_begins_aligned() {
794        let module = Module::new();
795        let offsets = VMOffsets::new(HostPtr, &module);
796        assert_eq!(offsets.vmctx_tags_begin() % 16, 0);
797    }
798}
799
800/// The VM caller-checked "funcref" record, for caller-side signature checking.
801///
802/// It consists of function pointer(s), a type id to be checked by the
803/// caller, and the vmctx closure associated with this function.
804#[derive(Debug, Clone)]
805#[repr(C)]
806pub struct VMFuncRef {
807    /// Function pointer for this funcref if being called via the "array"
808    /// calling convention that `Func::new` et al use.
809    pub array_call: VmPtr<VMArrayCallFunction>,
810
811    /// Function pointer for this funcref if being called via the calling
812    /// convention we use when compiling Wasm.
813    ///
814    /// Most functions come with a function pointer that we can use when they
815    /// are called from Wasm. The notable exception is when we `Func::wrap` a
816    /// host function, and we don't have a Wasm compiler on hand to compile a
817    /// Wasm-to-native trampoline for the function. In this case, we leave
818    /// `wasm_call` empty until the function is passed as an import to Wasm (or
819    /// otherwise exposed to Wasm via tables/globals). At this point, we look up
820    /// a Wasm-to-native trampoline for the function in the Wasm's compiled
821    /// module and use that fill in `VMFunctionImport::wasm_call`. **However**
822    /// there is no guarantee that the Wasm module has a trampoline for this
823    /// function's signature. The Wasm module only has trampolines for its
824    /// types, and if this function isn't of one of those types, then the Wasm
825    /// module will not have a trampoline for it. This is actually okay, because
826    /// it means that the Wasm cannot actually call this function. But it does
827    /// mean that this field needs to be an `Option` even though it is non-null
828    /// the vast vast vast majority of the time.
829    pub wasm_call: Option<VmPtr<VMWasmCallFunction>>,
830
831    /// Function signature's type id.
832    pub type_index: VMSharedTypeIndex,
833
834    /// The VM state associated with this function.
835    ///
836    /// The actual definition of what this pointer points to depends on the
837    /// function being referenced: for core Wasm functions, this is a `*mut
838    /// VMContext`, for host functions it is a `*mut VMHostFuncContext`, and for
839    /// component functions it is a `*mut VMComponentContext`.
840    pub vmctx: VmPtr<VMOpaqueContext>,
841    // If more elements are added here, remember to add offset_of tests below!
842}
843
844// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
845unsafe impl VmSafe for VMFuncRef {}
846
847impl VMFuncRef {
848    /// Invokes the `array_call` field of this `VMFuncRef` with the supplied
849    /// arguments.
850    ///
851    /// This will invoke the function pointer in the `array_call` field with:
852    ///
853    /// * the `callee` vmctx as `self.vmctx`
854    /// * the `caller` as `caller` specified here
855    /// * the args pointer as `args_and_results`
856    /// * the args length as `args_and_results`
857    ///
858    /// The `args_and_results` area must be large enough to both load all
859    /// arguments from and store all results to.
860    ///
861    /// Returns whether a trap was recorded in TLS for raising.
862    ///
863    /// # Unsafety
864    ///
865    /// This method is unsafe because it can be called with any pointers. They
866    /// must all be valid for this wasm function call to proceed. For example
867    /// the `caller` must be valid machine code if `pulley` is `None` or it must
868    /// be valid bytecode if `pulley` is `Some`. Additionally `args_and_results`
869    /// must be large enough to handle all the arguments/results for this call.
870    ///
871    /// Note that the unsafety invariants to maintain here are not currently
872    /// exhaustively documented.
873    #[inline]
874    pub unsafe fn array_call(
875        me: NonNull<VMFuncRef>,
876        pulley: Option<InterpreterRef<'_>>,
877        caller: NonNull<VMContext>,
878        args_and_results: NonNull<[ValRaw]>,
879    ) -> bool {
880        match pulley {
881            Some(vm) => Self::array_call_interpreted(me, vm, caller, args_and_results),
882            None => Self::array_call_native(me, caller, args_and_results),
883        }
884    }
885
886    unsafe fn array_call_interpreted(
887        me: NonNull<VMFuncRef>,
888        vm: InterpreterRef<'_>,
889        caller: NonNull<VMContext>,
890        args_and_results: NonNull<[ValRaw]>,
891    ) -> bool {
892        // If `caller` is actually a `VMArrayCallHostFuncContext` then skip the
893        // interpreter, even though it's available, as `array_call` will be
894        // native code.
895        if me.as_ref().vmctx.as_non_null().as_ref().magic
896            == wasmtime_environ::VM_ARRAY_CALL_HOST_FUNC_MAGIC
897        {
898            return Self::array_call_native(me, caller, args_and_results);
899        }
900        vm.call(
901            me.as_ref().array_call.as_non_null().cast(),
902            me.as_ref().vmctx.as_non_null(),
903            caller,
904            args_and_results,
905        )
906    }
907
908    #[inline]
909    unsafe fn array_call_native(
910        me: NonNull<VMFuncRef>,
911        caller: NonNull<VMContext>,
912        args_and_results: NonNull<[ValRaw]>,
913    ) -> bool {
914        union GetNativePointer {
915            native: VMArrayCallNative,
916            ptr: NonNull<VMArrayCallFunction>,
917        }
918        let native = GetNativePointer {
919            ptr: me.as_ref().array_call.as_non_null(),
920        }
921        .native;
922        native(
923            me.as_ref().vmctx.as_non_null(),
924            caller,
925            args_and_results.cast(),
926            args_and_results.len(),
927        )
928    }
929}
930
931#[cfg(test)]
932mod test_vm_func_ref {
933    use super::VMFuncRef;
934    use core::mem::offset_of;
935    use std::mem::size_of;
936    use wasmtime_environ::{HostPtr, Module, PtrSize, VMOffsets};
937
938    #[test]
939    fn check_vm_func_ref_offsets() {
940        let module = Module::new();
941        let offsets = VMOffsets::new(HostPtr, &module);
942        assert_eq!(
943            size_of::<VMFuncRef>(),
944            usize::from(offsets.ptr.size_of_vm_func_ref())
945        );
946        assert_eq!(
947            offset_of!(VMFuncRef, array_call),
948            usize::from(offsets.ptr.vm_func_ref_array_call())
949        );
950        assert_eq!(
951            offset_of!(VMFuncRef, wasm_call),
952            usize::from(offsets.ptr.vm_func_ref_wasm_call())
953        );
954        assert_eq!(
955            offset_of!(VMFuncRef, type_index),
956            usize::from(offsets.ptr.vm_func_ref_type_index())
957        );
958        assert_eq!(
959            offset_of!(VMFuncRef, vmctx),
960            usize::from(offsets.ptr.vm_func_ref_vmctx())
961        );
962    }
963}
964
965macro_rules! define_builtin_array {
966    (
967        $(
968            $( #[$attr:meta] )*
969            $name:ident( $( $pname:ident: $param:ident ),* ) $( -> $result:ident )?;
970        )*
971    ) => {
972        /// An array that stores addresses of builtin functions. We translate code
973        /// to use indirect calls. This way, we don't have to patch the code.
974        ///
975        /// Ignore improper ctypes to permit `__m128i` on x86_64.
976        #[repr(C)]
977        pub struct VMBuiltinFunctionsArray {
978            $(
979                #[allow(improper_ctypes_definitions)]
980                $name: unsafe extern "C" fn(
981                    $(define_builtin_array!(@ty $param)),*
982                ) $( -> define_builtin_array!(@ty $result))?,
983            )*
984        }
985
986        impl VMBuiltinFunctionsArray {
987            #[allow(unused_doc_comments)]
988            pub const INIT: VMBuiltinFunctionsArray = VMBuiltinFunctionsArray {
989                $(
990                    $name: crate::runtime::vm::libcalls::raw::$name,
991                )*
992            };
993
994            /// Helper to call `expose_provenance()` on all contained pointers.
995            ///
996            /// This is required to be called at least once before entering wasm
997            /// to inform the compiler that these function pointers may all be
998            /// loaded/stored and used on the "other end" to reacquire
999            /// provenance in Pulley. Pulley models hostcalls with a host
1000            /// pointer as the first parameter that's a function pointer under
1001            /// the hood, and this call ensures that the use of the function
1002            /// pointer is considered valid.
1003            pub fn expose_provenance(&self) -> NonNull<Self>{
1004                $(
1005                    (self.$name as *mut u8).expose_provenance();
1006                )*
1007                NonNull::from(self)
1008            }
1009        }
1010    };
1011
1012    (@ty u32) => (u32);
1013    (@ty u64) => (u64);
1014    (@ty f32) => (f32);
1015    (@ty f64) => (f64);
1016    (@ty u8) => (u8);
1017    (@ty i8x16) => (i8x16);
1018    (@ty f32x4) => (f32x4);
1019    (@ty f64x2) => (f64x2);
1020    (@ty bool) => (bool);
1021    (@ty pointer) => (*mut u8);
1022    (@ty vmctx) => (NonNull<VMContext>);
1023}
1024
1025// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
1026unsafe impl VmSafe for VMBuiltinFunctionsArray {}
1027
1028wasmtime_environ::foreach_builtin_function!(define_builtin_array);
1029
1030const _: () = {
1031    assert!(
1032        mem::size_of::<VMBuiltinFunctionsArray>()
1033            == mem::size_of::<usize>() * (BuiltinFunctionIndex::len() as usize)
1034    )
1035};
1036
1037/// Structure that holds all mutable context that is shared across all instances
1038/// in a store, for example data related to fuel or epochs.
1039///
1040/// `VMStoreContext`s are one-to-one with `wasmtime::Store`s, the same way that
1041/// `VMContext`s are one-to-one with `wasmtime::Instance`s. And the same way
1042/// that multiple `wasmtime::Instance`s may be associated with the same
1043/// `wasmtime::Store`, multiple `VMContext`s hold a pointer to the same
1044/// `VMStoreContext` when they are associated with the same `wasmtime::Store`.
1045#[derive(Debug)]
1046#[repr(C)]
1047pub struct VMStoreContext {
1048    // NB: 64-bit integer fields are located first with pointer-sized fields
1049    // trailing afterwards. That makes the offsets in this structure easier to
1050    // calculate on 32-bit platforms as we don't have to worry about the
1051    // alignment of 64-bit integers.
1052    //
1053    /// Indicator of how much fuel has been consumed and is remaining to
1054    /// WebAssembly.
1055    ///
1056    /// This field is typically negative and increments towards positive. Upon
1057    /// turning positive a wasm trap will be generated. This field is only
1058    /// modified if wasm is configured to consume fuel.
1059    pub fuel_consumed: UnsafeCell<i64>,
1060
1061    /// Deadline epoch for interruption: if epoch-based interruption
1062    /// is enabled and the global (per engine) epoch counter is
1063    /// observed to reach or exceed this value, the guest code will
1064    /// yield if running asynchronously.
1065    pub epoch_deadline: UnsafeCell<u64>,
1066
1067    /// Current stack limit of the wasm module.
1068    ///
1069    /// For more information see `crates/cranelift/src/lib.rs`.
1070    pub stack_limit: UnsafeCell<usize>,
1071
1072    /// The `VMMemoryDefinition` for this store's GC heap.
1073    pub gc_heap: VMMemoryDefinition,
1074
1075    /// The value of the frame pointer register when we last called from Wasm to
1076    /// the host.
1077    ///
1078    /// Maintained by our Wasm-to-host trampoline, and cleared just before
1079    /// calling into Wasm in `catch_traps`.
1080    ///
1081    /// This member is `0` when Wasm is actively running and has not called out
1082    /// to the host.
1083    ///
1084    /// Used to find the start of a contiguous sequence of Wasm frames when
1085    /// walking the stack.
1086    pub last_wasm_exit_fp: UnsafeCell<usize>,
1087
1088    /// The last Wasm program counter before we called from Wasm to the host.
1089    ///
1090    /// Maintained by our Wasm-to-host trampoline, and cleared just before
1091    /// calling into Wasm in `catch_traps`.
1092    ///
1093    /// This member is `0` when Wasm is actively running and has not called out
1094    /// to the host.
1095    ///
1096    /// Used when walking a contiguous sequence of Wasm frames.
1097    pub last_wasm_exit_pc: UnsafeCell<usize>,
1098
1099    /// The last host stack pointer before we called into Wasm from the host.
1100    ///
1101    /// Maintained by our host-to-Wasm trampoline, and cleared just before
1102    /// calling into Wasm in `catch_traps`.
1103    ///
1104    /// This member is `0` when Wasm is actively running and has not called out
1105    /// to the host.
1106    ///
1107    /// When a host function is wrapped into a `wasmtime::Func`, and is then
1108    /// called from the host, then this member has the sentinel value of `-1 as
1109    /// usize`, meaning that this contiguous sequence of Wasm frames is the
1110    /// empty sequence, and it is not safe to dereference the
1111    /// `last_wasm_exit_fp`.
1112    ///
1113    /// Used to find the end of a contiguous sequence of Wasm frames when
1114    /// walking the stack.
1115    pub last_wasm_entry_fp: UnsafeCell<usize>,
1116
1117    /// Stack information used by stack switching instructions. See documentation
1118    /// on `VMStackChain` for details.
1119    pub stack_chain: UnsafeCell<VMStackChain>,
1120
1121    /// The range, in addresses, of the guard page that is currently in use.
1122    ///
1123    /// This field is used when signal handlers are run to determine whether a
1124    /// faulting address lies within the guard page of an async stack for
1125    /// example. If this happens then the signal handler aborts with a stack
1126    /// overflow message similar to what would happen had the stack overflow
1127    /// happened on the main thread. This field is, by default a null..null
1128    /// range indicating that no async guard is in use (aka no fiber). In such a
1129    /// situation while this field is read it'll never classify a fault as an
1130    /// guard page fault.
1131    pub async_guard_range: Range<*mut u8>,
1132}
1133
1134// The `VMStoreContext` type is a pod-type with no destructor, and we don't
1135// access any fields from other threads, so add in these trait impls which are
1136// otherwise not available due to the `fuel_consumed` and `epoch_deadline`
1137// variables in `VMStoreContext`.
1138unsafe impl Send for VMStoreContext {}
1139unsafe impl Sync for VMStoreContext {}
1140
1141// SAFETY: the above structure is repr(C) and only contains `VmSafe` fields.
1142unsafe impl VmSafe for VMStoreContext {}
1143
1144impl Default for VMStoreContext {
1145    fn default() -> VMStoreContext {
1146        VMStoreContext {
1147            fuel_consumed: UnsafeCell::new(0),
1148            epoch_deadline: UnsafeCell::new(0),
1149            stack_limit: UnsafeCell::new(usize::max_value()),
1150            gc_heap: VMMemoryDefinition {
1151                base: NonNull::dangling().into(),
1152                current_length: AtomicUsize::new(0),
1153            },
1154            last_wasm_exit_fp: UnsafeCell::new(0),
1155            last_wasm_exit_pc: UnsafeCell::new(0),
1156            last_wasm_entry_fp: UnsafeCell::new(0),
1157            stack_chain: UnsafeCell::new(VMStackChain::Absent),
1158            async_guard_range: ptr::null_mut()..ptr::null_mut(),
1159        }
1160    }
1161}
1162
1163#[cfg(test)]
1164mod test_vmstore_context {
1165    use super::{VMMemoryDefinition, VMStoreContext};
1166    use core::mem::offset_of;
1167    use wasmtime_environ::{HostPtr, Module, PtrSize, VMOffsets};
1168
1169    #[test]
1170    fn field_offsets() {
1171        let module = Module::new();
1172        let offsets = VMOffsets::new(HostPtr, &module);
1173        assert_eq!(
1174            offset_of!(VMStoreContext, stack_limit),
1175            usize::from(offsets.ptr.vmstore_context_stack_limit())
1176        );
1177        assert_eq!(
1178            offset_of!(VMStoreContext, fuel_consumed),
1179            usize::from(offsets.ptr.vmstore_context_fuel_consumed())
1180        );
1181        assert_eq!(
1182            offset_of!(VMStoreContext, epoch_deadline),
1183            usize::from(offsets.ptr.vmstore_context_epoch_deadline())
1184        );
1185        assert_eq!(
1186            offset_of!(VMStoreContext, gc_heap),
1187            usize::from(offsets.ptr.vmstore_context_gc_heap())
1188        );
1189        assert_eq!(
1190            offset_of!(VMStoreContext, gc_heap) + offset_of!(VMMemoryDefinition, base),
1191            usize::from(offsets.ptr.vmstore_context_gc_heap_base())
1192        );
1193        assert_eq!(
1194            offset_of!(VMStoreContext, gc_heap) + offset_of!(VMMemoryDefinition, current_length),
1195            usize::from(offsets.ptr.vmstore_context_gc_heap_current_length())
1196        );
1197        assert_eq!(
1198            offset_of!(VMStoreContext, last_wasm_exit_fp),
1199            usize::from(offsets.ptr.vmstore_context_last_wasm_exit_fp())
1200        );
1201        assert_eq!(
1202            offset_of!(VMStoreContext, last_wasm_exit_pc),
1203            usize::from(offsets.ptr.vmstore_context_last_wasm_exit_pc())
1204        );
1205        assert_eq!(
1206            offset_of!(VMStoreContext, last_wasm_entry_fp),
1207            usize::from(offsets.ptr.vmstore_context_last_wasm_entry_fp())
1208        );
1209        assert_eq!(
1210            offset_of!(VMStoreContext, stack_chain),
1211            usize::from(offsets.ptr.vmstore_context_stack_chain())
1212        )
1213    }
1214}
1215
1216/// The VM "context", which is pointed to by the `vmctx` arg in Cranelift.
1217/// This has information about globals, memories, tables, and other runtime
1218/// state associated with the current instance.
1219///
1220/// The struct here is empty, as the sizes of these fields are dynamic, and
1221/// we can't describe them in Rust's type system. Sufficient memory is
1222/// allocated at runtime.
1223#[derive(Debug)]
1224#[repr(C, align(16))] // align 16 since globals are aligned to that and contained inside
1225pub struct VMContext;
1226
1227impl VMContext {
1228    /// Helper function to cast between context types using a debug assertion to
1229    /// protect against some mistakes.
1230    #[inline]
1231    pub unsafe fn from_opaque(opaque: NonNull<VMOpaqueContext>) -> NonNull<VMContext> {
1232        // Note that in general the offset of the "magic" field is stored in
1233        // `VMOffsets::vmctx_magic`. Given though that this is a sanity check
1234        // about converting this pointer to another type we ideally don't want
1235        // to read the offset from potentially corrupt memory. Instead it would
1236        // be better to catch errors here as soon as possible.
1237        //
1238        // To accomplish this the `VMContext` structure is laid out with the
1239        // magic field at a statically known offset (here it's 0 for now). This
1240        // static offset is asserted in `VMOffsets::from` and needs to be kept
1241        // in sync with this line for this debug assertion to work.
1242        //
1243        // Also note that this magic is only ever invalid in the presence of
1244        // bugs, meaning we don't actually read the magic and act differently
1245        // at runtime depending what it is, so this is a debug assertion as
1246        // opposed to a regular assertion.
1247        debug_assert_eq!(opaque.as_ref().magic, VMCONTEXT_MAGIC);
1248        opaque.cast()
1249    }
1250}
1251
1252/// A "raw" and unsafe representation of a WebAssembly value.
1253///
1254/// This is provided for use with the `Func::new_unchecked` and
1255/// `Func::call_unchecked` APIs. In general it's unlikely you should be using
1256/// this from Rust, rather using APIs like `Func::wrap` and `TypedFunc::call`.
1257///
1258/// This is notably an "unsafe" way to work with `Val` and it's recommended to
1259/// instead use `Val` where possible. An important note about this union is that
1260/// fields are all stored in little-endian format, regardless of the endianness
1261/// of the host system.
1262#[allow(missing_docs)]
1263#[repr(C)]
1264#[derive(Copy, Clone)]
1265pub union ValRaw {
1266    /// A WebAssembly `i32` value.
1267    ///
1268    /// Note that the payload here is a Rust `i32` but the WebAssembly `i32`
1269    /// type does not assign an interpretation of the upper bit as either signed
1270    /// or unsigned. The Rust type `i32` is simply chosen for convenience.
1271    ///
1272    /// This value is always stored in a little-endian format.
1273    i32: i32,
1274
1275    /// A WebAssembly `i64` value.
1276    ///
1277    /// Note that the payload here is a Rust `i64` but the WebAssembly `i64`
1278    /// type does not assign an interpretation of the upper bit as either signed
1279    /// or unsigned. The Rust type `i64` is simply chosen for convenience.
1280    ///
1281    /// This value is always stored in a little-endian format.
1282    i64: i64,
1283
1284    /// A WebAssembly `f32` value.
1285    ///
1286    /// Note that the payload here is a Rust `u32`. This is to allow passing any
1287    /// representation of NaN into WebAssembly without risk of changing NaN
1288    /// payload bits as its gets passed around the system. Otherwise though this
1289    /// `u32` value is the return value of `f32::to_bits` in Rust.
1290    ///
1291    /// This value is always stored in a little-endian format.
1292    f32: u32,
1293
1294    /// A WebAssembly `f64` value.
1295    ///
1296    /// Note that the payload here is a Rust `u64`. This is to allow passing any
1297    /// representation of NaN into WebAssembly without risk of changing NaN
1298    /// payload bits as its gets passed around the system. Otherwise though this
1299    /// `u64` value is the return value of `f64::to_bits` in Rust.
1300    ///
1301    /// This value is always stored in a little-endian format.
1302    f64: u64,
1303
1304    /// A WebAssembly `v128` value.
1305    ///
1306    /// The payload here is a Rust `[u8; 16]` which has the same number of bits
1307    /// but note that `v128` in WebAssembly is often considered a vector type
1308    /// such as `i32x4` or `f64x2`. This means that the actual interpretation
1309    /// of the underlying bits is left up to the instructions which consume
1310    /// this value.
1311    ///
1312    /// This value is always stored in a little-endian format.
1313    v128: [u8; 16],
1314
1315    /// A WebAssembly `funcref` value (or one of its subtypes).
1316    ///
1317    /// The payload here is a pointer which is runtime-defined. This is one of
1318    /// the main points of unsafety about the `ValRaw` type as the validity of
1319    /// the pointer here is not easily verified and must be preserved by
1320    /// carefully calling the correct functions throughout the runtime.
1321    ///
1322    /// This value is always stored in a little-endian format.
1323    funcref: *mut c_void,
1324
1325    /// A WebAssembly `externref` value (or one of its subtypes).
1326    ///
1327    /// The payload here is a compressed pointer value which is
1328    /// runtime-defined. This is one of the main points of unsafety about the
1329    /// `ValRaw` type as the validity of the pointer here is not easily verified
1330    /// and must be preserved by carefully calling the correct functions
1331    /// throughout the runtime.
1332    ///
1333    /// This value is always stored in a little-endian format.
1334    externref: u32,
1335
1336    /// A WebAssembly `anyref` value (or one of its subtypes).
1337    ///
1338    /// The payload here is a compressed pointer value which is
1339    /// runtime-defined. This is one of the main points of unsafety about the
1340    /// `ValRaw` type as the validity of the pointer here is not easily verified
1341    /// and must be preserved by carefully calling the correct functions
1342    /// throughout the runtime.
1343    ///
1344    /// This value is always stored in a little-endian format.
1345    anyref: u32,
1346}
1347
1348// The `ValRaw` type is matched as `wasmtime_val_raw_t` in the C API so these
1349// are some simple assertions about the shape of the type which are additionally
1350// matched in C.
1351const _: () = {
1352    assert!(mem::size_of::<ValRaw>() == 16);
1353    assert!(mem::align_of::<ValRaw>() == mem::align_of::<u64>());
1354};
1355
1356// This type is just a bag-of-bits so it's up to the caller to figure out how
1357// to safely deal with threading concerns and safely access interior bits.
1358unsafe impl Send for ValRaw {}
1359unsafe impl Sync for ValRaw {}
1360
1361impl fmt::Debug for ValRaw {
1362    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1363        struct Hex<T>(T);
1364        impl<T: fmt::LowerHex> fmt::Debug for Hex<T> {
1365            fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1366                let bytes = mem::size_of::<T>();
1367                let hex_digits_per_byte = 2;
1368                let hex_digits = bytes * hex_digits_per_byte;
1369                write!(f, "0x{:0width$x}", self.0, width = hex_digits)
1370            }
1371        }
1372
1373        unsafe {
1374            f.debug_struct("ValRaw")
1375                .field("i32", &Hex(self.i32))
1376                .field("i64", &Hex(self.i64))
1377                .field("f32", &Hex(self.f32))
1378                .field("f64", &Hex(self.f64))
1379                .field("v128", &Hex(u128::from_le_bytes(self.v128)))
1380                .field("funcref", &self.funcref)
1381                .field("externref", &Hex(self.externref))
1382                .field("anyref", &Hex(self.anyref))
1383                .finish()
1384        }
1385    }
1386}
1387
1388impl ValRaw {
1389    /// Create a null reference that is compatible with any of
1390    /// `{any,extern,func}ref`.
1391    pub fn null() -> ValRaw {
1392        unsafe {
1393            let raw = mem::MaybeUninit::<Self>::zeroed().assume_init();
1394            debug_assert_eq!(raw.get_anyref(), 0);
1395            debug_assert_eq!(raw.get_externref(), 0);
1396            debug_assert_eq!(raw.get_funcref(), ptr::null_mut());
1397            raw
1398        }
1399    }
1400
1401    /// Creates a WebAssembly `i32` value
1402    #[inline]
1403    pub fn i32(i: i32) -> ValRaw {
1404        // Note that this is intentionally not setting the `i32` field, instead
1405        // setting the `i64` field with a zero-extended version of `i`. For more
1406        // information on this see the comments on `Lower for Result` in the
1407        // `wasmtime` crate. Otherwise though all `ValRaw` constructors are
1408        // otherwise constrained to guarantee that the initial 64-bits are
1409        // always initialized.
1410        ValRaw::u64(i.unsigned().into())
1411    }
1412
1413    /// Creates a WebAssembly `i64` value
1414    #[inline]
1415    pub fn i64(i: i64) -> ValRaw {
1416        ValRaw { i64: i.to_le() }
1417    }
1418
1419    /// Creates a WebAssembly `i32` value
1420    #[inline]
1421    pub fn u32(i: u32) -> ValRaw {
1422        // See comments in `ValRaw::i32` for why this is setting the upper
1423        // 32-bits as well.
1424        ValRaw::u64(i.into())
1425    }
1426
1427    /// Creates a WebAssembly `i64` value
1428    #[inline]
1429    pub fn u64(i: u64) -> ValRaw {
1430        ValRaw::i64(i as i64)
1431    }
1432
1433    /// Creates a WebAssembly `f32` value
1434    #[inline]
1435    pub fn f32(i: u32) -> ValRaw {
1436        // See comments in `ValRaw::i32` for why this is setting the upper
1437        // 32-bits as well.
1438        ValRaw::u64(i.into())
1439    }
1440
1441    /// Creates a WebAssembly `f64` value
1442    #[inline]
1443    pub fn f64(i: u64) -> ValRaw {
1444        ValRaw { f64: i.to_le() }
1445    }
1446
1447    /// Creates a WebAssembly `v128` value
1448    #[inline]
1449    pub fn v128(i: u128) -> ValRaw {
1450        ValRaw {
1451            v128: i.to_le_bytes(),
1452        }
1453    }
1454
1455    /// Creates a WebAssembly `funcref` value
1456    #[inline]
1457    pub fn funcref(i: *mut c_void) -> ValRaw {
1458        ValRaw {
1459            funcref: i.map_addr(|i| i.to_le()),
1460        }
1461    }
1462
1463    /// Creates a WebAssembly `externref` value
1464    #[inline]
1465    pub fn externref(e: u32) -> ValRaw {
1466        assert!(cfg!(feature = "gc") || e == 0);
1467        ValRaw {
1468            externref: e.to_le(),
1469        }
1470    }
1471
1472    /// Creates a WebAssembly `anyref` value
1473    #[inline]
1474    pub fn anyref(r: u32) -> ValRaw {
1475        assert!(cfg!(feature = "gc") || r == 0);
1476        ValRaw { anyref: r.to_le() }
1477    }
1478
1479    /// Gets the WebAssembly `i32` value
1480    #[inline]
1481    pub fn get_i32(&self) -> i32 {
1482        unsafe { i32::from_le(self.i32) }
1483    }
1484
1485    /// Gets the WebAssembly `i64` value
1486    #[inline]
1487    pub fn get_i64(&self) -> i64 {
1488        unsafe { i64::from_le(self.i64) }
1489    }
1490
1491    /// Gets the WebAssembly `i32` value
1492    #[inline]
1493    pub fn get_u32(&self) -> u32 {
1494        self.get_i32().unsigned()
1495    }
1496
1497    /// Gets the WebAssembly `i64` value
1498    #[inline]
1499    pub fn get_u64(&self) -> u64 {
1500        self.get_i64().unsigned()
1501    }
1502
1503    /// Gets the WebAssembly `f32` value
1504    #[inline]
1505    pub fn get_f32(&self) -> u32 {
1506        unsafe { u32::from_le(self.f32) }
1507    }
1508
1509    /// Gets the WebAssembly `f64` value
1510    #[inline]
1511    pub fn get_f64(&self) -> u64 {
1512        unsafe { u64::from_le(self.f64) }
1513    }
1514
1515    /// Gets the WebAssembly `v128` value
1516    #[inline]
1517    pub fn get_v128(&self) -> u128 {
1518        unsafe { u128::from_le_bytes(self.v128) }
1519    }
1520
1521    /// Gets the WebAssembly `funcref` value
1522    #[inline]
1523    pub fn get_funcref(&self) -> *mut c_void {
1524        unsafe { self.funcref.map_addr(|i| usize::from_le(i)) }
1525    }
1526
1527    /// Gets the WebAssembly `externref` value
1528    #[inline]
1529    pub fn get_externref(&self) -> u32 {
1530        let externref = u32::from_le(unsafe { self.externref });
1531        assert!(cfg!(feature = "gc") || externref == 0);
1532        externref
1533    }
1534
1535    /// Gets the WebAssembly `anyref` value
1536    #[inline]
1537    pub fn get_anyref(&self) -> u32 {
1538        let anyref = u32::from_le(unsafe { self.anyref });
1539        assert!(cfg!(feature = "gc") || anyref == 0);
1540        anyref
1541    }
1542}
1543
1544/// An "opaque" version of `VMContext` which must be explicitly casted to a
1545/// target context.
1546///
1547/// This context is used to represent that contexts specified in
1548/// `VMFuncRef` can have any type and don't have an implicit
1549/// structure. Neither wasmtime nor cranelift-generated code can rely on the
1550/// structure of an opaque context in general and only the code which configured
1551/// the context is able to rely on a particular structure. This is because the
1552/// context pointer configured for `VMFuncRef` is guaranteed to be
1553/// the first parameter passed.
1554///
1555/// Note that Wasmtime currently has a layout where all contexts that are casted
1556/// to an opaque context start with a 32-bit "magic" which can be used in debug
1557/// mode to debug-assert that the casts here are correct and have at least a
1558/// little protection against incorrect casts.
1559pub struct VMOpaqueContext {
1560    pub(crate) magic: u32,
1561    _marker: marker::PhantomPinned,
1562}
1563
1564impl VMOpaqueContext {
1565    /// Helper function to clearly indicate that casts are desired.
1566    #[inline]
1567    pub fn from_vmcontext(ptr: NonNull<VMContext>) -> NonNull<VMOpaqueContext> {
1568        ptr.cast()
1569    }
1570
1571    /// Helper function to clearly indicate that casts are desired.
1572    #[inline]
1573    pub fn from_vm_array_call_host_func_context(
1574        ptr: NonNull<VMArrayCallHostFuncContext>,
1575    ) -> NonNull<VMOpaqueContext> {
1576        ptr.cast()
1577    }
1578}