wasmtime_runtime/vmcontext.rs
1//! This file declares `VMContext` and several related structs which contain
2//! fields that compiled wasm code accesses directly.
3
4mod vm_host_func_context;
5
6use crate::externref::VMExternRef;
7use crate::instance::Instance;
8use std::any::Any;
9use std::cell::UnsafeCell;
10use std::marker;
11use std::ptr::NonNull;
12use std::sync::atomic::{AtomicUsize, Ordering};
13use std::u32;
14pub use vm_host_func_context::VMHostFuncContext;
15use wasmtime_environ::DefinedMemoryIndex;
16
17pub const VMCONTEXT_MAGIC: u32 = u32::from_le_bytes(*b"core");
18
19/// An imported function.
20#[derive(Debug, Copy, Clone)]
21#[repr(C)]
22pub struct VMFunctionImport {
23 /// A pointer to the imported function body.
24 pub body: NonNull<VMFunctionBody>,
25
26 /// The VM state associated with this function.
27 ///
28 /// For core wasm instances this will be `*mut VMContext` but for the
29 /// upcoming implementation of the component model this will be something
30 /// else. The actual definition of what this pointer points to depends on
31 /// the definition of `func_ptr` and what compiled it.
32 pub vmctx: *mut VMOpaqueContext,
33}
34
35// Declare that this type is send/sync, it's the responsibility of users of
36// `VMFunctionImport` to uphold this guarantee.
37unsafe impl Send for VMFunctionImport {}
38unsafe impl Sync for VMFunctionImport {}
39
40#[cfg(test)]
41mod test_vmfunction_import {
42 use super::VMFunctionImport;
43 use memoffset::offset_of;
44 use std::mem::size_of;
45 use wasmtime_environ::{Module, VMOffsets};
46
47 #[test]
48 fn check_vmfunction_import_offsets() {
49 let module = Module::new();
50 let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
51 assert_eq!(
52 size_of::<VMFunctionImport>(),
53 usize::from(offsets.size_of_vmfunction_import())
54 );
55 assert_eq!(
56 offset_of!(VMFunctionImport, body),
57 usize::from(offsets.vmfunction_import_body())
58 );
59 assert_eq!(
60 offset_of!(VMFunctionImport, vmctx),
61 usize::from(offsets.vmfunction_import_vmctx())
62 );
63 }
64}
65
66/// A placeholder byte-sized type which is just used to provide some amount of type
67/// safety when dealing with pointers to JIT-compiled function bodies. Note that it's
68/// deliberately not Copy, as we shouldn't be carelessly copying function body bytes
69/// around.
70#[repr(C)]
71pub struct VMFunctionBody(u8);
72
73#[cfg(test)]
74mod test_vmfunction_body {
75 use super::VMFunctionBody;
76 use std::mem::size_of;
77
78 #[test]
79 fn check_vmfunction_body_offsets() {
80 assert_eq!(size_of::<VMFunctionBody>(), 1);
81 }
82}
83
84/// The fields compiled code needs to access to utilize a WebAssembly table
85/// imported from another instance.
86#[derive(Debug, Copy, Clone)]
87#[repr(C)]
88pub struct VMTableImport {
89 /// A pointer to the imported table description.
90 pub from: *mut VMTableDefinition,
91
92 /// A pointer to the `VMContext` that owns the table description.
93 pub vmctx: *mut VMContext,
94}
95
96// Declare that this type is send/sync, it's the responsibility of users of
97// `VMTableImport` to uphold this guarantee.
98unsafe impl Send for VMTableImport {}
99unsafe impl Sync for VMTableImport {}
100
101#[cfg(test)]
102mod test_vmtable_import {
103 use super::VMTableImport;
104 use memoffset::offset_of;
105 use std::mem::size_of;
106 use wasmtime_environ::{Module, VMOffsets};
107
108 #[test]
109 fn check_vmtable_import_offsets() {
110 let module = Module::new();
111 let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
112 assert_eq!(
113 size_of::<VMTableImport>(),
114 usize::from(offsets.size_of_vmtable_import())
115 );
116 assert_eq!(
117 offset_of!(VMTableImport, from),
118 usize::from(offsets.vmtable_import_from())
119 );
120 assert_eq!(
121 offset_of!(VMTableImport, vmctx),
122 usize::from(offsets.vmtable_import_vmctx())
123 );
124 }
125}
126
127/// The fields compiled code needs to access to utilize a WebAssembly linear
128/// memory imported from another instance.
129#[derive(Debug, Copy, Clone)]
130#[repr(C)]
131pub struct VMMemoryImport {
132 /// A pointer to the imported memory description.
133 pub from: *mut VMMemoryDefinition,
134
135 /// A pointer to the `VMContext` that owns the memory description.
136 pub vmctx: *mut VMContext,
137
138 /// The index of the memory in the containing `vmctx`.
139 pub index: DefinedMemoryIndex,
140}
141
142// Declare that this type is send/sync, it's the responsibility of users of
143// `VMMemoryImport` to uphold this guarantee.
144unsafe impl Send for VMMemoryImport {}
145unsafe impl Sync for VMMemoryImport {}
146
147#[cfg(test)]
148mod test_vmmemory_import {
149 use super::VMMemoryImport;
150 use memoffset::offset_of;
151 use std::mem::size_of;
152 use wasmtime_environ::{Module, VMOffsets};
153
154 #[test]
155 fn check_vmmemory_import_offsets() {
156 let module = Module::new();
157 let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
158 assert_eq!(
159 size_of::<VMMemoryImport>(),
160 usize::from(offsets.size_of_vmmemory_import())
161 );
162 assert_eq!(
163 offset_of!(VMMemoryImport, from),
164 usize::from(offsets.vmmemory_import_from())
165 );
166 assert_eq!(
167 offset_of!(VMMemoryImport, vmctx),
168 usize::from(offsets.vmmemory_import_vmctx())
169 );
170 }
171}
172
173/// The fields compiled code needs to access to utilize a WebAssembly global
174/// variable imported from another instance.
175///
176/// Note that unlike with functions, tables, and memories, `VMGlobalImport`
177/// doesn't include a `vmctx` pointer. Globals are never resized, and don't
178/// require a `vmctx` pointer to access.
179#[derive(Debug, Copy, Clone)]
180#[repr(C)]
181pub struct VMGlobalImport {
182 /// A pointer to the imported global variable description.
183 pub from: *mut VMGlobalDefinition,
184}
185
186// Declare that this type is send/sync, it's the responsibility of users of
187// `VMGlobalImport` to uphold this guarantee.
188unsafe impl Send for VMGlobalImport {}
189unsafe impl Sync for VMGlobalImport {}
190
191#[cfg(test)]
192mod test_vmglobal_import {
193 use super::VMGlobalImport;
194 use memoffset::offset_of;
195 use std::mem::size_of;
196 use wasmtime_environ::{Module, VMOffsets};
197
198 #[test]
199 fn check_vmglobal_import_offsets() {
200 let module = Module::new();
201 let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
202 assert_eq!(
203 size_of::<VMGlobalImport>(),
204 usize::from(offsets.size_of_vmglobal_import())
205 );
206 assert_eq!(
207 offset_of!(VMGlobalImport, from),
208 usize::from(offsets.vmglobal_import_from())
209 );
210 }
211}
212
213/// The fields compiled code needs to access to utilize a WebAssembly linear
214/// memory defined within the instance, namely the start address and the
215/// size in bytes.
216#[derive(Debug)]
217#[repr(C)]
218pub struct VMMemoryDefinition {
219 /// The start address.
220 pub base: *mut u8,
221
222 /// The current logical size of this linear memory in bytes.
223 ///
224 /// This is atomic because shared memories must be able to grow their length
225 /// atomically. For relaxed access, see
226 /// [`VMMemoryDefinition::current_length()`].
227 pub current_length: AtomicUsize,
228}
229
230impl VMMemoryDefinition {
231 /// Return the current length of the [`VMMemoryDefinition`] by performing a
232 /// relaxed load; do not use this function for situations in which a precise
233 /// length is needed. Owned memories (i.e., non-shared) will always return a
234 /// precise result (since no concurrent modification is possible) but shared
235 /// memories may see an imprecise value--a `current_length` potentially
236 /// smaller than what some other thread observes. Since Wasm memory only
237 /// grows, this under-estimation may be acceptable in certain cases.
238 pub fn current_length(&self) -> usize {
239 self.current_length.load(Ordering::Relaxed)
240 }
241
242 /// Return a copy of the [`VMMemoryDefinition`] using the relaxed value of
243 /// `current_length`; see [`VMMemoryDefinition::current_length()`].
244 pub unsafe fn load(ptr: *mut Self) -> Self {
245 let other = &*ptr;
246 VMMemoryDefinition {
247 base: other.base,
248 current_length: other.current_length().into(),
249 }
250 }
251}
252
253#[cfg(test)]
254mod test_vmmemory_definition {
255 use super::VMMemoryDefinition;
256 use memoffset::offset_of;
257 use std::mem::size_of;
258 use wasmtime_environ::{Module, PtrSize, VMOffsets};
259
260 #[test]
261 fn check_vmmemory_definition_offsets() {
262 let module = Module::new();
263 let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
264 assert_eq!(
265 size_of::<VMMemoryDefinition>(),
266 usize::from(offsets.ptr.size_of_vmmemory_definition())
267 );
268 assert_eq!(
269 offset_of!(VMMemoryDefinition, base),
270 usize::from(offsets.ptr.vmmemory_definition_base())
271 );
272 assert_eq!(
273 offset_of!(VMMemoryDefinition, current_length),
274 usize::from(offsets.ptr.vmmemory_definition_current_length())
275 );
276 /* TODO: Assert that the size of `current_length` matches.
277 assert_eq!(
278 size_of::<VMMemoryDefinition::current_length>(),
279 usize::from(offsets.size_of_vmmemory_definition_current_length())
280 );
281 */
282 }
283}
284
285/// The fields compiled code needs to access to utilize a WebAssembly table
286/// defined within the instance.
287#[derive(Debug, Copy, Clone)]
288#[repr(C)]
289pub struct VMTableDefinition {
290 /// Pointer to the table data.
291 pub base: *mut u8,
292
293 /// The current number of elements in the table.
294 pub current_elements: u32,
295}
296
297#[cfg(test)]
298mod test_vmtable_definition {
299 use super::VMTableDefinition;
300 use memoffset::offset_of;
301 use std::mem::size_of;
302 use wasmtime_environ::{Module, VMOffsets};
303
304 #[test]
305 fn check_vmtable_definition_offsets() {
306 let module = Module::new();
307 let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
308 assert_eq!(
309 size_of::<VMTableDefinition>(),
310 usize::from(offsets.size_of_vmtable_definition())
311 );
312 assert_eq!(
313 offset_of!(VMTableDefinition, base),
314 usize::from(offsets.vmtable_definition_base())
315 );
316 assert_eq!(
317 offset_of!(VMTableDefinition, current_elements),
318 usize::from(offsets.vmtable_definition_current_elements())
319 );
320 }
321}
322
323/// The storage for a WebAssembly global defined within the instance.
324///
325/// TODO: Pack the globals more densely, rather than using the same size
326/// for every type.
327#[derive(Debug)]
328#[repr(C, align(16))]
329pub struct VMGlobalDefinition {
330 storage: [u8; 16],
331 // If more elements are added here, remember to add offset_of tests below!
332}
333
334#[cfg(test)]
335mod test_vmglobal_definition {
336 use super::VMGlobalDefinition;
337 use crate::externref::VMExternRef;
338 use std::mem::{align_of, size_of};
339 use wasmtime_environ::{Module, PtrSize, VMOffsets};
340
341 #[test]
342 fn check_vmglobal_definition_alignment() {
343 assert!(align_of::<VMGlobalDefinition>() >= align_of::<i32>());
344 assert!(align_of::<VMGlobalDefinition>() >= align_of::<i64>());
345 assert!(align_of::<VMGlobalDefinition>() >= align_of::<f32>());
346 assert!(align_of::<VMGlobalDefinition>() >= align_of::<f64>());
347 assert!(align_of::<VMGlobalDefinition>() >= align_of::<[u8; 16]>());
348 }
349
350 #[test]
351 fn check_vmglobal_definition_offsets() {
352 let module = Module::new();
353 let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
354 assert_eq!(
355 size_of::<VMGlobalDefinition>(),
356 usize::from(offsets.ptr.size_of_vmglobal_definition())
357 );
358 }
359
360 #[test]
361 fn check_vmglobal_begins_aligned() {
362 let module = Module::new();
363 let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
364 assert_eq!(offsets.vmctx_globals_begin() % 16, 0);
365 }
366
367 #[test]
368 fn check_vmglobal_can_contain_externref() {
369 assert!(size_of::<VMExternRef>() <= size_of::<VMGlobalDefinition>());
370 }
371}
372
373impl VMGlobalDefinition {
374 /// Construct a `VMGlobalDefinition`.
375 pub fn new() -> Self {
376 Self { storage: [0; 16] }
377 }
378
379 /// Return a reference to the value as an i32.
380 #[allow(clippy::cast_ptr_alignment)]
381 pub unsafe fn as_i32(&self) -> &i32 {
382 &*(self.storage.as_ref().as_ptr().cast::<i32>())
383 }
384
385 /// Return a mutable reference to the value as an i32.
386 #[allow(clippy::cast_ptr_alignment)]
387 pub unsafe fn as_i32_mut(&mut self) -> &mut i32 {
388 &mut *(self.storage.as_mut().as_mut_ptr().cast::<i32>())
389 }
390
391 /// Return a reference to the value as a u32.
392 #[allow(clippy::cast_ptr_alignment)]
393 pub unsafe fn as_u32(&self) -> &u32 {
394 &*(self.storage.as_ref().as_ptr().cast::<u32>())
395 }
396
397 /// Return a mutable reference to the value as an u32.
398 #[allow(clippy::cast_ptr_alignment)]
399 pub unsafe fn as_u32_mut(&mut self) -> &mut u32 {
400 &mut *(self.storage.as_mut().as_mut_ptr().cast::<u32>())
401 }
402
403 /// Return a reference to the value as an i64.
404 #[allow(clippy::cast_ptr_alignment)]
405 pub unsafe fn as_i64(&self) -> &i64 {
406 &*(self.storage.as_ref().as_ptr().cast::<i64>())
407 }
408
409 /// Return a mutable reference to the value as an i64.
410 #[allow(clippy::cast_ptr_alignment)]
411 pub unsafe fn as_i64_mut(&mut self) -> &mut i64 {
412 &mut *(self.storage.as_mut().as_mut_ptr().cast::<i64>())
413 }
414
415 /// Return a reference to the value as an u64.
416 #[allow(clippy::cast_ptr_alignment)]
417 pub unsafe fn as_u64(&self) -> &u64 {
418 &*(self.storage.as_ref().as_ptr().cast::<u64>())
419 }
420
421 /// Return a mutable reference to the value as an u64.
422 #[allow(clippy::cast_ptr_alignment)]
423 pub unsafe fn as_u64_mut(&mut self) -> &mut u64 {
424 &mut *(self.storage.as_mut().as_mut_ptr().cast::<u64>())
425 }
426
427 /// Return a reference to the value as an f32.
428 #[allow(clippy::cast_ptr_alignment)]
429 pub unsafe fn as_f32(&self) -> &f32 {
430 &*(self.storage.as_ref().as_ptr().cast::<f32>())
431 }
432
433 /// Return a mutable reference to the value as an f32.
434 #[allow(clippy::cast_ptr_alignment)]
435 pub unsafe fn as_f32_mut(&mut self) -> &mut f32 {
436 &mut *(self.storage.as_mut().as_mut_ptr().cast::<f32>())
437 }
438
439 /// Return a reference to the value as f32 bits.
440 #[allow(clippy::cast_ptr_alignment)]
441 pub unsafe fn as_f32_bits(&self) -> &u32 {
442 &*(self.storage.as_ref().as_ptr().cast::<u32>())
443 }
444
445 /// Return a mutable reference to the value as f32 bits.
446 #[allow(clippy::cast_ptr_alignment)]
447 pub unsafe fn as_f32_bits_mut(&mut self) -> &mut u32 {
448 &mut *(self.storage.as_mut().as_mut_ptr().cast::<u32>())
449 }
450
451 /// Return a reference to the value as an f64.
452 #[allow(clippy::cast_ptr_alignment)]
453 pub unsafe fn as_f64(&self) -> &f64 {
454 &*(self.storage.as_ref().as_ptr().cast::<f64>())
455 }
456
457 /// Return a mutable reference to the value as an f64.
458 #[allow(clippy::cast_ptr_alignment)]
459 pub unsafe fn as_f64_mut(&mut self) -> &mut f64 {
460 &mut *(self.storage.as_mut().as_mut_ptr().cast::<f64>())
461 }
462
463 /// Return a reference to the value as f64 bits.
464 #[allow(clippy::cast_ptr_alignment)]
465 pub unsafe fn as_f64_bits(&self) -> &u64 {
466 &*(self.storage.as_ref().as_ptr().cast::<u64>())
467 }
468
469 /// Return a mutable reference to the value as f64 bits.
470 #[allow(clippy::cast_ptr_alignment)]
471 pub unsafe fn as_f64_bits_mut(&mut self) -> &mut u64 {
472 &mut *(self.storage.as_mut().as_mut_ptr().cast::<u64>())
473 }
474
475 /// Return a reference to the value as an u128.
476 #[allow(clippy::cast_ptr_alignment)]
477 pub unsafe fn as_u128(&self) -> &u128 {
478 &*(self.storage.as_ref().as_ptr().cast::<u128>())
479 }
480
481 /// Return a mutable reference to the value as an u128.
482 #[allow(clippy::cast_ptr_alignment)]
483 pub unsafe fn as_u128_mut(&mut self) -> &mut u128 {
484 &mut *(self.storage.as_mut().as_mut_ptr().cast::<u128>())
485 }
486
487 /// Return a reference to the value as u128 bits.
488 #[allow(clippy::cast_ptr_alignment)]
489 pub unsafe fn as_u128_bits(&self) -> &[u8; 16] {
490 &*(self.storage.as_ref().as_ptr().cast::<[u8; 16]>())
491 }
492
493 /// Return a mutable reference to the value as u128 bits.
494 #[allow(clippy::cast_ptr_alignment)]
495 pub unsafe fn as_u128_bits_mut(&mut self) -> &mut [u8; 16] {
496 &mut *(self.storage.as_mut().as_mut_ptr().cast::<[u8; 16]>())
497 }
498
499 /// Return a reference to the value as an externref.
500 #[allow(clippy::cast_ptr_alignment)]
501 pub unsafe fn as_externref(&self) -> &Option<VMExternRef> {
502 &*(self.storage.as_ref().as_ptr().cast::<Option<VMExternRef>>())
503 }
504
505 /// Return a mutable reference to the value as an externref.
506 #[allow(clippy::cast_ptr_alignment)]
507 pub unsafe fn as_externref_mut(&mut self) -> &mut Option<VMExternRef> {
508 &mut *(self
509 .storage
510 .as_mut()
511 .as_mut_ptr()
512 .cast::<Option<VMExternRef>>())
513 }
514
515 /// Return a reference to the value as an anyfunc.
516 #[allow(clippy::cast_ptr_alignment)]
517 pub unsafe fn as_anyfunc(&self) -> *const VMCallerCheckedFuncRef {
518 *(self
519 .storage
520 .as_ref()
521 .as_ptr()
522 .cast::<*const VMCallerCheckedFuncRef>())
523 }
524
525 /// Return a mutable reference to the value as an anyfunc.
526 #[allow(clippy::cast_ptr_alignment)]
527 pub unsafe fn as_anyfunc_mut(&mut self) -> &mut *const VMCallerCheckedFuncRef {
528 &mut *(self
529 .storage
530 .as_mut()
531 .as_mut_ptr()
532 .cast::<*const VMCallerCheckedFuncRef>())
533 }
534}
535
536/// An index into the shared signature registry, usable for checking signatures
537/// at indirect calls.
538#[repr(C)]
539#[derive(Debug, Eq, PartialEq, Clone, Copy, Hash)]
540pub struct VMSharedSignatureIndex(u32);
541
542#[cfg(test)]
543mod test_vmshared_signature_index {
544 use super::VMSharedSignatureIndex;
545 use std::mem::size_of;
546 use wasmtime_environ::{Module, VMOffsets};
547
548 #[test]
549 fn check_vmshared_signature_index() {
550 let module = Module::new();
551 let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
552 assert_eq!(
553 size_of::<VMSharedSignatureIndex>(),
554 usize::from(offsets.size_of_vmshared_signature_index())
555 );
556 }
557}
558
559impl VMSharedSignatureIndex {
560 /// Create a new `VMSharedSignatureIndex`.
561 #[inline]
562 pub fn new(value: u32) -> Self {
563 Self(value)
564 }
565
566 /// Returns the underlying bits of the index.
567 #[inline]
568 pub fn bits(&self) -> u32 {
569 self.0
570 }
571}
572
573impl Default for VMSharedSignatureIndex {
574 #[inline]
575 fn default() -> Self {
576 Self::new(u32::MAX)
577 }
578}
579
580/// The VM caller-checked "anyfunc" record, for caller-side signature checking.
581/// It consists of the actual function pointer and a signature id to be checked
582/// by the caller.
583#[derive(Debug, Clone)]
584#[repr(C)]
585pub struct VMCallerCheckedFuncRef {
586 /// Function body.
587 pub func_ptr: NonNull<VMFunctionBody>,
588 /// Function signature id.
589 pub type_index: VMSharedSignatureIndex,
590 /// The VM state associated with this function.
591 ///
592 /// For core wasm instances this will be `*mut VMContext` but for the
593 /// upcoming implementation of the component model this will be something
594 /// else. The actual definition of what this pointer points to depends on
595 /// the definition of `func_ptr` and what compiled it.
596 pub vmctx: *mut VMOpaqueContext,
597 // If more elements are added here, remember to add offset_of tests below!
598}
599
600unsafe impl Send for VMCallerCheckedFuncRef {}
601unsafe impl Sync for VMCallerCheckedFuncRef {}
602
603#[cfg(test)]
604mod test_vmcaller_checked_anyfunc {
605 use super::VMCallerCheckedFuncRef;
606 use memoffset::offset_of;
607 use std::mem::size_of;
608 use wasmtime_environ::{Module, PtrSize, VMOffsets};
609
610 #[test]
611 fn check_vmcaller_checked_anyfunc_offsets() {
612 let module = Module::new();
613 let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
614 assert_eq!(
615 size_of::<VMCallerCheckedFuncRef>(),
616 usize::from(offsets.ptr.size_of_vmcaller_checked_func_ref())
617 );
618 assert_eq!(
619 offset_of!(VMCallerCheckedFuncRef, func_ptr),
620 usize::from(offsets.ptr.vmcaller_checked_func_ref_func_ptr())
621 );
622 assert_eq!(
623 offset_of!(VMCallerCheckedFuncRef, type_index),
624 usize::from(offsets.ptr.vmcaller_checked_func_ref_type_index())
625 );
626 assert_eq!(
627 offset_of!(VMCallerCheckedFuncRef, vmctx),
628 usize::from(offsets.ptr.vmcaller_checked_func_ref_vmctx())
629 );
630 }
631}
632
633macro_rules! define_builtin_array {
634 (
635 $(
636 $( #[$attr:meta] )*
637 $name:ident( $( $pname:ident: $param:ident ),* ) $( -> $result:ident )?;
638 )*
639 ) => {
640 /// An array that stores addresses of builtin functions. We translate code
641 /// to use indirect calls. This way, we don't have to patch the code.
642 #[repr(C)]
643 pub struct VMBuiltinFunctionsArray {
644 $(
645 $name: unsafe extern "C" fn(
646 $(define_builtin_array!(@ty $param)),*
647 ) $( -> define_builtin_array!(@ty $result))?,
648 )*
649 }
650
651 impl VMBuiltinFunctionsArray {
652 pub const INIT: VMBuiltinFunctionsArray = VMBuiltinFunctionsArray {
653 $($name: crate::libcalls::trampolines::$name,)*
654 };
655 }
656 };
657
658 (@ty i32) => (u32);
659 (@ty i64) => (u64);
660 (@ty reference) => (*mut u8);
661 (@ty pointer) => (*mut u8);
662 (@ty vmctx) => (*mut VMContext);
663}
664
665wasmtime_environ::foreach_builtin_function!(define_builtin_array);
666
667/// The storage for a WebAssembly invocation argument
668///
669/// TODO: These could be packed more densely, rather than using the same size for every type.
670#[derive(Debug, Copy, Clone)]
671#[repr(C, align(16))]
672pub struct VMInvokeArgument([u8; 16]);
673
674#[cfg(test)]
675mod test_vm_invoke_argument {
676 use super::VMInvokeArgument;
677 use std::mem::{align_of, size_of};
678 use wasmtime_environ::{Module, PtrSize, VMOffsets};
679
680 #[test]
681 fn check_vm_invoke_argument_alignment() {
682 assert_eq!(align_of::<VMInvokeArgument>(), 16);
683 }
684
685 #[test]
686 fn check_vmglobal_definition_offsets() {
687 let module = Module::new();
688 let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
689 assert_eq!(
690 size_of::<VMInvokeArgument>(),
691 usize::from(offsets.ptr.size_of_vmglobal_definition())
692 );
693 }
694}
695
696impl VMInvokeArgument {
697 /// Create a new invocation argument filled with zeroes
698 pub fn new() -> Self {
699 Self([0; 16])
700 }
701}
702
703/// Structure used to control interrupting wasm code.
704#[derive(Debug)]
705#[repr(C)]
706pub struct VMRuntimeLimits {
707 /// Current stack limit of the wasm module.
708 ///
709 /// For more information see `crates/cranelift/src/lib.rs`.
710 pub stack_limit: UnsafeCell<usize>,
711
712 /// Indicator of how much fuel has been consumed and is remaining to
713 /// WebAssembly.
714 ///
715 /// This field is typically negative and increments towards positive. Upon
716 /// turning positive a wasm trap will be generated. This field is only
717 /// modified if wasm is configured to consume fuel.
718 pub fuel_consumed: UnsafeCell<i64>,
719
720 /// Deadline epoch for interruption: if epoch-based interruption
721 /// is enabled and the global (per engine) epoch counter is
722 /// observed to reach or exceed this value, the guest code will
723 /// yield if running asynchronously.
724 pub epoch_deadline: UnsafeCell<u64>,
725
726 /// The value of the frame pointer register when we last called from Wasm to
727 /// the host.
728 ///
729 /// Maintained by our Wasm-to-host trampoline, and cleared just before
730 /// calling into Wasm in `catch_traps`.
731 ///
732 /// This member is `0` when Wasm is actively running and has not called out
733 /// to the host.
734 ///
735 /// Used to find the start of a a contiguous sequence of Wasm frames when
736 /// walking the stack.
737 pub last_wasm_exit_fp: UnsafeCell<usize>,
738
739 /// The last Wasm program counter before we called from Wasm to the host.
740 ///
741 /// Maintained by our Wasm-to-host trampoline, and cleared just before
742 /// calling into Wasm in `catch_traps`.
743 ///
744 /// This member is `0` when Wasm is actively running and has not called out
745 /// to the host.
746 ///
747 /// Used when walking a contiguous sequence of Wasm frames.
748 pub last_wasm_exit_pc: UnsafeCell<usize>,
749
750 /// The last host stack pointer before we called into Wasm from the host.
751 ///
752 /// Maintained by our host-to-Wasm trampoline, and cleared just before
753 /// calling into Wasm in `catch_traps`.
754 ///
755 /// This member is `0` when Wasm is actively running and has not called out
756 /// to the host.
757 ///
758 /// When a host function is wrapped into a `wasmtime::Func`, and is then
759 /// called from the host, then this member has the sentinal value of `-1 as
760 /// usize`, meaning that this contiguous sequence of Wasm frames is the
761 /// empty sequence, and it is not safe to dereference the
762 /// `last_wasm_exit_fp`.
763 ///
764 /// Used to find the end of a contiguous sequence of Wasm frames when
765 /// walking the stack.
766 pub last_wasm_entry_sp: UnsafeCell<usize>,
767}
768
769// The `VMRuntimeLimits` type is a pod-type with no destructor, and we don't
770// access any fields from other threads, so add in these trait impls which are
771// otherwise not available due to the `fuel_consumed` and `epoch_deadline`
772// variables in `VMRuntimeLimits`.
773unsafe impl Send for VMRuntimeLimits {}
774unsafe impl Sync for VMRuntimeLimits {}
775
776impl Default for VMRuntimeLimits {
777 fn default() -> VMRuntimeLimits {
778 VMRuntimeLimits {
779 stack_limit: UnsafeCell::new(usize::max_value()),
780 fuel_consumed: UnsafeCell::new(0),
781 epoch_deadline: UnsafeCell::new(0),
782 last_wasm_exit_fp: UnsafeCell::new(0),
783 last_wasm_exit_pc: UnsafeCell::new(0),
784 last_wasm_entry_sp: UnsafeCell::new(0),
785 }
786 }
787}
788
789#[cfg(test)]
790mod test_vmruntime_limits {
791 use super::VMRuntimeLimits;
792 use memoffset::offset_of;
793 use std::mem::size_of;
794 use wasmtime_environ::{Module, PtrSize, VMOffsets};
795
796 #[test]
797 fn field_offsets() {
798 let module = Module::new();
799 let offsets = VMOffsets::new(size_of::<*mut u8>() as u8, &module);
800 assert_eq!(
801 offset_of!(VMRuntimeLimits, stack_limit),
802 usize::from(offsets.ptr.vmruntime_limits_stack_limit())
803 );
804 assert_eq!(
805 offset_of!(VMRuntimeLimits, fuel_consumed),
806 usize::from(offsets.ptr.vmruntime_limits_fuel_consumed())
807 );
808 assert_eq!(
809 offset_of!(VMRuntimeLimits, epoch_deadline),
810 usize::from(offsets.ptr.vmruntime_limits_epoch_deadline())
811 );
812 assert_eq!(
813 offset_of!(VMRuntimeLimits, last_wasm_exit_fp),
814 usize::from(offsets.ptr.vmruntime_limits_last_wasm_exit_fp())
815 );
816 assert_eq!(
817 offset_of!(VMRuntimeLimits, last_wasm_exit_pc),
818 usize::from(offsets.ptr.vmruntime_limits_last_wasm_exit_pc())
819 );
820 assert_eq!(
821 offset_of!(VMRuntimeLimits, last_wasm_entry_sp),
822 usize::from(offsets.ptr.vmruntime_limits_last_wasm_entry_sp())
823 );
824 }
825}
826
827/// The VM "context", which is pointed to by the `vmctx` arg in Cranelift.
828/// This has information about globals, memories, tables, and other runtime
829/// state associated with the current instance.
830///
831/// The struct here is empty, as the sizes of these fields are dynamic, and
832/// we can't describe them in Rust's type system. Sufficient memory is
833/// allocated at runtime.
834#[derive(Debug)]
835#[repr(C, align(16))] // align 16 since globals are aligned to that and contained inside
836pub struct VMContext {
837 /// There's some more discussion about this within `wasmtime/src/lib.rs` but
838 /// the idea is that we want to tell the compiler that this contains
839 /// pointers which transitively refers to itself, to suppress some
840 /// optimizations that might otherwise assume this doesn't exist.
841 ///
842 /// The self-referential pointer we care about is the `*mut Store` pointer
843 /// early on in this context, which if you follow through enough levels of
844 /// nesting, eventually can refer back to this `VMContext`
845 pub _marker: marker::PhantomPinned,
846}
847
848impl VMContext {
849 /// Helper function to cast between context types using a debug assertion to
850 /// protect against some mistakes.
851 #[inline]
852 pub unsafe fn from_opaque(opaque: *mut VMOpaqueContext) -> *mut VMContext {
853 // Note that in general the offset of the "magic" field is stored in
854 // `VMOffsets::vmctx_magic`. Given though that this is a sanity check
855 // about converting this pointer to another type we ideally don't want
856 // to read the offset from potentially corrupt memory. Instead it would
857 // be better to catch errors here as soon as possible.
858 //
859 // To accomplish this the `VMContext` structure is laid out with the
860 // magic field at a statically known offset (here it's 0 for now). This
861 // static offset is asserted in `VMOffsets::from` and needs to be kept
862 // in sync with this line for this debug assertion to work.
863 //
864 // Also note that this magic is only ever invalid in the presence of
865 // bugs, meaning we don't actually read the magic and act differently
866 // at runtime depending what it is, so this is a debug assertion as
867 // opposed to a regular assertion.
868 debug_assert_eq!((*opaque).magic, VMCONTEXT_MAGIC);
869 opaque.cast()
870 }
871
872 /// Return a mutable reference to the associated `Instance`.
873 ///
874 /// # Safety
875 /// This is unsafe because it doesn't work on just any `VMContext`, it must
876 /// be a `VMContext` allocated as part of an `Instance`.
877 #[allow(clippy::cast_ptr_alignment)]
878 #[inline]
879 pub(crate) unsafe fn instance(&self) -> &Instance {
880 &*((self as *const Self as *mut u8).offset(-Instance::vmctx_offset()) as *const Instance)
881 }
882
883 #[inline]
884 pub(crate) unsafe fn instance_mut(&mut self) -> &mut Instance {
885 &mut *((self as *const Self as *mut u8).offset(-Instance::vmctx_offset()) as *mut Instance)
886 }
887
888 /// Return a reference to the host state associated with this `Instance`.
889 ///
890 /// # Safety
891 /// This is unsafe because it doesn't work on just any `VMContext`, it must
892 /// be a `VMContext` allocated as part of an `Instance`.
893 #[inline]
894 pub unsafe fn host_state(&self) -> &dyn Any {
895 self.instance().host_state()
896 }
897}
898
899/// A "raw" and unsafe representation of a WebAssembly value.
900///
901/// This is provided for use with the `Func::new_unchecked` and
902/// `Func::call_unchecked` APIs. In general it's unlikely you should be using
903/// this from Rust, rather using APIs like `Func::wrap` and `TypedFunc::call`.
904///
905/// This is notably an "unsafe" way to work with `Val` and it's recommended to
906/// instead use `Val` where possible. An important note about this union is that
907/// fields are all stored in little-endian format, regardless of the endianness
908/// of the host system.
909#[allow(missing_docs)]
910#[repr(C)]
911#[derive(Copy, Clone)]
912pub union ValRaw {
913 /// A WebAssembly `i32` value.
914 ///
915 /// Note that the payload here is a Rust `i32` but the WebAssembly `i32`
916 /// type does not assign an interpretation of the upper bit as either signed
917 /// or unsigned. The Rust type `i32` is simply chosen for convenience.
918 ///
919 /// This value is always stored in a little-endian format.
920 i32: i32,
921
922 /// A WebAssembly `i64` value.
923 ///
924 /// Note that the payload here is a Rust `i64` but the WebAssembly `i64`
925 /// type does not assign an interpretation of the upper bit as either signed
926 /// or unsigned. The Rust type `i64` is simply chosen for convenience.
927 ///
928 /// This value is always stored in a little-endian format.
929 i64: i64,
930
931 /// A WebAssembly `f32` value.
932 ///
933 /// Note that the payload here is a Rust `u32`. This is to allow passing any
934 /// representation of NaN into WebAssembly without risk of changing NaN
935 /// payload bits as its gets passed around the system. Otherwise though this
936 /// `u32` value is the return value of `f32::to_bits` in Rust.
937 ///
938 /// This value is always stored in a little-endian format.
939 f32: u32,
940
941 /// A WebAssembly `f64` value.
942 ///
943 /// Note that the payload here is a Rust `u64`. This is to allow passing any
944 /// representation of NaN into WebAssembly without risk of changing NaN
945 /// payload bits as its gets passed around the system. Otherwise though this
946 /// `u64` value is the return value of `f64::to_bits` in Rust.
947 ///
948 /// This value is always stored in a little-endian format.
949 f64: u64,
950
951 /// A WebAssembly `v128` value.
952 ///
953 /// The payload here is a Rust `u128` which has the same number of bits but
954 /// note that `v128` in WebAssembly is often considered a vector type such
955 /// as `i32x4` or `f64x2`. This means that the actual interpretation of the
956 /// underlying bits is left up to the instructions which consume this value.
957 ///
958 /// This value is always stored in a little-endian format.
959 v128: u128,
960
961 /// A WebAssembly `funcref` value.
962 ///
963 /// The payload here is a pointer which is runtime-defined. This is one of
964 /// the main points of unsafety about the `ValRaw` type as the validity of
965 /// the pointer here is not easily verified and must be preserved by
966 /// carefully calling the correct functions throughout the runtime.
967 ///
968 /// This value is always stored in a little-endian format.
969 funcref: usize,
970
971 /// A WebAssembly `externref` value.
972 ///
973 /// The payload here is a pointer which is runtime-defined. This is one of
974 /// the main points of unsafety about the `ValRaw` type as the validity of
975 /// the pointer here is not easily verified and must be preserved by
976 /// carefully calling the correct functions throughout the runtime.
977 ///
978 /// This value is always stored in a little-endian format.
979 externref: usize,
980}
981
982impl ValRaw {
983 /// Creates a WebAssembly `i32` value
984 #[inline]
985 pub fn i32(i: i32) -> ValRaw {
986 // Note that this is intentionally not setting the `i32` field, instead
987 // setting the `i64` field with a zero-extended version of `i`. For more
988 // information on this see the comments on `Lower for Result` in the
989 // `wasmtime` crate. Otherwise though all `ValRaw` constructors are
990 // otherwise constrained to guarantee that the initial 64-bits are
991 // always initialized.
992 ValRaw::u64((i as u32).into())
993 }
994
995 /// Creates a WebAssembly `i64` value
996 #[inline]
997 pub fn i64(i: i64) -> ValRaw {
998 ValRaw { i64: i.to_le() }
999 }
1000
1001 /// Creates a WebAssembly `i32` value
1002 #[inline]
1003 pub fn u32(i: u32) -> ValRaw {
1004 // See comments in `ValRaw::i32` for why this is setting the upper
1005 // 32-bits as well.
1006 ValRaw::u64(i.into())
1007 }
1008
1009 /// Creates a WebAssembly `i64` value
1010 #[inline]
1011 pub fn u64(i: u64) -> ValRaw {
1012 ValRaw::i64(i as i64)
1013 }
1014
1015 /// Creates a WebAssembly `f32` value
1016 #[inline]
1017 pub fn f32(i: u32) -> ValRaw {
1018 // See comments in `ValRaw::i32` for why this is setting the upper
1019 // 32-bits as well.
1020 ValRaw::u64(i.into())
1021 }
1022
1023 /// Creates a WebAssembly `f64` value
1024 #[inline]
1025 pub fn f64(i: u64) -> ValRaw {
1026 ValRaw { f64: i.to_le() }
1027 }
1028
1029 /// Creates a WebAssembly `v128` value
1030 #[inline]
1031 pub fn v128(i: u128) -> ValRaw {
1032 ValRaw { v128: i.to_le() }
1033 }
1034
1035 /// Creates a WebAssembly `funcref` value
1036 #[inline]
1037 pub fn funcref(i: usize) -> ValRaw {
1038 ValRaw { funcref: i.to_le() }
1039 }
1040
1041 /// Creates a WebAssembly `externref` value
1042 #[inline]
1043 pub fn externref(i: usize) -> ValRaw {
1044 ValRaw {
1045 externref: i.to_le(),
1046 }
1047 }
1048
1049 /// Gets the WebAssembly `i32` value
1050 #[inline]
1051 pub fn get_i32(&self) -> i32 {
1052 unsafe { i32::from_le(self.i32) }
1053 }
1054
1055 /// Gets the WebAssembly `i64` value
1056 #[inline]
1057 pub fn get_i64(&self) -> i64 {
1058 unsafe { i64::from_le(self.i64) }
1059 }
1060
1061 /// Gets the WebAssembly `i32` value
1062 #[inline]
1063 pub fn get_u32(&self) -> u32 {
1064 self.get_i32() as u32
1065 }
1066
1067 /// Gets the WebAssembly `i64` value
1068 #[inline]
1069 pub fn get_u64(&self) -> u64 {
1070 self.get_i64() as u64
1071 }
1072
1073 /// Gets the WebAssembly `f32` value
1074 #[inline]
1075 pub fn get_f32(&self) -> u32 {
1076 unsafe { u32::from_le(self.f32) }
1077 }
1078
1079 /// Gets the WebAssembly `f64` value
1080 #[inline]
1081 pub fn get_f64(&self) -> u64 {
1082 unsafe { u64::from_le(self.f64) }
1083 }
1084
1085 /// Gets the WebAssembly `v128` value
1086 #[inline]
1087 pub fn get_v128(&self) -> u128 {
1088 unsafe { u128::from_le(self.v128) }
1089 }
1090
1091 /// Gets the WebAssembly `funcref` value
1092 #[inline]
1093 pub fn get_funcref(&self) -> usize {
1094 unsafe { usize::from_le(self.funcref) }
1095 }
1096
1097 /// Gets the WebAssembly `externref` value
1098 #[inline]
1099 pub fn get_externref(&self) -> usize {
1100 unsafe { usize::from_le(self.externref) }
1101 }
1102}
1103
1104/// Type definition of the trampoline used to enter WebAssembly from the host.
1105///
1106/// This function type is what's generated for the entry trampolines that are
1107/// compiled into a WebAssembly module's image. Note that trampolines are not
1108/// always used by Wasmtime since the `TypedFunc` API allows bypassing the
1109/// trampoline and directly calling the underlying wasm function (at the time of
1110/// this writing).
1111///
1112/// The trampoline's arguments here are:
1113///
1114/// * `*mut VMOpaqueContext` - this a contextual pointer defined within the
1115/// context of the receiving function pointer. For now this is always `*mut
1116/// VMContext` but with the component model it may be the case that this is a
1117/// different type of pointer.
1118///
1119/// * `*mut VMContext` - this is the "caller" context, which at this time is
1120/// always unconditionally core wasm (even in the component model). This
1121/// contextual pointer cannot be `NULL` and provides information necessary to
1122/// resolve the caller's context for the `Caller` API in Wasmtime.
1123///
1124/// * `*const VMFunctionBody` - this is the indirect function pointer which is
1125/// the actual target function to invoke. This function uses the System-V ABI
1126/// for its argumenst and a semi-custom ABI for the return values (one return
1127/// value is returned directly, multiple return values have the first one
1128/// returned directly and remaining ones returned indirectly through a
1129/// stack pointer). This function pointer may be Cranelift-compiled code or it
1130/// may also be a host-compiled trampoline (e.g. when a host function calls a
1131/// host function through the `wasmtime::Func` wrapper). The definition of the
1132/// first argument of this function depends on what this receiving function
1133/// pointer desires.
1134///
1135/// * `*mut ValRaw` - this is storage space for both arguments and results of
1136/// the function. The trampoline will read the arguments from this array to
1137/// pass to the function pointer provided. The results are then written to the
1138/// array afterwards (both reads and writes start at index 0). It's the
1139/// caller's responsibility to make sure this array is appropriately sized.
1140pub type VMTrampoline =
1141 unsafe extern "C" fn(*mut VMOpaqueContext, *mut VMContext, *const VMFunctionBody, *mut ValRaw);
1142
1143/// An "opaque" version of `VMContext` which must be explicitly casted to a
1144/// target context.
1145///
1146/// This context is used to represent that contexts specified in
1147/// `VMCallerCheckedFuncRef` can have any type and don't have an implicit
1148/// structure. Neither wasmtime nor cranelift-generated code can rely on the
1149/// structure of an opaque context in general and only the code which configured
1150/// the context is able to rely on a particular structure. This is because the
1151/// context pointer configured for `VMCallerCheckedFuncRef` is guaranteed to be
1152/// the first parameter passed.
1153///
1154/// Note that Wasmtime currently has a layout where all contexts that are casted
1155/// to an opaque context start with a 32-bit "magic" which can be used in debug
1156/// mode to debug-assert that the casts here are correct and have at least a
1157/// little protection against incorrect casts.
1158pub struct VMOpaqueContext {
1159 pub(crate) magic: u32,
1160 _marker: marker::PhantomPinned,
1161}
1162
1163impl VMOpaqueContext {
1164 /// Helper function to clearly indicate that casts are desired.
1165 #[inline]
1166 pub fn from_vmcontext(ptr: *mut VMContext) -> *mut VMOpaqueContext {
1167 ptr.cast()
1168 }
1169
1170 /// Helper function to clearly indicate that casts are desired.
1171 #[inline]
1172 pub fn from_vm_host_func_context(ptr: *mut VMHostFuncContext) -> *mut VMOpaqueContext {
1173 ptr.cast()
1174 }
1175}