wasmtime_cranelift/
func_environ.rs

1use cranelift_codegen::cursor::FuncCursor;
2use cranelift_codegen::ir;
3use cranelift_codegen::ir::condcodes::*;
4use cranelift_codegen::ir::immediates::{Imm64, Offset32, Uimm64};
5use cranelift_codegen::ir::types::*;
6use cranelift_codegen::ir::{AbiParam, ArgumentPurpose, Function, InstBuilder, Signature};
7use cranelift_codegen::isa::{self, TargetFrontendConfig, TargetIsa};
8use cranelift_entity::{EntityRef, PrimaryMap};
9use cranelift_frontend::FunctionBuilder;
10use cranelift_frontend::Variable;
11use cranelift_wasm::{
12    self, FuncIndex, FuncTranslationState, GlobalIndex, GlobalVariable, Heap, HeapData, HeapStyle,
13    MemoryIndex, TableIndex, TargetEnvironment, TypeIndex, WasmError, WasmResult, WasmType,
14};
15use std::convert::TryFrom;
16use std::mem;
17use wasmparser::Operator;
18use wasmtime_environ::{
19    BuiltinFunctionIndex, MemoryPlan, MemoryStyle, Module, ModuleTranslation, ModuleTypes, PtrSize,
20    TableStyle, Tunables, VMOffsets, WASM_PAGE_SIZE,
21};
22use wasmtime_environ::{FUNCREF_INIT_BIT, FUNCREF_MASK};
23
24macro_rules! declare_function_signatures {
25    (
26        $(
27            $( #[$attr:meta] )*
28            $name:ident( $( $pname:ident: $param:ident ),* ) $( -> $result:ident )?;
29        )*
30    ) => {
31        /// A struct with an `Option<ir::SigRef>` member for every builtin
32        /// function, to de-duplicate constructing/getting its signature.
33        struct BuiltinFunctionSignatures {
34            pointer_type: ir::Type,
35            reference_type: ir::Type,
36            call_conv: isa::CallConv,
37            $(
38                $name: Option<ir::SigRef>,
39            )*
40        }
41
42        impl BuiltinFunctionSignatures {
43            fn new(
44                pointer_type: ir::Type,
45                reference_type: ir::Type,
46                call_conv: isa::CallConv,
47            ) -> Self {
48                Self {
49                    pointer_type,
50                    reference_type,
51                    call_conv,
52                    $(
53                        $name: None,
54                    )*
55                }
56            }
57
58            fn vmctx(&self) -> AbiParam {
59                AbiParam::special(self.pointer_type, ArgumentPurpose::VMContext)
60            }
61
62            fn reference(&self) -> AbiParam {
63                AbiParam::new(self.reference_type)
64            }
65
66            fn pointer(&self) -> AbiParam {
67                AbiParam::new(self.pointer_type)
68            }
69
70            fn i32(&self) -> AbiParam {
71                // Some platform ABIs require i32 values to be zero- or sign-
72                // extended to the full register width.  We need to indicate
73                // this here by using the appropriate .uext or .sext attribute.
74                // The attribute can be added unconditionally; platforms whose
75                // ABI does not require such extensions will simply ignore it.
76                // Note that currently all i32 arguments or return values used
77                // by builtin functions are unsigned, so we always use .uext.
78                // If that ever changes, we will have to add a second type
79                // marker here.
80                AbiParam::new(I32).uext()
81            }
82
83            fn i64(&self) -> AbiParam {
84                AbiParam::new(I64)
85            }
86
87            $(
88                fn $name(&mut self, func: &mut Function) -> ir::SigRef {
89                    let sig = self.$name.unwrap_or_else(|| {
90                        func.import_signature(Signature {
91                            params: vec![ $( self.$param() ),* ],
92                            returns: vec![ $( self.$result() )? ],
93                            call_conv: self.call_conv,
94                        })
95                    });
96                    self.$name = Some(sig);
97                    sig
98                }
99            )*
100        }
101    };
102}
103
104wasmtime_environ::foreach_builtin_function!(declare_function_signatures);
105
106/// The `FuncEnvironment` implementation for use by the `ModuleEnvironment`.
107pub struct FuncEnvironment<'module_environment> {
108    isa: &'module_environment (dyn TargetIsa + 'module_environment),
109    module: &'module_environment Module,
110    translation: &'module_environment ModuleTranslation<'module_environment>,
111    types: &'module_environment ModuleTypes,
112
113    /// Heaps implementing WebAssembly linear memories.
114    heaps: PrimaryMap<Heap, HeapData>,
115
116    /// The Cranelift global holding the vmctx address.
117    vmctx: Option<ir::GlobalValue>,
118
119    /// Caches of signatures for builtin functions.
120    builtin_function_signatures: BuiltinFunctionSignatures,
121
122    /// Offsets to struct fields accessed by JIT code.
123    pub(crate) offsets: VMOffsets<u8>,
124
125    tunables: &'module_environment Tunables,
126
127    /// A function-local variable which stores the cached value of the amount of
128    /// fuel remaining to execute. If used this is modified frequently so it's
129    /// stored locally as a variable instead of always referenced from the field
130    /// in `*const VMRuntimeLimits`
131    fuel_var: cranelift_frontend::Variable,
132
133    /// A function-local variable which caches the value of `*const
134    /// VMRuntimeLimits` for this function's vmctx argument. This pointer is stored
135    /// in the vmctx itself, but never changes for the lifetime of the function,
136    /// so if we load it up front we can continue to use it throughout.
137    vmruntime_limits_ptr: cranelift_frontend::Variable,
138
139    /// A cached epoch deadline value, when performing epoch-based
140    /// interruption. Loaded from `VMRuntimeLimits` and reloaded after
141    /// any yield.
142    epoch_deadline_var: cranelift_frontend::Variable,
143
144    /// A cached pointer to the per-Engine epoch counter, when
145    /// performing epoch-based interruption. Initialized in the
146    /// function prologue. We prefer to use a variable here rather
147    /// than reload on each check because it's better to let the
148    /// regalloc keep it in a register if able; if not, it can always
149    /// spill, and this isn't any worse than reloading each time.
150    epoch_ptr_var: cranelift_frontend::Variable,
151
152    fuel_consumed: i64,
153}
154
155impl<'module_environment> FuncEnvironment<'module_environment> {
156    pub fn new(
157        isa: &'module_environment (dyn TargetIsa + 'module_environment),
158        translation: &'module_environment ModuleTranslation<'module_environment>,
159        types: &'module_environment ModuleTypes,
160        tunables: &'module_environment Tunables,
161    ) -> Self {
162        let builtin_function_signatures = BuiltinFunctionSignatures::new(
163            isa.pointer_type(),
164            match isa.pointer_type() {
165                ir::types::I32 => ir::types::R32,
166                ir::types::I64 => ir::types::R64,
167                _ => panic!(),
168            },
169            crate::wasmtime_call_conv(isa),
170        );
171        Self {
172            isa,
173            module: &translation.module,
174            translation,
175            types,
176            heaps: PrimaryMap::default(),
177            vmctx: None,
178            builtin_function_signatures,
179            offsets: VMOffsets::new(isa.pointer_bytes(), &translation.module),
180            tunables,
181            fuel_var: Variable::new(0),
182            epoch_deadline_var: Variable::new(0),
183            epoch_ptr_var: Variable::new(0),
184            vmruntime_limits_ptr: Variable::new(0),
185
186            // Start with at least one fuel being consumed because even empty
187            // functions should consume at least some fuel.
188            fuel_consumed: 1,
189        }
190    }
191
192    fn pointer_type(&self) -> ir::Type {
193        self.isa.pointer_type()
194    }
195
196    fn vmctx(&mut self, func: &mut Function) -> ir::GlobalValue {
197        self.vmctx.unwrap_or_else(|| {
198            let vmctx = func.create_global_value(ir::GlobalValueData::VMContext);
199            self.vmctx = Some(vmctx);
200            vmctx
201        })
202    }
203
204    fn get_table_copy_func(
205        &mut self,
206        func: &mut Function,
207        dst_table_index: TableIndex,
208        src_table_index: TableIndex,
209    ) -> (ir::SigRef, usize, usize, BuiltinFunctionIndex) {
210        let sig = self.builtin_function_signatures.table_copy(func);
211        (
212            sig,
213            dst_table_index.as_u32() as usize,
214            src_table_index.as_u32() as usize,
215            BuiltinFunctionIndex::table_copy(),
216        )
217    }
218
219    fn get_table_init_func(
220        &mut self,
221        func: &mut Function,
222        table_index: TableIndex,
223    ) -> (ir::SigRef, usize, BuiltinFunctionIndex) {
224        let sig = self.builtin_function_signatures.table_init(func);
225        let table_index = table_index.as_u32() as usize;
226        (sig, table_index, BuiltinFunctionIndex::table_init())
227    }
228
229    fn get_elem_drop_func(&mut self, func: &mut Function) -> (ir::SigRef, BuiltinFunctionIndex) {
230        let sig = self.builtin_function_signatures.elem_drop(func);
231        (sig, BuiltinFunctionIndex::elem_drop())
232    }
233
234    fn get_memory_atomic_wait(
235        &mut self,
236        func: &mut Function,
237        memory_index: MemoryIndex,
238        ty: ir::Type,
239    ) -> (ir::SigRef, usize, BuiltinFunctionIndex) {
240        match ty {
241            I32 => (
242                self.builtin_function_signatures.memory_atomic_wait32(func),
243                memory_index.index(),
244                BuiltinFunctionIndex::memory_atomic_wait32(),
245            ),
246            I64 => (
247                self.builtin_function_signatures.memory_atomic_wait64(func),
248                memory_index.index(),
249                BuiltinFunctionIndex::memory_atomic_wait64(),
250            ),
251            x => panic!("get_memory_atomic_wait unsupported type: {:?}", x),
252        }
253    }
254
255    fn get_memory_init_func(&mut self, func: &mut Function) -> (ir::SigRef, BuiltinFunctionIndex) {
256        (
257            self.builtin_function_signatures.memory_init(func),
258            BuiltinFunctionIndex::memory_init(),
259        )
260    }
261
262    fn get_data_drop_func(&mut self, func: &mut Function) -> (ir::SigRef, BuiltinFunctionIndex) {
263        (
264            self.builtin_function_signatures.data_drop(func),
265            BuiltinFunctionIndex::data_drop(),
266        )
267    }
268
269    /// Translates load of builtin function and returns a pair of values `vmctx`
270    /// and address of the loaded function.
271    fn translate_load_builtin_function_address(
272        &mut self,
273        pos: &mut FuncCursor<'_>,
274        callee_func_idx: BuiltinFunctionIndex,
275    ) -> (ir::Value, ir::Value) {
276        // We use an indirect call so that we don't have to patch the code at runtime.
277        let pointer_type = self.pointer_type();
278        let vmctx = self.vmctx(&mut pos.func);
279        let base = pos.ins().global_value(pointer_type, vmctx);
280
281        let mem_flags = ir::MemFlags::trusted().with_readonly();
282
283        // Load the base of the array of builtin functions
284        let array_offset = i32::try_from(self.offsets.vmctx_builtin_functions()).unwrap();
285        let array_addr = pos.ins().load(pointer_type, mem_flags, base, array_offset);
286
287        // Load the callee address.
288        let body_offset = i32::try_from(callee_func_idx.index() * pointer_type.bytes()).unwrap();
289        let func_addr = pos
290            .ins()
291            .load(pointer_type, mem_flags, array_addr, body_offset);
292
293        (base, func_addr)
294    }
295
296    /// Generate code to increment or decrement the given `externref`'s
297    /// reference count.
298    ///
299    /// The new reference count is returned.
300    fn mutate_externref_ref_count(
301        &mut self,
302        builder: &mut FunctionBuilder,
303        externref: ir::Value,
304        delta: i64,
305    ) -> ir::Value {
306        debug_assert!(delta == -1 || delta == 1);
307
308        let pointer_type = self.pointer_type();
309
310        // If this changes that's ok, the `atomic_rmw` below just needs to be
311        // preceded with an add instruction of `externref` and the offset.
312        assert_eq!(self.offsets.vm_extern_data_ref_count(), 0);
313        let delta = builder.ins().iconst(pointer_type, delta);
314        builder.ins().atomic_rmw(
315            pointer_type,
316            ir::MemFlags::trusted(),
317            ir::AtomicRmwOp::Add,
318            externref,
319            delta,
320        )
321    }
322
323    fn get_global_location(
324        &mut self,
325        func: &mut ir::Function,
326        index: GlobalIndex,
327    ) -> (ir::GlobalValue, i32) {
328        let pointer_type = self.pointer_type();
329        let vmctx = self.vmctx(func);
330        if let Some(def_index) = self.module.defined_global_index(index) {
331            let offset = i32::try_from(self.offsets.vmctx_vmglobal_definition(def_index)).unwrap();
332            (vmctx, offset)
333        } else {
334            let from_offset = self.offsets.vmctx_vmglobal_import_from(index);
335            let global = func.create_global_value(ir::GlobalValueData::Load {
336                base: vmctx,
337                offset: Offset32::new(i32::try_from(from_offset).unwrap()),
338                global_type: pointer_type,
339                readonly: true,
340            });
341            (global, 0)
342        }
343    }
344
345    fn declare_vmruntime_limits_ptr(&mut self, builder: &mut FunctionBuilder<'_>) {
346        // We load the `*const VMRuntimeLimits` value stored within vmctx at the
347        // head of the function and reuse the same value across the entire
348        // function. This is possible since we know that the pointer never
349        // changes for the lifetime of the function.
350        let pointer_type = self.pointer_type();
351        builder.declare_var(self.vmruntime_limits_ptr, pointer_type);
352        let vmctx = self.vmctx(builder.func);
353        let base = builder.ins().global_value(pointer_type, vmctx);
354        let offset = i32::try_from(self.offsets.vmctx_runtime_limits()).unwrap();
355        let interrupt_ptr = builder
356            .ins()
357            .load(pointer_type, ir::MemFlags::trusted(), base, offset);
358        builder.def_var(self.vmruntime_limits_ptr, interrupt_ptr);
359    }
360
361    fn fuel_function_entry(&mut self, builder: &mut FunctionBuilder<'_>) {
362        // On function entry we load the amount of fuel into a function-local
363        // `self.fuel_var` to make fuel modifications fast locally. This cache
364        // is then periodically flushed to the Store-defined location in
365        // `VMRuntimeLimits` later.
366        builder.declare_var(self.fuel_var, ir::types::I64);
367        self.fuel_load_into_var(builder);
368        self.fuel_check(builder);
369    }
370
371    fn fuel_function_exit(&mut self, builder: &mut FunctionBuilder<'_>) {
372        // On exiting the function we need to be sure to save the fuel we have
373        // cached locally in `self.fuel_var` back into the Store-defined
374        // location.
375        self.fuel_save_from_var(builder);
376    }
377
378    fn fuel_before_op(
379        &mut self,
380        op: &Operator<'_>,
381        builder: &mut FunctionBuilder<'_>,
382        reachable: bool,
383    ) {
384        if !reachable {
385            // In unreachable code we shouldn't have any leftover fuel we
386            // haven't accounted for since the reason for us to become
387            // unreachable should have already added it to `self.fuel_var`.
388            debug_assert_eq!(self.fuel_consumed, 0);
389            return;
390        }
391
392        self.fuel_consumed += match op {
393            // Nop and drop generate no code, so don't consume fuel for them.
394            Operator::Nop | Operator::Drop => 0,
395
396            // Control flow may create branches, but is generally cheap and
397            // free, so don't consume fuel. Note the lack of `if` since some
398            // cost is incurred with the conditional check.
399            Operator::Block { .. }
400            | Operator::Loop { .. }
401            | Operator::Unreachable
402            | Operator::Return
403            | Operator::Else
404            | Operator::End => 0,
405
406            // everything else, just call it one operation.
407            _ => 1,
408        };
409
410        match op {
411            // Exiting a function (via a return or unreachable) or otherwise
412            // entering a different function (via a call) means that we need to
413            // update the fuel consumption in `VMRuntimeLimits` because we're
414            // about to move control out of this function itself and the fuel
415            // may need to be read.
416            //
417            // Before this we need to update the fuel counter from our own cost
418            // leading up to this function call, and then we can store
419            // `self.fuel_var` into `VMRuntimeLimits`.
420            Operator::Unreachable
421            | Operator::Return
422            | Operator::CallIndirect { .. }
423            | Operator::Call { .. }
424            | Operator::ReturnCall { .. }
425            | Operator::ReturnCallIndirect { .. } => {
426                self.fuel_increment_var(builder);
427                self.fuel_save_from_var(builder);
428            }
429
430            // To ensure all code preceding a loop is only counted once we
431            // update the fuel variable on entry.
432            Operator::Loop { .. }
433
434            // Entering into an `if` block means that the edge we take isn't
435            // known until runtime, so we need to update our fuel consumption
436            // before we take the branch.
437            | Operator::If { .. }
438
439            // Control-flow instructions mean that we're moving to the end/exit
440            // of a block somewhere else. That means we need to update the fuel
441            // counter since we're effectively terminating our basic block.
442            | Operator::Br { .. }
443            | Operator::BrIf { .. }
444            | Operator::BrTable { .. }
445
446            // Exiting a scope means that we need to update the fuel
447            // consumption because there are multiple ways to exit a scope and
448            // this is the only time we have to account for instructions
449            // executed so far.
450            | Operator::End
451
452            // This is similar to `end`, except that it's only the terminator
453            // for an `if` block. The same reasoning applies though in that we
454            // are terminating a basic block and need to update the fuel
455            // variable.
456            | Operator::Else => self.fuel_increment_var(builder),
457
458            // This is a normal instruction where the fuel is buffered to later
459            // get added to `self.fuel_var`.
460            //
461            // Note that we generally ignore instructions which may trap and
462            // therefore result in exiting a block early. Current usage of fuel
463            // means that it's not too important to account for a precise amount
464            // of fuel consumed but rather "close to the actual amount" is good
465            // enough. For 100% precise counting, however, we'd probably need to
466            // not only increment but also save the fuel amount more often
467            // around trapping instructions. (see the `unreachable` instruction
468            // case above)
469            //
470            // Note that `Block` is specifically omitted from incrementing the
471            // fuel variable. Control flow entering a `block` is unconditional
472            // which means it's effectively executing straight-line code. We'll
473            // update the counter when exiting a block, but we shouldn't need to
474            // do so upon entering a block.
475            _ => {}
476        }
477    }
478
479    fn fuel_after_op(&mut self, op: &Operator<'_>, builder: &mut FunctionBuilder<'_>) {
480        // After a function call we need to reload our fuel value since the
481        // function may have changed it.
482        match op {
483            Operator::Call { .. } | Operator::CallIndirect { .. } => {
484                self.fuel_load_into_var(builder);
485            }
486            _ => {}
487        }
488    }
489
490    /// Adds `self.fuel_consumed` to the `fuel_var`, zero-ing out the amount of
491    /// fuel consumed at that point.
492    fn fuel_increment_var(&mut self, builder: &mut FunctionBuilder<'_>) {
493        let consumption = mem::replace(&mut self.fuel_consumed, 0);
494        if consumption == 0 {
495            return;
496        }
497
498        let fuel = builder.use_var(self.fuel_var);
499        let fuel = builder.ins().iadd_imm(fuel, consumption);
500        builder.def_var(self.fuel_var, fuel);
501    }
502
503    /// Loads the fuel consumption value from `VMRuntimeLimits` into `self.fuel_var`
504    fn fuel_load_into_var(&mut self, builder: &mut FunctionBuilder<'_>) {
505        let (addr, offset) = self.fuel_addr_offset(builder);
506        let fuel = builder
507            .ins()
508            .load(ir::types::I64, ir::MemFlags::trusted(), addr, offset);
509        builder.def_var(self.fuel_var, fuel);
510    }
511
512    /// Stores the fuel consumption value from `self.fuel_var` into
513    /// `VMRuntimeLimits`.
514    fn fuel_save_from_var(&mut self, builder: &mut FunctionBuilder<'_>) {
515        let (addr, offset) = self.fuel_addr_offset(builder);
516        let fuel_consumed = builder.use_var(self.fuel_var);
517        builder
518            .ins()
519            .store(ir::MemFlags::trusted(), fuel_consumed, addr, offset);
520    }
521
522    /// Returns the `(address, offset)` of the fuel consumption within
523    /// `VMRuntimeLimits`, used to perform loads/stores later.
524    fn fuel_addr_offset(
525        &mut self,
526        builder: &mut FunctionBuilder<'_>,
527    ) -> (ir::Value, ir::immediates::Offset32) {
528        (
529            builder.use_var(self.vmruntime_limits_ptr),
530            i32::from(self.offsets.ptr.vmruntime_limits_fuel_consumed()).into(),
531        )
532    }
533
534    /// Checks the amount of remaining, and if we've run out of fuel we call
535    /// the out-of-fuel function.
536    fn fuel_check(&mut self, builder: &mut FunctionBuilder) {
537        self.fuel_increment_var(builder);
538        let out_of_gas_block = builder.create_block();
539        let continuation_block = builder.create_block();
540
541        // Note that our fuel is encoded as adding positive values to a
542        // negative number. Whenever the negative number goes positive that
543        // means we ran out of fuel.
544        //
545        // Compare to see if our fuel is positive, and if so we ran out of gas.
546        // Otherwise we can continue on like usual.
547        let zero = builder.ins().iconst(ir::types::I64, 0);
548        let fuel = builder.use_var(self.fuel_var);
549        let cmp = builder
550            .ins()
551            .icmp(IntCC::SignedGreaterThanOrEqual, fuel, zero);
552        builder
553            .ins()
554            .brif(cmp, out_of_gas_block, &[], continuation_block, &[]);
555        builder.seal_block(out_of_gas_block);
556
557        // If we ran out of gas then we call our out-of-gas intrinsic and it
558        // figures out what to do. Note that this may raise a trap, or do
559        // something like yield to an async runtime. In either case we don't
560        // assume what happens and handle the case the intrinsic returns.
561        //
562        // Note that we save/reload fuel around this since the out-of-gas
563        // intrinsic may alter how much fuel is in the system.
564        builder.switch_to_block(out_of_gas_block);
565        self.fuel_save_from_var(builder);
566        let out_of_gas_sig = self.builtin_function_signatures.out_of_gas(builder.func);
567        let (vmctx, out_of_gas) = self.translate_load_builtin_function_address(
568            &mut builder.cursor(),
569            BuiltinFunctionIndex::out_of_gas(),
570        );
571        builder
572            .ins()
573            .call_indirect(out_of_gas_sig, out_of_gas, &[vmctx]);
574        self.fuel_load_into_var(builder);
575        builder.ins().jump(continuation_block, &[]);
576        builder.seal_block(continuation_block);
577
578        builder.switch_to_block(continuation_block);
579    }
580
581    fn epoch_function_entry(&mut self, builder: &mut FunctionBuilder<'_>) {
582        builder.declare_var(self.epoch_deadline_var, ir::types::I64);
583        self.epoch_load_deadline_into_var(builder);
584        builder.declare_var(self.epoch_ptr_var, self.pointer_type());
585        let epoch_ptr = self.epoch_ptr(builder);
586        builder.def_var(self.epoch_ptr_var, epoch_ptr);
587
588        // We must check for an epoch change when entering a
589        // function. Why? Why aren't checks at loops sufficient to
590        // bound runtime to O(|static program size|)?
591        //
592        // The reason is that one can construct a "zip-bomb-like"
593        // program with exponential-in-program-size runtime, with no
594        // backedges (loops), by building a tree of function calls: f0
595        // calls f1 ten times, f1 calls f2 ten times, etc. E.g., nine
596        // levels of this yields a billion function calls with no
597        // backedges. So we can't do checks only at backedges.
598        //
599        // In this "call-tree" scenario, and in fact in any program
600        // that uses calls as a sort of control flow to try to evade
601        // backedge checks, a check at every function entry is
602        // sufficient. Then, combined with checks at every backedge
603        // (loop) the longest runtime between checks is bounded by the
604        // straightline length of any function body.
605        self.epoch_check(builder);
606    }
607
608    fn epoch_ptr(&mut self, builder: &mut FunctionBuilder<'_>) -> ir::Value {
609        let vmctx = self.vmctx(builder.func);
610        let pointer_type = self.pointer_type();
611        let base = builder.ins().global_value(pointer_type, vmctx);
612        let offset = i32::try_from(self.offsets.vmctx_epoch_ptr()).unwrap();
613        let epoch_ptr = builder
614            .ins()
615            .load(pointer_type, ir::MemFlags::trusted(), base, offset);
616        epoch_ptr
617    }
618
619    fn epoch_load_current(&mut self, builder: &mut FunctionBuilder<'_>) -> ir::Value {
620        let addr = builder.use_var(self.epoch_ptr_var);
621        builder.ins().load(
622            ir::types::I64,
623            ir::MemFlags::trusted(),
624            addr,
625            ir::immediates::Offset32::new(0),
626        )
627    }
628
629    fn epoch_load_deadline_into_var(&mut self, builder: &mut FunctionBuilder<'_>) {
630        let interrupts = builder.use_var(self.vmruntime_limits_ptr);
631        let deadline =
632            builder.ins().load(
633                ir::types::I64,
634                ir::MemFlags::trusted(),
635                interrupts,
636                ir::immediates::Offset32::new(
637                    self.offsets.ptr.vmruntime_limits_epoch_deadline() as i32
638                ),
639            );
640        builder.def_var(self.epoch_deadline_var, deadline);
641    }
642
643    fn epoch_check(&mut self, builder: &mut FunctionBuilder<'_>) {
644        let new_epoch_block = builder.create_block();
645        let new_epoch_doublecheck_block = builder.create_block();
646        let continuation_block = builder.create_block();
647        builder.set_cold_block(new_epoch_block);
648        builder.set_cold_block(new_epoch_doublecheck_block);
649
650        let epoch_deadline = builder.use_var(self.epoch_deadline_var);
651        // Load new epoch and check against cached deadline. The
652        // deadline may be out of date if it was updated (within
653        // another yield) during some function that we called; this is
654        // fine, as we'll reload it and check again before yielding in
655        // the cold path.
656        let cur_epoch_value = self.epoch_load_current(builder);
657        let cmp = builder.ins().icmp(
658            IntCC::UnsignedGreaterThanOrEqual,
659            cur_epoch_value,
660            epoch_deadline,
661        );
662        builder
663            .ins()
664            .brif(cmp, new_epoch_block, &[], continuation_block, &[]);
665        builder.seal_block(new_epoch_block);
666
667        // In the "new epoch block", we've noticed that the epoch has
668        // exceeded our cached deadline. However the real deadline may
669        // have been moved in the meantime. We keep the cached value
670        // in a register to speed the checks in the common case
671        // (between epoch ticks) but we want to do a precise check
672        // here, on the cold path, by reloading the latest value
673        // first.
674        builder.switch_to_block(new_epoch_block);
675        self.epoch_load_deadline_into_var(builder);
676        let fresh_epoch_deadline = builder.use_var(self.epoch_deadline_var);
677        let fresh_cmp = builder.ins().icmp(
678            IntCC::UnsignedGreaterThanOrEqual,
679            cur_epoch_value,
680            fresh_epoch_deadline,
681        );
682        builder.ins().brif(
683            fresh_cmp,
684            new_epoch_doublecheck_block,
685            &[],
686            continuation_block,
687            &[],
688        );
689        builder.seal_block(new_epoch_doublecheck_block);
690
691        builder.switch_to_block(new_epoch_doublecheck_block);
692        let new_epoch_sig = self.builtin_function_signatures.new_epoch(builder.func);
693        let (vmctx, new_epoch) = self.translate_load_builtin_function_address(
694            &mut builder.cursor(),
695            BuiltinFunctionIndex::new_epoch(),
696        );
697        // new_epoch() returns the new deadline, so we don't have to
698        // reload it.
699        let call = builder
700            .ins()
701            .call_indirect(new_epoch_sig, new_epoch, &[vmctx]);
702        let new_deadline = *builder.func.dfg.inst_results(call).first().unwrap();
703        builder.def_var(self.epoch_deadline_var, new_deadline);
704        builder.ins().jump(continuation_block, &[]);
705        builder.seal_block(continuation_block);
706
707        builder.switch_to_block(continuation_block);
708    }
709
710    fn memory_index_type(&self, index: MemoryIndex) -> ir::Type {
711        if self.module.memory_plans[index].memory.memory64 {
712            I64
713        } else {
714            I32
715        }
716    }
717
718    fn cast_pointer_to_memory_index(
719        &self,
720        mut pos: FuncCursor<'_>,
721        val: ir::Value,
722        index: MemoryIndex,
723    ) -> ir::Value {
724        let desired_type = self.memory_index_type(index);
725        let pointer_type = self.pointer_type();
726        assert_eq!(pos.func.dfg.value_type(val), pointer_type);
727
728        // The current length is of type `pointer_type` but we need to fit it
729        // into `desired_type`. We are guaranteed that the result will always
730        // fit, so we just need to do the right ireduce/sextend here.
731        if pointer_type == desired_type {
732            val
733        } else if pointer_type.bits() > desired_type.bits() {
734            pos.ins().ireduce(desired_type, val)
735        } else {
736            // Note that we `sextend` instead of the probably expected
737            // `uextend`. This function is only used within the contexts of
738            // `memory.size` and `memory.grow` where we're working with units of
739            // pages instead of actual bytes, so we know that the upper bit is
740            // always cleared for "valid values". The one case we care about
741            // sextend would be when the return value of `memory.grow` is `-1`,
742            // in which case we want to copy the sign bit.
743            //
744            // This should only come up on 32-bit hosts running wasm64 modules,
745            // which at some point also makes you question various assumptions
746            // made along the way...
747            pos.ins().sextend(desired_type, val)
748        }
749    }
750
751    fn cast_memory_index_to_i64(
752        &self,
753        pos: &mut FuncCursor<'_>,
754        val: ir::Value,
755        index: MemoryIndex,
756    ) -> ir::Value {
757        if self.memory_index_type(index) == I64 {
758            val
759        } else {
760            pos.ins().uextend(I64, val)
761        }
762    }
763
764    fn get_or_init_funcref_table_elem(
765        &mut self,
766        builder: &mut FunctionBuilder,
767        table_index: TableIndex,
768        table: ir::Table,
769        index: ir::Value,
770    ) -> ir::Value {
771        let pointer_type = self.pointer_type();
772
773        // To support lazy initialization of table
774        // contents, we check for a null entry here, and
775        // if null, we take a slow-path that invokes a
776        // libcall.
777        let table_entry_addr = builder.ins().table_addr(pointer_type, table, index, 0);
778        let flags = ir::MemFlags::trusted().with_table();
779        let value = builder.ins().load(pointer_type, flags, table_entry_addr, 0);
780        // Mask off the "initialized bit". See documentation on
781        // FUNCREF_INIT_BIT in crates/environ/src/ref_bits.rs for more
782        // details.
783        let value_masked = builder
784            .ins()
785            .band_imm(value, Imm64::from(FUNCREF_MASK as i64));
786
787        let null_block = builder.create_block();
788        let continuation_block = builder.create_block();
789        let result_param = builder.append_block_param(continuation_block, pointer_type);
790        builder.set_cold_block(null_block);
791
792        builder
793            .ins()
794            .brif(value, continuation_block, &[value_masked], null_block, &[]);
795        builder.seal_block(null_block);
796
797        builder.switch_to_block(null_block);
798        let table_index = builder.ins().iconst(I32, table_index.index() as i64);
799        let builtin_idx = BuiltinFunctionIndex::table_get_lazy_init_funcref();
800        let builtin_sig = self
801            .builtin_function_signatures
802            .table_get_lazy_init_funcref(builder.func);
803        let (vmctx, builtin_addr) =
804            self.translate_load_builtin_function_address(&mut builder.cursor(), builtin_idx);
805        let call_inst =
806            builder
807                .ins()
808                .call_indirect(builtin_sig, builtin_addr, &[vmctx, table_index, index]);
809        let returned_entry = builder.func.dfg.inst_results(call_inst)[0];
810        builder.ins().jump(continuation_block, &[returned_entry]);
811        builder.seal_block(continuation_block);
812
813        builder.switch_to_block(continuation_block);
814        result_param
815    }
816}
817
818impl<'module_environment> TargetEnvironment for FuncEnvironment<'module_environment> {
819    fn target_config(&self) -> TargetFrontendConfig {
820        self.isa.frontend_config()
821    }
822
823    fn reference_type(&self, ty: WasmType) -> ir::Type {
824        crate::reference_type(ty, self.pointer_type())
825    }
826
827    fn heap_access_spectre_mitigation(&self) -> bool {
828        self.isa.flags().enable_heap_access_spectre_mitigation()
829    }
830}
831
832impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'module_environment> {
833    fn heaps(&self) -> &PrimaryMap<Heap, HeapData> {
834        &self.heaps
835    }
836
837    fn is_wasm_parameter(&self, _signature: &ir::Signature, index: usize) -> bool {
838        // The first two parameters are the vmctx and caller vmctx. The rest are
839        // the wasm parameters.
840        index >= 2
841    }
842
843    fn after_locals(&mut self, num_locals: usize) {
844        self.vmruntime_limits_ptr = Variable::new(num_locals);
845        self.fuel_var = Variable::new(num_locals + 1);
846        self.epoch_deadline_var = Variable::new(num_locals + 2);
847        self.epoch_ptr_var = Variable::new(num_locals + 3);
848    }
849
850    fn make_table(&mut self, func: &mut ir::Function, index: TableIndex) -> WasmResult<ir::Table> {
851        let pointer_type = self.pointer_type();
852
853        let (ptr, base_offset, current_elements_offset) = {
854            let vmctx = self.vmctx(func);
855            if let Some(def_index) = self.module.defined_table_index(index) {
856                let base_offset =
857                    i32::try_from(self.offsets.vmctx_vmtable_definition_base(def_index)).unwrap();
858                let current_elements_offset = i32::try_from(
859                    self.offsets
860                        .vmctx_vmtable_definition_current_elements(def_index),
861                )
862                .unwrap();
863                (vmctx, base_offset, current_elements_offset)
864            } else {
865                let from_offset = self.offsets.vmctx_vmtable_import_from(index);
866                let table = func.create_global_value(ir::GlobalValueData::Load {
867                    base: vmctx,
868                    offset: Offset32::new(i32::try_from(from_offset).unwrap()),
869                    global_type: pointer_type,
870                    readonly: true,
871                });
872                let base_offset = i32::from(self.offsets.vmtable_definition_base());
873                let current_elements_offset =
874                    i32::from(self.offsets.vmtable_definition_current_elements());
875                (table, base_offset, current_elements_offset)
876            }
877        };
878
879        let base_gv = func.create_global_value(ir::GlobalValueData::Load {
880            base: ptr,
881            offset: Offset32::new(base_offset),
882            global_type: pointer_type,
883            readonly: false,
884        });
885        let bound_gv = func.create_global_value(ir::GlobalValueData::Load {
886            base: ptr,
887            offset: Offset32::new(current_elements_offset),
888            global_type: ir::Type::int(
889                u16::from(self.offsets.size_of_vmtable_definition_current_elements()) * 8,
890            )
891            .unwrap(),
892            readonly: false,
893        });
894
895        let element_size = u64::from(
896            self.reference_type(self.module.table_plans[index].table.wasm_ty)
897                .bytes(),
898        );
899
900        Ok(func.create_table(ir::TableData {
901            base_gv,
902            min_size: Uimm64::new(0),
903            bound_gv,
904            element_size: Uimm64::new(element_size),
905            index_type: I32,
906        }))
907    }
908
909    fn translate_table_grow(
910        &mut self,
911        mut pos: cranelift_codegen::cursor::FuncCursor<'_>,
912        table_index: TableIndex,
913        _table: ir::Table,
914        delta: ir::Value,
915        init_value: ir::Value,
916    ) -> WasmResult<ir::Value> {
917        let (func_idx, func_sig) =
918            match self.module.table_plans[table_index].table.wasm_ty {
919                WasmType::FuncRef => (
920                    BuiltinFunctionIndex::table_grow_funcref(),
921                    self.builtin_function_signatures
922                        .table_grow_funcref(&mut pos.func),
923                ),
924                WasmType::ExternRef => (
925                    BuiltinFunctionIndex::table_grow_externref(),
926                    self.builtin_function_signatures
927                        .table_grow_externref(&mut pos.func),
928                ),
929                _ => return Err(WasmError::Unsupported(
930                    "`table.grow` with a table element type that is not `funcref` or `externref`"
931                        .into(),
932                )),
933            };
934
935        let (vmctx, func_addr) = self.translate_load_builtin_function_address(&mut pos, func_idx);
936
937        let table_index_arg = pos.ins().iconst(I32, table_index.as_u32() as i64);
938        let call_inst = pos.ins().call_indirect(
939            func_sig,
940            func_addr,
941            &[vmctx, table_index_arg, delta, init_value],
942        );
943
944        Ok(pos.func.dfg.first_result(call_inst))
945    }
946
947    fn translate_table_get(
948        &mut self,
949        builder: &mut FunctionBuilder,
950        table_index: TableIndex,
951        table: ir::Table,
952        index: ir::Value,
953    ) -> WasmResult<ir::Value> {
954        let pointer_type = self.pointer_type();
955
956        let plan = &self.module.table_plans[table_index];
957        match plan.table.wasm_ty {
958            WasmType::FuncRef => match plan.style {
959                TableStyle::CallerChecksSignature => {
960                    Ok(self.get_or_init_funcref_table_elem(builder, table_index, table, index))
961                }
962            },
963            WasmType::ExternRef => {
964                // Our read barrier for `externref` tables is roughly equivalent
965                // to the following pseudocode:
966                //
967                // ```
968                // let elem = table[index]
969                // if elem is not null:
970                //     let (next, end) = VMExternRefActivationsTable bump region
971                //     if next != end:
972                //         elem.ref_count += 1
973                //         *next = elem
974                //         next += 1
975                //     else:
976                //         call activations_table_insert_with_gc(elem)
977                // return elem
978                // ```
979                //
980                // This ensures that all `externref`s coming out of tables and
981                // onto the stack are safely held alive by the
982                // `VMExternRefActivationsTable`.
983
984                let reference_type = self.reference_type(WasmType::ExternRef);
985
986                builder.ensure_inserted_block();
987                let continue_block = builder.create_block();
988                let non_null_elem_block = builder.create_block();
989                let gc_block = builder.create_block();
990                let no_gc_block = builder.create_block();
991                let current_block = builder.current_block().unwrap();
992                builder.insert_block_after(non_null_elem_block, current_block);
993                builder.insert_block_after(no_gc_block, non_null_elem_block);
994                builder.insert_block_after(gc_block, no_gc_block);
995                builder.insert_block_after(continue_block, gc_block);
996
997                // Load the table element.
998                let elem_addr = builder.ins().table_addr(pointer_type, table, index, 0);
999                let flags = ir::MemFlags::trusted().with_table();
1000                let elem = builder.ins().load(reference_type, flags, elem_addr, 0);
1001
1002                let elem_is_null = builder.ins().is_null(elem);
1003                builder
1004                    .ins()
1005                    .brif(elem_is_null, continue_block, &[], non_null_elem_block, &[]);
1006
1007                // Load the `VMExternRefActivationsTable::next` bump finger and
1008                // the `VMExternRefActivationsTable::end` bump boundary.
1009                builder.switch_to_block(non_null_elem_block);
1010                let vmctx = self.vmctx(&mut builder.func);
1011                let vmctx = builder.ins().global_value(pointer_type, vmctx);
1012                let activations_table = builder.ins().load(
1013                    pointer_type,
1014                    ir::MemFlags::trusted(),
1015                    vmctx,
1016                    i32::try_from(self.offsets.vmctx_externref_activations_table()).unwrap(),
1017                );
1018                let next = builder.ins().load(
1019                    pointer_type,
1020                    ir::MemFlags::trusted(),
1021                    activations_table,
1022                    i32::try_from(self.offsets.vm_extern_ref_activation_table_next()).unwrap(),
1023                );
1024                let end = builder.ins().load(
1025                    pointer_type,
1026                    ir::MemFlags::trusted(),
1027                    activations_table,
1028                    i32::try_from(self.offsets.vm_extern_ref_activation_table_end()).unwrap(),
1029                );
1030
1031                // If `next == end`, then we are at full capacity. Call a
1032                // builtin to do a GC and insert this reference into the
1033                // just-swept table for us.
1034                let at_capacity = builder.ins().icmp(ir::condcodes::IntCC::Equal, next, end);
1035                builder
1036                    .ins()
1037                    .brif(at_capacity, gc_block, &[], no_gc_block, &[]);
1038                builder.switch_to_block(gc_block);
1039                let builtin_idx = BuiltinFunctionIndex::activations_table_insert_with_gc();
1040                let builtin_sig = self
1041                    .builtin_function_signatures
1042                    .activations_table_insert_with_gc(builder.func);
1043                let (vmctx, builtin_addr) = self
1044                    .translate_load_builtin_function_address(&mut builder.cursor(), builtin_idx);
1045                builder
1046                    .ins()
1047                    .call_indirect(builtin_sig, builtin_addr, &[vmctx, elem]);
1048                builder.ins().jump(continue_block, &[]);
1049
1050                // If `next != end`, then:
1051                //
1052                // * increment this reference's ref count,
1053                // * store the reference into the bump table at `*next`,
1054                // * and finally increment the `next` bump finger.
1055                builder.switch_to_block(no_gc_block);
1056                self.mutate_externref_ref_count(builder, elem, 1);
1057                builder.ins().store(ir::MemFlags::trusted(), elem, next, 0);
1058
1059                let new_next = builder
1060                    .ins()
1061                    .iadd_imm(next, i64::from(reference_type.bytes()));
1062                builder.ins().store(
1063                    ir::MemFlags::trusted(),
1064                    new_next,
1065                    activations_table,
1066                    i32::try_from(self.offsets.vm_extern_ref_activation_table_next()).unwrap(),
1067                );
1068
1069                builder.ins().jump(continue_block, &[]);
1070                builder.switch_to_block(continue_block);
1071
1072                builder.seal_block(non_null_elem_block);
1073                builder.seal_block(gc_block);
1074                builder.seal_block(no_gc_block);
1075                builder.seal_block(continue_block);
1076
1077                Ok(elem)
1078            }
1079            ty => Err(WasmError::Unsupported(format!(
1080                "unsupported table type for `table.get` instruction: {:?}",
1081                ty
1082            ))),
1083        }
1084    }
1085
1086    fn translate_table_set(
1087        &mut self,
1088        builder: &mut FunctionBuilder,
1089        table_index: TableIndex,
1090        table: ir::Table,
1091        value: ir::Value,
1092        index: ir::Value,
1093    ) -> WasmResult<()> {
1094        let pointer_type = self.pointer_type();
1095
1096        let plan = &self.module.table_plans[table_index];
1097        match plan.table.wasm_ty {
1098            WasmType::FuncRef => match plan.style {
1099                TableStyle::CallerChecksSignature => {
1100                    let table_entry_addr = builder.ins().table_addr(pointer_type, table, index, 0);
1101                    // Set the "initialized bit". See doc-comment on
1102                    // `FUNCREF_INIT_BIT` in
1103                    // crates/environ/src/ref_bits.rs for details.
1104                    let value_with_init_bit = builder
1105                        .ins()
1106                        .bor_imm(value, Imm64::from(FUNCREF_INIT_BIT as i64));
1107                    let flags = ir::MemFlags::trusted().with_table();
1108                    builder
1109                        .ins()
1110                        .store(flags, value_with_init_bit, table_entry_addr, 0);
1111                    Ok(())
1112                }
1113            },
1114            WasmType::ExternRef => {
1115                // Our write barrier for `externref`s being copied out of the
1116                // stack and into a table is roughly equivalent to the following
1117                // pseudocode:
1118                //
1119                // ```
1120                // if value != null:
1121                //     value.ref_count += 1
1122                // let current_elem = table[index]
1123                // table[index] = value
1124                // if current_elem != null:
1125                //     current_elem.ref_count -= 1
1126                //     if current_elem.ref_count == 0:
1127                //         call drop_externref(current_elem)
1128                // ```
1129                //
1130                // This write barrier is responsible for ensuring that:
1131                //
1132                // 1. The value's ref count is incremented now that the
1133                //    table is holding onto it. This is required for memory safety.
1134                //
1135                // 2. The old table element, if any, has its ref count
1136                //    decremented, and that the wrapped data is dropped if the
1137                //    ref count reaches zero. This is not required for memory
1138                //    safety, but is required to avoid leaks. Furthermore, the
1139                //    destructor might GC or touch this table, so we must only
1140                //    drop the old table element *after* we've replaced it with
1141                //    the new `value`!
1142
1143                builder.ensure_inserted_block();
1144                let current_block = builder.current_block().unwrap();
1145                let inc_ref_count_block = builder.create_block();
1146                builder.insert_block_after(inc_ref_count_block, current_block);
1147                let check_current_elem_block = builder.create_block();
1148                builder.insert_block_after(check_current_elem_block, inc_ref_count_block);
1149                let dec_ref_count_block = builder.create_block();
1150                builder.insert_block_after(dec_ref_count_block, check_current_elem_block);
1151                let drop_block = builder.create_block();
1152                builder.insert_block_after(drop_block, dec_ref_count_block);
1153                let continue_block = builder.create_block();
1154                builder.insert_block_after(continue_block, drop_block);
1155
1156                // Calculate the table address of the current element and do
1157                // bounds checks. This is the first thing we do, because we
1158                // don't want to modify any ref counts if this `table.set` is
1159                // going to trap.
1160                let table_entry_addr = builder.ins().table_addr(pointer_type, table, index, 0);
1161
1162                // If value is not null, increment `value`'s ref count.
1163                //
1164                // This has to come *before* decrementing the current table
1165                // element's ref count, because it might reach ref count == zero,
1166                // causing us to deallocate the current table element. However,
1167                // if `value` *is* the current table element (and therefore this
1168                // whole `table.set` is a no-op), then we would incorrectly
1169                // deallocate `value` and leave it in the table, leading to use
1170                // after free.
1171                let value_is_null = builder.ins().is_null(value);
1172                builder.ins().brif(
1173                    value_is_null,
1174                    check_current_elem_block,
1175                    &[],
1176                    inc_ref_count_block,
1177                    &[],
1178                );
1179                builder.switch_to_block(inc_ref_count_block);
1180                self.mutate_externref_ref_count(builder, value, 1);
1181                builder.ins().jump(check_current_elem_block, &[]);
1182
1183                // Grab the current element from the table, and store the new
1184                // `value` into the table.
1185                //
1186                // Note that we load the current element as a pointer, not a
1187                // reference. This is so that if we call out-of-line to run its
1188                // destructor, and its destructor triggers GC, this reference is
1189                // not recorded in the stack map (which would lead to the GC
1190                // saving a reference to a deallocated object, and then using it
1191                // after its been freed).
1192                builder.switch_to_block(check_current_elem_block);
1193                let flags = ir::MemFlags::trusted().with_table();
1194                let current_elem = builder.ins().load(pointer_type, flags, table_entry_addr, 0);
1195                builder.ins().store(flags, value, table_entry_addr, 0);
1196
1197                // If the current element is non-null, decrement its reference
1198                // count. And if its reference count has reached zero, then make
1199                // an out-of-line call to deallocate it.
1200                let current_elem_is_null =
1201                    builder
1202                        .ins()
1203                        .icmp_imm(ir::condcodes::IntCC::Equal, current_elem, 0);
1204                builder.ins().brif(
1205                    current_elem_is_null,
1206                    continue_block,
1207                    &[],
1208                    dec_ref_count_block,
1209                    &[],
1210                );
1211
1212                builder.switch_to_block(dec_ref_count_block);
1213                let prev_ref_count = self.mutate_externref_ref_count(builder, current_elem, -1);
1214                let one = builder.ins().iconst(pointer_type, 1);
1215                let cond = builder.ins().icmp(IntCC::Equal, one, prev_ref_count);
1216                builder
1217                    .ins()
1218                    .brif(cond, drop_block, &[], continue_block, &[]);
1219
1220                // Call the `drop_externref` builtin to (you guessed it) drop
1221                // the `externref`.
1222                builder.switch_to_block(drop_block);
1223                let builtin_idx = BuiltinFunctionIndex::drop_externref();
1224                let builtin_sig = self
1225                    .builtin_function_signatures
1226                    .drop_externref(builder.func);
1227                let (vmctx, builtin_addr) = self
1228                    .translate_load_builtin_function_address(&mut builder.cursor(), builtin_idx);
1229                builder
1230                    .ins()
1231                    .call_indirect(builtin_sig, builtin_addr, &[vmctx, current_elem]);
1232                builder.ins().jump(continue_block, &[]);
1233
1234                builder.switch_to_block(continue_block);
1235
1236                builder.seal_block(inc_ref_count_block);
1237                builder.seal_block(check_current_elem_block);
1238                builder.seal_block(dec_ref_count_block);
1239                builder.seal_block(drop_block);
1240                builder.seal_block(continue_block);
1241
1242                Ok(())
1243            }
1244            ty => Err(WasmError::Unsupported(format!(
1245                "unsupported table type for `table.set` instruction: {:?}",
1246                ty
1247            ))),
1248        }
1249    }
1250
1251    fn translate_table_fill(
1252        &mut self,
1253        mut pos: cranelift_codegen::cursor::FuncCursor<'_>,
1254        table_index: TableIndex,
1255        dst: ir::Value,
1256        val: ir::Value,
1257        len: ir::Value,
1258    ) -> WasmResult<()> {
1259        let (builtin_idx, builtin_sig) =
1260            match self.module.table_plans[table_index].table.wasm_ty {
1261                WasmType::FuncRef => (
1262                    BuiltinFunctionIndex::table_fill_funcref(),
1263                    self.builtin_function_signatures
1264                        .table_fill_funcref(&mut pos.func),
1265                ),
1266                WasmType::ExternRef => (
1267                    BuiltinFunctionIndex::table_fill_externref(),
1268                    self.builtin_function_signatures
1269                        .table_fill_externref(&mut pos.func),
1270                ),
1271                _ => return Err(WasmError::Unsupported(
1272                    "`table.fill` with a table element type that is not `funcref` or `externref`"
1273                        .into(),
1274                )),
1275            };
1276
1277        let (vmctx, builtin_addr) =
1278            self.translate_load_builtin_function_address(&mut pos, builtin_idx);
1279
1280        let table_index_arg = pos.ins().iconst(I32, table_index.as_u32() as i64);
1281        pos.ins().call_indirect(
1282            builtin_sig,
1283            builtin_addr,
1284            &[vmctx, table_index_arg, dst, val, len],
1285        );
1286
1287        Ok(())
1288    }
1289
1290    fn translate_ref_null(
1291        &mut self,
1292        mut pos: cranelift_codegen::cursor::FuncCursor,
1293        ty: WasmType,
1294    ) -> WasmResult<ir::Value> {
1295        Ok(match ty {
1296            WasmType::FuncRef => pos.ins().iconst(self.pointer_type(), 0),
1297            WasmType::ExternRef => pos.ins().null(self.reference_type(ty)),
1298            _ => {
1299                return Err(WasmError::Unsupported(
1300                    "`ref.null T` that is not a `funcref` or an `externref`".into(),
1301                ));
1302            }
1303        })
1304    }
1305
1306    fn translate_ref_is_null(
1307        &mut self,
1308        mut pos: cranelift_codegen::cursor::FuncCursor,
1309        value: ir::Value,
1310    ) -> WasmResult<ir::Value> {
1311        let bool_is_null = match pos.func.dfg.value_type(value) {
1312            // `externref`
1313            ty if ty.is_ref() => pos.ins().is_null(value),
1314            // `funcref`
1315            ty if ty == self.pointer_type() => {
1316                pos.ins()
1317                    .icmp_imm(cranelift_codegen::ir::condcodes::IntCC::Equal, value, 0)
1318            }
1319            _ => unreachable!(),
1320        };
1321
1322        Ok(pos.ins().uextend(ir::types::I32, bool_is_null))
1323    }
1324
1325    fn translate_ref_func(
1326        &mut self,
1327        mut pos: cranelift_codegen::cursor::FuncCursor<'_>,
1328        func_index: FuncIndex,
1329    ) -> WasmResult<ir::Value> {
1330        let func_index = pos.ins().iconst(I32, func_index.as_u32() as i64);
1331        let builtin_index = BuiltinFunctionIndex::ref_func();
1332        let builtin_sig = self.builtin_function_signatures.ref_func(&mut pos.func);
1333        let (vmctx, builtin_addr) =
1334            self.translate_load_builtin_function_address(&mut pos, builtin_index);
1335
1336        let call_inst = pos
1337            .ins()
1338            .call_indirect(builtin_sig, builtin_addr, &[vmctx, func_index]);
1339        Ok(pos.func.dfg.first_result(call_inst))
1340    }
1341
1342    fn translate_custom_global_get(
1343        &mut self,
1344        mut pos: cranelift_codegen::cursor::FuncCursor<'_>,
1345        index: cranelift_wasm::GlobalIndex,
1346    ) -> WasmResult<ir::Value> {
1347        debug_assert_eq!(
1348            self.module.globals[index].wasm_ty,
1349            WasmType::ExternRef,
1350            "We only use GlobalVariable::Custom for externref"
1351        );
1352
1353        let builtin_index = BuiltinFunctionIndex::externref_global_get();
1354        let builtin_sig = self
1355            .builtin_function_signatures
1356            .externref_global_get(&mut pos.func);
1357
1358        let (vmctx, builtin_addr) =
1359            self.translate_load_builtin_function_address(&mut pos, builtin_index);
1360
1361        let global_index_arg = pos.ins().iconst(I32, index.as_u32() as i64);
1362        let call_inst =
1363            pos.ins()
1364                .call_indirect(builtin_sig, builtin_addr, &[vmctx, global_index_arg]);
1365
1366        Ok(pos.func.dfg.first_result(call_inst))
1367    }
1368
1369    fn translate_custom_global_set(
1370        &mut self,
1371        mut pos: cranelift_codegen::cursor::FuncCursor<'_>,
1372        index: cranelift_wasm::GlobalIndex,
1373        value: ir::Value,
1374    ) -> WasmResult<()> {
1375        debug_assert_eq!(
1376            self.module.globals[index].wasm_ty,
1377            WasmType::ExternRef,
1378            "We only use GlobalVariable::Custom for externref"
1379        );
1380
1381        let builtin_index = BuiltinFunctionIndex::externref_global_set();
1382        let builtin_sig = self
1383            .builtin_function_signatures
1384            .externref_global_set(&mut pos.func);
1385
1386        let (vmctx, builtin_addr) =
1387            self.translate_load_builtin_function_address(&mut pos, builtin_index);
1388
1389        let global_index_arg = pos.ins().iconst(I32, index.as_u32() as i64);
1390        pos.ins()
1391            .call_indirect(builtin_sig, builtin_addr, &[vmctx, global_index_arg, value]);
1392
1393        Ok(())
1394    }
1395
1396    fn make_heap(&mut self, func: &mut ir::Function, index: MemoryIndex) -> WasmResult<Heap> {
1397        let pointer_type = self.pointer_type();
1398        let is_shared = self.module.memory_plans[index].memory.shared;
1399
1400        let min_size = self.module.memory_plans[index]
1401            .memory
1402            .minimum
1403            .checked_mul(u64::from(WASM_PAGE_SIZE))
1404            .unwrap_or_else(|| {
1405                // The only valid Wasm memory size that won't fit in a 64-bit
1406                // integer is the maximum memory64 size (2^64) which is one
1407                // larger than `u64::MAX` (2^64 - 1). In this case, just say the
1408                // minimum heap size is `u64::MAX`.
1409                debug_assert_eq!(self.module.memory_plans[index].memory.minimum, 1 << 48);
1410                u64::MAX
1411            });
1412
1413        let (ptr, base_offset, current_length_offset) = {
1414            let vmctx = self.vmctx(func);
1415            if let Some(def_index) = self.module.defined_memory_index(index) {
1416                if is_shared {
1417                    // As with imported memory, the `VMMemoryDefinition` for a
1418                    // shared memory is stored elsewhere. We store a `*mut
1419                    // VMMemoryDefinition` to it and dereference that when
1420                    // atomically growing it.
1421                    let from_offset = self.offsets.vmctx_vmmemory_pointer(def_index);
1422                    let memory = func.create_global_value(ir::GlobalValueData::Load {
1423                        base: vmctx,
1424                        offset: Offset32::new(i32::try_from(from_offset).unwrap()),
1425                        global_type: pointer_type,
1426                        readonly: true,
1427                    });
1428                    let base_offset = i32::from(self.offsets.ptr.vmmemory_definition_base());
1429                    let current_length_offset =
1430                        i32::from(self.offsets.ptr.vmmemory_definition_current_length());
1431                    (memory, base_offset, current_length_offset)
1432                } else {
1433                    let owned_index = self.module.owned_memory_index(def_index);
1434                    let owned_base_offset =
1435                        self.offsets.vmctx_vmmemory_definition_base(owned_index);
1436                    let owned_length_offset = self
1437                        .offsets
1438                        .vmctx_vmmemory_definition_current_length(owned_index);
1439                    let current_base_offset = i32::try_from(owned_base_offset).unwrap();
1440                    let current_length_offset = i32::try_from(owned_length_offset).unwrap();
1441                    (vmctx, current_base_offset, current_length_offset)
1442                }
1443            } else {
1444                let from_offset = self.offsets.vmctx_vmmemory_import_from(index);
1445                let memory = func.create_global_value(ir::GlobalValueData::Load {
1446                    base: vmctx,
1447                    offset: Offset32::new(i32::try_from(from_offset).unwrap()),
1448                    global_type: pointer_type,
1449                    readonly: true,
1450                });
1451                let base_offset = i32::from(self.offsets.ptr.vmmemory_definition_base());
1452                let current_length_offset =
1453                    i32::from(self.offsets.ptr.vmmemory_definition_current_length());
1454                (memory, base_offset, current_length_offset)
1455            }
1456        };
1457
1458        // If we have a declared maximum, we can make this a "static" heap, which is
1459        // allocated up front and never moved.
1460        let (offset_guard_size, heap_style, readonly_base) = match self.module.memory_plans[index] {
1461            MemoryPlan {
1462                style: MemoryStyle::Dynamic { .. },
1463                offset_guard_size,
1464                pre_guard_size: _,
1465                memory: _,
1466            } => {
1467                let heap_bound = func.create_global_value(ir::GlobalValueData::Load {
1468                    base: ptr,
1469                    offset: Offset32::new(current_length_offset),
1470                    global_type: pointer_type,
1471                    readonly: false,
1472                });
1473                (
1474                    offset_guard_size,
1475                    HeapStyle::Dynamic {
1476                        bound_gv: heap_bound,
1477                    },
1478                    false,
1479                )
1480            }
1481            MemoryPlan {
1482                style: MemoryStyle::Static { bound },
1483                offset_guard_size,
1484                pre_guard_size: _,
1485                memory: _,
1486            } => (
1487                offset_guard_size,
1488                HeapStyle::Static {
1489                    bound: u64::from(bound) * u64::from(WASM_PAGE_SIZE),
1490                },
1491                true,
1492            ),
1493        };
1494
1495        let heap_base = func.create_global_value(ir::GlobalValueData::Load {
1496            base: ptr,
1497            offset: Offset32::new(base_offset),
1498            global_type: pointer_type,
1499            readonly: readonly_base,
1500        });
1501        Ok(self.heaps.push(HeapData {
1502            base: heap_base,
1503            min_size,
1504            offset_guard_size,
1505            style: heap_style,
1506            index_type: self.memory_index_type(index),
1507        }))
1508    }
1509
1510    fn make_global(
1511        &mut self,
1512        func: &mut ir::Function,
1513        index: GlobalIndex,
1514    ) -> WasmResult<GlobalVariable> {
1515        // Although `ExternRef`s live at the same memory location as any other
1516        // type of global at the same index would, getting or setting them
1517        // requires ref counting barriers. Therefore, we need to use
1518        // `GlobalVariable::Custom`, as that is the only kind of
1519        // `GlobalVariable` for which `cranelift-wasm` supports custom access
1520        // translation.
1521        if self.module.globals[index].wasm_ty == WasmType::ExternRef {
1522            return Ok(GlobalVariable::Custom);
1523        }
1524
1525        let (gv, offset) = self.get_global_location(func, index);
1526        Ok(GlobalVariable::Memory {
1527            gv,
1528            offset: offset.into(),
1529            ty: super::value_type(self.isa, self.module.globals[index].wasm_ty),
1530        })
1531    }
1532
1533    fn make_indirect_sig(
1534        &mut self,
1535        func: &mut ir::Function,
1536        index: TypeIndex,
1537    ) -> WasmResult<ir::SigRef> {
1538        let index = self.module.types[index].unwrap_function();
1539        let sig = crate::indirect_signature(self.isa, &self.types[index]);
1540        Ok(func.import_signature(sig))
1541    }
1542
1543    fn make_direct_func(
1544        &mut self,
1545        func: &mut ir::Function,
1546        index: FuncIndex,
1547    ) -> WasmResult<ir::FuncRef> {
1548        let sig = crate::func_signature(self.isa, self.translation, self.types, index);
1549        let signature = func.import_signature(sig);
1550        let name =
1551            ir::ExternalName::User(func.declare_imported_user_function(ir::UserExternalName {
1552                namespace: 0,
1553                index: index.as_u32(),
1554            }));
1555        Ok(func.import_function(ir::ExtFuncData {
1556            name,
1557            signature,
1558
1559            // The value of this flag determines the codegen for calls to this
1560            // function. If this flag is `false` then absolute relocations will
1561            // be generated for references to the function, which requires
1562            // load-time relocation resolution. If this flag is set to `true`
1563            // then relative relocations are emitted which can be resolved at
1564            // object-link-time, just after all functions are compiled.
1565            //
1566            // This flag is set to `true` for functions defined in the object
1567            // we'll be defining in this compilation unit, or everything local
1568            // to the wasm module. This means that between functions in a wasm
1569            // module there's relative calls encoded. All calls external to a
1570            // wasm module (e.g. imports or libcalls) are either encoded through
1571            // the `VMContext` as relative jumps (hence no relocations) or
1572            // they're libcalls with absolute relocations.
1573            colocated: self.module.defined_func_index(index).is_some(),
1574        }))
1575    }
1576
1577    fn translate_call_indirect(
1578        &mut self,
1579        builder: &mut FunctionBuilder,
1580        table_index: TableIndex,
1581        table: ir::Table,
1582        ty_index: TypeIndex,
1583        sig_ref: ir::SigRef,
1584        callee: ir::Value,
1585        call_args: &[ir::Value],
1586    ) -> WasmResult<ir::Inst> {
1587        let pointer_type = self.pointer_type();
1588
1589        // Get the funcref pointer from the table.
1590        let funcref_ptr = self.get_or_init_funcref_table_elem(builder, table_index, table, callee);
1591
1592        // Check for whether the table element is null, and trap if so.
1593        builder
1594            .ins()
1595            .trapz(funcref_ptr, ir::TrapCode::IndirectCallToNull);
1596
1597        // Dereference the funcref pointer to get the function address.
1598        let mem_flags = ir::MemFlags::trusted();
1599        let func_addr = builder.ins().load(
1600            pointer_type,
1601            mem_flags,
1602            funcref_ptr,
1603            i32::from(self.offsets.ptr.vmcaller_checked_func_ref_func_ptr()),
1604        );
1605
1606        // If necessary, check the signature.
1607        match self.module.table_plans[table_index].style {
1608            TableStyle::CallerChecksSignature => {
1609                let sig_id_size = self.offsets.size_of_vmshared_signature_index();
1610                let sig_id_type = Type::int(u16::from(sig_id_size) * 8).unwrap();
1611                let vmctx = self.vmctx(builder.func);
1612                let base = builder.ins().global_value(pointer_type, vmctx);
1613
1614                // Load the caller ID. This requires loading the
1615                // `*mut VMCallerCheckedFuncRef` base pointer from `VMContext`
1616                // and then loading, based on `SignatureIndex`, the
1617                // corresponding entry.
1618                let mem_flags = ir::MemFlags::trusted().with_readonly();
1619                let signatures = builder.ins().load(
1620                    pointer_type,
1621                    mem_flags,
1622                    base,
1623                    i32::try_from(self.offsets.vmctx_signature_ids_array()).unwrap(),
1624                );
1625                let sig_index = self.module.types[ty_index].unwrap_function();
1626                let offset =
1627                    i32::try_from(sig_index.as_u32().checked_mul(sig_id_type.bytes()).unwrap())
1628                        .unwrap();
1629                let caller_sig_id = builder
1630                    .ins()
1631                    .load(sig_id_type, mem_flags, signatures, offset);
1632
1633                // Load the callee ID.
1634                let mem_flags = ir::MemFlags::trusted();
1635                let callee_sig_id = builder.ins().load(
1636                    sig_id_type,
1637                    mem_flags,
1638                    funcref_ptr,
1639                    i32::from(self.offsets.ptr.vmcaller_checked_func_ref_type_index()),
1640                );
1641
1642                // Check that they match.
1643                let cmp = builder
1644                    .ins()
1645                    .icmp(IntCC::Equal, callee_sig_id, caller_sig_id);
1646                builder.ins().trapz(cmp, ir::TrapCode::BadSignature);
1647            }
1648        }
1649
1650        let mut real_call_args = Vec::with_capacity(call_args.len() + 2);
1651        let caller_vmctx = builder
1652            .func
1653            .special_param(ArgumentPurpose::VMContext)
1654            .unwrap();
1655
1656        // First append the callee vmctx address.
1657        let vmctx = builder.ins().load(
1658            pointer_type,
1659            mem_flags,
1660            funcref_ptr,
1661            i32::from(self.offsets.ptr.vmcaller_checked_func_ref_vmctx()),
1662        );
1663        real_call_args.push(vmctx);
1664        real_call_args.push(caller_vmctx);
1665
1666        // Then append the regular call arguments.
1667        real_call_args.extend_from_slice(call_args);
1668
1669        Ok(builder
1670            .ins()
1671            .call_indirect(sig_ref, func_addr, &real_call_args))
1672    }
1673
1674    fn translate_call(
1675        &mut self,
1676        mut pos: FuncCursor<'_>,
1677        callee_index: FuncIndex,
1678        callee: ir::FuncRef,
1679        call_args: &[ir::Value],
1680    ) -> WasmResult<ir::Inst> {
1681        let mut real_call_args = Vec::with_capacity(call_args.len() + 2);
1682        let caller_vmctx = pos.func.special_param(ArgumentPurpose::VMContext).unwrap();
1683
1684        // Handle direct calls to locally-defined functions.
1685        if !self.module.is_imported_function(callee_index) {
1686            // First append the callee vmctx address, which is the same as the caller vmctx in
1687            // this case.
1688            real_call_args.push(caller_vmctx);
1689
1690            // Then append the caller vmctx address.
1691            real_call_args.push(caller_vmctx);
1692
1693            // Then append the regular call arguments.
1694            real_call_args.extend_from_slice(call_args);
1695
1696            return Ok(pos.ins().call(callee, &real_call_args));
1697        }
1698
1699        // Handle direct calls to imported functions. We use an indirect call
1700        // so that we don't have to patch the code at runtime.
1701        let pointer_type = self.pointer_type();
1702        let sig_ref = pos.func.dfg.ext_funcs[callee].signature;
1703        let vmctx = self.vmctx(&mut pos.func);
1704        let base = pos.ins().global_value(pointer_type, vmctx);
1705
1706        let mem_flags = ir::MemFlags::trusted();
1707
1708        // Load the callee address.
1709        let body_offset =
1710            i32::try_from(self.offsets.vmctx_vmfunction_import_body(callee_index)).unwrap();
1711        let func_addr = pos.ins().load(pointer_type, mem_flags, base, body_offset);
1712
1713        // First append the callee vmctx address.
1714        let vmctx_offset =
1715            i32::try_from(self.offsets.vmctx_vmfunction_import_vmctx(callee_index)).unwrap();
1716        let vmctx = pos.ins().load(pointer_type, mem_flags, base, vmctx_offset);
1717        real_call_args.push(vmctx);
1718        real_call_args.push(caller_vmctx);
1719
1720        // Then append the regular call arguments.
1721        real_call_args.extend_from_slice(call_args);
1722
1723        Ok(pos.ins().call_indirect(sig_ref, func_addr, &real_call_args))
1724    }
1725
1726    fn translate_memory_grow(
1727        &mut self,
1728        mut pos: FuncCursor<'_>,
1729        index: MemoryIndex,
1730        _heap: Heap,
1731        val: ir::Value,
1732    ) -> WasmResult<ir::Value> {
1733        let func_sig = self
1734            .builtin_function_signatures
1735            .memory32_grow(&mut pos.func);
1736        let index_arg = index.index();
1737
1738        let memory_index = pos.ins().iconst(I32, index_arg as i64);
1739        let (vmctx, func_addr) = self.translate_load_builtin_function_address(
1740            &mut pos,
1741            BuiltinFunctionIndex::memory32_grow(),
1742        );
1743
1744        let val = self.cast_memory_index_to_i64(&mut pos, val, index);
1745        let call_inst = pos
1746            .ins()
1747            .call_indirect(func_sig, func_addr, &[vmctx, val, memory_index]);
1748        let result = *pos.func.dfg.inst_results(call_inst).first().unwrap();
1749        Ok(self.cast_pointer_to_memory_index(pos, result, index))
1750    }
1751
1752    fn translate_memory_size(
1753        &mut self,
1754        mut pos: FuncCursor<'_>,
1755        index: MemoryIndex,
1756        _heap: Heap,
1757    ) -> WasmResult<ir::Value> {
1758        let pointer_type = self.pointer_type();
1759        let vmctx = self.vmctx(&mut pos.func);
1760        let is_shared = self.module.memory_plans[index].memory.shared;
1761        let base = pos.ins().global_value(pointer_type, vmctx);
1762        let current_length_in_bytes = match self.module.defined_memory_index(index) {
1763            Some(def_index) => {
1764                if is_shared {
1765                    let offset =
1766                        i32::try_from(self.offsets.vmctx_vmmemory_pointer(def_index)).unwrap();
1767                    let vmmemory_ptr =
1768                        pos.ins()
1769                            .load(pointer_type, ir::MemFlags::trusted(), base, offset);
1770                    let vmmemory_definition_offset =
1771                        i64::from(self.offsets.ptr.vmmemory_definition_current_length());
1772                    let vmmemory_definition_ptr =
1773                        pos.ins().iadd_imm(vmmemory_ptr, vmmemory_definition_offset);
1774                    // This atomic access of the
1775                    // `VMMemoryDefinition::current_length` is direct; no bounds
1776                    // check is needed. This is possible because shared memory
1777                    // has a static size (the maximum is always known). Shared
1778                    // memory is thus built with a static memory plan and no
1779                    // bounds-checked version of this is implemented.
1780                    pos.ins().atomic_load(
1781                        pointer_type,
1782                        ir::MemFlags::trusted(),
1783                        vmmemory_definition_ptr,
1784                    )
1785                } else {
1786                    let owned_index = self.module.owned_memory_index(def_index);
1787                    let offset = i32::try_from(
1788                        self.offsets
1789                            .vmctx_vmmemory_definition_current_length(owned_index),
1790                    )
1791                    .unwrap();
1792                    pos.ins()
1793                        .load(pointer_type, ir::MemFlags::trusted(), base, offset)
1794                }
1795            }
1796            None => {
1797                let offset = i32::try_from(self.offsets.vmctx_vmmemory_import_from(index)).unwrap();
1798                let vmmemory_ptr =
1799                    pos.ins()
1800                        .load(pointer_type, ir::MemFlags::trusted(), base, offset);
1801                if is_shared {
1802                    let vmmemory_definition_offset =
1803                        i64::from(self.offsets.ptr.vmmemory_definition_current_length());
1804                    let vmmemory_definition_ptr =
1805                        pos.ins().iadd_imm(vmmemory_ptr, vmmemory_definition_offset);
1806                    pos.ins().atomic_load(
1807                        pointer_type,
1808                        ir::MemFlags::trusted(),
1809                        vmmemory_definition_ptr,
1810                    )
1811                } else {
1812                    pos.ins().load(
1813                        pointer_type,
1814                        ir::MemFlags::trusted(),
1815                        vmmemory_ptr,
1816                        i32::from(self.offsets.ptr.vmmemory_definition_current_length()),
1817                    )
1818                }
1819            }
1820        };
1821        let current_length_in_pages = pos
1822            .ins()
1823            .udiv_imm(current_length_in_bytes, i64::from(WASM_PAGE_SIZE));
1824
1825        Ok(self.cast_pointer_to_memory_index(pos, current_length_in_pages, index))
1826    }
1827
1828    fn translate_memory_copy(
1829        &mut self,
1830        mut pos: FuncCursor,
1831        src_index: MemoryIndex,
1832        _src_heap: Heap,
1833        dst_index: MemoryIndex,
1834        _dst_heap: Heap,
1835        dst: ir::Value,
1836        src: ir::Value,
1837        len: ir::Value,
1838    ) -> WasmResult<()> {
1839        let (vmctx, func_addr) = self
1840            .translate_load_builtin_function_address(&mut pos, BuiltinFunctionIndex::memory_copy());
1841
1842        let func_sig = self.builtin_function_signatures.memory_copy(&mut pos.func);
1843        let dst = self.cast_memory_index_to_i64(&mut pos, dst, dst_index);
1844        let src = self.cast_memory_index_to_i64(&mut pos, src, src_index);
1845        // The length is 32-bit if either memory is 32-bit, but if they're both
1846        // 64-bit then it's 64-bit. Our intrinsic takes a 64-bit length for
1847        // compatibility across all memories, so make sure that it's cast
1848        // correctly here (this is a bit special so no generic helper unlike for
1849        // `dst`/`src` above)
1850        let len = if self.memory_index_type(dst_index) == I64
1851            && self.memory_index_type(src_index) == I64
1852        {
1853            len
1854        } else {
1855            pos.ins().uextend(I64, len)
1856        };
1857        let src_index = pos.ins().iconst(I32, i64::from(src_index.as_u32()));
1858        let dst_index = pos.ins().iconst(I32, i64::from(dst_index.as_u32()));
1859        pos.ins().call_indirect(
1860            func_sig,
1861            func_addr,
1862            &[vmctx, dst_index, dst, src_index, src, len],
1863        );
1864
1865        Ok(())
1866    }
1867
1868    fn translate_memory_fill(
1869        &mut self,
1870        mut pos: FuncCursor,
1871        memory_index: MemoryIndex,
1872        _heap: Heap,
1873        dst: ir::Value,
1874        val: ir::Value,
1875        len: ir::Value,
1876    ) -> WasmResult<()> {
1877        let func_sig = self.builtin_function_signatures.memory_fill(&mut pos.func);
1878        let dst = self.cast_memory_index_to_i64(&mut pos, dst, memory_index);
1879        let len = self.cast_memory_index_to_i64(&mut pos, len, memory_index);
1880        let memory_index_arg = pos.ins().iconst(I32, i64::from(memory_index.as_u32()));
1881
1882        let (vmctx, func_addr) = self
1883            .translate_load_builtin_function_address(&mut pos, BuiltinFunctionIndex::memory_fill());
1884
1885        pos.ins().call_indirect(
1886            func_sig,
1887            func_addr,
1888            &[vmctx, memory_index_arg, dst, val, len],
1889        );
1890
1891        Ok(())
1892    }
1893
1894    fn translate_memory_init(
1895        &mut self,
1896        mut pos: FuncCursor,
1897        memory_index: MemoryIndex,
1898        _heap: Heap,
1899        seg_index: u32,
1900        dst: ir::Value,
1901        src: ir::Value,
1902        len: ir::Value,
1903    ) -> WasmResult<()> {
1904        let (func_sig, func_idx) = self.get_memory_init_func(&mut pos.func);
1905
1906        let memory_index_arg = pos.ins().iconst(I32, memory_index.index() as i64);
1907        let seg_index_arg = pos.ins().iconst(I32, seg_index as i64);
1908
1909        let (vmctx, func_addr) = self.translate_load_builtin_function_address(&mut pos, func_idx);
1910
1911        let dst = self.cast_memory_index_to_i64(&mut pos, dst, memory_index);
1912
1913        pos.ins().call_indirect(
1914            func_sig,
1915            func_addr,
1916            &[vmctx, memory_index_arg, seg_index_arg, dst, src, len],
1917        );
1918
1919        Ok(())
1920    }
1921
1922    fn translate_data_drop(&mut self, mut pos: FuncCursor, seg_index: u32) -> WasmResult<()> {
1923        let (func_sig, func_idx) = self.get_data_drop_func(&mut pos.func);
1924        let seg_index_arg = pos.ins().iconst(I32, seg_index as i64);
1925        let (vmctx, func_addr) = self.translate_load_builtin_function_address(&mut pos, func_idx);
1926        pos.ins()
1927            .call_indirect(func_sig, func_addr, &[vmctx, seg_index_arg]);
1928        Ok(())
1929    }
1930
1931    fn translate_table_size(
1932        &mut self,
1933        mut pos: FuncCursor,
1934        _table_index: TableIndex,
1935        table: ir::Table,
1936    ) -> WasmResult<ir::Value> {
1937        let size_gv = pos.func.tables[table].bound_gv;
1938        Ok(pos.ins().global_value(ir::types::I32, size_gv))
1939    }
1940
1941    fn translate_table_copy(
1942        &mut self,
1943        mut pos: FuncCursor,
1944        dst_table_index: TableIndex,
1945        _dst_table: ir::Table,
1946        src_table_index: TableIndex,
1947        _src_table: ir::Table,
1948        dst: ir::Value,
1949        src: ir::Value,
1950        len: ir::Value,
1951    ) -> WasmResult<()> {
1952        let (func_sig, dst_table_index_arg, src_table_index_arg, func_idx) =
1953            self.get_table_copy_func(&mut pos.func, dst_table_index, src_table_index);
1954
1955        let dst_table_index_arg = pos.ins().iconst(I32, dst_table_index_arg as i64);
1956        let src_table_index_arg = pos.ins().iconst(I32, src_table_index_arg as i64);
1957
1958        let (vmctx, func_addr) = self.translate_load_builtin_function_address(&mut pos, func_idx);
1959
1960        pos.ins().call_indirect(
1961            func_sig,
1962            func_addr,
1963            &[
1964                vmctx,
1965                dst_table_index_arg,
1966                src_table_index_arg,
1967                dst,
1968                src,
1969                len,
1970            ],
1971        );
1972
1973        Ok(())
1974    }
1975
1976    fn translate_table_init(
1977        &mut self,
1978        mut pos: FuncCursor,
1979        seg_index: u32,
1980        table_index: TableIndex,
1981        _table: ir::Table,
1982        dst: ir::Value,
1983        src: ir::Value,
1984        len: ir::Value,
1985    ) -> WasmResult<()> {
1986        let (func_sig, table_index_arg, func_idx) =
1987            self.get_table_init_func(&mut pos.func, table_index);
1988
1989        let table_index_arg = pos.ins().iconst(I32, table_index_arg as i64);
1990        let seg_index_arg = pos.ins().iconst(I32, seg_index as i64);
1991
1992        let (vmctx, func_addr) = self.translate_load_builtin_function_address(&mut pos, func_idx);
1993
1994        pos.ins().call_indirect(
1995            func_sig,
1996            func_addr,
1997            &[vmctx, table_index_arg, seg_index_arg, dst, src, len],
1998        );
1999
2000        Ok(())
2001    }
2002
2003    fn translate_elem_drop(&mut self, mut pos: FuncCursor, elem_index: u32) -> WasmResult<()> {
2004        let (func_sig, func_idx) = self.get_elem_drop_func(&mut pos.func);
2005
2006        let elem_index_arg = pos.ins().iconst(I32, elem_index as i64);
2007
2008        let (vmctx, func_addr) = self.translate_load_builtin_function_address(&mut pos, func_idx);
2009
2010        pos.ins()
2011            .call_indirect(func_sig, func_addr, &[vmctx, elem_index_arg]);
2012
2013        Ok(())
2014    }
2015
2016    fn translate_atomic_wait(
2017        &mut self,
2018        mut pos: FuncCursor,
2019        memory_index: MemoryIndex,
2020        _heap: Heap,
2021        addr: ir::Value,
2022        expected: ir::Value,
2023        timeout: ir::Value,
2024    ) -> WasmResult<ir::Value> {
2025        let addr = self.cast_memory_index_to_i64(&mut pos, addr, memory_index);
2026        let implied_ty = pos.func.dfg.value_type(expected);
2027        let (func_sig, memory_index, func_idx) =
2028            self.get_memory_atomic_wait(&mut pos.func, memory_index, implied_ty);
2029
2030        let memory_index_arg = pos.ins().iconst(I32, memory_index as i64);
2031
2032        let (vmctx, func_addr) = self.translate_load_builtin_function_address(&mut pos, func_idx);
2033
2034        let call_inst = pos.ins().call_indirect(
2035            func_sig,
2036            func_addr,
2037            &[vmctx, memory_index_arg, addr, expected, timeout],
2038        );
2039
2040        Ok(*pos.func.dfg.inst_results(call_inst).first().unwrap())
2041    }
2042
2043    fn translate_atomic_notify(
2044        &mut self,
2045        mut pos: FuncCursor,
2046        memory_index: MemoryIndex,
2047        _heap: Heap,
2048        addr: ir::Value,
2049        count: ir::Value,
2050    ) -> WasmResult<ir::Value> {
2051        let addr = self.cast_memory_index_to_i64(&mut pos, addr, memory_index);
2052        let func_sig = self
2053            .builtin_function_signatures
2054            .memory_atomic_notify(&mut pos.func);
2055
2056        let memory_index_arg = pos.ins().iconst(I32, memory_index.index() as i64);
2057
2058        let (vmctx, func_addr) = self.translate_load_builtin_function_address(
2059            &mut pos,
2060            BuiltinFunctionIndex::memory_atomic_notify(),
2061        );
2062
2063        let call_inst =
2064            pos.ins()
2065                .call_indirect(func_sig, func_addr, &[vmctx, memory_index_arg, addr, count]);
2066
2067        Ok(*pos.func.dfg.inst_results(call_inst).first().unwrap())
2068    }
2069
2070    fn translate_loop_header(&mut self, builder: &mut FunctionBuilder) -> WasmResult<()> {
2071        // Additionally if enabled check how much fuel we have remaining to see
2072        // if we've run out by this point.
2073        if self.tunables.consume_fuel {
2074            self.fuel_check(builder);
2075        }
2076
2077        // If we are performing epoch-based interruption, check to see
2078        // if the epoch counter has changed.
2079        if self.tunables.epoch_interruption {
2080            self.epoch_check(builder);
2081        }
2082
2083        Ok(())
2084    }
2085
2086    fn before_translate_operator(
2087        &mut self,
2088        op: &Operator,
2089        builder: &mut FunctionBuilder,
2090        state: &FuncTranslationState,
2091    ) -> WasmResult<()> {
2092        if self.tunables.consume_fuel {
2093            self.fuel_before_op(op, builder, state.reachable());
2094        }
2095        Ok(())
2096    }
2097
2098    fn after_translate_operator(
2099        &mut self,
2100        op: &Operator,
2101        builder: &mut FunctionBuilder,
2102        state: &FuncTranslationState,
2103    ) -> WasmResult<()> {
2104        if self.tunables.consume_fuel && state.reachable() {
2105            self.fuel_after_op(op, builder);
2106        }
2107        Ok(())
2108    }
2109
2110    fn before_unconditionally_trapping_memory_access(
2111        &mut self,
2112        builder: &mut FunctionBuilder,
2113    ) -> WasmResult<()> {
2114        if self.tunables.consume_fuel {
2115            self.fuel_increment_var(builder);
2116            self.fuel_save_from_var(builder);
2117        }
2118        Ok(())
2119    }
2120
2121    fn before_translate_function(
2122        &mut self,
2123        builder: &mut FunctionBuilder,
2124        _state: &FuncTranslationState,
2125    ) -> WasmResult<()> {
2126        // If the `vmruntime_limits_ptr` variable will get used then we initialize
2127        // it here.
2128        if self.tunables.consume_fuel || self.tunables.epoch_interruption {
2129            self.declare_vmruntime_limits_ptr(builder);
2130        }
2131        // Additionally we initialize `fuel_var` if it will get used.
2132        if self.tunables.consume_fuel {
2133            self.fuel_function_entry(builder);
2134        }
2135        // Initialize `epoch_var` with the current epoch.
2136        if self.tunables.epoch_interruption {
2137            self.epoch_function_entry(builder);
2138        }
2139        Ok(())
2140    }
2141
2142    fn after_translate_function(
2143        &mut self,
2144        builder: &mut FunctionBuilder,
2145        state: &FuncTranslationState,
2146    ) -> WasmResult<()> {
2147        if self.tunables.consume_fuel && state.reachable() {
2148            self.fuel_function_exit(builder);
2149        }
2150        Ok(())
2151    }
2152
2153    fn unsigned_add_overflow_condition(&self) -> ir::condcodes::IntCC {
2154        self.isa.unsigned_add_overflow_condition()
2155    }
2156
2157    fn relaxed_simd_deterministic(&self) -> bool {
2158        self.tunables.relaxed_simd_deterministic
2159    }
2160
2161    fn has_native_fma(&self) -> bool {
2162        self.isa.has_native_fma()
2163    }
2164
2165    fn is_x86(&self) -> bool {
2166        self.isa.triple().architecture == target_lexicon::Architecture::X86_64
2167    }
2168}