polkavm/compiler/
amd64.rs

1use polkavm_assembler::amd64::addr::*;
2use polkavm_assembler::amd64::inst::*;
3use polkavm_assembler::amd64::RegIndex as NativeReg;
4use polkavm_assembler::amd64::RegIndex::*;
5use polkavm_assembler::amd64::Reg::rsp;
6use polkavm_assembler::amd64::{Condition, LoadKind, RegSize, Size, MemOp};
7use polkavm_assembler::Label;
8
9use polkavm_common::program::{InstructionVisitor, Reg};
10use polkavm_common::abi::VM_CODE_ADDRESS_ALIGNMENT;
11use polkavm_common::zygote::VM_ADDR_VMCTX;
12
13use crate::api::VisitorWrapper;
14use crate::config::GasMeteringKind;
15use crate::compiler::{Compiler, SandboxKind};
16use crate::utils::RegImm;
17
18const TMP_REG: NativeReg = rcx;
19
20/// The register used for the embedded sandbox to hold the base address of the guest's linear memory.
21const GENERIC_SANDBOX_MEMORY_REG: NativeReg = r15;
22
23/// The register used for the linux sandbox to hold the address of the VM context.
24const LINUX_SANDBOX_VMCTX_REG: NativeReg = r15;
25
26const fn conv_reg_const(reg: Reg) -> NativeReg {
27    // NOTE: This is sorted roughly in the order of which registers are more commonly used.
28    // We try to assign registers which result in more compact code to the more common RISC-V registers.
29    match reg {
30        Reg::A0 => rdi,
31        Reg::A1 => rsi,
32        Reg::SP => rax,
33        Reg::RA => rbx,
34        Reg::A2 => rdx,
35        Reg::A3 => rbp,
36        Reg::S0 => r8,
37        Reg::S1 => r9,
38        Reg::A4 => r10,
39        Reg::A5 => r11,
40        Reg::T0 => r13,
41        Reg::T1 => r14,
42        Reg::T2 => r12,
43    }
44}
45
46static REG_MAP: [NativeReg; Reg::ALL.len()] = {
47    let mut output = [NativeReg::rcx; Reg::ALL.len()];
48    let mut index = 0;
49    while index < Reg::ALL.len() {
50        assert!(Reg::ALL[index] as usize == index);
51        output[index] = conv_reg_const(Reg::ALL[index]);
52        index += 1;
53    }
54    output
55};
56
57#[inline]
58fn conv_reg(reg: Reg) -> NativeReg {
59    REG_MAP[reg as usize]
60}
61
62#[test]
63fn test_conv_reg() {
64    for reg in Reg::ALL {
65        assert_eq!(conv_reg(reg), conv_reg_const(reg));
66    }
67}
68
69macro_rules! with_sandbox_kind {
70    ($input:expr, |$kind:ident| $body:expr) => {
71        match $input {
72            SandboxKind::Linux => {
73                #[allow(non_upper_case_globals)]
74                const $kind: SandboxKind = SandboxKind::Linux;
75                $body
76            },
77            SandboxKind::Generic => {
78                #[allow(non_upper_case_globals)]
79                const $kind: SandboxKind = SandboxKind::Generic;
80                $body
81            }
82        }
83    }
84}
85
86macro_rules! load_store_operand {
87    ($self:ident, $base:ident, $offset:expr, |$op:ident| $body:expr) => {
88        with_sandbox_kind!($self.sandbox_kind, |sandbox_kind| {
89            match sandbox_kind {
90                SandboxKind::Linux => {
91                    if let Some($base) = $base {
92                        // [address + offset]
93                        let $op = reg_indirect(RegSize::R32, conv_reg($base) + $offset as i32);
94                        $body
95                    } else {
96                        // [address] = ..
97                        let $op = abs(RegSize::R32, $offset as i32);
98                        $body
99                    }
100                },
101                SandboxKind::Generic => {
102                    match ($base, $offset) {
103                        // [address] = ..
104                        // (address is in the lower 2GB of the address space)
105                        (None, _) if $offset as i32 >= 0 => {
106                            let $op = reg_indirect(RegSize::R64, GENERIC_SANDBOX_MEMORY_REG + $offset as i32);
107                            $body
108                        },
109
110                        // [address] = ..
111                        (None, _) => {
112                            $self.push(mov_imm(TMP_REG, imm32($offset)));
113                            let $op = base_index(RegSize::R64, GENERIC_SANDBOX_MEMORY_REG, TMP_REG);
114                            $body
115                        },
116
117                        // [base] = ..
118                        (Some($base), 0) => {
119                            // NOTE: This assumes that `base` has its upper 32-bits clear.
120                            let $op = base_index(RegSize::R64, GENERIC_SANDBOX_MEMORY_REG, conv_reg($base));
121                            $body
122                        },
123
124                        // [base + offset] = ..
125                        (Some($base), _) => {
126                            $self.push(lea(RegSize::R32, TMP_REG, reg_indirect(RegSize::R32, conv_reg($base) + $offset as i32)));
127                            let $op = base_index(RegSize::R64, GENERIC_SANDBOX_MEMORY_REG, TMP_REG);
128                            $body
129                        }
130                    }
131                }
132            }
133        })
134    }
135}
136
137enum Signedness {
138    Signed,
139    Unsigned,
140}
141
142enum DivRem {
143    Div,
144    Rem,
145}
146
147enum ShiftKind {
148    LogicalLeft,
149    LogicalRight,
150    ArithmeticRight,
151}
152
153impl<'a> Compiler<'a> {
154    pub const PADDING_BYTE: u8 = 0x90; // NOP
155
156    #[allow(clippy::unused_self)]
157    #[cfg_attr(not(debug_assertions), inline(always))]
158    fn reg_size(&self) -> RegSize {
159        RegSize::R32
160    }
161
162    #[cfg_attr(not(debug_assertions), inline(always))]
163    fn load_immediate(&mut self, dst: Reg, value: u32) {
164        self.push(mov_imm(conv_reg(dst), imm32(value)));
165    }
166
167    #[cfg_attr(not(debug_assertions), inline(always))]
168    fn store(&mut self, src: impl Into<RegImm>, base: Option<Reg>, offset: u32, kind: Size) {
169        let src = src.into();
170        load_store_operand!(self, base, offset, |dst| {
171            match src {
172                RegImm::Reg(src) => self.push(store(kind, dst, conv_reg(src))),
173                RegImm::Imm(value) => {
174                    match kind {
175                        Size::U8 => self.push(mov_imm(dst, imm8(value as u8))),
176                        Size::U16 => self.push(mov_imm(dst, imm16(value as u16))),
177                        Size::U32 => self.push(mov_imm(dst, imm32(value))),
178                        Size::U64 => unreachable!(),
179                    }
180                },
181            }
182        });
183    }
184
185    #[cfg_attr(not(debug_assertions), inline(always))]
186    fn load(&mut self, dst: Reg, base: Option<Reg>, offset: u32, kind: LoadKind) {
187        load_store_operand!(self, base, offset, |src| {
188            self.push(load(kind, conv_reg(dst), src));
189        });
190    }
191
192    #[cfg_attr(not(debug_assertions), inline(always))]
193    fn clear_reg(&mut self, reg: Reg) {
194        let reg = conv_reg(reg);
195        self.push(xor((RegSize::R32, reg, reg)));
196    }
197
198    #[cfg_attr(not(debug_assertions), inline(always))]
199    fn fill_with_ones(&mut self, reg: Reg) {
200        match self.reg_size() {
201            RegSize::R32 => {
202                self.push(mov_imm(conv_reg(reg), imm32(0xffffffff)));
203            },
204            RegSize::R64 => {
205                self.clear_reg(reg);
206                self.push(not(Size::U64, conv_reg(reg)));
207            }
208        }
209    }
210
211    #[cfg_attr(not(debug_assertions), inline(always))]
212    fn compare_reg_reg(&mut self, d: Reg, s1: Reg, s2: Reg, condition: Condition) {
213        if d == s1 || d == s2 {
214            self.push(cmp((self.reg_size(), conv_reg(s1), conv_reg(s2))));
215            self.push(setcc(condition, conv_reg(d)));
216            self.push(and((conv_reg(d), imm32(1))));
217        } else {
218            self.clear_reg(d);
219            self.push(cmp((self.reg_size(), conv_reg(s1), conv_reg(s2))));
220            self.push(setcc(condition, conv_reg(d)));
221        }
222    }
223
224    #[cfg_attr(not(debug_assertions), inline(always))]
225    fn compare_reg_imm(&mut self, d: Reg, s1: Reg, s2: u32, condition: Condition) {
226        if d != s1 {
227            self.clear_reg(d);
228        }
229
230        if condition == Condition::Below && s2 == 1 {
231            // d = s1 <u 1  =>  d = s1 == 0
232            self.push(test((self.reg_size(), conv_reg(s1), conv_reg(s1))));
233            self.push(setcc(Condition::Equal, conv_reg(d)));
234        } else if condition == Condition::Above && s2 == 0 {
235            // d = s1 >u 0  =>  d = s1 != 0
236            self.push(test((self.reg_size(), conv_reg(s1), conv_reg(s1))));
237            self.push(setcc(Condition::NotEqual, conv_reg(d)));
238        } else {
239            match self.reg_size() {
240                RegSize::R32 => {
241                    self.push(cmp((conv_reg(s1), imm32(s2))));
242                },
243                RegSize::R64 => {
244                    self.push(cmp((conv_reg(s1), imm64(s2 as i32))));
245                }
246            }
247            self.push(setcc(condition, conv_reg(d)));
248        }
249
250        if d == s1 {
251            self.push(and((conv_reg(d), imm32(1))));
252        }
253    }
254
255    #[cfg_attr(not(debug_assertions), inline(always))]
256    fn shift_imm(&mut self, d: Reg, s1: Reg, s2: u32, kind: ShiftKind) {
257        if s2 >= 32 {
258            // d = s << 32+
259            self.clear_reg(d);
260            return;
261        }
262
263        if d != s1 {
264            self.mov(d, s1);
265        }
266
267        // d = d << s2
268        match kind {
269            ShiftKind::LogicalLeft => self.push(shl_imm(self.reg_size(), conv_reg(d), s2 as u8)),
270            ShiftKind::LogicalRight => self.push(shr_imm(self.reg_size(), conv_reg(d), s2 as u8)),
271            ShiftKind::ArithmeticRight => self.push(sar_imm(self.reg_size(), conv_reg(d), s2 as u8)),
272        }
273    }
274
275    #[cfg_attr(not(debug_assertions), inline(always))]
276    fn shift(&mut self, d: Reg, s1: impl Into<RegImm>, s2: Reg, kind: ShiftKind) {
277        // TODO: Consider using shlx/shrx/sarx when BMI2 is available.
278        self.push(mov(self.reg_size(), rcx, conv_reg(s2)));
279
280        match s1.into() {
281            RegImm::Reg(s1) => {
282                if s1 != d {
283                    self.mov(d, s1);
284                }
285            },
286            RegImm::Imm(s1) => {
287                self.load_immediate(d, s1);
288            }
289        }
290
291        // d = d << s2
292        match kind {
293            ShiftKind::LogicalLeft => self.push(shl_cl(self.reg_size(), conv_reg(d))),
294            ShiftKind::LogicalRight => self.push(shr_cl(self.reg_size(), conv_reg(d))),
295            ShiftKind::ArithmeticRight => self.push(sar_cl(self.reg_size(), conv_reg(d))),
296        }
297    }
298
299    #[cfg_attr(not(debug_assertions), inline(always))]
300    fn mov(&mut self, dst: Reg, src: Reg) {
301        self.push(mov(self.reg_size(), conv_reg(dst), conv_reg(src)))
302    }
303
304    #[cfg_attr(not(debug_assertions), inline(always))]
305    fn calculate_label_offset(&self, rel8_len: usize, rel32_len: usize, offset: isize) -> Result<i8, i32> {
306        let offset_near = offset - (self.asm.len() as isize + rel8_len as isize);
307        if offset_near <= i8::MAX as isize && offset_near >= i8::MIN as isize {
308            Ok(offset_near as i8)
309        } else {
310            let offset = offset - (self.asm.len() as isize + rel32_len as isize);
311            Err(offset as i32)
312        }
313    }
314
315    #[cfg_attr(not(debug_assertions), inline(always))]
316    fn jump_to_label(&mut self, label: Label) {
317        if let Some(offset) = self.asm.get_label_origin_offset(label) {
318            let offset = self.calculate_label_offset(
319                jmp_rel8(i8::MAX).len(),
320                jmp_rel32(i32::MAX).len(),
321                offset
322            );
323
324            match offset {
325                Ok(offset) => self.push(jmp_rel8(offset)),
326                Err(offset) => self.push(jmp_rel32(offset))
327            }
328        } else {
329            self.push(jmp_label32(label));
330        }
331    }
332
333    #[cfg_attr(not(debug_assertions), inline(always))]
334    fn branch(&mut self, s1: Reg, s2: impl Into<RegImm>, target: u32, condition: Condition) {
335        match s2.into() {
336            RegImm::Reg(s2) => self.push(cmp((self.reg_size(), conv_reg(s1), conv_reg(s2)))),
337            RegImm::Imm(s2) => self.push(cmp((conv_reg(s1), imm32(s2)))),
338        }
339
340        let label = self.get_or_forward_declare_label(target);
341        if let Some(offset) = self.asm.get_label_origin_offset(label) {
342            let offset = self.calculate_label_offset(
343                jcc_rel8(condition, i8::MAX).len(),
344                jcc_rel32(condition, i32::MAX).len(),
345                offset
346            );
347
348            match offset {
349                Ok(offset) => self.push(jcc_rel8(condition, offset)),
350                Err(offset) => self.push(jcc_rel32(condition, offset))
351            }
352        } else {
353            self.push(jcc_label32(condition, label));
354        }
355
356        self.start_new_basic_block();
357    }
358
359    #[cfg_attr(not(debug_assertions), inline(always))]
360    fn cmov(&mut self, d: Reg, s: Reg, c: Reg, condition: Condition) {
361        if d == s {
362            return;
363        }
364
365        let d = conv_reg(d);
366        let s = conv_reg(s);
367        let c = conv_reg(c);
368
369        self.push(test((self.reg_size(), c, c)));
370        self.push(cmov(condition, self.reg_size(), d, s));
371    }
372
373    #[cfg_attr(not(debug_assertions), inline(always))]
374    fn cmov_imm(&mut self, d: Reg, s: u32, c: Reg, condition: Condition) {
375        let d = conv_reg(d);
376        let c = conv_reg(c);
377
378        self.push(test((self.reg_size(), c, c)));
379        self.push(mov_imm(TMP_REG, imm32(s)));
380        self.push(cmov(condition, self.reg_size(), d, TMP_REG));
381    }
382
383    fn div_rem(&mut self, d: Reg, s1: Reg, s2: Reg, div_rem: DivRem, kind: Signedness) {
384        // Unlike most other architectures RISC-V doesn't trap on division by zero
385        // nor on division with overflow, and has well defined results in such cases.
386
387        let label_divisor_is_zero = self.asm.forward_declare_label();
388        let label_next = self.asm.forward_declare_label();
389
390        self.push(test((self.reg_size(), conv_reg(s2), conv_reg(s2))));
391        self.push(jcc_label8(Condition::Equal, label_divisor_is_zero));
392
393        if matches!(kind, Signedness::Signed) {
394            let label_normal = self.asm.forward_declare_label();
395            match self.reg_size() {
396                RegSize::R32 => {
397                    self.push(cmp((conv_reg(s1), imm32(i32::MIN as u32))));
398                    self.push(jcc_label8(Condition::NotEqual, label_normal));
399                    self.push(cmp((conv_reg(s2), imm32(-1_i32 as u32))));
400                    self.push(jcc_label8(Condition::NotEqual, label_normal));
401                    match div_rem {
402                        DivRem::Div => self.mov(d, s1),
403                        DivRem::Rem => self.clear_reg(d),
404                    }
405                    self.push(jmp_label8(label_next));
406                }
407                RegSize::R64 => todo!(),
408            }
409
410            self.define_label(label_normal);
411        }
412
413        // The division instruction always accepts the dividend and returns the result in rdx:rax.
414        // This isn't great because we're using these registers for the VM's registers, so we need
415        // to do all of this in such a way that those won't be accidentally overwritten.
416
417        const _: () = {
418            assert!(TMP_REG as u32 != rdx as u32);
419            assert!(TMP_REG as u32 != rax as u32);
420        };
421
422        // Push the registers that will be clobbered.
423        self.push(push(rdx));
424        self.push(push(rax));
425
426        // Push the operands.
427        self.push(push(conv_reg(s1)));
428        self.push(push(conv_reg(s2)));
429
430        // Pop the divisor.
431        self.push(pop(TMP_REG));
432
433        // Pop the dividend.
434        self.push(xor((RegSize::R32, rdx, rdx)));
435        self.push(pop(rax));
436
437        match kind {
438            Signedness::Unsigned => self.push(div(self.reg_size(), TMP_REG)),
439            Signedness::Signed => {
440                self.push(cdq());
441                self.push(idiv(self.reg_size(), TMP_REG))
442            }
443        }
444
445        // Move the result to the temporary register.
446        match div_rem {
447            DivRem::Div => self.push(mov(self.reg_size(), TMP_REG, rax)),
448            DivRem::Rem => self.push(mov(self.reg_size(), TMP_REG, rdx)),
449        }
450
451        // Restore the original registers.
452        self.push(pop(rax));
453        self.push(pop(rdx));
454
455        // Move the output into the destination registers.
456        self.push(mov(self.reg_size(), conv_reg(d), TMP_REG));
457
458        // Go to the next instruction.
459        self.push(jmp_label8(label_next));
460
461        self.define_label(label_divisor_is_zero);
462        match div_rem {
463            DivRem::Div => self.fill_with_ones(d),
464            DivRem::Rem if d == s1 => {}
465            DivRem::Rem => self.mov(d, s1),
466        }
467
468        self.define_label(label_next);
469    }
470
471    #[cfg_attr(not(debug_assertions), inline(always))]
472    fn vmctx_field(&self, offset: usize) -> MemOp {
473        match self.sandbox_kind {
474            SandboxKind::Linux => {
475                reg_indirect(RegSize::R64, LINUX_SANDBOX_VMCTX_REG + offset as i32)
476            },
477            SandboxKind::Generic => {
478                let offset = crate::sandbox::generic::GUEST_MEMORY_TO_VMCTX_OFFSET as i32 + offset as i32;
479                reg_indirect(RegSize::R64, GENERIC_SANDBOX_MEMORY_REG + offset)
480            }
481        }
482    }
483
484    fn load_vmctx_field_address(&mut self, offset: usize) -> NativeReg {
485        if offset == 0 && matches!(self.sandbox_kind, SandboxKind::Linux) {
486            LINUX_SANDBOX_VMCTX_REG
487        } else {
488            self.push(lea(RegSize::R64, TMP_REG, self.vmctx_field(offset)));
489            TMP_REG
490        }
491    }
492
493    fn save_registers_to_vmctx(&mut self) {
494        let regs_base = self.load_vmctx_field_address(self.vmctx_regs_offset);
495        for (nth, reg) in Reg::ALL.iter().copied().enumerate() {
496            self.push(store(Size::U32, reg_indirect(RegSize::R64, regs_base + nth as i32 * 4), conv_reg(reg)));
497        }
498    }
499
500    fn restore_registers_from_vmctx(&mut self) {
501        let regs_base = self.load_vmctx_field_address(self.vmctx_regs_offset);
502        for (nth, reg) in Reg::ALL.iter().copied().enumerate() {
503            self.push(load(LoadKind::U32, conv_reg(reg), reg_indirect(RegSize::R64, regs_base + nth as i32 * 4)));
504        }
505    }
506
507    pub(crate) fn emit_export_trampolines(&mut self) {
508        for export in self.exports {
509            log::trace!("Emitting trampoline: export: {}", export.symbol());
510
511            let trampoline_label = self.asm.create_label();
512            self.export_to_label.insert(export.jump_target(), trampoline_label);
513
514            if matches!(self.sandbox_kind, SandboxKind::Linux) {
515                self.push(mov_imm64(LINUX_SANDBOX_VMCTX_REG, VM_ADDR_VMCTX));
516            }
517            self.restore_registers_from_vmctx();
518
519            if self.gas_metering.is_some() {
520                // Did we enter again after running out of gas? If so don't even bother running anything, just immediately trap.
521                self.push(cmp((self.vmctx_field(self.vmctx_gas_offset), imm64(0))));
522                self.push(jcc_label32(Condition::Sign, self.trap_label));
523            }
524
525            let target_label = self.get_or_forward_declare_label(export.jump_target());
526            self.push(jmp_label32(target_label));
527        }
528    }
529
530    pub(crate) fn emit_sysreturn(&mut self) -> Label {
531        log::trace!("Emitting trampoline: sysreturn");
532        let label = self.asm.create_label();
533
534        self.save_registers_to_vmctx();
535        self.push(mov_imm64(TMP_REG, self.address_table.syscall_return));
536        self.push(jmp(TMP_REG));
537
538        label
539    }
540
541    pub(crate) fn emit_ecall_trampoline(&mut self) {
542        log::trace!("Emitting trampoline: ecall");
543        self.define_label(self.ecall_label);
544
545        self.push(push(TMP_REG)); // Save the ecall number.
546        self.save_registers_to_vmctx();
547        self.push(mov_imm64(TMP_REG, self.address_table.syscall_hostcall));
548        self.push(pop(rdi)); // Pop the ecall number as an argument.
549        self.push(call(TMP_REG));
550        self.restore_registers_from_vmctx();
551        self.push(ret());
552
553    }
554
555    pub(crate) fn emit_trace_trampoline(&mut self) {
556        log::trace!("Emitting trampoline: trace");
557        self.define_label(self.trace_label);
558
559        self.push(push(TMP_REG)); // Save the instruction number.
560        self.save_registers_to_vmctx();
561        self.push(mov_imm64(TMP_REG, self.address_table.syscall_trace));
562        self.push(pop(rdi)); // Pop the instruction number as an argument.
563        self.push(load(LoadKind::U64, rsi, reg_indirect(RegSize::R64, rsp - 8))); // Grab the return address.
564        self.push(call(TMP_REG));
565        self.restore_registers_from_vmctx();
566        self.push(ret());
567    }
568
569    pub(crate) fn emit_trap_trampoline(&mut self) {
570        log::trace!("Emitting trampoline: trap");
571        self.define_label(self.trap_label);
572
573        self.save_registers_to_vmctx();
574        self.push(mov_imm64(TMP_REG, self.address_table.syscall_trap));
575        self.push(jmp(TMP_REG));
576    }
577
578    pub(crate) fn emit_sbrk_trampoline(&mut self) {
579        log::trace!("Emitting trampoline: sbrk");
580        self.define_label(self.sbrk_label);
581
582        self.push(push(TMP_REG));
583        self.save_registers_to_vmctx();
584        self.push(mov_imm64(TMP_REG, self.address_table.syscall_sbrk));
585        self.push(pop(rdi));
586        self.push(call(TMP_REG));
587        self.push(push(rax));
588        self.restore_registers_from_vmctx();
589        self.push(pop(TMP_REG));
590        self.push(ret());
591    }
592
593    #[cold]
594    pub(crate) fn trace_execution(&mut self, nth_instruction: usize) {
595        self.push(mov_imm(TMP_REG, imm32(nth_instruction as u32)));
596        self.push(call_label32(self.trace_label));
597    }
598
599    pub(crate) fn emit_gas_metering_stub(&mut self, kind: GasMeteringKind) {
600        self.push(sub((self.vmctx_field(self.vmctx_gas_offset), imm64(i32::MAX))));
601        if matches!(kind, GasMeteringKind::Sync) {
602            self.push(cmp((self.vmctx_field(self.vmctx_gas_offset), imm64(0))));
603            self.push(jcc_label32(Condition::Sign, self.trap_label));
604        }
605    }
606
607    pub(crate) fn emit_weight(&mut self, offset: usize, cost: u32) {
608        let length = sub((self.vmctx_field(self.vmctx_gas_offset), imm64(i32::MAX))).len();
609        let xs = cost.to_le_bytes();
610        self.asm.code_mut()[offset + length - 4..offset + length].copy_from_slice(&xs);
611    }
612
613    #[cfg_attr(not(debug_assertions), inline(always))]
614    fn get_return_address(&self) -> u32 {
615        let index = self.jump_table_index_by_basic_block.get(self.next_basic_block() as usize).copied().unwrap_or(0);
616        if index == 0 {
617            panic!("internal error: couldn't fetch the jump table index for the return basic block");
618        }
619
620        index * VM_CODE_ADDRESS_ALIGNMENT
621    }
622
623    fn indirect_jump_or_call(&mut self, ra: Option<Reg>, base: Reg, offset: u32) {
624        let return_address = ra.map(|ra| (ra, self.get_return_address()));
625        match self.sandbox_kind {
626            SandboxKind::Linux => {
627                use polkavm_assembler::amd64::{SegReg, Scale};
628
629                let target = if offset != 0 || ra == Some(base) {
630                    self.push(lea(RegSize::R32, TMP_REG, reg_indirect(RegSize::R32, conv_reg(base) + offset as i32)));
631                    TMP_REG
632                } else {
633                    conv_reg(base)
634                };
635
636                if let Some((return_register, return_address)) = return_address {
637                    self.load_immediate(return_register, return_address);
638                }
639
640                self.asm.push(jmp(MemOp::IndexScaleOffset(Some(SegReg::gs), RegSize::R64, target, Scale::x8, 0)));
641            },
642            SandboxKind::Generic => {
643                // TODO: This also could be more efficient.
644                self.push(lea_rip_label(TMP_REG, self.jump_table_label));
645                self.push(push(conv_reg(base)));
646                self.push(shl_imm(RegSize::R64, conv_reg(base), 3));
647                if offset > 0 {
648                    let offset = offset.wrapping_mul(8);
649                    self.push(add((conv_reg(base), imm32(offset))));
650                }
651                self.push(add((RegSize::R64, TMP_REG, conv_reg(base))));
652                self.push(pop(conv_reg(base)));
653                self.push(load(LoadKind::U64, TMP_REG, reg_indirect(RegSize::R64, TMP_REG)));
654
655                if let Some((return_register, return_address)) = return_address {
656                    self.load_immediate(return_register, return_address);
657                }
658
659                self.push(jmp(TMP_REG));
660            }
661        }
662
663        self.start_new_basic_block();
664    }
665}
666
667impl<'a> InstructionVisitor for VisitorWrapper<'a, Compiler<'a>> {
668    type ReturnTy = ();
669
670    #[inline(always)]
671    fn trap(&mut self) -> Self::ReturnTy {
672        let trap_label = self.trap_label;
673        self.push(jmp_label32(trap_label));
674        self.start_new_basic_block();
675    }
676
677    #[inline(always)]
678    fn fallthrough(&mut self) -> Self::ReturnTy {
679        self.start_new_basic_block();
680    }
681
682    #[inline(always)]
683    fn sbrk(&mut self, dst: Reg, size: Reg) -> Self::ReturnTy {
684        let label_bump_only = self.asm.forward_declare_label();
685        let label_continue = self.asm.forward_declare_label();
686        let sbrk_label = self.sbrk_label;
687
688        let dst = conv_reg(dst);
689        let size = conv_reg(size);
690        if dst != size {
691            self.push(mov(RegSize::R32, dst, size));
692        }
693
694        let offset = self.vmctx_heap_info_offset;
695        let heap_info_base = self.load_vmctx_field_address(offset);
696
697        // Calculate new top-of-the-heap pointer.
698        self.push(add((RegSize::R64, dst, reg_indirect(RegSize::R64, heap_info_base))));
699        // Compare it to the current threshold.
700        self.push(cmp((RegSize::R64, dst, reg_indirect(RegSize::R64, heap_info_base + 8))));
701        // If it was less or equal to the threshold then no extra action is necessary (bump only!).
702        self.push(jcc_label8(Condition::BelowOrEqual, label_bump_only));
703
704        // The new top-of-the-heap pointer crossed the threshold, so more involved handling is necessary.
705        // We'll either allocate new memory, or return a null pointer.
706        self.push(mov(RegSize::R64, TMP_REG, dst));
707        self.push(call_label32(sbrk_label));
708        self.push(mov(RegSize::R32, dst, TMP_REG));
709        // Note: `dst` can be zero here, which is why we do the pointer bump from within the handler.
710        self.push(jmp_label8(label_continue));
711
712        self.define_label(label_bump_only);
713        // Only a bump was necessary, so just updated the pointer and continue.
714        self.push(store(RegSize::R64, reg_indirect(RegSize::R64, heap_info_base), dst));
715
716        self.define_label(label_continue);
717    }
718
719    #[inline(always)]
720    fn ecalli(&mut self, imm: u32) -> Self::ReturnTy {
721        let ecall_label = self.ecall_label;
722        self.push(mov_imm(TMP_REG, imm32(imm)));
723        self.push(call_label32(ecall_label));
724    }
725
726    #[inline(always)]
727    fn set_less_than_unsigned(&mut self, d: Reg, s1: Reg, s2: Reg) -> Self::ReturnTy {
728        self.compare_reg_reg(d, s1, s2, Condition::Below);
729    }
730
731    #[inline(always)]
732    fn set_less_than_unsigned_imm(&mut self, d: Reg, s1: Reg, s2: u32) -> Self::ReturnTy {
733        self.compare_reg_imm(d, s1, s2, Condition::Below);
734    }
735
736    #[inline(always)]
737    fn set_greater_than_unsigned_imm(&mut self, d: Reg, s1: Reg, s2: u32) -> Self::ReturnTy {
738        self.compare_reg_imm(d, s1, s2, Condition::Above);
739    }
740
741    #[inline(always)]
742    fn set_less_than_signed(&mut self, d: Reg, s1: Reg, s2: Reg) -> Self::ReturnTy {
743        self.compare_reg_reg(d, s1, s2, Condition::Less);
744    }
745
746    #[inline(always)]
747    fn set_less_than_signed_imm(&mut self, d: Reg, s1: Reg, s2: u32) -> Self::ReturnTy {
748        self.compare_reg_imm(d, s1, s2, Condition::Less);
749    }
750
751    #[inline(always)]
752    fn set_greater_than_signed_imm(&mut self, d: Reg, s1: Reg, s2: u32) -> Self::ReturnTy {
753        self.compare_reg_imm(d, s1, s2, Condition::Greater);
754    }
755
756    #[inline(always)]
757    fn shift_logical_right(&mut self, d: Reg, s1: Reg, s2: Reg) -> Self::ReturnTy {
758        self.shift(d, s1, s2, ShiftKind::LogicalRight);
759    }
760
761    #[inline(always)]
762    fn shift_arithmetic_right(&mut self, d: Reg, s1: Reg, s2: Reg) -> Self::ReturnTy {
763        self.shift(d, s1, s2, ShiftKind::ArithmeticRight);
764    }
765
766    #[inline(always)]
767    fn shift_logical_left(&mut self, d: Reg, s1: Reg, s2: Reg) -> Self::ReturnTy {
768        self.shift(d, s1, s2, ShiftKind::LogicalLeft);
769    }
770
771    #[inline(always)]
772    fn shift_logical_right_imm_alt(&mut self, d: Reg, s2: Reg, s1: u32) -> Self::ReturnTy {
773        self.shift(d, s1, s2, ShiftKind::LogicalRight);
774    }
775
776    #[inline(always)]
777    fn shift_arithmetic_right_imm_alt(&mut self, d: Reg, s2: Reg, s1: u32) -> Self::ReturnTy  {
778        self.shift(d, s1, s2, ShiftKind::ArithmeticRight);
779    }
780
781    #[inline(always)]
782    fn shift_logical_left_imm_alt(&mut self, d: Reg, s2: Reg, s1: u32) -> Self::ReturnTy  {
783        self.shift(d, s1, s2, ShiftKind::LogicalLeft);
784    }
785
786    #[inline(always)]
787    fn xor(&mut self, d: Reg, s1: Reg, s2: Reg) -> Self::ReturnTy {
788        let reg_size = self.reg_size();
789        match (d, s1, s2) {
790            // d = d ^ s2
791            (_, _, _) if d == s1 => self.push(xor((reg_size, conv_reg(d), conv_reg(s2)))),
792            // d = s1 ^ d
793            (_, _, _) if d == s2 => self.push(xor((reg_size, conv_reg(d), conv_reg(s1)))),
794            // d = s1 ^ s2
795            _ => {
796                self.mov(d, s1);
797                self.push(xor((reg_size, conv_reg(d), conv_reg(s2))));
798            }
799        }
800    }
801
802    #[inline(always)]
803    fn and(&mut self, d: Reg, s1: Reg, s2: Reg) -> Self::ReturnTy {
804        let reg_size = self.reg_size();
805        match (d, s1, s2) {
806            // d = d & s2
807            (_, _, _) if d == s1 => self.push(and((reg_size, conv_reg(d), conv_reg(s2)))),
808            // d = s1 & d
809            (_, _, _) if d == s2 => self.push(and((reg_size, conv_reg(d), conv_reg(s1)))),
810            // d = s1 & s2
811            _ => {
812                self.mov(d, s1);
813                self.push(and((reg_size, conv_reg(d), conv_reg(s2))));
814            }
815        }
816    }
817
818    #[inline(always)]
819    fn or(&mut self, d: Reg, s1: Reg, s2: Reg) -> Self::ReturnTy {
820        let reg_size = self.reg_size();
821        match (d, s1, s2) {
822            // d = d | s2
823            (_, _, _) if d == s1 => self.push(or((reg_size, conv_reg(d), conv_reg(s2)))),
824            // d = s1 | d
825            (_, _, _) if d == s2 => self.push(or((reg_size, conv_reg(d), conv_reg(s1)))),
826            // d = s1 | s2
827            _ => {
828                self.mov(d, s1);
829                self.push(or((reg_size, conv_reg(d), conv_reg(s2))));
830            }
831        }
832    }
833
834    #[inline(always)]
835    fn add(&mut self, d: Reg, s1: Reg, s2: Reg) -> Self::ReturnTy {
836        let reg_size = self.reg_size();
837        match (d, s1, s2) {
838            // d = d + s2
839            (_, _, _) if d == s1 => self.push(add((reg_size, conv_reg(d), conv_reg(s2)))),
840            // d = s1 + d
841            (_, _, _) if d == s2 => self.push(add((reg_size, conv_reg(d), conv_reg(s1)))),
842            // d = s1 + s2
843            _ => {
844                if d != s1 {
845                    self.mov(d, s1);
846                }
847                self.push(add((reg_size, conv_reg(d), conv_reg(s2))));
848            }
849        }
850    }
851
852    #[inline(always)]
853    fn sub(&mut self, d: Reg, s1: Reg, s2: Reg) -> Self::ReturnTy {
854        let reg_size = self.reg_size();
855        match (d, s1, s2) {
856            // d = d - s2
857            (_, _, _) if d == s1 => self.push(sub((reg_size, conv_reg(d), conv_reg(s2)))),
858            // d = s1 - d
859            (_, _, _) if d == s2 => {
860                self.push(neg(reg_size, conv_reg(d)));
861                self.push(add((reg_size, conv_reg(d), conv_reg(s1))));
862            }
863            // d = s1 - s2
864            _ => {
865                self.mov(d, s1);
866                self.push(sub((reg_size, conv_reg(d), conv_reg(s2))));
867            }
868        }
869    }
870
871    #[inline(always)]
872    fn negate_and_add_imm(&mut self, d: Reg, s1: Reg, s2: u32) -> Self::ReturnTy {
873        if d == s1 {
874            // d = -d + s2
875            self.push(neg(RegSize::R32, conv_reg(d)));
876            if s2 != 0 {
877                self.push(add((conv_reg(d), imm32(s2))));
878            }
879        } else {
880            // d = -s1 + s2  =>  d = s2 - s1
881            if s2 == 0 {
882                self.mov(d, s1);
883                self.push(neg(RegSize::R32, conv_reg(d)));
884            } else {
885                self.push(mov_imm(conv_reg(d), imm32(s2)));
886                self.push(sub((RegSize::R32, conv_reg(d), conv_reg(s1))));
887            }
888        }
889    }
890
891    #[inline(always)]
892    fn mul(&mut self, d: Reg, s1: Reg, s2: Reg) -> Self::ReturnTy {
893        let reg_size = self.reg_size();
894        if d == s1 {
895            // d = d * s2
896            self.push(imul(reg_size, conv_reg(d), conv_reg(s2)))
897        } else if d == s2 {
898            // d = s1 * d
899            self.push(imul(reg_size, conv_reg(d), conv_reg(s1)))
900        } else {
901            // d = s1 * s2
902            self.mov(d, s1);
903            self.push(imul(reg_size, conv_reg(d), conv_reg(s2)));
904        }
905    }
906
907    #[inline(always)]
908    fn mul_imm(&mut self, d: Reg, s1: Reg, s2: u32) -> Self::ReturnTy {
909        self.push(imul_imm(RegSize::R32, conv_reg(d), conv_reg(s1), s2 as i32));
910    }
911
912    #[inline(always)]
913    fn mul_upper_signed_signed(&mut self, d: Reg, s1: Reg, s2: Reg) -> Self::ReturnTy {
914        self.push(movsxd_32_to_64(TMP_REG, conv_reg(s2)));
915        self.push(movsxd_32_to_64(conv_reg(d), conv_reg(s1)));
916        self.push(imul(RegSize::R64, conv_reg(d), TMP_REG));
917        self.push(shr_imm(RegSize::R64, conv_reg(d), 32));
918    }
919
920    #[inline(always)]
921    fn mul_upper_signed_signed_imm(&mut self, d: Reg, s1: Reg, s2: u32) -> Self::ReturnTy {
922        self.push(mov_imm(TMP_REG, imm64(s2 as i32)));
923        self.push(movsxd_32_to_64(conv_reg(d), conv_reg(s1)));
924        self.push(imul(RegSize::R64, conv_reg(d), TMP_REG));
925        self.push(shr_imm(RegSize::R64, conv_reg(d), 32));
926    }
927
928    #[inline(always)]
929    fn mul_upper_unsigned_unsigned(&mut self, d: Reg, s1: Reg, s2: Reg) -> Self::ReturnTy {
930        if d == s1 {
931            // d = d * s2
932            self.push(imul(RegSize::R64, conv_reg(d), conv_reg(s2)));
933        } else if d == s2 {
934            // d = s1 * d
935            self.push(imul(RegSize::R64, conv_reg(d), conv_reg(s1)));
936        } else {
937            // d = s1 * s2
938            self.push(mov(RegSize::R32, conv_reg(d), conv_reg(s1)));
939            self.push(imul(RegSize::R64, conv_reg(d), conv_reg(s2)));
940        }
941
942        self.push(shr_imm(RegSize::R64, conv_reg(d), 32));
943    }
944
945    #[inline(always)]
946    fn mul_upper_unsigned_unsigned_imm(&mut self, d: Reg, s1: Reg, s2: u32) -> Self::ReturnTy {
947        self.push(mov_imm(TMP_REG, imm32(s2)));
948        if d != s1 {
949            self.push(mov(RegSize::R32, conv_reg(d), conv_reg(s1)));
950        }
951
952        self.push(imul(RegSize::R64, conv_reg(d), TMP_REG));
953        self.push(shr_imm(RegSize::R64, conv_reg(d), 32));
954    }
955
956    #[inline(always)]
957    fn mul_upper_signed_unsigned(&mut self, d: Reg, s1: Reg, s2: Reg) -> Self::ReturnTy {
958        // This instruction is equivalent to:
959        //   let s1: i32;
960        //   let s2: u32;
961        //   let s1: i64 = s1 as i64;
962        //   let s2: i64 = s2 as u64 as i64;
963        //   let d: u32 = ((s1 * s2) >> 32) as u32;
964        //
965        // So, basically:
966        //   1) sign-extend the s1 to 64-bits,
967        //   2) zero-extend the s2 to 64-bits,
968        //   3) multiply,
969        //   4) return the upper 32-bits.
970
971        if d == s2 {
972            // d = s1 * d
973            self.push(mov(RegSize::R32, TMP_REG, conv_reg(s2)));
974            self.push(movsxd_32_to_64(conv_reg(d), conv_reg(s1)));
975            self.push(imul(RegSize::R64, conv_reg(d), TMP_REG));
976        } else {
977            // d = s1 * s2
978            self.push(movsxd_32_to_64(conv_reg(d), conv_reg(s1)));
979            self.push(imul(RegSize::R64, conv_reg(d), conv_reg(s2)));
980        }
981
982        self.push(shr_imm(RegSize::R64, conv_reg(d), 32));
983    }
984
985    #[inline(always)]
986    fn div_unsigned(&mut self, d: Reg, s1: Reg, s2: Reg) -> Self::ReturnTy {
987        self.div_rem(d, s1, s2, DivRem::Div, Signedness::Unsigned);
988    }
989
990    #[inline(always)]
991    fn div_signed(&mut self, d: Reg, s1: Reg, s2: Reg) -> Self::ReturnTy {
992        self.div_rem(d, s1, s2, DivRem::Div, Signedness::Signed);
993    }
994
995    #[inline(always)]
996    fn rem_unsigned(&mut self, d: Reg, s1: Reg, s2: Reg) -> Self::ReturnTy {
997        self.div_rem(d, s1, s2, DivRem::Rem, Signedness::Unsigned);
998    }
999
1000    #[inline(always)]
1001    fn rem_signed(&mut self, d: Reg, s1: Reg, s2: Reg) -> Self::ReturnTy {
1002        self.div_rem(d, s1, s2, DivRem::Rem, Signedness::Signed);
1003    }
1004
1005    #[inline(always)]
1006    fn shift_logical_right_imm(&mut self, d: Reg, s1: Reg, s2: u32) -> Self::ReturnTy {
1007        self.shift_imm(d, s1, s2, ShiftKind::LogicalRight);
1008    }
1009
1010    #[inline(always)]
1011    fn shift_arithmetic_right_imm(&mut self, d: Reg, s1: Reg, s2: u32) -> Self::ReturnTy {
1012        self.shift_imm(d, s1, s2, ShiftKind::ArithmeticRight);
1013    }
1014
1015    #[inline(always)]
1016    fn shift_logical_left_imm(&mut self, d: Reg, s1: Reg, s2: u32) -> Self::ReturnTy {
1017        self.shift_imm(d, s1, s2, ShiftKind::LogicalLeft);
1018    }
1019
1020    #[inline(always)]
1021    fn or_imm(&mut self, d: Reg, s1: Reg, s2: u32) -> Self::ReturnTy {
1022        if d != s1 {
1023            self.mov(d, s1);
1024        }
1025
1026        // d = s1 | s2
1027        self.push(or((conv_reg(d), imm32(s2))));
1028    }
1029
1030    #[inline(always)]
1031    fn and_imm(&mut self, d: Reg, s1: Reg, s2: u32) -> Self::ReturnTy {
1032        if d != s1 {
1033            self.mov(d, s1);
1034        }
1035
1036        // d = s1 & s2
1037        self.push(and((conv_reg(d), imm32(s2))));
1038    }
1039
1040    #[inline(always)]
1041    fn xor_imm(&mut self, d: Reg, s1: Reg, s2: u32) -> Self::ReturnTy {
1042        let reg_size = self.reg_size();
1043        if d != s1 {
1044            self.mov(d, s1);
1045        }
1046
1047        if s2 != !0 {
1048            // d = s1 ^ s2
1049            self.push(xor((conv_reg(d), imm32(s2))));
1050        } else {
1051            // d = s1 ^ 0xfffffff
1052            self.push(not(reg_size, conv_reg(d)));
1053        }
1054    }
1055
1056    #[inline(always)]
1057    fn load_imm(&mut self, dst: Reg, s2: u32) -> Self::ReturnTy {
1058        self.load_immediate(dst, s2);
1059    }
1060
1061    #[inline(always)]
1062    fn move_reg(&mut self, d: Reg, s: Reg) -> Self::ReturnTy {
1063        self.mov(d, s);
1064    }
1065
1066    #[inline(always)]
1067    fn cmov_if_zero(&mut self, d: Reg, s: Reg, c: Reg) -> Self::ReturnTy {
1068        self.cmov(d, s, c, Condition::Equal);
1069    }
1070
1071    #[inline(always)]
1072    fn cmov_if_not_zero(&mut self, d: Reg, s: Reg, c: Reg) -> Self::ReturnTy {
1073        self.cmov(d, s, c, Condition::NotEqual);
1074    }
1075
1076    #[inline(always)]
1077    fn cmov_if_zero_imm(&mut self, d: Reg, c: Reg, s: u32) -> Self::ReturnTy {
1078        self.cmov_imm(d, s, c, Condition::Equal);
1079    }
1080
1081    #[inline(always)]
1082    fn cmov_if_not_zero_imm(&mut self, d: Reg, c: Reg, s: u32) -> Self::ReturnTy {
1083        self.cmov_imm(d, s, c, Condition::NotEqual);
1084    }
1085
1086    #[inline(always)]
1087    fn add_imm(&mut self, d: Reg, s1: Reg, s2: u32) -> Self::ReturnTy {
1088        let reg_size = self.reg_size();
1089        let d = conv_reg(d);
1090        let s1 = conv_reg(s1);
1091        match (d, s1, s2) {
1092            // d = d + 1
1093            (_, _, 1) if d == s1 => self.push(inc(reg_size, d)),
1094            // d = d + s2
1095            (_, _, _) if d == s1 => self.push(add((d, imm32(s2)))),
1096            // d = s1 + s2
1097            (_, _, _) => {
1098                self.push(lea(reg_size, d, reg_indirect(reg_size, s1 + s2 as i32)));
1099            }
1100        }
1101    }
1102
1103    #[inline(always)]
1104    fn store_u8(&mut self, src: Reg, offset: u32) -> Self::ReturnTy {
1105        self.store(src, None, offset, Size::U8);
1106    }
1107
1108    #[inline(always)]
1109    fn store_u16(&mut self, src: Reg, offset: u32) -> Self::ReturnTy {
1110        self.store(src, None, offset, Size::U16);
1111    }
1112
1113    #[inline(always)]
1114    fn store_u32(&mut self, src: Reg, offset: u32) -> Self::ReturnTy {
1115        self.store(src, None, offset, Size::U32);
1116    }
1117
1118    #[inline(always)]
1119    fn store_indirect_u8(&mut self, src: Reg, base: Reg, offset: u32) -> Self::ReturnTy {
1120        self.store(src, Some(base), offset, Size::U8);
1121    }
1122
1123    #[inline(always)]
1124    fn store_indirect_u16(&mut self, src: Reg, base: Reg, offset: u32) -> Self::ReturnTy {
1125        self.store(src, Some(base), offset, Size::U16);
1126    }
1127
1128    #[inline(always)]
1129    fn store_indirect_u32(&mut self, src: Reg, base: Reg, offset: u32) -> Self::ReturnTy {
1130        self.store(src, Some(base), offset, Size::U32);
1131    }
1132
1133    #[inline(always)]
1134    fn store_imm_indirect_u8(&mut self, base: Reg, offset: u32, value: u32) -> Self::ReturnTy {
1135        self.store(value, Some(base), offset, Size::U8);
1136    }
1137
1138    #[inline(always)]
1139    fn store_imm_indirect_u16(&mut self, base: Reg, offset: u32, value: u32) -> Self::ReturnTy {
1140        self.store(value, Some(base), offset, Size::U16);
1141    }
1142
1143    #[inline(always)]
1144    fn store_imm_indirect_u32(&mut self, base: Reg, offset: u32, value: u32) -> Self::ReturnTy {
1145        self.store(value, Some(base), offset, Size::U32);
1146    }
1147
1148    #[inline(always)]
1149    fn store_imm_u8(&mut self, value: u32, offset: u32) -> Self::ReturnTy {
1150        self.store(value, None, offset, Size::U8);
1151    }
1152
1153    #[inline(always)]
1154    fn store_imm_u16(&mut self, value: u32, offset: u32) -> Self::ReturnTy {
1155        self.store(value, None, offset, Size::U16);
1156    }
1157
1158    #[inline(always)]
1159    fn store_imm_u32(&mut self, value: u32, offset: u32) -> Self::ReturnTy {
1160        self.store(value, None, offset, Size::U32);
1161    }
1162
1163    #[inline(always)]
1164    fn load_indirect_u8(&mut self, dst: Reg, base: Reg, offset: u32) -> Self::ReturnTy {
1165        self.load(dst, Some(base), offset, LoadKind::U8);
1166    }
1167
1168    #[inline(always)]
1169    fn load_indirect_i8(&mut self, dst: Reg, base: Reg, offset: u32) -> Self::ReturnTy {
1170        self.load(dst, Some(base), offset, LoadKind::I8);
1171    }
1172
1173    #[inline(always)]
1174    fn load_indirect_u16(&mut self, dst: Reg, base: Reg, offset: u32) -> Self::ReturnTy {
1175        self.load(dst, Some(base), offset, LoadKind::U16);
1176    }
1177
1178    #[inline(always)]
1179    fn load_indirect_i16(&mut self, dst: Reg, base: Reg, offset: u32) -> Self::ReturnTy {
1180        self.load(dst, Some(base), offset, LoadKind::I16);
1181    }
1182
1183    #[inline(always)]
1184    fn load_indirect_u32(&mut self, dst: Reg, base: Reg, offset: u32) -> Self::ReturnTy {
1185        self.load(dst, Some(base), offset, LoadKind::U32);
1186    }
1187
1188    #[inline(always)]
1189    fn load_u8(&mut self, dst: Reg, offset: u32) -> Self::ReturnTy {
1190        self.load(dst, None, offset, LoadKind::U8);
1191    }
1192
1193    #[inline(always)]
1194    fn load_i8(&mut self, dst: Reg, offset: u32) -> Self::ReturnTy {
1195        self.load(dst, None, offset, LoadKind::I8);
1196    }
1197
1198    #[inline(always)]
1199    fn load_u16(&mut self, dst: Reg, offset: u32) -> Self::ReturnTy {
1200        self.load(dst, None, offset, LoadKind::U16);
1201    }
1202
1203    #[inline(always)]
1204    fn load_i16(&mut self, dst: Reg, offset: u32) -> Self::ReturnTy {
1205        self.load(dst, None, offset, LoadKind::I16);
1206    }
1207
1208    #[inline(always)]
1209    fn load_u32(&mut self, dst: Reg, offset: u32) -> Self::ReturnTy {
1210        self.load(dst, None, offset, LoadKind::U32);
1211    }
1212
1213    #[inline(always)]
1214    fn branch_less_unsigned(&mut self, s1: Reg, s2: Reg, target: u32) -> Self::ReturnTy {
1215        self.branch(s1, s2, target, Condition::Below);
1216    }
1217
1218    #[inline(always)]
1219    fn branch_less_signed(&mut self, s1: Reg, s2: Reg, target: u32) -> Self::ReturnTy {
1220        self.branch(s1, s2, target, Condition::Less);
1221    }
1222
1223    #[inline(always)]
1224    fn branch_greater_or_equal_unsigned(&mut self, s1: Reg, s2: Reg, target: u32) -> Self::ReturnTy {
1225        self.branch(s1, s2, target, Condition::AboveOrEqual);
1226    }
1227
1228    #[inline(always)]
1229    fn branch_greater_or_equal_signed(&mut self, s1: Reg, s2: Reg, target: u32) -> Self::ReturnTy {
1230        self.branch(s1, s2, target, Condition::GreaterOrEqual);
1231    }
1232
1233    #[inline(always)]
1234    fn branch_eq(&mut self, s1: Reg, s2: Reg, target: u32) -> Self::ReturnTy {
1235        self.branch(s1, s2, target, Condition::Equal);
1236    }
1237
1238    #[inline(always)]
1239    fn branch_not_eq(&mut self, s1: Reg, s2: Reg, target: u32) -> Self::ReturnTy {
1240        self.branch(s1, s2, target, Condition::NotEqual);
1241    }
1242
1243    #[inline(always)]
1244    fn branch_eq_imm(&mut self, s1: Reg, s2: u32, target: u32) -> Self::ReturnTy {
1245        self.branch(s1, s2, target, Condition::Equal);
1246    }
1247
1248    #[inline(always)]
1249    fn branch_not_eq_imm(&mut self, s1: Reg, s2: u32, target: u32) -> Self::ReturnTy {
1250        self.branch(s1, s2, target, Condition::NotEqual);
1251    }
1252
1253    #[inline(always)]
1254    fn branch_less_unsigned_imm(&mut self, s1: Reg, s2: u32, target: u32) -> Self::ReturnTy {
1255        self.branch(s1, s2, target, Condition::Below);
1256    }
1257
1258    #[inline(always)]
1259    fn branch_less_signed_imm(&mut self, s1: Reg, s2: u32, target: u32) -> Self::ReturnTy {
1260        self.branch(s1, s2, target, Condition::Less);
1261    }
1262
1263    #[inline(always)]
1264    fn branch_greater_or_equal_unsigned_imm(&mut self, s1: Reg, s2: u32, target: u32) -> Self::ReturnTy {
1265        self.branch(s1, s2, target, Condition::AboveOrEqual);
1266    }
1267
1268    #[inline(always)]
1269    fn branch_greater_or_equal_signed_imm(&mut self, s1: Reg, s2: u32, target: u32) -> Self::ReturnTy {
1270        self.branch(s1, s2, target, Condition::GreaterOrEqual);
1271    }
1272
1273    #[inline(always)]
1274    fn branch_less_or_equal_unsigned_imm(&mut self, s1: Reg, s2: u32, target: u32) -> Self::ReturnTy {
1275        self.branch(s1, s2, target, Condition::BelowOrEqual);
1276    }
1277
1278    #[inline(always)]
1279    fn branch_less_or_equal_signed_imm(&mut self, s1: Reg, s2: u32, target: u32) -> Self::ReturnTy {
1280        self.branch(s1, s2, target, Condition::LessOrEqual);
1281    }
1282
1283    #[inline(always)]
1284    fn branch_greater_unsigned_imm(&mut self, s1: Reg, s2: u32, target: u32) -> Self::ReturnTy {
1285        self.branch(s1, s2, target, Condition::Above);
1286    }
1287
1288    #[inline(always)]
1289    fn branch_greater_signed_imm(&mut self, s1: Reg, s2: u32, target: u32) -> Self::ReturnTy {
1290        self.branch(s1, s2, target, Condition::Greater);
1291    }
1292
1293    #[inline(always)]
1294    fn jump(&mut self, target: u32) -> Self::ReturnTy {
1295        let label = self.get_or_forward_declare_label(target);
1296        self.jump_to_label(label);
1297        self.start_new_basic_block();
1298    }
1299
1300    #[inline(always)]
1301    fn call(&mut self, ra: Reg, target: u32) -> Self::ReturnTy {
1302        let label = self.get_or_forward_declare_label(target);
1303        let return_address = self.get_return_address();
1304        self.load_immediate(ra, return_address);
1305        self.jump_to_label(label);
1306        self.start_new_basic_block();
1307    }
1308
1309    #[inline(always)]
1310    fn jump_indirect(&mut self, base: Reg, offset: u32) -> Self::ReturnTy {
1311        self.indirect_jump_or_call(None, base, offset)
1312    }
1313
1314    #[inline(always)]
1315    fn call_indirect(&mut self, ra: Reg, base: Reg, offset: u32) -> Self::ReturnTy {
1316        self.indirect_jump_or_call(Some(ra), base, offset)
1317    }
1318}