pub(super) mod isle;
use crate::ir::{types, ExternalName, Inst as IRInst, LibCall, Opcode, Type};
use crate::isa::x64::abi::*;
use crate::isa::x64::inst::args::*;
use crate::isa::x64::inst::*;
use crate::isa::{x64::X64Backend, CallConv};
use crate::machinst::abi::SmallInstVec;
use crate::machinst::lower::*;
use crate::machinst::*;
use crate::result::CodegenResult;
use crate::settings::Flags;
use smallvec::smallvec;
use target_lexicon::Triple;
fn is_int_or_ref_ty(ty: Type) -> bool {
match ty {
types::I8 | types::I16 | types::I32 | types::I64 | types::R64 => true,
types::R32 => panic!("shouldn't have 32-bits refs on x64"),
_ => false,
}
}
fn matches_input(ctx: &mut Lower<Inst>, input: InsnInput, op: Opcode) -> Option<IRInst> {
let inputs = ctx.get_input_as_source_or_const(input.insn, input.input);
inputs.inst.as_inst().and_then(|(src_inst, _)| {
let data = ctx.data(src_inst);
if data.opcode() == op {
return Some(src_inst);
}
None
})
}
fn put_input_in_regs(ctx: &mut Lower<Inst>, spec: InsnInput) -> ValueRegs<Reg> {
let ty = ctx.input_ty(spec.insn, spec.input);
let input = ctx.get_input_as_source_or_const(spec.insn, spec.input);
if let Some(c) = input.constant {
let from_bits = ty_bits(ty);
let (size, c) = if from_bits < 64 {
(OperandSize::Size32, c & ((1u64 << from_bits) - 1))
} else {
(OperandSize::Size64, c)
};
assert!(is_int_or_ref_ty(ty)); let cst_copy = ctx.alloc_tmp(ty);
ctx.emit(Inst::imm(size, c, cst_copy.only_reg().unwrap()));
non_writable_value_regs(cst_copy)
} else {
ctx.put_input_in_regs(spec.insn, spec.input)
}
}
fn put_input_in_reg(ctx: &mut Lower<Inst>, spec: InsnInput) -> Reg {
put_input_in_regs(ctx, spec)
.only_reg()
.expect("Multi-register value not expected")
}
enum MergeableLoadSize {
Exact,
Min32,
}
fn is_mergeable_load(
ctx: &mut Lower<Inst>,
src_insn: IRInst,
size: MergeableLoadSize,
) -> Option<(InsnInput, i32)> {
let insn_data = ctx.data(src_insn);
let inputs = ctx.num_inputs(src_insn);
if inputs != 1 {
return None;
}
let load_ty = ctx.output_ty(src_insn, 0);
if ty_bits(load_ty) < 32 {
match size {
MergeableLoadSize::Exact => {}
MergeableLoadSize::Min32 => return None,
}
}
if insn_data.opcode() == Opcode::Load {
let offset = insn_data
.load_store_offset()
.expect("load should have offset");
Some((
InsnInput {
insn: src_insn,
input: 0,
},
offset,
))
} else {
None
}
}
fn input_to_imm(ctx: &mut Lower<Inst>, spec: InsnInput) -> Option<u64> {
ctx.get_input_as_source_or_const(spec.insn, spec.input)
.constant
}
fn emit_vm_call(
ctx: &mut Lower<Inst>,
flags: &Flags,
triple: &Triple,
libcall: LibCall,
inputs: &[Reg],
outputs: &[Writable<Reg>],
) -> CodegenResult<()> {
let extname = ExternalName::LibCall(libcall);
let dist = if flags.use_colocated_libcalls() {
RelocDistance::Near
} else {
RelocDistance::Far
};
let call_conv = CallConv::for_libcall(flags, CallConv::triple_default(triple));
let sig = libcall.signature(call_conv);
let caller_conv = ctx.abi().call_conv(ctx.sigs());
if !ctx.sigs().have_abi_sig_for_signature(&sig) {
ctx.sigs_mut()
.make_abi_sig_from_ir_signature::<X64ABIMachineSpec>(sig.clone(), flags)?;
}
let mut abi =
X64Caller::from_libcall(ctx.sigs(), &sig, &extname, dist, caller_conv, flags.clone())?;
abi.emit_stack_pre_adjust(ctx);
assert_eq!(inputs.len(), abi.num_args(ctx.sigs()));
for (i, input) in inputs.iter().enumerate() {
for inst in abi.gen_arg(ctx, i, ValueRegs::one(*input)) {
ctx.emit(inst);
}
}
let mut retval_insts: SmallInstVec<_> = smallvec![];
for (i, output) in outputs.iter().enumerate() {
retval_insts.extend(abi.gen_retval(ctx, i, ValueRegs::one(*output)).into_iter());
}
abi.emit_call(ctx);
for inst in retval_insts {
ctx.emit(inst);
}
abi.emit_stack_post_adjust(ctx);
Ok(())
}
fn matches_small_constant_shift(ctx: &mut Lower<Inst>, spec: InsnInput) -> Option<(InsnInput, u8)> {
matches_input(ctx, spec, Opcode::Ishl).and_then(|shift| {
match input_to_imm(
ctx,
InsnInput {
insn: shift,
input: 1,
},
) {
Some(shift_amt) if shift_amt <= 3 => Some((
InsnInput {
insn: shift,
input: 0,
},
shift_amt as u8,
)),
_ => None,
}
})
}
fn lower_to_amode(ctx: &mut Lower<Inst>, spec: InsnInput, offset: i32) -> Amode {
let flags = ctx
.memflags(spec.insn)
.expect("Instruction with amode should have memflags");
if let Some(add) = matches_input(ctx, spec, Opcode::Iadd) {
debug_assert_eq!(ctx.output_ty(add, 0), types::I64);
let add_inputs = &[
InsnInput {
insn: add,
input: 0,
},
InsnInput {
insn: add,
input: 1,
},
];
let (base, index, shift) = if let Some((shift_input, shift_amt)) =
matches_small_constant_shift(ctx, add_inputs[0])
{
(
put_input_in_reg(ctx, add_inputs[1]),
put_input_in_reg(ctx, shift_input),
shift_amt,
)
} else if let Some((shift_input, shift_amt)) =
matches_small_constant_shift(ctx, add_inputs[1])
{
(
put_input_in_reg(ctx, add_inputs[0]),
put_input_in_reg(ctx, shift_input),
shift_amt,
)
} else {
for i in 0..=1 {
if let Some(uextend) = matches_input(
ctx,
InsnInput {
insn: add,
input: i,
},
Opcode::Uextend,
) {
if let Some(cst) = ctx.get_input_as_source_or_const(uextend, 0).constant {
let input_size = ctx.input_ty(uextend, 0).bits() as u64;
let shift: u64 = 64 - input_size;
let uext_cst: u64 = (cst << shift) >> shift;
let final_offset = (offset as i64).wrapping_add(uext_cst as i64);
if low32_will_sign_extend_to_64(final_offset as u64) {
let base = put_input_in_reg(ctx, add_inputs[1 - i]);
return Amode::imm_reg(final_offset as u32, base).with_flags(flags);
}
}
}
if let Some(cst) = ctx.get_input_as_source_or_const(add, i).constant {
let final_offset = (offset as i64).wrapping_add(cst as i64);
if low32_will_sign_extend_to_64(final_offset as u64) {
let base = put_input_in_reg(ctx, add_inputs[1 - i]);
return Amode::imm_reg(final_offset as u32, base).with_flags(flags);
}
}
}
(
put_input_in_reg(ctx, add_inputs[0]),
put_input_in_reg(ctx, add_inputs[1]),
0,
)
};
return Amode::imm_reg_reg_shift(
offset as u32,
Gpr::new(base).unwrap(),
Gpr::new(index).unwrap(),
shift,
)
.with_flags(flags);
}
let input = put_input_in_reg(ctx, spec);
Amode::imm_reg(offset as u32, input).with_flags(flags)
}
impl LowerBackend for X64Backend {
type MInst = Inst;
fn lower(&self, ctx: &mut Lower<Inst>, ir_inst: IRInst) -> Option<InstOutput> {
isle::lower(ctx, self, ir_inst)
}
fn lower_branch(
&self,
ctx: &mut Lower<Inst>,
ir_inst: IRInst,
targets: &[MachLabel],
) -> Option<()> {
isle::lower_branch(ctx, self, ir_inst, targets)
}
fn maybe_pinned_reg(&self) -> Option<Reg> {
Some(regs::pinned_reg())
}
}