mod bounds_checks;
use super::{hash_map, HashMap};
use crate::environ::{FuncEnvironment, GlobalVariable};
use crate::state::{ControlStackFrame, ElseData, FuncTranslationState};
use crate::translation_utils::{
block_with_params, blocktype_params_results, f32_translation, f64_translation,
};
use crate::wasm_unsupported;
use crate::{FuncIndex, GlobalIndex, MemoryIndex, TableIndex, TypeIndex, WasmResult};
use core::convert::TryInto;
use core::{i32, u32};
use cranelift_codegen::ir::condcodes::{FloatCC, IntCC};
use cranelift_codegen::ir::immediates::Offset32;
use cranelift_codegen::ir::types::*;
use cranelift_codegen::ir::{
self, AtomicRmwOp, ConstantData, InstBuilder, JumpTableData, MemFlags, Value, ValueLabel,
};
use cranelift_codegen::packed_option::ReservedValue;
use cranelift_frontend::{FunctionBuilder, Variable};
use itertools::Itertools;
use smallvec::SmallVec;
use std::convert::TryFrom;
use std::vec::Vec;
use wasmparser::{FuncValidator, MemArg, Operator, WasmModuleResources};
macro_rules! unwrap_or_return_unreachable_state {
($state:ident, $value:expr) => {
match $value {
Reachability::Reachable(x) => x,
Reachability::Unreachable => {
$state.reachable = false;
return Ok(());
}
}
};
}
#[cfg_attr(
feature = "cargo-clippy",
allow(clippy::unneeded_field_pattern, clippy::cognitive_complexity)
)]
pub fn translate_operator<FE: FuncEnvironment + ?Sized>(
validator: &mut FuncValidator<impl WasmModuleResources>,
op: &Operator,
builder: &mut FunctionBuilder,
state: &mut FuncTranslationState,
environ: &mut FE,
) -> WasmResult<()> {
if !state.reachable {
translate_unreachable_operator(validator, &op, builder, state, environ)?;
return Ok(());
}
debug_assert!(!builder.is_unreachable());
match op {
Operator::LocalGet { local_index } => {
let val = builder.use_var(Variable::from_u32(*local_index));
state.push1(val);
let label = ValueLabel::from_u32(*local_index);
builder.set_val_label(val, label);
}
Operator::LocalSet { local_index } => {
let mut val = state.pop1();
let ty = builder.func.dfg.value_type(val);
if ty.is_vector() {
val = optionally_bitcast_vector(val, I8X16, builder);
}
builder.def_var(Variable::from_u32(*local_index), val);
let label = ValueLabel::from_u32(*local_index);
builder.set_val_label(val, label);
}
Operator::LocalTee { local_index } => {
let mut val = state.peek1();
let ty = builder.func.dfg.value_type(val);
if ty.is_vector() {
val = optionally_bitcast_vector(val, I8X16, builder);
}
builder.def_var(Variable::from_u32(*local_index), val);
let label = ValueLabel::from_u32(*local_index);
builder.set_val_label(val, label);
}
Operator::GlobalGet { global_index } => {
let val = match state.get_global(builder.func, *global_index, environ)? {
GlobalVariable::Const(val) => val,
GlobalVariable::Memory { gv, offset, ty } => {
let addr = builder.ins().global_value(environ.pointer_type(), gv);
let mut flags = ir::MemFlags::trusted();
flags.set_table();
builder.ins().load(ty, flags, addr, offset)
}
GlobalVariable::Custom => environ.translate_custom_global_get(
builder.cursor(),
GlobalIndex::from_u32(*global_index),
)?,
};
state.push1(val);
}
Operator::GlobalSet { global_index } => {
match state.get_global(builder.func, *global_index, environ)? {
GlobalVariable::Const(_) => panic!("global #{} is a constant", *global_index),
GlobalVariable::Memory { gv, offset, ty } => {
let addr = builder.ins().global_value(environ.pointer_type(), gv);
let mut flags = ir::MemFlags::trusted();
flags.set_table();
let mut val = state.pop1();
if ty.is_vector() {
val = optionally_bitcast_vector(val, I8X16, builder);
}
debug_assert_eq!(ty, builder.func.dfg.value_type(val));
builder.ins().store(flags, val, addr, offset);
}
GlobalVariable::Custom => {
let val = state.pop1();
environ.translate_custom_global_set(
builder.cursor(),
GlobalIndex::from_u32(*global_index),
val,
)?;
}
}
}
Operator::Drop => {
state.pop1();
}
Operator::Select => {
let (mut arg1, mut arg2, cond) = state.pop3();
if builder.func.dfg.value_type(arg1).is_vector() {
arg1 = optionally_bitcast_vector(arg1, I8X16, builder);
}
if builder.func.dfg.value_type(arg2).is_vector() {
arg2 = optionally_bitcast_vector(arg2, I8X16, builder);
}
state.push1(builder.ins().select(cond, arg1, arg2));
}
Operator::TypedSelect { ty: _ } => {
let (mut arg1, mut arg2, cond) = state.pop3();
if builder.func.dfg.value_type(arg1).is_vector() {
arg1 = optionally_bitcast_vector(arg1, I8X16, builder);
}
if builder.func.dfg.value_type(arg2).is_vector() {
arg2 = optionally_bitcast_vector(arg2, I8X16, builder);
}
state.push1(builder.ins().select(cond, arg1, arg2));
}
Operator::Nop => {
}
Operator::Unreachable => {
builder.ins().trap(ir::TrapCode::UnreachableCodeReached);
state.reachable = false;
}
Operator::Block { blockty } => {
let (params, results) = blocktype_params_results(validator, *blockty)?;
let next = block_with_params(builder, results.clone(), environ)?;
state.push_block(next, params.len(), results.len());
}
Operator::Loop { blockty } => {
let (params, results) = blocktype_params_results(validator, *blockty)?;
let loop_body = block_with_params(builder, params.clone(), environ)?;
let next = block_with_params(builder, results.clone(), environ)?;
canonicalise_then_jump(builder, loop_body, state.peekn(params.len()));
state.push_loop(loop_body, next, params.len(), results.len());
state.popn(params.len());
state
.stack
.extend_from_slice(builder.block_params(loop_body));
builder.switch_to_block(loop_body);
environ.translate_loop_header(builder)?;
}
Operator::If { blockty } => {
let val = state.pop1();
let next_block = builder.create_block();
let (params, results) = blocktype_params_results(validator, *blockty)?;
let (destination, else_data) = if params.clone().eq(results.clone()) {
let destination = block_with_params(builder, results.clone(), environ)?;
let branch_inst = canonicalise_brif(
builder,
val,
next_block,
&[],
destination,
state.peekn(params.len()),
);
(
destination,
ElseData::NoElse {
branch_inst,
placeholder: destination,
},
)
} else {
let destination = block_with_params(builder, results.clone(), environ)?;
let else_block = block_with_params(builder, params.clone(), environ)?;
canonicalise_brif(
builder,
val,
next_block,
&[],
else_block,
state.peekn(params.len()),
);
builder.seal_block(else_block);
(destination, ElseData::WithElse { else_block })
};
builder.seal_block(next_block); builder.switch_to_block(next_block);
state.push_if(
destination,
else_data,
params.len(),
results.len(),
*blockty,
);
}
Operator::Else => {
let i = state.control_stack.len() - 1;
match state.control_stack[i] {
ControlStackFrame::If {
ref else_data,
head_is_reachable,
ref mut consequent_ends_reachable,
num_return_values,
blocktype,
destination,
..
} => {
debug_assert!(consequent_ends_reachable.is_none());
*consequent_ends_reachable = Some(state.reachable);
if head_is_reachable {
state.reachable = true;
let else_block = match *else_data {
ElseData::NoElse {
branch_inst,
placeholder,
} => {
let (params, _results) =
blocktype_params_results(validator, blocktype)?;
debug_assert_eq!(params.len(), num_return_values);
let else_block =
block_with_params(builder, params.clone(), environ)?;
canonicalise_then_jump(
builder,
destination,
state.peekn(params.len()),
);
state.popn(params.len());
builder.change_jump_destination(
branch_inst,
placeholder,
else_block,
);
builder.seal_block(else_block);
else_block
}
ElseData::WithElse { else_block } => {
canonicalise_then_jump(
builder,
destination,
state.peekn(num_return_values),
);
state.popn(num_return_values);
else_block
}
};
builder.switch_to_block(else_block);
}
}
_ => unreachable!(),
}
}
Operator::End => {
let frame = state.control_stack.pop().unwrap();
let next_block = frame.following_code();
let return_count = frame.num_return_values();
let return_args = state.peekn_mut(return_count);
canonicalise_then_jump(builder, next_block, return_args);
builder.switch_to_block(next_block);
builder.seal_block(next_block);
if let ControlStackFrame::Loop { header, .. } = frame {
builder.seal_block(header)
}
frame.truncate_value_stack_to_original_size(&mut state.stack);
state
.stack
.extend_from_slice(builder.block_params(next_block));
}
Operator::Br { relative_depth } => {
let i = state.control_stack.len() - 1 - (*relative_depth as usize);
let (return_count, br_destination) = {
let frame = &mut state.control_stack[i];
frame.set_branched_to_exit();
let return_count = if frame.is_loop() {
frame.num_param_values()
} else {
frame.num_return_values()
};
(return_count, frame.br_destination())
};
let destination_args = state.peekn_mut(return_count);
canonicalise_then_jump(builder, br_destination, destination_args);
state.popn(return_count);
state.reachable = false;
}
Operator::BrIf { relative_depth } => translate_br_if(*relative_depth, builder, state),
Operator::BrTable { targets } => {
let default = targets.default();
let mut min_depth = default;
for depth in targets.targets() {
let depth = depth?;
if depth < min_depth {
min_depth = depth;
}
}
let jump_args_count = {
let i = state.control_stack.len() - 1 - (min_depth as usize);
let min_depth_frame = &state.control_stack[i];
if min_depth_frame.is_loop() {
min_depth_frame.num_param_values()
} else {
min_depth_frame.num_return_values()
}
};
let val = state.pop1();
let mut data = Vec::with_capacity(targets.len() as usize);
if jump_args_count == 0 {
for depth in targets.targets() {
let depth = depth?;
let block = {
let i = state.control_stack.len() - 1 - (depth as usize);
let frame = &mut state.control_stack[i];
frame.set_branched_to_exit();
frame.br_destination()
};
data.push(builder.func.dfg.block_call(block, &[]));
}
let block = {
let i = state.control_stack.len() - 1 - (default as usize);
let frame = &mut state.control_stack[i];
frame.set_branched_to_exit();
frame.br_destination()
};
let block = builder.func.dfg.block_call(block, &[]);
let jt = builder.create_jump_table(JumpTableData::new(block, &data));
builder.ins().br_table(val, jt);
} else {
let return_count = jump_args_count;
let mut dest_block_sequence = vec![];
let mut dest_block_map = HashMap::new();
for depth in targets.targets() {
let depth = depth?;
let branch_block = match dest_block_map.entry(depth as usize) {
hash_map::Entry::Occupied(entry) => *entry.get(),
hash_map::Entry::Vacant(entry) => {
let block = builder.create_block();
dest_block_sequence.push((depth as usize, block));
*entry.insert(block)
}
};
data.push(builder.func.dfg.block_call(branch_block, &[]));
}
let default_branch_block = match dest_block_map.entry(default as usize) {
hash_map::Entry::Occupied(entry) => *entry.get(),
hash_map::Entry::Vacant(entry) => {
let block = builder.create_block();
dest_block_sequence.push((default as usize, block));
*entry.insert(block)
}
};
let default_branch_block = builder.func.dfg.block_call(default_branch_block, &[]);
let jt = builder.create_jump_table(JumpTableData::new(default_branch_block, &data));
builder.ins().br_table(val, jt);
for (depth, dest_block) in dest_block_sequence {
builder.switch_to_block(dest_block);
builder.seal_block(dest_block);
let real_dest_block = {
let i = state.control_stack.len() - 1 - depth;
let frame = &mut state.control_stack[i];
frame.set_branched_to_exit();
frame.br_destination()
};
let destination_args = state.peekn_mut(return_count);
canonicalise_then_jump(builder, real_dest_block, destination_args);
}
state.popn(return_count);
}
state.reachable = false;
}
Operator::Return => {
let return_count = {
let frame = &mut state.control_stack[0];
frame.num_return_values()
};
{
let return_args = state.peekn_mut(return_count);
bitcast_wasm_returns(environ, return_args, builder);
builder.ins().return_(return_args);
}
state.popn(return_count);
state.reachable = false;
}
Operator::Try { .. }
| Operator::Catch { .. }
| Operator::Throw { .. }
| Operator::Rethrow { .. }
| Operator::Delegate { .. }
| Operator::CatchAll => {
return Err(wasm_unsupported!(
"proposed exception handling operator {:?}",
op
));
}
Operator::Call { function_index } => {
let (fref, num_args) = state.get_direct_func(builder.func, *function_index, environ)?;
let args = state.peekn_mut(num_args);
bitcast_wasm_params(
environ,
builder.func.dfg.ext_funcs[fref].signature,
args,
builder,
);
let call = environ.translate_call(
builder.cursor(),
FuncIndex::from_u32(*function_index),
fref,
args,
)?;
let inst_results = builder.inst_results(call);
debug_assert_eq!(
inst_results.len(),
builder.func.dfg.signatures[builder.func.dfg.ext_funcs[fref].signature]
.returns
.len(),
"translate_call results should match the call signature"
);
state.popn(num_args);
state.pushn(inst_results);
}
Operator::CallIndirect {
type_index,
table_index,
table_byte: _,
} => {
let (sigref, num_args) = state.get_indirect_sig(builder.func, *type_index, environ)?;
let table = state.get_or_create_table(builder.func, *table_index, environ)?;
let callee = state.pop1();
let args = state.peekn_mut(num_args);
bitcast_wasm_params(environ, sigref, args, builder);
let call = environ.translate_call_indirect(
builder,
TableIndex::from_u32(*table_index),
table,
TypeIndex::from_u32(*type_index),
sigref,
callee,
state.peekn(num_args),
)?;
let inst_results = builder.inst_results(call);
debug_assert_eq!(
inst_results.len(),
builder.func.dfg.signatures[sigref].returns.len(),
"translate_call_indirect results should match the call signature"
);
state.popn(num_args);
state.pushn(inst_results);
}
Operator::MemoryGrow { mem, mem_byte: _ } => {
let heap_index = MemoryIndex::from_u32(*mem);
let heap = state.get_heap(builder.func, *mem, environ)?;
let val = state.pop1();
state.push1(environ.translate_memory_grow(builder.cursor(), heap_index, heap, val)?)
}
Operator::MemorySize { mem, mem_byte: _ } => {
let heap_index = MemoryIndex::from_u32(*mem);
let heap = state.get_heap(builder.func, *mem, environ)?;
state.push1(environ.translate_memory_size(builder.cursor(), heap_index, heap)?);
}
Operator::I32Load8U { memarg } => {
unwrap_or_return_unreachable_state!(
state,
translate_load(memarg, ir::Opcode::Uload8, I32, builder, state, environ)?
);
}
Operator::I32Load16U { memarg } => {
unwrap_or_return_unreachable_state!(
state,
translate_load(memarg, ir::Opcode::Uload16, I32, builder, state, environ)?
);
}
Operator::I32Load8S { memarg } => {
unwrap_or_return_unreachable_state!(
state,
translate_load(memarg, ir::Opcode::Sload8, I32, builder, state, environ)?
);
}
Operator::I32Load16S { memarg } => {
unwrap_or_return_unreachable_state!(
state,
translate_load(memarg, ir::Opcode::Sload16, I32, builder, state, environ)?
);
}
Operator::I64Load8U { memarg } => {
unwrap_or_return_unreachable_state!(
state,
translate_load(memarg, ir::Opcode::Uload8, I64, builder, state, environ)?
);
}
Operator::I64Load16U { memarg } => {
unwrap_or_return_unreachable_state!(
state,
translate_load(memarg, ir::Opcode::Uload16, I64, builder, state, environ)?
);
}
Operator::I64Load8S { memarg } => {
unwrap_or_return_unreachable_state!(
state,
translate_load(memarg, ir::Opcode::Sload8, I64, builder, state, environ)?
);
}
Operator::I64Load16S { memarg } => {
unwrap_or_return_unreachable_state!(
state,
translate_load(memarg, ir::Opcode::Sload16, I64, builder, state, environ)?
);
}
Operator::I64Load32S { memarg } => {
unwrap_or_return_unreachable_state!(
state,
translate_load(memarg, ir::Opcode::Sload32, I64, builder, state, environ)?
);
}
Operator::I64Load32U { memarg } => {
unwrap_or_return_unreachable_state!(
state,
translate_load(memarg, ir::Opcode::Uload32, I64, builder, state, environ)?
);
}
Operator::I32Load { memarg } => {
unwrap_or_return_unreachable_state!(
state,
translate_load(memarg, ir::Opcode::Load, I32, builder, state, environ)?
);
}
Operator::F32Load { memarg } => {
unwrap_or_return_unreachable_state!(
state,
translate_load(memarg, ir::Opcode::Load, F32, builder, state, environ)?
);
}
Operator::I64Load { memarg } => {
unwrap_or_return_unreachable_state!(
state,
translate_load(memarg, ir::Opcode::Load, I64, builder, state, environ)?
);
}
Operator::F64Load { memarg } => {
unwrap_or_return_unreachable_state!(
state,
translate_load(memarg, ir::Opcode::Load, F64, builder, state, environ)?
);
}
Operator::V128Load { memarg } => {
unwrap_or_return_unreachable_state!(
state,
translate_load(memarg, ir::Opcode::Load, I8X16, builder, state, environ)?
);
}
Operator::V128Load8x8S { memarg } => {
let (flags, base) = unwrap_or_return_unreachable_state!(
state,
prepare_addr(memarg, 8, builder, state, environ)?
);
let loaded = builder.ins().sload8x8(flags, base, 0);
state.push1(loaded);
}
Operator::V128Load8x8U { memarg } => {
let (flags, base) = unwrap_or_return_unreachable_state!(
state,
prepare_addr(memarg, 8, builder, state, environ)?
);
let loaded = builder.ins().uload8x8(flags, base, 0);
state.push1(loaded);
}
Operator::V128Load16x4S { memarg } => {
let (flags, base) = unwrap_or_return_unreachable_state!(
state,
prepare_addr(memarg, 8, builder, state, environ)?
);
let loaded = builder.ins().sload16x4(flags, base, 0);
state.push1(loaded);
}
Operator::V128Load16x4U { memarg } => {
let (flags, base) = unwrap_or_return_unreachable_state!(
state,
prepare_addr(memarg, 8, builder, state, environ)?
);
let loaded = builder.ins().uload16x4(flags, base, 0);
state.push1(loaded);
}
Operator::V128Load32x2S { memarg } => {
let (flags, base) = unwrap_or_return_unreachable_state!(
state,
prepare_addr(memarg, 8, builder, state, environ)?
);
let loaded = builder.ins().sload32x2(flags, base, 0);
state.push1(loaded);
}
Operator::V128Load32x2U { memarg } => {
let (flags, base) = unwrap_or_return_unreachable_state!(
state,
prepare_addr(memarg, 8, builder, state, environ)?
);
let loaded = builder.ins().uload32x2(flags, base, 0);
state.push1(loaded);
}
Operator::I32Store { memarg }
| Operator::I64Store { memarg }
| Operator::F32Store { memarg }
| Operator::F64Store { memarg } => {
translate_store(memarg, ir::Opcode::Store, builder, state, environ)?;
}
Operator::I32Store8 { memarg } | Operator::I64Store8 { memarg } => {
translate_store(memarg, ir::Opcode::Istore8, builder, state, environ)?;
}
Operator::I32Store16 { memarg } | Operator::I64Store16 { memarg } => {
translate_store(memarg, ir::Opcode::Istore16, builder, state, environ)?;
}
Operator::I64Store32 { memarg } => {
translate_store(memarg, ir::Opcode::Istore32, builder, state, environ)?;
}
Operator::V128Store { memarg } => {
translate_store(memarg, ir::Opcode::Store, builder, state, environ)?;
}
Operator::I32Const { value } => state.push1(builder.ins().iconst(I32, i64::from(*value))),
Operator::I64Const { value } => state.push1(builder.ins().iconst(I64, *value)),
Operator::F32Const { value } => {
state.push1(builder.ins().f32const(f32_translation(*value)));
}
Operator::F64Const { value } => {
state.push1(builder.ins().f64const(f64_translation(*value)));
}
Operator::I32Clz | Operator::I64Clz => {
let arg = state.pop1();
state.push1(builder.ins().clz(arg));
}
Operator::I32Ctz | Operator::I64Ctz => {
let arg = state.pop1();
state.push1(builder.ins().ctz(arg));
}
Operator::I32Popcnt | Operator::I64Popcnt => {
let arg = state.pop1();
state.push1(builder.ins().popcnt(arg));
}
Operator::I64ExtendI32S => {
let val = state.pop1();
state.push1(builder.ins().sextend(I64, val));
}
Operator::I64ExtendI32U => {
let val = state.pop1();
state.push1(builder.ins().uextend(I64, val));
}
Operator::I32WrapI64 => {
let val = state.pop1();
state.push1(builder.ins().ireduce(I32, val));
}
Operator::F32Sqrt | Operator::F64Sqrt => {
let arg = state.pop1();
state.push1(builder.ins().sqrt(arg));
}
Operator::F32Ceil | Operator::F64Ceil => {
let arg = state.pop1();
state.push1(builder.ins().ceil(arg));
}
Operator::F32Floor | Operator::F64Floor => {
let arg = state.pop1();
state.push1(builder.ins().floor(arg));
}
Operator::F32Trunc | Operator::F64Trunc => {
let arg = state.pop1();
state.push1(builder.ins().trunc(arg));
}
Operator::F32Nearest | Operator::F64Nearest => {
let arg = state.pop1();
state.push1(builder.ins().nearest(arg));
}
Operator::F32Abs | Operator::F64Abs => {
let val = state.pop1();
state.push1(builder.ins().fabs(val));
}
Operator::F32Neg | Operator::F64Neg => {
let arg = state.pop1();
state.push1(builder.ins().fneg(arg));
}
Operator::F64ConvertI64U | Operator::F64ConvertI32U => {
let val = state.pop1();
state.push1(builder.ins().fcvt_from_uint(F64, val));
}
Operator::F64ConvertI64S | Operator::F64ConvertI32S => {
let val = state.pop1();
state.push1(builder.ins().fcvt_from_sint(F64, val));
}
Operator::F32ConvertI64S | Operator::F32ConvertI32S => {
let val = state.pop1();
state.push1(builder.ins().fcvt_from_sint(F32, val));
}
Operator::F32ConvertI64U | Operator::F32ConvertI32U => {
let val = state.pop1();
state.push1(builder.ins().fcvt_from_uint(F32, val));
}
Operator::F64PromoteF32 => {
let val = state.pop1();
state.push1(builder.ins().fpromote(F64, val));
}
Operator::F32DemoteF64 => {
let val = state.pop1();
state.push1(builder.ins().fdemote(F32, val));
}
Operator::I64TruncF64S | Operator::I64TruncF32S => {
let val = state.pop1();
state.push1(builder.ins().fcvt_to_sint(I64, val));
}
Operator::I32TruncF64S | Operator::I32TruncF32S => {
let val = state.pop1();
state.push1(builder.ins().fcvt_to_sint(I32, val));
}
Operator::I64TruncF64U | Operator::I64TruncF32U => {
let val = state.pop1();
state.push1(builder.ins().fcvt_to_uint(I64, val));
}
Operator::I32TruncF64U | Operator::I32TruncF32U => {
let val = state.pop1();
state.push1(builder.ins().fcvt_to_uint(I32, val));
}
Operator::I64TruncSatF64S | Operator::I64TruncSatF32S => {
let val = state.pop1();
state.push1(builder.ins().fcvt_to_sint_sat(I64, val));
}
Operator::I32TruncSatF64S | Operator::I32TruncSatF32S => {
let val = state.pop1();
state.push1(builder.ins().fcvt_to_sint_sat(I32, val));
}
Operator::I64TruncSatF64U | Operator::I64TruncSatF32U => {
let val = state.pop1();
state.push1(builder.ins().fcvt_to_uint_sat(I64, val));
}
Operator::I32TruncSatF64U | Operator::I32TruncSatF32U => {
let val = state.pop1();
state.push1(builder.ins().fcvt_to_uint_sat(I32, val));
}
Operator::F32ReinterpretI32 => {
let val = state.pop1();
state.push1(builder.ins().bitcast(F32, MemFlags::new(), val));
}
Operator::F64ReinterpretI64 => {
let val = state.pop1();
state.push1(builder.ins().bitcast(F64, MemFlags::new(), val));
}
Operator::I32ReinterpretF32 => {
let val = state.pop1();
state.push1(builder.ins().bitcast(I32, MemFlags::new(), val));
}
Operator::I64ReinterpretF64 => {
let val = state.pop1();
state.push1(builder.ins().bitcast(I64, MemFlags::new(), val));
}
Operator::I32Extend8S => {
let val = state.pop1();
state.push1(builder.ins().ireduce(I8, val));
let val = state.pop1();
state.push1(builder.ins().sextend(I32, val));
}
Operator::I32Extend16S => {
let val = state.pop1();
state.push1(builder.ins().ireduce(I16, val));
let val = state.pop1();
state.push1(builder.ins().sextend(I32, val));
}
Operator::I64Extend8S => {
let val = state.pop1();
state.push1(builder.ins().ireduce(I8, val));
let val = state.pop1();
state.push1(builder.ins().sextend(I64, val));
}
Operator::I64Extend16S => {
let val = state.pop1();
state.push1(builder.ins().ireduce(I16, val));
let val = state.pop1();
state.push1(builder.ins().sextend(I64, val));
}
Operator::I64Extend32S => {
let val = state.pop1();
state.push1(builder.ins().ireduce(I32, val));
let val = state.pop1();
state.push1(builder.ins().sextend(I64, val));
}
Operator::I32Add | Operator::I64Add => {
let (arg1, arg2) = state.pop2();
state.push1(builder.ins().iadd(arg1, arg2));
}
Operator::I32And | Operator::I64And => {
let (arg1, arg2) = state.pop2();
state.push1(builder.ins().band(arg1, arg2));
}
Operator::I32Or | Operator::I64Or => {
let (arg1, arg2) = state.pop2();
state.push1(builder.ins().bor(arg1, arg2));
}
Operator::I32Xor | Operator::I64Xor => {
let (arg1, arg2) = state.pop2();
state.push1(builder.ins().bxor(arg1, arg2));
}
Operator::I32Shl | Operator::I64Shl => {
let (arg1, arg2) = state.pop2();
state.push1(builder.ins().ishl(arg1, arg2));
}
Operator::I32ShrS | Operator::I64ShrS => {
let (arg1, arg2) = state.pop2();
state.push1(builder.ins().sshr(arg1, arg2));
}
Operator::I32ShrU | Operator::I64ShrU => {
let (arg1, arg2) = state.pop2();
state.push1(builder.ins().ushr(arg1, arg2));
}
Operator::I32Rotl | Operator::I64Rotl => {
let (arg1, arg2) = state.pop2();
state.push1(builder.ins().rotl(arg1, arg2));
}
Operator::I32Rotr | Operator::I64Rotr => {
let (arg1, arg2) = state.pop2();
state.push1(builder.ins().rotr(arg1, arg2));
}
Operator::F32Add | Operator::F64Add => {
let (arg1, arg2) = state.pop2();
state.push1(builder.ins().fadd(arg1, arg2));
}
Operator::I32Sub | Operator::I64Sub => {
let (arg1, arg2) = state.pop2();
state.push1(builder.ins().isub(arg1, arg2));
}
Operator::F32Sub | Operator::F64Sub => {
let (arg1, arg2) = state.pop2();
state.push1(builder.ins().fsub(arg1, arg2));
}
Operator::I32Mul | Operator::I64Mul => {
let (arg1, arg2) = state.pop2();
state.push1(builder.ins().imul(arg1, arg2));
}
Operator::F32Mul | Operator::F64Mul => {
let (arg1, arg2) = state.pop2();
state.push1(builder.ins().fmul(arg1, arg2));
}
Operator::F32Div | Operator::F64Div => {
let (arg1, arg2) = state.pop2();
state.push1(builder.ins().fdiv(arg1, arg2));
}
Operator::I32DivS | Operator::I64DivS => {
let (arg1, arg2) = state.pop2();
state.push1(builder.ins().sdiv(arg1, arg2));
}
Operator::I32DivU | Operator::I64DivU => {
let (arg1, arg2) = state.pop2();
state.push1(builder.ins().udiv(arg1, arg2));
}
Operator::I32RemS | Operator::I64RemS => {
let (arg1, arg2) = state.pop2();
state.push1(builder.ins().srem(arg1, arg2));
}
Operator::I32RemU | Operator::I64RemU => {
let (arg1, arg2) = state.pop2();
state.push1(builder.ins().urem(arg1, arg2));
}
Operator::F32Min | Operator::F64Min => {
let (arg1, arg2) = state.pop2();
state.push1(builder.ins().fmin(arg1, arg2));
}
Operator::F32Max | Operator::F64Max => {
let (arg1, arg2) = state.pop2();
state.push1(builder.ins().fmax(arg1, arg2));
}
Operator::F32Copysign | Operator::F64Copysign => {
let (arg1, arg2) = state.pop2();
state.push1(builder.ins().fcopysign(arg1, arg2));
}
Operator::I32LtS | Operator::I64LtS => {
translate_icmp(IntCC::SignedLessThan, builder, state)
}
Operator::I32LtU | Operator::I64LtU => {
translate_icmp(IntCC::UnsignedLessThan, builder, state)
}
Operator::I32LeS | Operator::I64LeS => {
translate_icmp(IntCC::SignedLessThanOrEqual, builder, state)
}
Operator::I32LeU | Operator::I64LeU => {
translate_icmp(IntCC::UnsignedLessThanOrEqual, builder, state)
}
Operator::I32GtS | Operator::I64GtS => {
translate_icmp(IntCC::SignedGreaterThan, builder, state)
}
Operator::I32GtU | Operator::I64GtU => {
translate_icmp(IntCC::UnsignedGreaterThan, builder, state)
}
Operator::I32GeS | Operator::I64GeS => {
translate_icmp(IntCC::SignedGreaterThanOrEqual, builder, state)
}
Operator::I32GeU | Operator::I64GeU => {
translate_icmp(IntCC::UnsignedGreaterThanOrEqual, builder, state)
}
Operator::I32Eqz | Operator::I64Eqz => {
let arg = state.pop1();
let val = builder.ins().icmp_imm(IntCC::Equal, arg, 0);
state.push1(builder.ins().uextend(I32, val));
}
Operator::I32Eq | Operator::I64Eq => translate_icmp(IntCC::Equal, builder, state),
Operator::F32Eq | Operator::F64Eq => translate_fcmp(FloatCC::Equal, builder, state),
Operator::I32Ne | Operator::I64Ne => translate_icmp(IntCC::NotEqual, builder, state),
Operator::F32Ne | Operator::F64Ne => translate_fcmp(FloatCC::NotEqual, builder, state),
Operator::F32Gt | Operator::F64Gt => translate_fcmp(FloatCC::GreaterThan, builder, state),
Operator::F32Ge | Operator::F64Ge => {
translate_fcmp(FloatCC::GreaterThanOrEqual, builder, state)
}
Operator::F32Lt | Operator::F64Lt => translate_fcmp(FloatCC::LessThan, builder, state),
Operator::F32Le | Operator::F64Le => {
translate_fcmp(FloatCC::LessThanOrEqual, builder, state)
}
Operator::RefNull { hty } => {
state.push1(environ.translate_ref_null(builder.cursor(), (*hty).try_into()?)?)
}
Operator::RefIsNull => {
let value = state.pop1();
state.push1(environ.translate_ref_is_null(builder.cursor(), value)?);
}
Operator::RefFunc { function_index } => {
let index = FuncIndex::from_u32(*function_index);
state.push1(environ.translate_ref_func(builder.cursor(), index)?);
}
Operator::MemoryAtomicWait32 { memarg } | Operator::MemoryAtomicWait64 { memarg } => {
let implied_ty = match op {
Operator::MemoryAtomicWait64 { .. } => I64,
Operator::MemoryAtomicWait32 { .. } => I32,
_ => unreachable!(),
};
let heap_index = MemoryIndex::from_u32(memarg.memory);
let heap = state.get_heap(builder.func, memarg.memory, environ)?;
let timeout = state.pop1(); let expected = state.pop1(); assert!(builder.func.dfg.value_type(expected) == implied_ty);
let addr = state.pop1();
let effective_addr = if memarg.offset == 0 {
addr
} else {
let index_type = environ.heaps()[heap].index_type;
let offset = builder.ins().iconst(index_type, memarg.offset as i64);
builder
.ins()
.uadd_overflow_trap(addr, offset, ir::TrapCode::HeapOutOfBounds)
};
let res = environ.translate_atomic_wait(
builder.cursor(),
heap_index,
heap,
effective_addr,
expected,
timeout,
)?;
state.push1(res);
}
Operator::MemoryAtomicNotify { memarg } => {
let heap_index = MemoryIndex::from_u32(memarg.memory);
let heap = state.get_heap(builder.func, memarg.memory, environ)?;
let count = state.pop1(); let addr = state.pop1();
let effective_addr = if memarg.offset == 0 {
addr
} else {
let index_type = environ.heaps()[heap].index_type;
let offset = builder.ins().iconst(index_type, memarg.offset as i64);
builder
.ins()
.uadd_overflow_trap(addr, offset, ir::TrapCode::HeapOutOfBounds)
};
let res = environ.translate_atomic_notify(
builder.cursor(),
heap_index,
heap,
effective_addr,
count,
)?;
state.push1(res);
}
Operator::I32AtomicLoad { memarg } => {
translate_atomic_load(I32, I32, memarg, builder, state, environ)?
}
Operator::I64AtomicLoad { memarg } => {
translate_atomic_load(I64, I64, memarg, builder, state, environ)?
}
Operator::I32AtomicLoad8U { memarg } => {
translate_atomic_load(I32, I8, memarg, builder, state, environ)?
}
Operator::I32AtomicLoad16U { memarg } => {
translate_atomic_load(I32, I16, memarg, builder, state, environ)?
}
Operator::I64AtomicLoad8U { memarg } => {
translate_atomic_load(I64, I8, memarg, builder, state, environ)?
}
Operator::I64AtomicLoad16U { memarg } => {
translate_atomic_load(I64, I16, memarg, builder, state, environ)?
}
Operator::I64AtomicLoad32U { memarg } => {
translate_atomic_load(I64, I32, memarg, builder, state, environ)?
}
Operator::I32AtomicStore { memarg } => {
translate_atomic_store(I32, memarg, builder, state, environ)?
}
Operator::I64AtomicStore { memarg } => {
translate_atomic_store(I64, memarg, builder, state, environ)?
}
Operator::I32AtomicStore8 { memarg } => {
translate_atomic_store(I8, memarg, builder, state, environ)?
}
Operator::I32AtomicStore16 { memarg } => {
translate_atomic_store(I16, memarg, builder, state, environ)?
}
Operator::I64AtomicStore8 { memarg } => {
translate_atomic_store(I8, memarg, builder, state, environ)?
}
Operator::I64AtomicStore16 { memarg } => {
translate_atomic_store(I16, memarg, builder, state, environ)?
}
Operator::I64AtomicStore32 { memarg } => {
translate_atomic_store(I32, memarg, builder, state, environ)?
}
Operator::I32AtomicRmwAdd { memarg } => {
translate_atomic_rmw(I32, I32, AtomicRmwOp::Add, memarg, builder, state, environ)?
}
Operator::I64AtomicRmwAdd { memarg } => {
translate_atomic_rmw(I64, I64, AtomicRmwOp::Add, memarg, builder, state, environ)?
}
Operator::I32AtomicRmw8AddU { memarg } => {
translate_atomic_rmw(I32, I8, AtomicRmwOp::Add, memarg, builder, state, environ)?
}
Operator::I32AtomicRmw16AddU { memarg } => {
translate_atomic_rmw(I32, I16, AtomicRmwOp::Add, memarg, builder, state, environ)?
}
Operator::I64AtomicRmw8AddU { memarg } => {
translate_atomic_rmw(I64, I8, AtomicRmwOp::Add, memarg, builder, state, environ)?
}
Operator::I64AtomicRmw16AddU { memarg } => {
translate_atomic_rmw(I64, I16, AtomicRmwOp::Add, memarg, builder, state, environ)?
}
Operator::I64AtomicRmw32AddU { memarg } => {
translate_atomic_rmw(I64, I32, AtomicRmwOp::Add, memarg, builder, state, environ)?
}
Operator::I32AtomicRmwSub { memarg } => {
translate_atomic_rmw(I32, I32, AtomicRmwOp::Sub, memarg, builder, state, environ)?
}
Operator::I64AtomicRmwSub { memarg } => {
translate_atomic_rmw(I64, I64, AtomicRmwOp::Sub, memarg, builder, state, environ)?
}
Operator::I32AtomicRmw8SubU { memarg } => {
translate_atomic_rmw(I32, I8, AtomicRmwOp::Sub, memarg, builder, state, environ)?
}
Operator::I32AtomicRmw16SubU { memarg } => {
translate_atomic_rmw(I32, I16, AtomicRmwOp::Sub, memarg, builder, state, environ)?
}
Operator::I64AtomicRmw8SubU { memarg } => {
translate_atomic_rmw(I64, I8, AtomicRmwOp::Sub, memarg, builder, state, environ)?
}
Operator::I64AtomicRmw16SubU { memarg } => {
translate_atomic_rmw(I64, I16, AtomicRmwOp::Sub, memarg, builder, state, environ)?
}
Operator::I64AtomicRmw32SubU { memarg } => {
translate_atomic_rmw(I64, I32, AtomicRmwOp::Sub, memarg, builder, state, environ)?
}
Operator::I32AtomicRmwAnd { memarg } => {
translate_atomic_rmw(I32, I32, AtomicRmwOp::And, memarg, builder, state, environ)?
}
Operator::I64AtomicRmwAnd { memarg } => {
translate_atomic_rmw(I64, I64, AtomicRmwOp::And, memarg, builder, state, environ)?
}
Operator::I32AtomicRmw8AndU { memarg } => {
translate_atomic_rmw(I32, I8, AtomicRmwOp::And, memarg, builder, state, environ)?
}
Operator::I32AtomicRmw16AndU { memarg } => {
translate_atomic_rmw(I32, I16, AtomicRmwOp::And, memarg, builder, state, environ)?
}
Operator::I64AtomicRmw8AndU { memarg } => {
translate_atomic_rmw(I64, I8, AtomicRmwOp::And, memarg, builder, state, environ)?
}
Operator::I64AtomicRmw16AndU { memarg } => {
translate_atomic_rmw(I64, I16, AtomicRmwOp::And, memarg, builder, state, environ)?
}
Operator::I64AtomicRmw32AndU { memarg } => {
translate_atomic_rmw(I64, I32, AtomicRmwOp::And, memarg, builder, state, environ)?
}
Operator::I32AtomicRmwOr { memarg } => {
translate_atomic_rmw(I32, I32, AtomicRmwOp::Or, memarg, builder, state, environ)?
}
Operator::I64AtomicRmwOr { memarg } => {
translate_atomic_rmw(I64, I64, AtomicRmwOp::Or, memarg, builder, state, environ)?
}
Operator::I32AtomicRmw8OrU { memarg } => {
translate_atomic_rmw(I32, I8, AtomicRmwOp::Or, memarg, builder, state, environ)?
}
Operator::I32AtomicRmw16OrU { memarg } => {
translate_atomic_rmw(I32, I16, AtomicRmwOp::Or, memarg, builder, state, environ)?
}
Operator::I64AtomicRmw8OrU { memarg } => {
translate_atomic_rmw(I64, I8, AtomicRmwOp::Or, memarg, builder, state, environ)?
}
Operator::I64AtomicRmw16OrU { memarg } => {
translate_atomic_rmw(I64, I16, AtomicRmwOp::Or, memarg, builder, state, environ)?
}
Operator::I64AtomicRmw32OrU { memarg } => {
translate_atomic_rmw(I64, I32, AtomicRmwOp::Or, memarg, builder, state, environ)?
}
Operator::I32AtomicRmwXor { memarg } => {
translate_atomic_rmw(I32, I32, AtomicRmwOp::Xor, memarg, builder, state, environ)?
}
Operator::I64AtomicRmwXor { memarg } => {
translate_atomic_rmw(I64, I64, AtomicRmwOp::Xor, memarg, builder, state, environ)?
}
Operator::I32AtomicRmw8XorU { memarg } => {
translate_atomic_rmw(I32, I8, AtomicRmwOp::Xor, memarg, builder, state, environ)?
}
Operator::I32AtomicRmw16XorU { memarg } => {
translate_atomic_rmw(I32, I16, AtomicRmwOp::Xor, memarg, builder, state, environ)?
}
Operator::I64AtomicRmw8XorU { memarg } => {
translate_atomic_rmw(I64, I8, AtomicRmwOp::Xor, memarg, builder, state, environ)?
}
Operator::I64AtomicRmw16XorU { memarg } => {
translate_atomic_rmw(I64, I16, AtomicRmwOp::Xor, memarg, builder, state, environ)?
}
Operator::I64AtomicRmw32XorU { memarg } => {
translate_atomic_rmw(I64, I32, AtomicRmwOp::Xor, memarg, builder, state, environ)?
}
Operator::I32AtomicRmwXchg { memarg } => {
translate_atomic_rmw(I32, I32, AtomicRmwOp::Xchg, memarg, builder, state, environ)?
}
Operator::I64AtomicRmwXchg { memarg } => {
translate_atomic_rmw(I64, I64, AtomicRmwOp::Xchg, memarg, builder, state, environ)?
}
Operator::I32AtomicRmw8XchgU { memarg } => {
translate_atomic_rmw(I32, I8, AtomicRmwOp::Xchg, memarg, builder, state, environ)?
}
Operator::I32AtomicRmw16XchgU { memarg } => {
translate_atomic_rmw(I32, I16, AtomicRmwOp::Xchg, memarg, builder, state, environ)?
}
Operator::I64AtomicRmw8XchgU { memarg } => {
translate_atomic_rmw(I64, I8, AtomicRmwOp::Xchg, memarg, builder, state, environ)?
}
Operator::I64AtomicRmw16XchgU { memarg } => {
translate_atomic_rmw(I64, I16, AtomicRmwOp::Xchg, memarg, builder, state, environ)?
}
Operator::I64AtomicRmw32XchgU { memarg } => {
translate_atomic_rmw(I64, I32, AtomicRmwOp::Xchg, memarg, builder, state, environ)?
}
Operator::I32AtomicRmwCmpxchg { memarg } => {
translate_atomic_cas(I32, I32, memarg, builder, state, environ)?
}
Operator::I64AtomicRmwCmpxchg { memarg } => {
translate_atomic_cas(I64, I64, memarg, builder, state, environ)?
}
Operator::I32AtomicRmw8CmpxchgU { memarg } => {
translate_atomic_cas(I32, I8, memarg, builder, state, environ)?
}
Operator::I32AtomicRmw16CmpxchgU { memarg } => {
translate_atomic_cas(I32, I16, memarg, builder, state, environ)?
}
Operator::I64AtomicRmw8CmpxchgU { memarg } => {
translate_atomic_cas(I64, I8, memarg, builder, state, environ)?
}
Operator::I64AtomicRmw16CmpxchgU { memarg } => {
translate_atomic_cas(I64, I16, memarg, builder, state, environ)?
}
Operator::I64AtomicRmw32CmpxchgU { memarg } => {
translate_atomic_cas(I64, I32, memarg, builder, state, environ)?
}
Operator::AtomicFence { .. } => {
builder.ins().fence();
}
Operator::MemoryCopy { src_mem, dst_mem } => {
let src_index = MemoryIndex::from_u32(*src_mem);
let dst_index = MemoryIndex::from_u32(*dst_mem);
let src_heap = state.get_heap(builder.func, *src_mem, environ)?;
let dst_heap = state.get_heap(builder.func, *dst_mem, environ)?;
let len = state.pop1();
let src_pos = state.pop1();
let dst_pos = state.pop1();
environ.translate_memory_copy(
builder.cursor(),
src_index,
src_heap,
dst_index,
dst_heap,
dst_pos,
src_pos,
len,
)?;
}
Operator::MemoryFill { mem } => {
let heap_index = MemoryIndex::from_u32(*mem);
let heap = state.get_heap(builder.func, *mem, environ)?;
let len = state.pop1();
let val = state.pop1();
let dest = state.pop1();
environ.translate_memory_fill(builder.cursor(), heap_index, heap, dest, val, len)?;
}
Operator::MemoryInit { data_index, mem } => {
let heap_index = MemoryIndex::from_u32(*mem);
let heap = state.get_heap(builder.func, *mem, environ)?;
let len = state.pop1();
let src = state.pop1();
let dest = state.pop1();
environ.translate_memory_init(
builder.cursor(),
heap_index,
heap,
*data_index,
dest,
src,
len,
)?;
}
Operator::DataDrop { data_index } => {
environ.translate_data_drop(builder.cursor(), *data_index)?;
}
Operator::TableSize { table: index } => {
let table = state.get_or_create_table(builder.func, *index, environ)?;
state.push1(environ.translate_table_size(
builder.cursor(),
TableIndex::from_u32(*index),
table,
)?);
}
Operator::TableGrow { table: index } => {
let table_index = TableIndex::from_u32(*index);
let table = state.get_or_create_table(builder.func, *index, environ)?;
let delta = state.pop1();
let init_value = state.pop1();
state.push1(environ.translate_table_grow(
builder.cursor(),
table_index,
table,
delta,
init_value,
)?);
}
Operator::TableGet { table: index } => {
let table_index = TableIndex::from_u32(*index);
let table = state.get_or_create_table(builder.func, *index, environ)?;
let index = state.pop1();
state.push1(environ.translate_table_get(builder, table_index, table, index)?);
}
Operator::TableSet { table: index } => {
let table_index = TableIndex::from_u32(*index);
let table = state.get_or_create_table(builder.func, *index, environ)?;
let value = state.pop1();
let index = state.pop1();
environ.translate_table_set(builder, table_index, table, value, index)?;
}
Operator::TableCopy {
dst_table: dst_table_index,
src_table: src_table_index,
} => {
let dst_table = state.get_or_create_table(builder.func, *dst_table_index, environ)?;
let src_table = state.get_or_create_table(builder.func, *src_table_index, environ)?;
let len = state.pop1();
let src = state.pop1();
let dest = state.pop1();
environ.translate_table_copy(
builder.cursor(),
TableIndex::from_u32(*dst_table_index),
dst_table,
TableIndex::from_u32(*src_table_index),
src_table,
dest,
src,
len,
)?;
}
Operator::TableFill { table } => {
let table_index = TableIndex::from_u32(*table);
let len = state.pop1();
let val = state.pop1();
let dest = state.pop1();
environ.translate_table_fill(builder.cursor(), table_index, dest, val, len)?;
}
Operator::TableInit {
elem_index,
table: table_index,
} => {
let table = state.get_or_create_table(builder.func, *table_index, environ)?;
let len = state.pop1();
let src = state.pop1();
let dest = state.pop1();
environ.translate_table_init(
builder.cursor(),
*elem_index,
TableIndex::from_u32(*table_index),
table,
dest,
src,
len,
)?;
}
Operator::ElemDrop { elem_index } => {
environ.translate_elem_drop(builder.cursor(), *elem_index)?;
}
Operator::V128Const { value } => {
let data = value.bytes().to_vec().into();
let handle = builder.func.dfg.constants.insert(data);
let value = builder.ins().vconst(I8X16, handle);
state.push1(value)
}
Operator::I8x16Splat | Operator::I16x8Splat => {
let reduced = builder.ins().ireduce(type_of(op).lane_type(), state.pop1());
let splatted = builder.ins().splat(type_of(op), reduced);
state.push1(splatted)
}
Operator::I32x4Splat
| Operator::I64x2Splat
| Operator::F32x4Splat
| Operator::F64x2Splat => {
let splatted = builder.ins().splat(type_of(op), state.pop1());
state.push1(splatted)
}
Operator::V128Load8Splat { memarg }
| Operator::V128Load16Splat { memarg }
| Operator::V128Load32Splat { memarg }
| Operator::V128Load64Splat { memarg } => {
unwrap_or_return_unreachable_state!(
state,
translate_load(
memarg,
ir::Opcode::Load,
type_of(op).lane_type(),
builder,
state,
environ,
)?
);
let splatted = builder.ins().splat(type_of(op), state.pop1());
state.push1(splatted)
}
Operator::V128Load32Zero { memarg } | Operator::V128Load64Zero { memarg } => {
unwrap_or_return_unreachable_state!(
state,
translate_load(
memarg,
ir::Opcode::Load,
type_of(op).lane_type(),
builder,
state,
environ,
)?
);
let as_vector = builder.ins().scalar_to_vector(type_of(op), state.pop1());
state.push1(as_vector)
}
Operator::V128Load8Lane { memarg, lane }
| Operator::V128Load16Lane { memarg, lane }
| Operator::V128Load32Lane { memarg, lane }
| Operator::V128Load64Lane { memarg, lane } => {
let vector = pop1_with_bitcast(state, type_of(op), builder);
unwrap_or_return_unreachable_state!(
state,
translate_load(
memarg,
ir::Opcode::Load,
type_of(op).lane_type(),
builder,
state,
environ,
)?
);
let replacement = state.pop1();
state.push1(builder.ins().insertlane(vector, replacement, *lane))
}
Operator::V128Store8Lane { memarg, lane }
| Operator::V128Store16Lane { memarg, lane }
| Operator::V128Store32Lane { memarg, lane }
| Operator::V128Store64Lane { memarg, lane } => {
let vector = pop1_with_bitcast(state, type_of(op), builder);
state.push1(builder.ins().extractlane(vector, lane.clone()));
translate_store(memarg, ir::Opcode::Store, builder, state, environ)?;
}
Operator::I8x16ExtractLaneS { lane } | Operator::I16x8ExtractLaneS { lane } => {
let vector = pop1_with_bitcast(state, type_of(op), builder);
let extracted = builder.ins().extractlane(vector, lane.clone());
state.push1(builder.ins().sextend(I32, extracted))
}
Operator::I8x16ExtractLaneU { lane } | Operator::I16x8ExtractLaneU { lane } => {
let vector = pop1_with_bitcast(state, type_of(op), builder);
let extracted = builder.ins().extractlane(vector, lane.clone());
state.push1(builder.ins().uextend(I32, extracted));
}
Operator::I32x4ExtractLane { lane }
| Operator::I64x2ExtractLane { lane }
| Operator::F32x4ExtractLane { lane }
| Operator::F64x2ExtractLane { lane } => {
let vector = pop1_with_bitcast(state, type_of(op), builder);
state.push1(builder.ins().extractlane(vector, lane.clone()))
}
Operator::I8x16ReplaceLane { lane } | Operator::I16x8ReplaceLane { lane } => {
let (vector, replacement) = state.pop2();
let ty = type_of(op);
let reduced = builder.ins().ireduce(ty.lane_type(), replacement);
let vector = optionally_bitcast_vector(vector, ty, builder);
state.push1(builder.ins().insertlane(vector, reduced, *lane))
}
Operator::I32x4ReplaceLane { lane }
| Operator::I64x2ReplaceLane { lane }
| Operator::F32x4ReplaceLane { lane }
| Operator::F64x2ReplaceLane { lane } => {
let (vector, replacement) = state.pop2();
let vector = optionally_bitcast_vector(vector, type_of(op), builder);
state.push1(builder.ins().insertlane(vector, replacement, *lane))
}
Operator::I8x16Shuffle { lanes, .. } => {
let (a, b) = pop2_with_bitcast(state, I8X16, builder);
let lanes = ConstantData::from(lanes.as_ref());
let mask = builder.func.dfg.immediates.push(lanes);
let shuffled = builder.ins().shuffle(a, b, mask);
state.push1(shuffled)
}
Operator::I8x16Swizzle => {
let (a, b) = pop2_with_bitcast(state, I8X16, builder);
state.push1(builder.ins().swizzle(a, b))
}
Operator::I8x16Add | Operator::I16x8Add | Operator::I32x4Add | Operator::I64x2Add => {
let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
state.push1(builder.ins().iadd(a, b))
}
Operator::I8x16AddSatS | Operator::I16x8AddSatS => {
let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
state.push1(builder.ins().sadd_sat(a, b))
}
Operator::I8x16AddSatU | Operator::I16x8AddSatU => {
let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
state.push1(builder.ins().uadd_sat(a, b))
}
Operator::I8x16Sub | Operator::I16x8Sub | Operator::I32x4Sub | Operator::I64x2Sub => {
let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
state.push1(builder.ins().isub(a, b))
}
Operator::I8x16SubSatS | Operator::I16x8SubSatS => {
let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
state.push1(builder.ins().ssub_sat(a, b))
}
Operator::I8x16SubSatU | Operator::I16x8SubSatU => {
let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
state.push1(builder.ins().usub_sat(a, b))
}
Operator::I8x16MinS | Operator::I16x8MinS | Operator::I32x4MinS => {
let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
state.push1(builder.ins().smin(a, b))
}
Operator::I8x16MinU | Operator::I16x8MinU | Operator::I32x4MinU => {
let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
state.push1(builder.ins().umin(a, b))
}
Operator::I8x16MaxS | Operator::I16x8MaxS | Operator::I32x4MaxS => {
let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
state.push1(builder.ins().smax(a, b))
}
Operator::I8x16MaxU | Operator::I16x8MaxU | Operator::I32x4MaxU => {
let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
state.push1(builder.ins().umax(a, b))
}
Operator::I8x16AvgrU | Operator::I16x8AvgrU => {
let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
state.push1(builder.ins().avg_round(a, b))
}
Operator::I8x16Neg | Operator::I16x8Neg | Operator::I32x4Neg | Operator::I64x2Neg => {
let a = pop1_with_bitcast(state, type_of(op), builder);
state.push1(builder.ins().ineg(a))
}
Operator::I8x16Abs | Operator::I16x8Abs | Operator::I32x4Abs | Operator::I64x2Abs => {
let a = pop1_with_bitcast(state, type_of(op), builder);
state.push1(builder.ins().iabs(a))
}
Operator::I16x8Mul | Operator::I32x4Mul | Operator::I64x2Mul => {
let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
state.push1(builder.ins().imul(a, b))
}
Operator::V128Or => {
let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
state.push1(builder.ins().bor(a, b))
}
Operator::V128Xor => {
let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
state.push1(builder.ins().bxor(a, b))
}
Operator::V128And => {
let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
state.push1(builder.ins().band(a, b))
}
Operator::V128AndNot => {
let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
state.push1(builder.ins().band_not(a, b))
}
Operator::V128Not => {
let a = state.pop1();
state.push1(builder.ins().bnot(a));
}
Operator::I8x16Shl | Operator::I16x8Shl | Operator::I32x4Shl | Operator::I64x2Shl => {
let (a, b) = state.pop2();
let bitcast_a = optionally_bitcast_vector(a, type_of(op), builder);
state.push1(builder.ins().ishl(bitcast_a, b))
}
Operator::I8x16ShrU | Operator::I16x8ShrU | Operator::I32x4ShrU | Operator::I64x2ShrU => {
let (a, b) = state.pop2();
let bitcast_a = optionally_bitcast_vector(a, type_of(op), builder);
state.push1(builder.ins().ushr(bitcast_a, b))
}
Operator::I8x16ShrS | Operator::I16x8ShrS | Operator::I32x4ShrS | Operator::I64x2ShrS => {
let (a, b) = state.pop2();
let bitcast_a = optionally_bitcast_vector(a, type_of(op), builder);
state.push1(builder.ins().sshr(bitcast_a, b))
}
Operator::V128Bitselect => {
let (a, b, c) = pop3_with_bitcast(state, I8X16, builder);
state.push1(builder.ins().bitselect(c, a, b))
}
Operator::V128AnyTrue => {
let a = pop1_with_bitcast(state, type_of(op), builder);
let bool_result = builder.ins().vany_true(a);
state.push1(builder.ins().uextend(I32, bool_result))
}
Operator::I8x16AllTrue
| Operator::I16x8AllTrue
| Operator::I32x4AllTrue
| Operator::I64x2AllTrue => {
let a = pop1_with_bitcast(state, type_of(op), builder);
let bool_result = builder.ins().vall_true(a);
state.push1(builder.ins().uextend(I32, bool_result))
}
Operator::I8x16Bitmask
| Operator::I16x8Bitmask
| Operator::I32x4Bitmask
| Operator::I64x2Bitmask => {
let a = pop1_with_bitcast(state, type_of(op), builder);
state.push1(builder.ins().vhigh_bits(I32, a));
}
Operator::I8x16Eq | Operator::I16x8Eq | Operator::I32x4Eq | Operator::I64x2Eq => {
translate_vector_icmp(IntCC::Equal, type_of(op), builder, state)
}
Operator::I8x16Ne | Operator::I16x8Ne | Operator::I32x4Ne | Operator::I64x2Ne => {
translate_vector_icmp(IntCC::NotEqual, type_of(op), builder, state)
}
Operator::I8x16GtS | Operator::I16x8GtS | Operator::I32x4GtS | Operator::I64x2GtS => {
translate_vector_icmp(IntCC::SignedGreaterThan, type_of(op), builder, state)
}
Operator::I8x16LtS | Operator::I16x8LtS | Operator::I32x4LtS | Operator::I64x2LtS => {
translate_vector_icmp(IntCC::SignedLessThan, type_of(op), builder, state)
}
Operator::I8x16GtU | Operator::I16x8GtU | Operator::I32x4GtU => {
translate_vector_icmp(IntCC::UnsignedGreaterThan, type_of(op), builder, state)
}
Operator::I8x16LtU | Operator::I16x8LtU | Operator::I32x4LtU => {
translate_vector_icmp(IntCC::UnsignedLessThan, type_of(op), builder, state)
}
Operator::I8x16GeS | Operator::I16x8GeS | Operator::I32x4GeS | Operator::I64x2GeS => {
translate_vector_icmp(IntCC::SignedGreaterThanOrEqual, type_of(op), builder, state)
}
Operator::I8x16LeS | Operator::I16x8LeS | Operator::I32x4LeS | Operator::I64x2LeS => {
translate_vector_icmp(IntCC::SignedLessThanOrEqual, type_of(op), builder, state)
}
Operator::I8x16GeU | Operator::I16x8GeU | Operator::I32x4GeU => translate_vector_icmp(
IntCC::UnsignedGreaterThanOrEqual,
type_of(op),
builder,
state,
),
Operator::I8x16LeU | Operator::I16x8LeU | Operator::I32x4LeU => {
translate_vector_icmp(IntCC::UnsignedLessThanOrEqual, type_of(op), builder, state)
}
Operator::F32x4Eq | Operator::F64x2Eq => {
translate_vector_fcmp(FloatCC::Equal, type_of(op), builder, state)
}
Operator::F32x4Ne | Operator::F64x2Ne => {
translate_vector_fcmp(FloatCC::NotEqual, type_of(op), builder, state)
}
Operator::F32x4Lt | Operator::F64x2Lt => {
translate_vector_fcmp(FloatCC::LessThan, type_of(op), builder, state)
}
Operator::F32x4Gt | Operator::F64x2Gt => {
translate_vector_fcmp(FloatCC::GreaterThan, type_of(op), builder, state)
}
Operator::F32x4Le | Operator::F64x2Le => {
translate_vector_fcmp(FloatCC::LessThanOrEqual, type_of(op), builder, state)
}
Operator::F32x4Ge | Operator::F64x2Ge => {
translate_vector_fcmp(FloatCC::GreaterThanOrEqual, type_of(op), builder, state)
}
Operator::F32x4Add | Operator::F64x2Add => {
let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
state.push1(builder.ins().fadd(a, b))
}
Operator::F32x4Sub | Operator::F64x2Sub => {
let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
state.push1(builder.ins().fsub(a, b))
}
Operator::F32x4Mul | Operator::F64x2Mul => {
let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
state.push1(builder.ins().fmul(a, b))
}
Operator::F32x4Div | Operator::F64x2Div => {
let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
state.push1(builder.ins().fdiv(a, b))
}
Operator::F32x4Max | Operator::F64x2Max => {
let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
state.push1(builder.ins().fmax(a, b))
}
Operator::F32x4Min | Operator::F64x2Min => {
let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
state.push1(builder.ins().fmin(a, b))
}
Operator::F32x4PMax | Operator::F64x2PMax => {
let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
state.push1(builder.ins().fmax_pseudo(a, b))
}
Operator::F32x4PMin | Operator::F64x2PMin => {
let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
state.push1(builder.ins().fmin_pseudo(a, b))
}
Operator::F32x4Sqrt | Operator::F64x2Sqrt => {
let a = pop1_with_bitcast(state, type_of(op), builder);
state.push1(builder.ins().sqrt(a))
}
Operator::F32x4Neg | Operator::F64x2Neg => {
let a = pop1_with_bitcast(state, type_of(op), builder);
state.push1(builder.ins().fneg(a))
}
Operator::F32x4Abs | Operator::F64x2Abs => {
let a = pop1_with_bitcast(state, type_of(op), builder);
state.push1(builder.ins().fabs(a))
}
Operator::F32x4ConvertI32x4S => {
let a = pop1_with_bitcast(state, I32X4, builder);
state.push1(builder.ins().fcvt_from_sint(F32X4, a))
}
Operator::F32x4ConvertI32x4U => {
let a = pop1_with_bitcast(state, I32X4, builder);
state.push1(builder.ins().fcvt_from_uint(F32X4, a))
}
Operator::F64x2ConvertLowI32x4S => {
let a = pop1_with_bitcast(state, I32X4, builder);
state.push1(builder.ins().fcvt_low_from_sint(F64X2, a));
}
Operator::F64x2ConvertLowI32x4U => {
let a = pop1_with_bitcast(state, I32X4, builder);
let widened_a = builder.ins().uwiden_low(a);
state.push1(builder.ins().fcvt_from_uint(F64X2, widened_a));
}
Operator::F64x2PromoteLowF32x4 => {
let a = pop1_with_bitcast(state, F32X4, builder);
state.push1(builder.ins().fvpromote_low(a));
}
Operator::F32x4DemoteF64x2Zero => {
let a = pop1_with_bitcast(state, F64X2, builder);
state.push1(builder.ins().fvdemote(a));
}
Operator::I32x4TruncSatF32x4S => {
let a = pop1_with_bitcast(state, F32X4, builder);
state.push1(builder.ins().fcvt_to_sint_sat(I32X4, a))
}
Operator::I32x4TruncSatF64x2SZero => {
let a = pop1_with_bitcast(state, F64X2, builder);
let converted_a = builder.ins().fcvt_to_sint_sat(I64X2, a);
let handle = builder.func.dfg.constants.insert(vec![0u8; 16].into());
let zero = builder.ins().vconst(I64X2, handle);
state.push1(builder.ins().snarrow(converted_a, zero));
}
Operator::I32x4RelaxedTruncF32x4U | Operator::I32x4TruncSatF32x4U => {
let a = pop1_with_bitcast(state, F32X4, builder);
state.push1(builder.ins().fcvt_to_uint_sat(I32X4, a))
}
Operator::I32x4RelaxedTruncF64x2UZero | Operator::I32x4TruncSatF64x2UZero => {
let a = pop1_with_bitcast(state, F64X2, builder);
let converted_a = builder.ins().fcvt_to_uint_sat(I64X2, a);
let handle = builder.func.dfg.constants.insert(vec![0u8; 16].into());
let zero = builder.ins().vconst(I64X2, handle);
state.push1(builder.ins().uunarrow(converted_a, zero));
}
Operator::I8x16NarrowI16x8S => {
let (a, b) = pop2_with_bitcast(state, I16X8, builder);
state.push1(builder.ins().snarrow(a, b))
}
Operator::I16x8NarrowI32x4S => {
let (a, b) = pop2_with_bitcast(state, I32X4, builder);
state.push1(builder.ins().snarrow(a, b))
}
Operator::I8x16NarrowI16x8U => {
let (a, b) = pop2_with_bitcast(state, I16X8, builder);
state.push1(builder.ins().unarrow(a, b))
}
Operator::I16x8NarrowI32x4U => {
let (a, b) = pop2_with_bitcast(state, I32X4, builder);
state.push1(builder.ins().unarrow(a, b))
}
Operator::I16x8ExtendLowI8x16S => {
let a = pop1_with_bitcast(state, I8X16, builder);
state.push1(builder.ins().swiden_low(a))
}
Operator::I16x8ExtendHighI8x16S => {
let a = pop1_with_bitcast(state, I8X16, builder);
state.push1(builder.ins().swiden_high(a))
}
Operator::I16x8ExtendLowI8x16U => {
let a = pop1_with_bitcast(state, I8X16, builder);
state.push1(builder.ins().uwiden_low(a))
}
Operator::I16x8ExtendHighI8x16U => {
let a = pop1_with_bitcast(state, I8X16, builder);
state.push1(builder.ins().uwiden_high(a))
}
Operator::I32x4ExtendLowI16x8S => {
let a = pop1_with_bitcast(state, I16X8, builder);
state.push1(builder.ins().swiden_low(a))
}
Operator::I32x4ExtendHighI16x8S => {
let a = pop1_with_bitcast(state, I16X8, builder);
state.push1(builder.ins().swiden_high(a))
}
Operator::I32x4ExtendLowI16x8U => {
let a = pop1_with_bitcast(state, I16X8, builder);
state.push1(builder.ins().uwiden_low(a))
}
Operator::I32x4ExtendHighI16x8U => {
let a = pop1_with_bitcast(state, I16X8, builder);
state.push1(builder.ins().uwiden_high(a))
}
Operator::I64x2ExtendLowI32x4S => {
let a = pop1_with_bitcast(state, I32X4, builder);
state.push1(builder.ins().swiden_low(a))
}
Operator::I64x2ExtendHighI32x4S => {
let a = pop1_with_bitcast(state, I32X4, builder);
state.push1(builder.ins().swiden_high(a))
}
Operator::I64x2ExtendLowI32x4U => {
let a = pop1_with_bitcast(state, I32X4, builder);
state.push1(builder.ins().uwiden_low(a))
}
Operator::I64x2ExtendHighI32x4U => {
let a = pop1_with_bitcast(state, I32X4, builder);
state.push1(builder.ins().uwiden_high(a))
}
Operator::I16x8ExtAddPairwiseI8x16S => {
let a = pop1_with_bitcast(state, I8X16, builder);
let widen_low = builder.ins().swiden_low(a);
let widen_high = builder.ins().swiden_high(a);
state.push1(builder.ins().iadd_pairwise(widen_low, widen_high));
}
Operator::I32x4ExtAddPairwiseI16x8S => {
let a = pop1_with_bitcast(state, I16X8, builder);
let widen_low = builder.ins().swiden_low(a);
let widen_high = builder.ins().swiden_high(a);
state.push1(builder.ins().iadd_pairwise(widen_low, widen_high));
}
Operator::I16x8ExtAddPairwiseI8x16U => {
let a = pop1_with_bitcast(state, I8X16, builder);
let widen_low = builder.ins().uwiden_low(a);
let widen_high = builder.ins().uwiden_high(a);
state.push1(builder.ins().iadd_pairwise(widen_low, widen_high));
}
Operator::I32x4ExtAddPairwiseI16x8U => {
let a = pop1_with_bitcast(state, I16X8, builder);
let widen_low = builder.ins().uwiden_low(a);
let widen_high = builder.ins().uwiden_high(a);
state.push1(builder.ins().iadd_pairwise(widen_low, widen_high));
}
Operator::F32x4Ceil | Operator::F64x2Ceil => {
let arg = pop1_with_bitcast(state, type_of(op), builder);
state.push1(builder.ins().ceil(arg));
}
Operator::F32x4Floor | Operator::F64x2Floor => {
let arg = pop1_with_bitcast(state, type_of(op), builder);
state.push1(builder.ins().floor(arg));
}
Operator::F32x4Trunc | Operator::F64x2Trunc => {
let arg = pop1_with_bitcast(state, type_of(op), builder);
state.push1(builder.ins().trunc(arg));
}
Operator::F32x4Nearest | Operator::F64x2Nearest => {
let arg = pop1_with_bitcast(state, type_of(op), builder);
state.push1(builder.ins().nearest(arg));
}
Operator::I32x4DotI16x8S => {
let (a, b) = pop2_with_bitcast(state, I16X8, builder);
let alow = builder.ins().swiden_low(a);
let blow = builder.ins().swiden_low(b);
let low = builder.ins().imul(alow, blow);
let ahigh = builder.ins().swiden_high(a);
let bhigh = builder.ins().swiden_high(b);
let high = builder.ins().imul(ahigh, bhigh);
state.push1(builder.ins().iadd_pairwise(low, high));
}
Operator::I8x16Popcnt => {
let arg = pop1_with_bitcast(state, type_of(op), builder);
state.push1(builder.ins().popcnt(arg));
}
Operator::I16x8Q15MulrSatS => {
let (a, b) = pop2_with_bitcast(state, I16X8, builder);
state.push1(builder.ins().sqmul_round_sat(a, b))
}
Operator::I16x8ExtMulLowI8x16S => {
let (a, b) = pop2_with_bitcast(state, I8X16, builder);
let a_low = builder.ins().swiden_low(a);
let b_low = builder.ins().swiden_low(b);
state.push1(builder.ins().imul(a_low, b_low));
}
Operator::I16x8ExtMulHighI8x16S => {
let (a, b) = pop2_with_bitcast(state, I8X16, builder);
let a_high = builder.ins().swiden_high(a);
let b_high = builder.ins().swiden_high(b);
state.push1(builder.ins().imul(a_high, b_high));
}
Operator::I16x8ExtMulLowI8x16U => {
let (a, b) = pop2_with_bitcast(state, I8X16, builder);
let a_low = builder.ins().uwiden_low(a);
let b_low = builder.ins().uwiden_low(b);
state.push1(builder.ins().imul(a_low, b_low));
}
Operator::I16x8ExtMulHighI8x16U => {
let (a, b) = pop2_with_bitcast(state, I8X16, builder);
let a_high = builder.ins().uwiden_high(a);
let b_high = builder.ins().uwiden_high(b);
state.push1(builder.ins().imul(a_high, b_high));
}
Operator::I32x4ExtMulLowI16x8S => {
let (a, b) = pop2_with_bitcast(state, I16X8, builder);
let a_low = builder.ins().swiden_low(a);
let b_low = builder.ins().swiden_low(b);
state.push1(builder.ins().imul(a_low, b_low));
}
Operator::I32x4ExtMulHighI16x8S => {
let (a, b) = pop2_with_bitcast(state, I16X8, builder);
let a_high = builder.ins().swiden_high(a);
let b_high = builder.ins().swiden_high(b);
state.push1(builder.ins().imul(a_high, b_high));
}
Operator::I32x4ExtMulLowI16x8U => {
let (a, b) = pop2_with_bitcast(state, I16X8, builder);
let a_low = builder.ins().uwiden_low(a);
let b_low = builder.ins().uwiden_low(b);
state.push1(builder.ins().imul(a_low, b_low));
}
Operator::I32x4ExtMulHighI16x8U => {
let (a, b) = pop2_with_bitcast(state, I16X8, builder);
let a_high = builder.ins().uwiden_high(a);
let b_high = builder.ins().uwiden_high(b);
state.push1(builder.ins().imul(a_high, b_high));
}
Operator::I64x2ExtMulLowI32x4S => {
let (a, b) = pop2_with_bitcast(state, I32X4, builder);
let a_low = builder.ins().swiden_low(a);
let b_low = builder.ins().swiden_low(b);
state.push1(builder.ins().imul(a_low, b_low));
}
Operator::I64x2ExtMulHighI32x4S => {
let (a, b) = pop2_with_bitcast(state, I32X4, builder);
let a_high = builder.ins().swiden_high(a);
let b_high = builder.ins().swiden_high(b);
state.push1(builder.ins().imul(a_high, b_high));
}
Operator::I64x2ExtMulLowI32x4U => {
let (a, b) = pop2_with_bitcast(state, I32X4, builder);
let a_low = builder.ins().uwiden_low(a);
let b_low = builder.ins().uwiden_low(b);
state.push1(builder.ins().imul(a_low, b_low));
}
Operator::I64x2ExtMulHighI32x4U => {
let (a, b) = pop2_with_bitcast(state, I32X4, builder);
let a_high = builder.ins().uwiden_high(a);
let b_high = builder.ins().uwiden_high(b);
state.push1(builder.ins().imul(a_high, b_high));
}
Operator::ReturnCall { .. } | Operator::ReturnCallIndirect { .. } => {
return Err(wasm_unsupported!("proposed tail-call operator {:?}", op));
}
Operator::MemoryDiscard { .. } => {
return Err(wasm_unsupported!(
"proposed memory-control operator {:?}",
op
));
}
Operator::F32x4RelaxedMax | Operator::F64x2RelaxedMax => {
let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
state.push1(
if environ.relaxed_simd_deterministic() || !environ.is_x86() {
builder.ins().fmax(a, b)
} else {
builder.ins().fmax_pseudo(a, b)
},
)
}
Operator::F32x4RelaxedMin | Operator::F64x2RelaxedMin => {
let (a, b) = pop2_with_bitcast(state, type_of(op), builder);
state.push1(
if environ.relaxed_simd_deterministic() || !environ.is_x86() {
builder.ins().fmin(a, b)
} else {
builder.ins().fmin_pseudo(a, b)
},
);
}
Operator::I8x16RelaxedSwizzle => {
let (a, b) = pop2_with_bitcast(state, I8X16, builder);
state.push1(
if environ.relaxed_simd_deterministic() || !environ.is_x86() {
builder.ins().swizzle(a, b)
} else {
builder.ins().x86_pshufb(a, b)
},
);
}
Operator::F32x4RelaxedMadd | Operator::F64x2RelaxedMadd => {
let (a, b, c) = pop3_with_bitcast(state, type_of(op), builder);
state.push1(
if environ.relaxed_simd_deterministic() || environ.has_native_fma() {
builder.ins().fma(a, b, c)
} else {
let mul = builder.ins().fmul(a, b);
builder.ins().fadd(mul, c)
},
);
}
Operator::F32x4RelaxedNmadd | Operator::F64x2RelaxedNmadd => {
let (a, b, c) = pop3_with_bitcast(state, type_of(op), builder);
let a = builder.ins().fneg(a);
state.push1(
if environ.relaxed_simd_deterministic() || environ.has_native_fma() {
builder.ins().fma(a, b, c)
} else {
let mul = builder.ins().fmul(a, b);
builder.ins().fadd(mul, c)
},
);
}
Operator::I8x16RelaxedLaneselect
| Operator::I16x8RelaxedLaneselect
| Operator::I32x4RelaxedLaneselect
| Operator::I64x2RelaxedLaneselect => {
let ty = type_of(op);
let (a, b, c) = pop3_with_bitcast(state, ty, builder);
state.push1(
if environ.relaxed_simd_deterministic() || !environ.is_x86() || ty == I16X8 {
builder.ins().bitselect(c, a, b)
} else {
builder.ins().x86_blendv(c, a, b)
},
);
}
Operator::I32x4RelaxedTruncF32x4S => {
let a = pop1_with_bitcast(state, F32X4, builder);
state.push1(
if environ.relaxed_simd_deterministic() || !environ.is_x86() {
builder.ins().fcvt_to_sint_sat(I32X4, a)
} else {
builder.ins().x86_cvtt2dq(I32X4, a)
},
)
}
Operator::I32x4RelaxedTruncF64x2SZero => {
let a = pop1_with_bitcast(state, F64X2, builder);
let converted_a = if environ.relaxed_simd_deterministic() || !environ.is_x86() {
builder.ins().fcvt_to_sint_sat(I64X2, a)
} else {
builder.ins().x86_cvtt2dq(I64X2, a)
};
let handle = builder.func.dfg.constants.insert(vec![0u8; 16].into());
let zero = builder.ins().vconst(I64X2, handle);
state.push1(builder.ins().snarrow(converted_a, zero));
}
Operator::I16x8RelaxedQ15mulrS => {
let (a, b) = pop2_with_bitcast(state, I16X8, builder);
state.push1(
if environ.relaxed_simd_deterministic() || !environ.is_x86() {
builder.ins().sqmul_round_sat(a, b)
} else {
builder.ins().x86_pmulhrsw(a, b)
},
);
}
Operator::I16x8RelaxedDotI8x16I7x16S => {
let (a, b) = pop2_with_bitcast(state, I8X16, builder);
state.push1(
if environ.relaxed_simd_deterministic() || !environ.is_x86() {
let alo = builder.ins().swiden_low(a);
let blo = builder.ins().swiden_low(b);
let lo = builder.ins().imul(alo, blo);
let ahi = builder.ins().swiden_high(a);
let bhi = builder.ins().swiden_high(b);
let hi = builder.ins().imul(ahi, bhi);
builder.ins().iadd_pairwise(lo, hi)
} else {
builder.ins().x86_pmaddubsw(a, b)
},
);
}
Operator::I32x4RelaxedDotI8x16I7x16AddS => {
let c = pop1_with_bitcast(state, I32X4, builder);
let (a, b) = pop2_with_bitcast(state, I8X16, builder);
let dot = if environ.relaxed_simd_deterministic() || !environ.is_x86() {
let alo = builder.ins().swiden_low(a);
let blo = builder.ins().swiden_low(b);
let lo = builder.ins().imul(alo, blo);
let ahi = builder.ins().swiden_high(a);
let bhi = builder.ins().swiden_high(b);
let hi = builder.ins().imul(ahi, bhi);
builder.ins().iadd_pairwise(lo, hi)
} else {
builder.ins().x86_pmaddubsw(a, b)
};
let dotlo = builder.ins().swiden_low(dot);
let dothi = builder.ins().swiden_high(dot);
let dot32 = builder.ins().iadd_pairwise(dotlo, dothi);
state.push1(builder.ins().iadd(dot32, c));
}
Operator::CallRef { .. }
| Operator::ReturnCallRef { .. }
| Operator::BrOnNull { .. }
| Operator::BrOnNonNull { .. }
| Operator::RefAsNonNull => {
return Err(wasm_unsupported!(
"proposed function-references operator {:?}",
op
));
}
};
Ok(())
}
#[cfg_attr(feature = "cargo-clippy", allow(clippy::unneeded_field_pattern))]
fn translate_unreachable_operator<FE: FuncEnvironment + ?Sized>(
validator: &FuncValidator<impl WasmModuleResources>,
op: &Operator,
builder: &mut FunctionBuilder,
state: &mut FuncTranslationState,
environ: &mut FE,
) -> WasmResult<()> {
debug_assert!(!state.reachable);
match *op {
Operator::If { blockty } => {
state.push_if(
ir::Block::reserved_value(),
ElseData::NoElse {
branch_inst: ir::Inst::reserved_value(),
placeholder: ir::Block::reserved_value(),
},
0,
0,
blockty,
);
}
Operator::Loop { blockty: _ } | Operator::Block { blockty: _ } => {
state.push_block(ir::Block::reserved_value(), 0, 0);
}
Operator::Else => {
let i = state.control_stack.len() - 1;
match state.control_stack[i] {
ControlStackFrame::If {
ref else_data,
head_is_reachable,
ref mut consequent_ends_reachable,
blocktype,
..
} => {
debug_assert!(consequent_ends_reachable.is_none());
*consequent_ends_reachable = Some(state.reachable);
if head_is_reachable {
state.reachable = true;
let else_block = match *else_data {
ElseData::NoElse {
branch_inst,
placeholder,
} => {
let (params, _results) =
blocktype_params_results(validator, blocktype)?;
let else_block = block_with_params(builder, params, environ)?;
let frame = state.control_stack.last().unwrap();
frame.truncate_value_stack_to_else_params(&mut state.stack);
builder.change_jump_destination(
branch_inst,
placeholder,
else_block,
);
builder.seal_block(else_block);
else_block
}
ElseData::WithElse { else_block } => {
let frame = state.control_stack.last().unwrap();
frame.truncate_value_stack_to_else_params(&mut state.stack);
else_block
}
};
builder.switch_to_block(else_block);
}
}
_ => unreachable!(),
}
}
Operator::End => {
let stack = &mut state.stack;
let control_stack = &mut state.control_stack;
let frame = control_stack.pop().unwrap();
frame.truncate_value_stack_to_original_size(stack);
let reachable_anyway = match frame {
ControlStackFrame::Loop { header, .. } => {
builder.seal_block(header);
false
}
ControlStackFrame::If {
head_is_reachable,
consequent_ends_reachable: None,
..
} => head_is_reachable,
ControlStackFrame::If {
head_is_reachable,
consequent_ends_reachable: Some(consequent_ends_reachable),
..
} => head_is_reachable && consequent_ends_reachable,
_ => false,
};
if frame.exit_is_branched_to() || reachable_anyway {
builder.switch_to_block(frame.following_code());
builder.seal_block(frame.following_code());
stack.extend_from_slice(builder.block_params(frame.following_code()));
state.reachable = true;
}
}
_ => {
}
}
Ok(())
}
fn prepare_addr<FE>(
memarg: &MemArg,
access_size: u8,
builder: &mut FunctionBuilder,
state: &mut FuncTranslationState,
environ: &mut FE,
) -> WasmResult<Reachability<(MemFlags, Value)>>
where
FE: FuncEnvironment + ?Sized,
{
let index = state.pop1();
let heap = state.get_heap(builder.func, memarg.memory, environ)?;
let heap = environ.heaps()[heap].clone();
let addr = match u32::try_from(memarg.offset) {
Ok(offset) => bounds_checks::bounds_check_and_compute_addr(
builder,
environ,
&heap,
index,
offset,
access_size,
)?,
Err(_) => {
let offset = builder.ins().iconst(heap.index_type, memarg.offset as i64);
let adjusted_index =
builder
.ins()
.uadd_overflow_trap(index, offset, ir::TrapCode::HeapOutOfBounds);
bounds_checks::bounds_check_and_compute_addr(
builder,
environ,
&heap,
adjusted_index,
0,
access_size,
)?
}
};
let addr = match addr {
Reachability::Unreachable => return Ok(Reachability::Unreachable),
Reachability::Reachable(a) => a,
};
let mut flags = MemFlags::new();
flags.set_endianness(ir::Endianness::Little);
flags.set_heap();
Ok(Reachability::Reachable((flags, addr)))
}
fn align_atomic_addr(
memarg: &MemArg,
loaded_bytes: u8,
builder: &mut FunctionBuilder,
state: &mut FuncTranslationState,
) {
if loaded_bytes > 1 {
let addr = state.pop1(); state.push1(addr);
let effective_addr = if memarg.offset == 0 {
addr
} else {
builder
.ins()
.iadd_imm(addr, i64::from(memarg.offset as i32))
};
debug_assert!(loaded_bytes.is_power_of_two());
let misalignment = builder
.ins()
.band_imm(effective_addr, i64::from(loaded_bytes - 1));
let f = builder.ins().icmp_imm(IntCC::NotEqual, misalignment, 0);
builder.ins().trapnz(f, ir::TrapCode::HeapMisaligned);
}
}
fn prepare_atomic_addr<FE: FuncEnvironment + ?Sized>(
memarg: &MemArg,
loaded_bytes: u8,
builder: &mut FunctionBuilder,
state: &mut FuncTranslationState,
environ: &mut FE,
) -> WasmResult<Reachability<(MemFlags, Value)>> {
align_atomic_addr(memarg, loaded_bytes, builder, state);
prepare_addr(memarg, loaded_bytes, builder, state, environ)
}
#[derive(PartialEq, Eq)]
#[must_use]
pub enum Reachability<T> {
Reachable(T),
Unreachable,
}
fn translate_load<FE: FuncEnvironment + ?Sized>(
memarg: &MemArg,
opcode: ir::Opcode,
result_ty: Type,
builder: &mut FunctionBuilder,
state: &mut FuncTranslationState,
environ: &mut FE,
) -> WasmResult<Reachability<()>> {
let (flags, base) = match prepare_addr(
memarg,
mem_op_size(opcode, result_ty),
builder,
state,
environ,
)? {
Reachability::Unreachable => return Ok(Reachability::Unreachable),
Reachability::Reachable((f, b)) => (f, b),
};
let (load, dfg) = builder
.ins()
.Load(opcode, result_ty, flags, Offset32::new(0), base);
state.push1(dfg.first_result(load));
Ok(Reachability::Reachable(()))
}
fn translate_store<FE: FuncEnvironment + ?Sized>(
memarg: &MemArg,
opcode: ir::Opcode,
builder: &mut FunctionBuilder,
state: &mut FuncTranslationState,
environ: &mut FE,
) -> WasmResult<()> {
let val = state.pop1();
let val_ty = builder.func.dfg.value_type(val);
let (flags, base) = unwrap_or_return_unreachable_state!(
state,
prepare_addr(memarg, mem_op_size(opcode, val_ty), builder, state, environ)?
);
builder
.ins()
.Store(opcode, val_ty, flags, Offset32::new(0), val, base);
Ok(())
}
fn mem_op_size(opcode: ir::Opcode, ty: Type) -> u8 {
match opcode {
ir::Opcode::Istore8 | ir::Opcode::Sload8 | ir::Opcode::Uload8 => 1,
ir::Opcode::Istore16 | ir::Opcode::Sload16 | ir::Opcode::Uload16 => 2,
ir::Opcode::Istore32 | ir::Opcode::Sload32 | ir::Opcode::Uload32 => 4,
ir::Opcode::Store | ir::Opcode::Load => u8::try_from(ty.bytes()).unwrap(),
_ => panic!("unknown size of mem op for {:?}", opcode),
}
}
fn translate_icmp(cc: IntCC, builder: &mut FunctionBuilder, state: &mut FuncTranslationState) {
let (arg0, arg1) = state.pop2();
let val = builder.ins().icmp(cc, arg0, arg1);
state.push1(builder.ins().uextend(I32, val));
}
fn translate_atomic_rmw<FE: FuncEnvironment + ?Sized>(
widened_ty: Type,
access_ty: Type,
op: AtomicRmwOp,
memarg: &MemArg,
builder: &mut FunctionBuilder,
state: &mut FuncTranslationState,
environ: &mut FE,
) -> WasmResult<()> {
let mut arg2 = state.pop1();
let arg2_ty = builder.func.dfg.value_type(arg2);
match access_ty {
I8 | I16 | I32 | I64 => {}
_ => {
return Err(wasm_unsupported!(
"atomic_rmw: unsupported access type {:?}",
access_ty
))
}
};
let w_ty_ok = match widened_ty {
I32 | I64 => true,
_ => false,
};
assert!(w_ty_ok && widened_ty.bytes() >= access_ty.bytes());
assert!(arg2_ty.bytes() >= access_ty.bytes());
if arg2_ty.bytes() > access_ty.bytes() {
arg2 = builder.ins().ireduce(access_ty, arg2);
}
let (flags, addr) = unwrap_or_return_unreachable_state!(
state,
prepare_atomic_addr(
memarg,
u8::try_from(access_ty.bytes()).unwrap(),
builder,
state,
environ,
)?
);
let mut res = builder.ins().atomic_rmw(access_ty, flags, op, addr, arg2);
if access_ty != widened_ty {
res = builder.ins().uextend(widened_ty, res);
}
state.push1(res);
Ok(())
}
fn translate_atomic_cas<FE: FuncEnvironment + ?Sized>(
widened_ty: Type,
access_ty: Type,
memarg: &MemArg,
builder: &mut FunctionBuilder,
state: &mut FuncTranslationState,
environ: &mut FE,
) -> WasmResult<()> {
let (mut expected, mut replacement) = state.pop2();
let expected_ty = builder.func.dfg.value_type(expected);
let replacement_ty = builder.func.dfg.value_type(replacement);
match access_ty {
I8 | I16 | I32 | I64 => {}
_ => {
return Err(wasm_unsupported!(
"atomic_cas: unsupported access type {:?}",
access_ty
))
}
};
let w_ty_ok = match widened_ty {
I32 | I64 => true,
_ => false,
};
assert!(w_ty_ok && widened_ty.bytes() >= access_ty.bytes());
assert!(expected_ty.bytes() >= access_ty.bytes());
if expected_ty.bytes() > access_ty.bytes() {
expected = builder.ins().ireduce(access_ty, expected);
}
assert!(replacement_ty.bytes() >= access_ty.bytes());
if replacement_ty.bytes() > access_ty.bytes() {
replacement = builder.ins().ireduce(access_ty, replacement);
}
let (flags, addr) = unwrap_or_return_unreachable_state!(
state,
prepare_atomic_addr(
memarg,
u8::try_from(access_ty.bytes()).unwrap(),
builder,
state,
environ,
)?
);
let mut res = builder.ins().atomic_cas(flags, addr, expected, replacement);
if access_ty != widened_ty {
res = builder.ins().uextend(widened_ty, res);
}
state.push1(res);
Ok(())
}
fn translate_atomic_load<FE: FuncEnvironment + ?Sized>(
widened_ty: Type,
access_ty: Type,
memarg: &MemArg,
builder: &mut FunctionBuilder,
state: &mut FuncTranslationState,
environ: &mut FE,
) -> WasmResult<()> {
match access_ty {
I8 | I16 | I32 | I64 => {}
_ => {
return Err(wasm_unsupported!(
"atomic_load: unsupported access type {:?}",
access_ty
))
}
};
let w_ty_ok = match widened_ty {
I32 | I64 => true,
_ => false,
};
assert!(w_ty_ok && widened_ty.bytes() >= access_ty.bytes());
let (flags, addr) = unwrap_or_return_unreachable_state!(
state,
prepare_atomic_addr(
memarg,
u8::try_from(access_ty.bytes()).unwrap(),
builder,
state,
environ,
)?
);
let mut res = builder.ins().atomic_load(access_ty, flags, addr);
if access_ty != widened_ty {
res = builder.ins().uextend(widened_ty, res);
}
state.push1(res);
Ok(())
}
fn translate_atomic_store<FE: FuncEnvironment + ?Sized>(
access_ty: Type,
memarg: &MemArg,
builder: &mut FunctionBuilder,
state: &mut FuncTranslationState,
environ: &mut FE,
) -> WasmResult<()> {
let mut data = state.pop1();
let data_ty = builder.func.dfg.value_type(data);
match access_ty {
I8 | I16 | I32 | I64 => {}
_ => {
return Err(wasm_unsupported!(
"atomic_store: unsupported access type {:?}",
access_ty
))
}
};
let d_ty_ok = match data_ty {
I32 | I64 => true,
_ => false,
};
assert!(d_ty_ok && data_ty.bytes() >= access_ty.bytes());
if data_ty.bytes() > access_ty.bytes() {
data = builder.ins().ireduce(access_ty, data);
}
let (flags, addr) = unwrap_or_return_unreachable_state!(
state,
prepare_atomic_addr(
memarg,
u8::try_from(access_ty.bytes()).unwrap(),
builder,
state,
environ,
)?
);
builder.ins().atomic_store(flags, data, addr);
Ok(())
}
fn translate_vector_icmp(
cc: IntCC,
needed_type: Type,
builder: &mut FunctionBuilder,
state: &mut FuncTranslationState,
) {
let (a, b) = state.pop2();
let bitcast_a = optionally_bitcast_vector(a, needed_type, builder);
let bitcast_b = optionally_bitcast_vector(b, needed_type, builder);
state.push1(builder.ins().icmp(cc, bitcast_a, bitcast_b))
}
fn translate_fcmp(cc: FloatCC, builder: &mut FunctionBuilder, state: &mut FuncTranslationState) {
let (arg0, arg1) = state.pop2();
let val = builder.ins().fcmp(cc, arg0, arg1);
state.push1(builder.ins().uextend(I32, val));
}
fn translate_vector_fcmp(
cc: FloatCC,
needed_type: Type,
builder: &mut FunctionBuilder,
state: &mut FuncTranslationState,
) {
let (a, b) = state.pop2();
let bitcast_a = optionally_bitcast_vector(a, needed_type, builder);
let bitcast_b = optionally_bitcast_vector(b, needed_type, builder);
state.push1(builder.ins().fcmp(cc, bitcast_a, bitcast_b))
}
fn translate_br_if(
relative_depth: u32,
builder: &mut FunctionBuilder,
state: &mut FuncTranslationState,
) {
let val = state.pop1();
let (br_destination, inputs) = translate_br_if_args(relative_depth, state);
let next_block = builder.create_block();
canonicalise_brif(builder, val, br_destination, inputs, next_block, &[]);
builder.seal_block(next_block); builder.switch_to_block(next_block);
}
fn translate_br_if_args(
relative_depth: u32,
state: &mut FuncTranslationState,
) -> (ir::Block, &mut [ir::Value]) {
let i = state.control_stack.len() - 1 - (relative_depth as usize);
let (return_count, br_destination) = {
let frame = &mut state.control_stack[i];
frame.set_branched_to_exit();
let return_count = if frame.is_loop() {
frame.num_param_values()
} else {
frame.num_return_values()
};
(return_count, frame.br_destination())
};
let inputs = state.peekn_mut(return_count);
(br_destination, inputs)
}
fn type_of(operator: &Operator) -> Type {
match operator {
Operator::V128Load { .. }
| Operator::V128Store { .. }
| Operator::V128Const { .. }
| Operator::V128Not
| Operator::V128And
| Operator::V128AndNot
| Operator::V128Or
| Operator::V128Xor
| Operator::V128AnyTrue
| Operator::V128Bitselect => I8X16, Operator::I8x16Shuffle { .. }
| Operator::I8x16Splat
| Operator::V128Load8Splat { .. }
| Operator::V128Load8Lane { .. }
| Operator::V128Store8Lane { .. }
| Operator::I8x16ExtractLaneS { .. }
| Operator::I8x16ExtractLaneU { .. }
| Operator::I8x16ReplaceLane { .. }
| Operator::I8x16Eq
| Operator::I8x16Ne
| Operator::I8x16LtS
| Operator::I8x16LtU
| Operator::I8x16GtS
| Operator::I8x16GtU
| Operator::I8x16LeS
| Operator::I8x16LeU
| Operator::I8x16GeS
| Operator::I8x16GeU
| Operator::I8x16Neg
| Operator::I8x16Abs
| Operator::I8x16AllTrue
| Operator::I8x16Shl
| Operator::I8x16ShrS
| Operator::I8x16ShrU
| Operator::I8x16Add
| Operator::I8x16AddSatS
| Operator::I8x16AddSatU
| Operator::I8x16Sub
| Operator::I8x16SubSatS
| Operator::I8x16SubSatU
| Operator::I8x16MinS
| Operator::I8x16MinU
| Operator::I8x16MaxS
| Operator::I8x16MaxU
| Operator::I8x16AvgrU
| Operator::I8x16Bitmask
| Operator::I8x16Popcnt
| Operator::I8x16RelaxedLaneselect => I8X16,
Operator::I16x8Splat
| Operator::V128Load16Splat { .. }
| Operator::V128Load16Lane { .. }
| Operator::V128Store16Lane { .. }
| Operator::I16x8ExtractLaneS { .. }
| Operator::I16x8ExtractLaneU { .. }
| Operator::I16x8ReplaceLane { .. }
| Operator::I16x8Eq
| Operator::I16x8Ne
| Operator::I16x8LtS
| Operator::I16x8LtU
| Operator::I16x8GtS
| Operator::I16x8GtU
| Operator::I16x8LeS
| Operator::I16x8LeU
| Operator::I16x8GeS
| Operator::I16x8GeU
| Operator::I16x8Neg
| Operator::I16x8Abs
| Operator::I16x8AllTrue
| Operator::I16x8Shl
| Operator::I16x8ShrS
| Operator::I16x8ShrU
| Operator::I16x8Add
| Operator::I16x8AddSatS
| Operator::I16x8AddSatU
| Operator::I16x8Sub
| Operator::I16x8SubSatS
| Operator::I16x8SubSatU
| Operator::I16x8MinS
| Operator::I16x8MinU
| Operator::I16x8MaxS
| Operator::I16x8MaxU
| Operator::I16x8AvgrU
| Operator::I16x8Mul
| Operator::I16x8Bitmask
| Operator::I16x8RelaxedLaneselect => I16X8,
Operator::I32x4Splat
| Operator::V128Load32Splat { .. }
| Operator::V128Load32Lane { .. }
| Operator::V128Store32Lane { .. }
| Operator::I32x4ExtractLane { .. }
| Operator::I32x4ReplaceLane { .. }
| Operator::I32x4Eq
| Operator::I32x4Ne
| Operator::I32x4LtS
| Operator::I32x4LtU
| Operator::I32x4GtS
| Operator::I32x4GtU
| Operator::I32x4LeS
| Operator::I32x4LeU
| Operator::I32x4GeS
| Operator::I32x4GeU
| Operator::I32x4Neg
| Operator::I32x4Abs
| Operator::I32x4AllTrue
| Operator::I32x4Shl
| Operator::I32x4ShrS
| Operator::I32x4ShrU
| Operator::I32x4Add
| Operator::I32x4Sub
| Operator::I32x4Mul
| Operator::I32x4MinS
| Operator::I32x4MinU
| Operator::I32x4MaxS
| Operator::I32x4MaxU
| Operator::I32x4Bitmask
| Operator::I32x4TruncSatF32x4S
| Operator::I32x4TruncSatF32x4U
| Operator::I32x4RelaxedLaneselect
| Operator::V128Load32Zero { .. } => I32X4,
Operator::I64x2Splat
| Operator::V128Load64Splat { .. }
| Operator::V128Load64Lane { .. }
| Operator::V128Store64Lane { .. }
| Operator::I64x2ExtractLane { .. }
| Operator::I64x2ReplaceLane { .. }
| Operator::I64x2Eq
| Operator::I64x2Ne
| Operator::I64x2LtS
| Operator::I64x2GtS
| Operator::I64x2LeS
| Operator::I64x2GeS
| Operator::I64x2Neg
| Operator::I64x2Abs
| Operator::I64x2AllTrue
| Operator::I64x2Shl
| Operator::I64x2ShrS
| Operator::I64x2ShrU
| Operator::I64x2Add
| Operator::I64x2Sub
| Operator::I64x2Mul
| Operator::I64x2Bitmask
| Operator::I64x2RelaxedLaneselect
| Operator::V128Load64Zero { .. } => I64X2,
Operator::F32x4Splat
| Operator::F32x4ExtractLane { .. }
| Operator::F32x4ReplaceLane { .. }
| Operator::F32x4Eq
| Operator::F32x4Ne
| Operator::F32x4Lt
| Operator::F32x4Gt
| Operator::F32x4Le
| Operator::F32x4Ge
| Operator::F32x4Abs
| Operator::F32x4Neg
| Operator::F32x4Sqrt
| Operator::F32x4Add
| Operator::F32x4Sub
| Operator::F32x4Mul
| Operator::F32x4Div
| Operator::F32x4Min
| Operator::F32x4Max
| Operator::F32x4PMin
| Operator::F32x4PMax
| Operator::F32x4ConvertI32x4S
| Operator::F32x4ConvertI32x4U
| Operator::F32x4Ceil
| Operator::F32x4Floor
| Operator::F32x4Trunc
| Operator::F32x4Nearest
| Operator::F32x4RelaxedMax
| Operator::F32x4RelaxedMin
| Operator::F32x4RelaxedMadd
| Operator::F32x4RelaxedNmadd => F32X4,
Operator::F64x2Splat
| Operator::F64x2ExtractLane { .. }
| Operator::F64x2ReplaceLane { .. }
| Operator::F64x2Eq
| Operator::F64x2Ne
| Operator::F64x2Lt
| Operator::F64x2Gt
| Operator::F64x2Le
| Operator::F64x2Ge
| Operator::F64x2Abs
| Operator::F64x2Neg
| Operator::F64x2Sqrt
| Operator::F64x2Add
| Operator::F64x2Sub
| Operator::F64x2Mul
| Operator::F64x2Div
| Operator::F64x2Min
| Operator::F64x2Max
| Operator::F64x2PMin
| Operator::F64x2PMax
| Operator::F64x2Ceil
| Operator::F64x2Floor
| Operator::F64x2Trunc
| Operator::F64x2Nearest
| Operator::F64x2RelaxedMax
| Operator::F64x2RelaxedMin
| Operator::F64x2RelaxedMadd
| Operator::F64x2RelaxedNmadd => F64X2,
_ => unimplemented!(
"Currently only SIMD instructions are mapped to their return type; the \
following instruction is not mapped: {:?}",
operator
),
}
}
fn optionally_bitcast_vector(
value: Value,
needed_type: Type,
builder: &mut FunctionBuilder,
) -> Value {
if builder.func.dfg.value_type(value) != needed_type {
let mut flags = MemFlags::new();
flags.set_endianness(ir::Endianness::Little);
builder.ins().bitcast(needed_type, flags, value)
} else {
value
}
}
#[inline(always)]
fn is_non_canonical_v128(ty: ir::Type) -> bool {
match ty {
I64X2 | I32X4 | I16X8 | F32X4 | F64X2 => true,
_ => false,
}
}
fn canonicalise_v128_values<'a>(
tmp_canonicalised: &'a mut SmallVec<[ir::Value; 16]>,
builder: &mut FunctionBuilder,
values: &'a [ir::Value],
) -> &'a [ir::Value] {
debug_assert!(tmp_canonicalised.is_empty());
let any_non_canonical = values
.iter()
.any(|v| is_non_canonical_v128(builder.func.dfg.value_type(*v)));
if !any_non_canonical {
return values;
}
for v in values {
tmp_canonicalised.push(if is_non_canonical_v128(builder.func.dfg.value_type(*v)) {
let mut flags = MemFlags::new();
flags.set_endianness(ir::Endianness::Little);
builder.ins().bitcast(I8X16, flags, *v)
} else {
*v
});
}
tmp_canonicalised.as_slice()
}
fn canonicalise_then_jump(
builder: &mut FunctionBuilder,
destination: ir::Block,
params: &[ir::Value],
) -> ir::Inst {
let mut tmp_canonicalised = SmallVec::<[ir::Value; 16]>::new();
let canonicalised = canonicalise_v128_values(&mut tmp_canonicalised, builder, params);
builder.ins().jump(destination, canonicalised)
}
fn canonicalise_brif(
builder: &mut FunctionBuilder,
cond: ir::Value,
block_then: ir::Block,
params_then: &[ir::Value],
block_else: ir::Block,
params_else: &[ir::Value],
) -> ir::Inst {
let mut tmp_canonicalised_then = SmallVec::<[ir::Value; 16]>::new();
let canonicalised_then =
canonicalise_v128_values(&mut tmp_canonicalised_then, builder, params_then);
let mut tmp_canonicalised_else = SmallVec::<[ir::Value; 16]>::new();
let canonicalised_else =
canonicalise_v128_values(&mut tmp_canonicalised_else, builder, params_else);
builder.ins().brif(
cond,
block_then,
canonicalised_then,
block_else,
canonicalised_else,
)
}
fn pop1_with_bitcast(
state: &mut FuncTranslationState,
needed_type: Type,
builder: &mut FunctionBuilder,
) -> Value {
optionally_bitcast_vector(state.pop1(), needed_type, builder)
}
fn pop2_with_bitcast(
state: &mut FuncTranslationState,
needed_type: Type,
builder: &mut FunctionBuilder,
) -> (Value, Value) {
let (a, b) = state.pop2();
let bitcast_a = optionally_bitcast_vector(a, needed_type, builder);
let bitcast_b = optionally_bitcast_vector(b, needed_type, builder);
(bitcast_a, bitcast_b)
}
fn pop3_with_bitcast(
state: &mut FuncTranslationState,
needed_type: Type,
builder: &mut FunctionBuilder,
) -> (Value, Value, Value) {
let (a, b, c) = state.pop3();
let bitcast_a = optionally_bitcast_vector(a, needed_type, builder);
let bitcast_b = optionally_bitcast_vector(b, needed_type, builder);
let bitcast_c = optionally_bitcast_vector(c, needed_type, builder);
(bitcast_a, bitcast_b, bitcast_c)
}
fn bitcast_arguments<'a>(
builder: &FunctionBuilder,
arguments: &'a mut [Value],
params: &[ir::AbiParam],
param_predicate: impl Fn(usize) -> bool,
) -> Vec<(Type, &'a mut Value)> {
let filtered_param_types = params
.iter()
.enumerate()
.filter(|(i, _)| param_predicate(*i))
.map(|(_, param)| param.value_type);
let pairs = filtered_param_types.zip_eq(arguments.iter_mut());
pairs
.filter(|(param_type, _)| param_type.is_vector())
.filter(|(param_type, arg)| {
let arg_type = builder.func.dfg.value_type(**arg);
assert!(
arg_type.is_vector(),
"unexpected type mismatch: expected {}, argument {} was actually of type {}",
param_type,
*arg,
arg_type
);
arg_type != *param_type
})
.collect()
}
pub fn bitcast_wasm_returns<FE: FuncEnvironment + ?Sized>(
environ: &mut FE,
arguments: &mut [Value],
builder: &mut FunctionBuilder,
) {
let changes = bitcast_arguments(builder, arguments, &builder.func.signature.returns, |i| {
environ.is_wasm_return(&builder.func.signature, i)
});
for (t, arg) in changes {
let mut flags = MemFlags::new();
flags.set_endianness(ir::Endianness::Little);
*arg = builder.ins().bitcast(t, flags, *arg);
}
}
fn bitcast_wasm_params<FE: FuncEnvironment + ?Sized>(
environ: &mut FE,
callee_signature: ir::SigRef,
arguments: &mut [Value],
builder: &mut FunctionBuilder,
) {
let callee_signature = &builder.func.dfg.signatures[callee_signature];
let changes = bitcast_arguments(builder, arguments, &callee_signature.params, |i| {
environ.is_wasm_parameter(&callee_signature, i)
});
for (t, arg) in changes {
let mut flags = MemFlags::new();
flags.set_endianness(ir::Endianness::Little);
*arg = builder.ins().bitcast(t, flags, *arg);
}
}