use crate::abi::MemoryMap;
use core::cell::UnsafeCell;
use core::sync::atomic::{AtomicBool, AtomicU32, AtomicU64};
macro_rules! define_address_table {
($($name:ident: $type:ty,)+) => {
#[repr(C)]
pub struct AddressTableRaw {
$(pub $name: $type),+
}
#[derive(Copy, Clone)]
#[repr(packed)]
pub struct AddressTablePacked {
$(pub $name: u64),+
}
#[derive(Copy, Clone)]
pub struct AddressTable {
$(pub $name: u64),+
}
impl AddressTable {
#[inline]
pub fn from_raw(table: AddressTableRaw) -> Self {
Self {
$(
$name: table.$name as u64
),+
}
}
pub const fn from_packed(table: &AddressTablePacked) -> Self {
Self {
$(
$name: table.$name
),+
}
}
}
static_assert!(core::mem::size_of::<AddressTableRaw>() == core::mem::size_of::<AddressTablePacked>());
static_assert!(core::mem::size_of::<AddressTableRaw>() == core::mem::size_of::<AddressTable>());
}
}
define_address_table! {
syscall_hostcall: unsafe extern "C" fn(u32),
syscall_trap: unsafe extern "C" fn() -> !,
syscall_return: unsafe extern "C" fn() -> !,
syscall_trace: unsafe extern "C" fn(u32, u64),
syscall_sbrk: unsafe extern "C" fn(u64) -> u32,
}
pub const VM_ADDR_NATIVE_CODE: u64 = 0x100000000;
pub const VM_ADDR_JUMP_TABLE: u64 = 0x800000000;
pub const VM_ADDR_JUMP_TABLE_RETURN_TO_HOST: u64 = VM_ADDR_JUMP_TABLE + ((crate::abi::VM_ADDR_RETURN_TO_HOST as u64) << 3);
pub const HOSTCALL_ABORT_EXECUTION: u32 = !0;
pub const HOSTCALL_SBRK: u32 = !0 - 1;
pub const SANDBOX_EMPTY_NTH_INSTRUCTION: u32 = !0;
pub const SANDBOX_EMPTY_NATIVE_PROGRAM_COUNTER: u64 = 0;
pub const VM_ADDR_VMCTX: u64 = 0x400000000;
pub const VM_ADDR_SIGSTACK: u64 = 0x500000000;
pub const VM_ADDR_NATIVE_STACK_LOW: u64 = 0x600000000;
pub const VM_ADDR_NATIVE_STACK_SIZE: u64 = 0x4000;
pub const VM_ADDR_NATIVE_STACK_HIGH: u64 = VM_ADDR_NATIVE_STACK_LOW + VM_ADDR_NATIVE_STACK_SIZE;
pub const VM_COMPILER_MAXIMUM_INSTRUCTION_LENGTH: u32 = 53;
pub const VM_COMPILER_MAXIMUM_EPILOGUE_LENGTH: u32 = 1024 * 1024;
pub const VM_SANDBOX_MAXIMUM_JUMP_TABLE_SIZE: u64 = (crate::abi::VM_MAXIMUM_INSTRUCTION_COUNT as u64 + 1)
* core::mem::size_of::<u64>() as u64
* crate::abi::VM_CODE_ADDRESS_ALIGNMENT as u64;
pub const VM_SANDBOX_MAXIMUM_JUMP_TABLE_VIRTUAL_SIZE: u64 = 0x100000000 * core::mem::size_of::<u64>() as u64;
pub const VM_SANDBOX_MAXIMUM_NATIVE_CODE_SIZE: u32 = 512 * 1024 * 1024 - 1;
#[derive(Clone)]
#[repr(C)]
pub struct SandboxMemoryConfig {
pub memory_map: MemoryMap,
pub ro_data_fd_size: u32,
pub rw_data_fd_size: u32,
pub code_size: u32,
pub jump_table_size: u32,
pub sysreturn_address: u64,
}
pub const VM_RPC_FLAG_RECONFIGURE: u32 = 1 << 0;
#[repr(C)]
pub struct VmInit {
pub stack_address: AtomicU64,
pub stack_length: AtomicU64,
pub vdso_address: AtomicU64,
pub vdso_length: AtomicU64,
pub vvar_address: AtomicU64,
pub vvar_length: AtomicU64,
}
const MESSAGE_BUFFER_SIZE: usize = 512;
#[repr(align(64))]
pub struct CacheAligned<T>(pub T);
impl<T> core::ops::Deref for CacheAligned<T> {
type Target = T;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl<T> core::ops::DerefMut for CacheAligned<T> {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
#[repr(C)]
pub struct VmCtxHeapInfo {
pub heap_top: UnsafeCell<u64>,
pub heap_threshold: UnsafeCell<u64>,
}
const REG_COUNT: usize = crate::program::Reg::ALL.len();
#[repr(C)]
pub struct VmCtxSyscall {
pub gas: UnsafeCell<i64>,
pub hostcall: UnsafeCell<u32>,
pub regs: UnsafeCell<[u32; REG_COUNT]>,
pub nth_instruction: UnsafeCell<u32>,
pub rip: UnsafeCell<u64>,
}
#[repr(C)]
pub struct VmCtxCounters {
pub syscall_wait_loop_start: UnsafeCell<u64>,
pub syscall_futex_wait: UnsafeCell<u64>,
}
#[allow(clippy::partial_pub_fields)]
#[repr(C)]
pub struct VmCtx {
syscall_ffi: CacheAligned<VmCtxSyscall>,
pub heap_info: VmCtxHeapInfo,
pub futex: CacheAligned<AtomicU32>,
pub rpc_address: UnsafeCell<u64>,
pub rpc_flags: UnsafeCell<u32>,
pub rpc_sbrk: UnsafeCell<u32>,
pub memory_config: UnsafeCell<SandboxMemoryConfig>,
pub is_memory_dirty: AtomicBool,
pub counters: CacheAligned<VmCtxCounters>,
pub init: VmInit,
pub message_length: UnsafeCell<u32>,
pub message_buffer: UnsafeCell<[u8; MESSAGE_BUFFER_SIZE]>,
}
static_assert!(core::mem::size_of::<VmCtx>() <= 4096);
pub const VMCTX_FUTEX_BUSY: u32 = 0;
pub const VMCTX_FUTEX_INIT: u32 = 1;
pub const VMCTX_FUTEX_IDLE: u32 = 2;
pub const VMCTX_FUTEX_HOSTCALL: u32 = 3;
pub const VMCTX_FUTEX_TRAP: u32 = 4;
impl VmCtx {
pub const fn zeroed() -> Self {
VmCtx {
futex: CacheAligned(AtomicU32::new(VMCTX_FUTEX_BUSY)),
rpc_address: UnsafeCell::new(0),
rpc_flags: UnsafeCell::new(0),
rpc_sbrk: UnsafeCell::new(0),
memory_config: UnsafeCell::new(SandboxMemoryConfig {
memory_map: MemoryMap::empty(),
ro_data_fd_size: 0,
rw_data_fd_size: 0,
code_size: 0,
jump_table_size: 0,
sysreturn_address: 0,
}),
is_memory_dirty: AtomicBool::new(false),
syscall_ffi: CacheAligned(VmCtxSyscall {
gas: UnsafeCell::new(0),
hostcall: UnsafeCell::new(0),
regs: UnsafeCell::new([0; REG_COUNT]),
rip: UnsafeCell::new(0),
nth_instruction: UnsafeCell::new(0),
}),
heap_info: VmCtxHeapInfo {
heap_top: UnsafeCell::new(0),
heap_threshold: UnsafeCell::new(0),
},
counters: CacheAligned(VmCtxCounters {
syscall_wait_loop_start: UnsafeCell::new(0),
syscall_futex_wait: UnsafeCell::new(0),
}),
init: VmInit {
stack_address: AtomicU64::new(0),
stack_length: AtomicU64::new(0),
vdso_address: AtomicU64::new(0),
vdso_length: AtomicU64::new(0),
vvar_address: AtomicU64::new(0),
vvar_length: AtomicU64::new(0),
},
message_length: UnsafeCell::new(0),
message_buffer: UnsafeCell::new([0; MESSAGE_BUFFER_SIZE]),
}
}
pub const fn new() -> Self {
let mut vmctx = Self::zeroed();
vmctx.syscall_ffi.0.nth_instruction = UnsafeCell::new(SANDBOX_EMPTY_NTH_INSTRUCTION);
vmctx
}
#[inline(always)]
pub const fn gas(&self) -> &UnsafeCell<i64> {
&self.syscall_ffi.0.gas
}
#[inline(always)]
pub const fn heap_info(&self) -> &VmCtxHeapInfo {
&self.heap_info
}
#[inline(always)]
pub const fn hostcall(&self) -> &UnsafeCell<u32> {
&self.syscall_ffi.0.hostcall
}
#[inline(always)]
pub const fn regs(&self) -> &UnsafeCell<[u32; REG_COUNT]> {
&self.syscall_ffi.0.regs
}
#[inline(always)]
pub const fn rip(&self) -> &UnsafeCell<u64> {
&self.syscall_ffi.0.rip
}
#[inline(always)]
pub const fn nth_instruction(&self) -> &UnsafeCell<u32> {
&self.syscall_ffi.0.nth_instruction
}
}
static_assert!(VM_ADDR_JUMP_TABLE_RETURN_TO_HOST > VM_ADDR_JUMP_TABLE);
static_assert!(VM_ADDR_JUMP_TABLE_RETURN_TO_HOST % 0x4000 == 0);
static_assert!(VM_SANDBOX_MAXIMUM_JUMP_TABLE_SIZE <= VM_SANDBOX_MAXIMUM_JUMP_TABLE_VIRTUAL_SIZE);
static_assert!(VM_ADDR_JUMP_TABLE + VM_SANDBOX_MAXIMUM_JUMP_TABLE_SIZE < VM_ADDR_JUMP_TABLE_RETURN_TO_HOST);
static_assert!(VM_ADDR_JUMP_TABLE_RETURN_TO_HOST < VM_ADDR_JUMP_TABLE + VM_SANDBOX_MAXIMUM_JUMP_TABLE_VIRTUAL_SIZE);
static_assert!(VM_ADDR_JUMP_TABLE.count_ones() == 1);
static_assert!((1 << VM_ADDR_JUMP_TABLE.trailing_zeros()) == VM_ADDR_JUMP_TABLE);
static_assert!(
VM_SANDBOX_MAXIMUM_NATIVE_CODE_SIZE
>= crate::abi::VM_MAXIMUM_INSTRUCTION_COUNT * VM_COMPILER_MAXIMUM_INSTRUCTION_LENGTH + VM_COMPILER_MAXIMUM_EPILOGUE_LENGTH
);
static_assert!(VM_ADDR_NATIVE_CODE > 0xffffffff);
static_assert!(VM_ADDR_VMCTX > 0xffffffff);
static_assert!(VM_ADDR_NATIVE_STACK_LOW > 0xffffffff);