1#![allow(clippy::manual_range_contains)]
2
3use polkavm_common::{
4 abi::MemoryMap,
5 error::{ExecutionError, Trap},
6 program::Reg,
7 utils::{align_to_next_page_usize, byte_slice_init, Access, AsUninitSliceMut, Gas},
8 zygote::{
9 AddressTable,
10 AddressTableRaw,
11 CacheAligned,
12 SandboxMemoryConfig,
13 VM_ADDR_JUMP_TABLE,
14 VM_ADDR_JUMP_TABLE_RETURN_TO_HOST,
15 VM_SANDBOX_MAXIMUM_NATIVE_CODE_SIZE,
16 VM_SANDBOX_MAXIMUM_JUMP_TABLE_VIRTUAL_SIZE,
17 },
18 VM_RPC_FLAG_CLEAR_PROGRAM_AFTER_EXECUTION,
19 VM_RPC_FLAG_RESET_MEMORY_AFTER_EXECUTION,
20 VM_RPC_FLAG_RESET_MEMORY_BEFORE_EXECUTION,
21};
22
23use super::ExecuteArgs;
24
25use core::ops::Range;
26use core::cell::UnsafeCell;
27use core::sync::atomic::{AtomicUsize, Ordering};
28use core::mem::MaybeUninit;
29use std::borrow::Cow;
30use std::sync::{Arc, Mutex};
31
32use super::{SandboxKind, SandboxInit, SandboxVec, get_native_page_size};
33use crate::api::{BackendAccess, CompiledModuleKind, MemoryAccessError, Module, HostcallHandler};
34use crate::compiler::CompiledModule;
35
36#[cfg(target_os = "linux")]
38#[allow(non_camel_case_types)]
39mod sys {
40 pub use polkavm_linux_raw::{c_void, c_int, size_t, siginfo_t, SIG_IGN, SIG_DFL, ucontext as ucontext_t};
41 pub const SIGSEGV: c_int = polkavm_linux_raw::SIGSEGV as c_int;
42 pub const SIGILL: c_int = polkavm_linux_raw::SIGILL as c_int;
43 pub const PROT_READ: c_int = polkavm_linux_raw::PROT_READ as c_int;
44 pub const PROT_WRITE: c_int = polkavm_linux_raw::PROT_WRITE as c_int;
45 pub const PROT_EXEC: c_int = polkavm_linux_raw::PROT_EXEC as c_int;
46 pub const MAP_ANONYMOUS: c_int = polkavm_linux_raw::MAP_ANONYMOUS as c_int;
47 pub const MAP_PRIVATE: c_int = polkavm_linux_raw::MAP_PRIVATE as c_int;
48 pub const MAP_FIXED: c_int = polkavm_linux_raw::MAP_FIXED as c_int;
49 pub const MAP_FAILED: *mut c_void = !0 as *mut c_void;
50 pub const SA_SIGINFO: c_int = polkavm_linux_raw::SA_SIGINFO as c_int;
51 pub const SA_NODEFER: c_int = polkavm_linux_raw::SA_NODEFER as c_int;
52
53 pub type sighandler_t = size_t;
54
55 #[repr(C)]
56 pub struct sigset_t {
57 #[cfg(target_pointer_width = "32")]
58 __val: [u32; 32],
59 #[cfg(target_pointer_width = "64")]
60 __val: [u64; 16],
61 }
62
63 #[repr(C)]
64 pub struct sigaction {
65 pub sa_sigaction: sighandler_t,
66 pub sa_mask: sigset_t,
67 pub sa_flags: c_int,
68 pub sa_restorer: Option<extern "C" fn()>,
69 }
70
71 extern "C" {
72 pub fn mmap(
73 addr: *mut c_void,
74 len: size_t,
75 prot: c_int,
76 flags: c_int,
77 fd: c_int,
78 offset: i64
79 ) -> *mut c_void;
80
81 pub fn munmap(
82 addr: *mut c_void,
83 len: size_t
84 ) -> c_int;
85
86 pub fn mprotect(
87 addr: *mut c_void,
88 len: size_t,
89 prot: c_int
90 ) -> c_int;
91
92 pub fn sigaction(
93 signum: c_int,
94 act: *const sigaction,
95 oldact: *mut sigaction
96 ) -> c_int;
97
98 pub fn sigemptyset(set: *mut sigset_t) -> c_int;
99 }
100}
101
102#[cfg(not(target_os = "linux"))]
103use libc as sys;
104
105use sys::{c_int, size_t, PROT_READ, PROT_WRITE, PROT_EXEC, MAP_ANONYMOUS, MAP_PRIVATE, MAP_FIXED};
106use core::ffi::c_void;
107
108pub(crate) const GUEST_MEMORY_TO_VMCTX_OFFSET: isize = -4096;
109
110fn get_guest_memory_offset() -> usize {
111 get_native_page_size()
112}
113
114#[derive(Debug)]
115pub struct Error(std::io::Error);
116
117impl core::fmt::Display for Error {
118 fn fmt(&self, fmt: &mut core::fmt::Formatter) -> core::fmt::Result {
119 self.0.fmt(fmt)
120 }
121}
122
123impl From<&'static str> for Error {
124 fn from(value: &'static str) -> Self {
125 Self(std::io::Error::new(std::io::ErrorKind::Other, value))
126 }
127}
128
129impl From<std::io::Error> for Error {
130 fn from(error: std::io::Error) -> Self {
131 Self(error)
132 }
133}
134
135pub struct Mmap {
136 pointer: *mut c_void,
137 length: usize,
138}
139
140unsafe impl Send for Mmap {}
142
143unsafe impl Sync for Mmap {}
145
146impl Mmap {
147 unsafe fn raw_mmap(
148 address: *mut c_void,
149 length: usize,
150 protection: c_int,
151 flags: c_int,
152 ) -> Result<Self, Error> {
153 let pointer = {
154 let pointer = sys::mmap(address, length, protection, flags, -1, 0);
155 if pointer == sys::MAP_FAILED {
156 return Err(Error(std::io::Error::last_os_error()));
157 }
158 pointer
159 };
160
161 Ok(Self { pointer, length })
162 }
163
164 fn mmap_within(&mut self, offset: usize, length: usize, protection: c_int) -> Result<(), Error> {
165 if !offset.checked_add(length).map_or(false, |end| end <= self.length) {
166 return Err("out of bounds mmap".into())
167 }
168
169 unsafe {
171 let pointer = self.pointer.cast::<u8>().add(offset).cast();
172 core::mem::forget(Self::raw_mmap(pointer, length, protection, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE)?);
173 }
174
175 Ok(())
176 }
177
178 fn unmap_inplace(&mut self) -> Result<(), Error> {
179 if self.length > 0 {
180 unsafe {
182 if sys::munmap(self.pointer, self.length) < 0 {
183 return Err(Error(std::io::Error::last_os_error()));
184 }
185 }
186
187 self.length = 0;
188 self.pointer = core::ptr::NonNull::<u8>::dangling().as_ptr().cast::<c_void>();
189 }
190
191 Ok(())
192 }
193
194 pub fn unmap(mut self) -> Result<(), Error> {
195 self.unmap_inplace()
196 }
197
198 pub fn reserve_address_space(
199 length: size_t
200 ) -> Result<Self, Error> {
201 unsafe {
203 Mmap::raw_mmap(core::ptr::null_mut(), length, 0, MAP_ANONYMOUS | MAP_PRIVATE)
204 }
205 }
206
207 pub fn mprotect(&mut self, offset: usize, length: usize, protection: c_int) -> Result<(), Error> {
208 if !offset.checked_add(length).map_or(false, |end| end <= self.length) {
209 return Err("out of bounds mprotect".into())
210 }
211
212 unsafe {
214 if sys::mprotect(self.pointer.add(offset), length, protection) < 0 {
215 return Err(Error(std::io::Error::last_os_error()));
216 }
217 }
218
219 Ok(())
220 }
221
222 pub fn modify_and_protect(&mut self, offset: usize, length: usize, protection: c_int, callback: impl FnOnce(&mut [u8])) -> Result<(), Error> {
223 self.mprotect(offset, length, PROT_READ | PROT_WRITE)?;
224 callback(&mut self.as_slice_mut()[offset..offset + length]);
225 if protection != PROT_READ | PROT_WRITE {
226 self.mprotect(offset, length, protection)?;
227 }
228 Ok(())
229 }
230
231 pub fn as_ptr(&self) -> *const c_void {
232 self.pointer
233 }
234
235 pub fn as_mut_ptr(&self) -> *mut c_void {
236 self.pointer
237 }
238
239 pub fn as_slice(&self) -> &[u8] {
240 unsafe { core::slice::from_raw_parts(self.as_ptr().cast::<u8>(), self.length) }
247 }
248
249 pub fn as_slice_mut(&mut self) -> &mut [u8] {
250 unsafe { core::slice::from_raw_parts_mut(self.as_mut_ptr().cast::<u8>(), self.length) }
257 }
258
259 pub fn len(&self) -> usize {
260 self.length
261 }
262}
263
264impl Default for Mmap {
265 fn default() -> Self {
266 Self {
267 pointer: core::ptr::NonNull::<u8>::dangling().as_ptr().cast::<c_void>(),
268 length: 0,
269 }
270 }
271}
272
273impl Drop for Mmap {
274 fn drop(&mut self) {
275 let _ = self.unmap_inplace();
276 }
277}
278
279static mut OLD_SIGSEGV: MaybeUninit<sys::sigaction> = MaybeUninit::uninit();
280static mut OLD_SIGILL: MaybeUninit<sys::sigaction> = MaybeUninit::uninit();
281
282#[cfg(any(target_os = "macos", target_os = "freebsd"))]
283static mut OLD_SIGBUS: MaybeUninit<sys::sigaction> = MaybeUninit::uninit();
284
285unsafe extern "C" fn signal_handler(signal: c_int, info: &sys::siginfo_t, context: &sys::ucontext_t) {
286 let old = match signal {
287 sys::SIGSEGV => &OLD_SIGSEGV,
288 sys::SIGILL => &OLD_SIGILL,
289 #[cfg(any(target_os = "macos", target_os = "freebsd"))]
290 sys::SIGBUS => &OLD_SIGBUS,
291 _ => unreachable!("received unknown signal")
292 };
293
294 let vmctx = THREAD_VMCTX.with(|thread_ctx| *thread_ctx.get());
295 if !vmctx.is_null() {
296 let rip;
297 #[cfg(target_os = "linux")]
298 {
299 rip = context.uc_mcontext.rip;
300 }
301 #[cfg(target_os = "macos")]
302 {
303 rip = (*context.uc_mcontext).__ss.__rip;
304 }
305 #[cfg(target_os = "freebsd")]
306 {
307 rip = context.uc_mcontext.mc_rip as u64;
308 }
309
310 let vmctx = &mut *vmctx;
311 if vmctx.program_range.contains(&rip) {
312 vmctx.native_program_counter = Some(rip);
313
314 log::trace!("Trap triggered at 0x{rip:x}");
315 trigger_trap(vmctx);
316 }
317 }
318
319 let old = &*old.as_ptr();
322 if old.sa_sigaction == sys::SIG_IGN || old.sa_sigaction == sys::SIG_DFL {
323 sys::sigaction(signal, old, core::ptr::null_mut());
324 return;
325 }
326
327 if old.sa_flags & sys::SA_SIGINFO != 0 {
328 let old_handler = core::mem::transmute::<usize, extern "C" fn(c_int, &sys::siginfo_t, &sys::ucontext_t)>(old.sa_sigaction);
329 old_handler(signal, info, context);
330 } else {
331 let old_handler = core::mem::transmute::<usize, extern "C" fn(c_int)>(old.sa_sigaction);
332 old_handler(signal);
333 }
334}
335
336#[allow(clippy::fn_to_numeric_cast_any)]
337unsafe fn register_signal_handler_for_signal(signal: c_int, old_sa: &mut MaybeUninit<sys::sigaction>) -> Result<(), Error> {
338 let mut sa: sys::sigaction = core::mem::zeroed();
339 let old_sa = old_sa.write(core::mem::zeroed());
340
341 sa.sa_flags = sys::SA_SIGINFO | sys::SA_NODEFER;
342 sa.sa_sigaction = signal_handler as usize;
343 sys::sigemptyset(&mut sa.sa_mask);
344 if sys::sigaction(signal, &sa, old_sa) < 0 {
345 return Err(Error(std::io::Error::last_os_error()));
346 }
347
348 Ok(())
349}
350
351unsafe fn register_signal_handlers() -> Result<(), Error> {
352 register_signal_handler_for_signal(sys::SIGSEGV, &mut OLD_SIGSEGV)?;
353 register_signal_handler_for_signal(sys::SIGILL, &mut OLD_SIGILL)?;
354 #[cfg(any(target_os = "macos", target_os = "freebsd"))]
355 register_signal_handler_for_signal(sys::SIGBUS, &mut OLD_SIGBUS)?;
356 Ok(())
357}
358
359fn register_signal_handlers_if_necessary() -> Result<(), Error> {
360 const STATE_UNINITIALIZED: usize = 0;
361 const STATE_INITIALIZING: usize = 1;
362 const STATE_FINISHED: usize = 2;
363 const STATE_ERROR: usize = 3;
364
365 static FLAG: AtomicUsize = AtomicUsize::new(STATE_UNINITIALIZED);
366 if FLAG.load(Ordering::Relaxed) == STATE_FINISHED {
367 return Ok(());
368 }
369
370 match FLAG.compare_exchange(STATE_UNINITIALIZED, STATE_INITIALIZING, Ordering::Acquire, Ordering::Relaxed) {
371 Ok(_) => {
372 let result = unsafe { register_signal_handlers() };
375 if let Err(error) = result {
376 FLAG.store(STATE_ERROR, Ordering::Release);
377 Err(error)
378 } else {
379 FLAG.store(STATE_FINISHED, Ordering::Release);
380 Ok(())
381 }
382 },
383 Err(_) => {
384 loop {
385 match FLAG.load(Ordering::Relaxed) {
386 STATE_INITIALIZING => continue,
387 STATE_FINISHED => return Ok(()),
388 _ => return Err("failed to set up signal handlers".into())
389 }
390 }
391 }
392 }
393}
394
395thread_local! {
396 static THREAD_VMCTX: UnsafeCell<*mut VmCtx> = const { UnsafeCell::new(core::ptr::null_mut()) };
397}
398
399unsafe fn sysreturn(vmctx: &mut VmCtx) -> ! {
400 debug_assert_ne!(vmctx.return_address, 0);
401 debug_assert_ne!(vmctx.return_stack_pointer, 0);
402
403 unsafe {
405 core::arch::asm!(r#"
406 // Restore the stack pointer to its original value.
407 mov rsp, [{vmctx} + 8]
408
409 // Jump back
410 jmp [{vmctx}]
411 "#,
412 vmctx = in(reg) vmctx,
413 options(noreturn)
414 );
415 }
416}
417
418#[repr(C)]
419enum TrapKind {
420 None,
421 Trap,
422 Error,
423}
424
425unsafe fn trigger_trap(vmctx: &mut VmCtx) -> ! {
426 vmctx.trap_kind = TrapKind::Trap;
427 sysreturn(vmctx);
428}
429
430unsafe fn trigger_error(vmctx: &mut VmCtx) -> ! {
431 vmctx.trap_kind = TrapKind::Error;
432 sysreturn(vmctx);
433}
434
435const REG_COUNT: usize = polkavm_common::program::Reg::ALL.len();
436
437#[repr(C)]
438struct HeapInfo {
439 heap_top: u64,
440 heap_threshold: u64,
441}
442
443#[repr(C)]
444struct VmCtx {
445 return_address: usize,
447 return_stack_pointer: usize,
448
449 gas: i64,
450
451 heap_info: HeapInfo,
452 memory_map: MemoryMap,
453
454 program_range: Range<u64>,
455 trap_kind: TrapKind,
456
457 regs: CacheAligned<[u32; REG_COUNT]>,
458 hostcall_handler: Option<HostcallHandler<'static>>,
459 sandbox: *mut Sandbox,
460 instruction_number: Option<u32>,
461 native_program_counter: Option<u64>,
462}
463
464impl VmCtx {
465 pub fn new() -> Self {
467 VmCtx {
468 return_address: 0,
469 return_stack_pointer: 0,
470 trap_kind: TrapKind::None,
471 program_range: 0..0,
472 heap_info: HeapInfo {
473 heap_top: 0,
474 heap_threshold: 0,
475 },
476 memory_map: MemoryMap::empty(),
477 gas: 0,
478 regs: CacheAligned([0; REG_COUNT]),
479 hostcall_handler: None,
480 sandbox: core::ptr::null_mut(),
481 instruction_number: None,
482 native_program_counter: None,
483 }
484 }
485
486 #[inline(always)]
487 pub const fn regs(&self) -> &[u32; REG_COUNT] {
488 &self.regs.0
489 }
490}
491
492polkavm_common::static_assert!(core::mem::size_of::<VmCtx>() <= 4096);
494
495#[derive(Default)]
496pub struct SandboxConfig {
497}
498
499impl super::SandboxConfig for SandboxConfig {
500 fn enable_logger(&mut self, _value: bool) {
501 }
502}
503
504unsafe fn vmctx_ptr(memory: &Mmap) -> *const VmCtx {
505 memory.as_ptr().cast::<u8>().offset(get_guest_memory_offset() as isize + GUEST_MEMORY_TO_VMCTX_OFFSET).cast()
506}
507
508#[allow(clippy::needless_pass_by_ref_mut)]
509unsafe fn vmctx_mut_ptr(memory: &mut Mmap) -> *mut VmCtx {
510 memory.as_mut_ptr().cast::<u8>().offset(get_guest_memory_offset() as isize + GUEST_MEMORY_TO_VMCTX_OFFSET).cast()
511}
512
513unsafe fn conjure_vmctx<'a>() -> &'a mut VmCtx {
514 &mut *THREAD_VMCTX.with(|thread_ctx| *thread_ctx.get())
515}
516
517unsafe extern "C" fn syscall_hostcall(hostcall: u32) {
518 let vmctx = unsafe { conjure_vmctx() };
520
521 let Some(hostcall_handler) = vmctx.hostcall_handler.as_mut().take() else {
522 trigger_error(vmctx);
523 };
524
525 let sandbox = unsafe {
528 &mut *vmctx.sandbox
529 };
530
531 match hostcall_handler(hostcall, super::Sandbox::access(sandbox).into()) {
532 Ok(()) => {}
533 Err(_) => trigger_trap(vmctx)
534 }
535}
536
537unsafe extern "C" fn syscall_trace(instruction_number: u32, rip: u64) {
538 let vmctx = unsafe { conjure_vmctx() };
540
541 vmctx.instruction_number = Some(instruction_number);
542 vmctx.native_program_counter = Some(rip);
543
544 let Some(hostcall_handler) = vmctx.hostcall_handler.as_mut().take() else {
545 return;
546 };
547
548 let sandbox = unsafe {
551 &mut *vmctx.sandbox
552 };
553
554 match hostcall_handler(polkavm_common::HOSTCALL_TRACE, super::Sandbox::access(sandbox).into()) {
555 Ok(()) => {}
556 Err(_) => trigger_trap(vmctx)
557 }
558}
559
560unsafe extern "C" fn syscall_trap() -> ! {
561 let vmctx = unsafe { conjure_vmctx() };
563
564 trigger_trap(vmctx);
566}
567
568unsafe extern "C" fn syscall_return() -> ! {
569 let vmctx = unsafe { conjure_vmctx() };
571
572 sysreturn(vmctx);
574}
575
576unsafe fn sbrk(vmctx: &mut VmCtx, pending_heap_top: u64) -> Result<Option<u32>, ()> {
577 if pending_heap_top > u64::from(vmctx.memory_map.heap_base() + vmctx.memory_map.max_heap_size()) {
578 return Ok(None);
579 }
580
581 let Some(start) = align_to_next_page_usize(vmctx.memory_map.page_size() as usize, vmctx.heap_info.heap_top as usize) else { return Err(()); };
582 let Some(end) = align_to_next_page_usize(vmctx.memory_map.page_size() as usize, pending_heap_top as usize) else { return Err(()); };
583
584 let size = end - start;
585 if size > 0 {
586 let guest_memory_base = (vmctx as *mut VmCtx).cast::<u8>().offset(-GUEST_MEMORY_TO_VMCTX_OFFSET);
587 let pointer = sys::mmap(
588 guest_memory_base.add(start).cast::<core::ffi::c_void>(),
589 end - start,
590 sys::PROT_READ | sys::PROT_WRITE,
591 sys::MAP_FIXED | sys::MAP_PRIVATE | sys::MAP_ANONYMOUS,
592 -1,
593 0,
594 );
595
596 if pointer == sys::MAP_FAILED {
597 log::error!("sbrk mmap failed!");
598 return Err(());
599 }
600 }
601
602 vmctx.heap_info.heap_top = pending_heap_top;
603 vmctx.heap_info.heap_threshold = end as u64;
604
605 Ok(Some(pending_heap_top as u32))
606}
607
608unsafe extern "C" fn syscall_sbrk(pending_heap_top: u64) -> u32 {
609 let vmctx = unsafe { conjure_vmctx() };
611
612 match sbrk(vmctx, pending_heap_top) {
614 Ok(Some(new_heap_top)) => new_heap_top,
615 Ok(None) => 0,
616 Err(()) => {
617 trigger_error(vmctx);
618 }
619 }
620}
621
622#[derive(Clone)]
623pub struct SandboxProgram(Arc<SandboxProgramInner>);
624
625struct SandboxProgramInner {
626 memory_config: SandboxMemoryConfig,
627 ro_data: Vec<u8>,
628 rw_data: Vec<u8>,
629
630 code_memory: Mmap,
631 code_length: usize,
632}
633
634impl super::SandboxProgram for SandboxProgram {
635 fn machine_code(&self) -> Cow<[u8]> {
636 Cow::Borrowed(&self.0.code_memory.as_slice()[..self.0.code_length])
637 }
638}
639
640enum Poison {
641 None,
642 Executing,
643 Poisoned,
644}
645
646pub struct Sandbox {
647 poison: Poison,
648 program: Option<SandboxProgram>,
649 memory: Mmap,
650 guest_memory_offset: usize,
651 module: Option<Module>,
652}
653
654impl Drop for Sandbox {
655 fn drop(&mut self) {
656 }
657}
658
659impl Sandbox {
660 #[inline]
661 fn vmctx(&self) -> &VmCtx {
662 unsafe {
664 &*vmctx_ptr(&self.memory)
665 }
666 }
667
668 #[inline]
669 fn vmctx_mut(&mut self) -> &mut VmCtx {
670 unsafe {
672 &mut *vmctx_mut_ptr(&mut self.memory)
673 }
674 }
675
676 fn clear_program(&mut self) -> Result<(), ExecutionError<Error>> {
677 let length = self.memory.len() - self.guest_memory_offset;
678 let program = self.program.take();
679
680 self.memory.mmap_within(
681 self.guest_memory_offset,
682 length,
683 0
684 )?;
685
686 if let Some(program) = program {
687 if let Some(program) = Arc::into_inner(program.0) {
688 program.code_memory.unmap()?;
689 }
690 }
691
692 self.vmctx_mut().heap_info.heap_top = 0;
693 self.vmctx_mut().heap_info.heap_threshold = 0;
694 self.vmctx_mut().memory_map = MemoryMap::empty();
695
696 Ok(())
697 }
698
699 fn force_reset_memory(&mut self) -> Result<(), Error> {
700 let Some(ref program) = self.program else { return Ok(()) };
701 let program = &program.0;
702 let memory_map = program.memory_config.memory_map.clone();
703
704 log::trace!("Resetting memory");
705 log::trace!(" Read-write data: 0x{:x}..0x{:x}", memory_map.rw_data_address(), memory_map.rw_data_range().end);
706
707 let rw_data_size = memory_map.rw_data_size() as usize;
708 if rw_data_size > 0 {
709 let offset = self.guest_memory_offset + memory_map.rw_data_address() as usize;
710 assert!(program.rw_data.len() <= rw_data_size);
711
712 let copy_range = offset..offset + program.rw_data.len();
713 self.memory.as_slice_mut()[copy_range.clone()].copy_from_slice(&program.rw_data);
714 log::trace!(" ...copy: 0x{:x}..0x{:x}", copy_range.start - self.guest_memory_offset, copy_range.end - self.guest_memory_offset);
715
716 let native_page_size = get_native_page_size();
717 let offset_to_next_native_page = align_to_next_page_usize(native_page_size, offset + program.rw_data.len()).unwrap();
718
719 let fill_range = offset + program.rw_data.len()..offset_to_next_native_page;
720 self.memory.as_slice_mut()[fill_range.clone()].fill(0);
721 log::trace!(" ...fill: 0x{:x}..0x{:x}", fill_range.start - self.guest_memory_offset, fill_range.end - self.guest_memory_offset);
722
723 let bss_size = memory_map.rw_data_size() as usize - (offset_to_next_native_page - offset);
724 if bss_size > 0 {
725 log::trace!(" ...mmap: 0x{:x}..0x{:x}", offset_to_next_native_page - self.guest_memory_offset, offset_to_next_native_page + bss_size - self.guest_memory_offset);
726 self.memory.mmap_within(
727 offset_to_next_native_page,
728 bss_size,
729 PROT_READ | PROT_WRITE
730 )?;
731 }
732 }
733
734 let stack_size = memory_map.stack_size() as usize;
735 if stack_size > 0 {
736 self.memory.mmap_within(
737 self.guest_memory_offset + memory_map.stack_address_low() as usize,
738 stack_size,
739 PROT_READ | PROT_WRITE
740 )?;
741 }
742
743 let initial_heap_threshold = u64::from(memory_map.rw_data_range().end);
744 let heap_top = self.vmctx().heap_info.heap_top;
745 log::trace!(" Heap: 0x{:x}..0x{:x}", memory_map.heap_base(), heap_top);
746 if heap_top > initial_heap_threshold {
747 log::trace!(" ..mmap:: 0x{:x}..0x{:x}", initial_heap_threshold, heap_top);
748 self.memory.mmap_within(
749 self.guest_memory_offset + initial_heap_threshold as usize,
750 heap_top as usize - initial_heap_threshold as usize,
751 0
752 )?;
753 }
754
755 self.vmctx_mut().heap_info.heap_top = u64::from(program.memory_config.memory_map.heap_base());
756 self.vmctx_mut().heap_info.heap_threshold = initial_heap_threshold;
757
758 Ok(())
759 }
760
761 fn bound_check_access(&self, address: u32, length: u32) -> Result<(), ()> {
762 let memory_map = self.vmctx().memory_map.clone();
763
764 let (start, region_length) = if address >= memory_map.stack_address_low() {
765 (memory_map.stack_address_low(), memory_map.stack_size())
766 } else if address >= memory_map.rw_data_address() {
767 let heap_threshold = self.vmctx().heap_info.heap_threshold as u32;
768 if heap_threshold == 0 {
769 (memory_map.rw_data_address(), memory_map.rw_data_size())
770 } else {
771 (memory_map.rw_data_address(), heap_threshold - memory_map.rw_data_address())
772 }
773 } else if address >= memory_map.ro_data_address() {
774 (memory_map.ro_data_address(), memory_map.ro_data_size())
775 } else {
776 return Err(());
777 };
778
779 let Some(address_end) = address.checked_add(length) else { return Err(()) };
780 if address_end <= (start + region_length) {
781 Ok(())
782 } else {
783 Err(())
784 }
785 }
786
787 fn get_memory_slice(&self, address: u32, length: u32) -> Option<&[u8]> {
788 self.bound_check_access(address, length).ok()?;
789 let range = self.guest_memory_offset + address as usize..self.guest_memory_offset + address as usize + length as usize;
790 Some(&self.memory.as_slice()[range])
791 }
792
793 fn get_memory_slice_mut(&mut self, address: u32, length: u32) -> Option<&mut [u8]> {
794 self.bound_check_access(address, length).ok()?;
795 let range = self.guest_memory_offset + address as usize..self.guest_memory_offset + address as usize + length as usize;
796 Some(&mut self.memory.as_slice_mut()[range])
797 }
798
799 fn execute_impl(&mut self, mut args: ExecuteArgs) -> Result<(), ExecutionError<Error>> {
800 if let Some(module) = args.module {
801 let compiled_module = <Self as crate::sandbox::Sandbox>::as_compiled_module(module);
802 let program = &compiled_module.sandbox_program.0;
803
804 log::trace!("Reconfiguring sandbox...");
805 self.clear_program()?;
806
807 let new = &program.memory_config;
808 if new.memory_map.ro_data_size() > 0 {
809 let offset = self.guest_memory_offset + new.memory_map.ro_data_address() as usize;
810 let length = new.memory_map.ro_data_size() as usize;
811 assert!(program.ro_data.len() <= length);
812
813 self.memory.modify_and_protect(offset, length, PROT_READ, |slice| {
814 slice[..program.ro_data.len()].copy_from_slice(&program.ro_data);
815 })?;
816
817 let memory_address = self.memory.as_ptr() as usize + offset;
818 log::trace!(
819 " New rodata range: 0x{:x}-0x{:x} (0x{:x}-0x{:x}) (0x{:x})",
820 memory_address,
821 memory_address + length,
822 new.memory_map.ro_data_address(),
823 new.memory_map.ro_data_address() + new.memory_map.ro_data_size(),
824 new.memory_map.ro_data_size()
825 );
826 }
827
828 if new.memory_map.rw_data_size() > 0 {
829 let offset = self.guest_memory_offset + new.memory_map.rw_data_address() as usize;
830 let length = new.memory_map.rw_data_size() as usize;
831 assert!(program.rw_data.len() <= length);
832
833 self.memory.modify_and_protect(offset, length, PROT_READ | PROT_WRITE, |slice| {
834 slice[..program.rw_data.len()].copy_from_slice(&program.rw_data);
835 })?;
836
837 let memory_address = self.memory.as_ptr() as usize + offset;
838 log::trace!(
839 " New rwdata range: 0x{:x}-0x{:x} (0x{:x}-0x{:x}) (0x{:x})",
840 memory_address,
841 memory_address + length,
842 new.memory_map.rw_data_address(),
843 new.memory_map.rw_data_address() + new.memory_map.rw_data_size(),
844 new.memory_map.rw_data_size()
845 );
846 }
847
848 if new.memory_map.stack_size() > 0 {
849 let offset = self.guest_memory_offset + new.memory_map.stack_address_low() as usize;
850 let length = new.memory_map.stack_size() as usize;
851
852 self.memory.mprotect(offset, length, PROT_READ | PROT_WRITE)?;
853
854 let memory_address = self.memory.as_ptr() as usize + offset;
855 log::trace!(
856 " New stack range: 0x{:x}-0x{:x} (0x{:x}-0x{:x}) (0x{:x})",
857 memory_address,
858 memory_address + length,
859 new.memory_map.stack_address_low(),
860 new.memory_map.stack_address_low() + new.memory_map.stack_size(),
861 new.memory_map.stack_size()
862 );
863 }
864
865 self.vmctx_mut().heap_info.heap_top = u64::from(program.memory_config.memory_map.heap_base());
866 self.vmctx_mut().heap_info.heap_threshold = u64::from(new.memory_map.rw_data_range().end);
867 self.vmctx_mut().memory_map = new.memory_map.clone();
868
869 self.program = Some(SandboxProgram(Arc::clone(program)));
870 self.module = Some(module.clone());
871 }
872
873 if let Some(regs) = args.regs {
874 self.vmctx_mut().regs.copy_from_slice(regs);
875 }
876
877 if let Some(gas) = crate::sandbox::get_gas(&args, self.module.as_ref().and_then(|module| module.gas_metering())) {
878 self.vmctx_mut().gas = gas;
879 }
880
881 if args.flags & VM_RPC_FLAG_RESET_MEMORY_BEFORE_EXECUTION != 0 {
882 self.force_reset_memory()?;
884 }
885
886 if args.sbrk > 0 {
887 let new_heap_top = self.vmctx().heap_info.heap_top + u64::from(args.sbrk);
888
889 match unsafe { sbrk(self.vmctx_mut(), new_heap_top) } {
891 Ok(Some(_)) => {},
892 Ok(None) => return Err(ExecutionError::Error("initial sbrk failed: cannot grow the heap over the maximum".into())),
893 Err(()) => return Err(ExecutionError::Error("initial sbrk failed".into()))
894 }
895 }
896
897 let mut trap_kind = TrapKind::None;
898 if let Some(entry_point) = args.entry_point {
899 let entry_point = <Self as crate::sandbox::Sandbox>::as_compiled_module(self.module.as_ref().unwrap()).export_trampolines[entry_point] as usize;
900
901 {
902 let Some(program) = self.program.as_ref() else {
903 return Err(ExecutionError::Trap(Trap::default()));
904 };
905
906 let code = &program.0.code_memory;
907 let address = code.as_ptr() as u64;
908 self.vmctx_mut().program_range = address..address + code.len() as u64;
909 }
910 log::trace!("Jumping to: 0x{:x}", entry_point);
911
912 let hostcall_handler: Option<HostcallHandler> = match args.hostcall_handler {
913 Some(ref mut hostcall_handler) => Some(&mut *hostcall_handler),
914 None => None,
915 };
916
917 let hostcall_handler: Option<HostcallHandler<'static>> = unsafe { core::mem::transmute(hostcall_handler) };
920 self.vmctx_mut().hostcall_handler = hostcall_handler;
921 self.vmctx_mut().sandbox = self;
922 self.vmctx_mut().trap_kind = TrapKind::None;
923
924 #[allow(clippy::undocumented_unsafe_blocks)]
925 unsafe {
926 let vmctx = vmctx_mut_ptr(&mut self.memory);
927 THREAD_VMCTX.with(|thread_ctx| core::ptr::write(thread_ctx.get(), vmctx));
928
929 let guest_memory = self.memory.as_ptr().cast::<u8>().add(self.guest_memory_offset);
930
931 core::arch::asm!(r#"
932 push rbp
933 push rbx
934
935 // Fill in the return address.
936 lea rbx, [rip+1f]
937 mov [r14], rbx
938
939 // Fill in the return stack pointer.
940 mov [r14 + 8], rsp
941
942 // Align the stack.
943 sub rsp, 8
944
945 // Call into the guest program.
946 jmp {entry_point}
947
948 // We will jump here on exit.
949 1:
950
951 pop rbx
952 pop rbp
953 "#,
954 entry_point = in(reg) entry_point,
955 clobber_abi("C"),
960 lateout("rax") _,
961 lateout("rcx") _,
962 lateout("rdx") _,
963 lateout("rsi") _,
964 lateout("rdi") _,
965 lateout("r8") _,
966 lateout("r9") _,
967 lateout("r10") _,
968 lateout("r11") _,
969 lateout("r12") _,
970 lateout("r13") _,
971 inlateout("r14") vmctx => _,
972 in("r15") guest_memory,
973 );
974
975 THREAD_VMCTX.with(|thread_ctx| core::ptr::write(thread_ctx.get(), core::ptr::null_mut()));
976 }
977
978 trap_kind = core::mem::replace(&mut self.vmctx_mut().trap_kind, TrapKind::None);
979 self.vmctx_mut().sandbox = core::ptr::null_mut();
980 self.vmctx_mut().hostcall_handler = None;
981 self.vmctx_mut().return_address = 0;
982 self.vmctx_mut().return_stack_pointer = 0;
983 self.vmctx_mut().program_range = 0..0;
984 };
985
986 if args.flags & VM_RPC_FLAG_CLEAR_PROGRAM_AFTER_EXECUTION != 0 {
987 self.clear_program()?;
988 } else if args.flags & VM_RPC_FLAG_RESET_MEMORY_AFTER_EXECUTION != 0 {
989 self.force_reset_memory()?;
990 }
991
992 match trap_kind {
993 TrapKind::None => Ok(()),
994 TrapKind::Trap => Err(ExecutionError::Trap(Trap::default())),
995 TrapKind::Error => Err(ExecutionError::Error("fatal error".into())),
996 }
997 }
998}
999
1000impl super::SandboxAddressSpace for Mmap {
1001 fn native_code_address(&self) -> u64 {
1002 self.as_ptr() as u64
1003 }
1004}
1005
1006impl super::Sandbox for Sandbox {
1007 const KIND: SandboxKind = SandboxKind::Generic;
1008
1009 type Access<'r> = SandboxAccess<'r>;
1010 type Config = SandboxConfig;
1011 type Error = Error;
1012 type Program = SandboxProgram;
1013 type AddressSpace = Mmap;
1014
1015 fn as_sandbox_vec(vec: &SandboxVec) -> &Mutex<Vec<Self>> {
1016 #[allow(clippy::match_wildcard_for_single_variants)]
1017 match vec {
1018 SandboxVec::Generic(ref vec) => vec,
1019 _ => unreachable!(),
1020 }
1021 }
1022
1023 fn as_compiled_module(module: &Module) -> &CompiledModule<Self> {
1024 match module.compiled_module() {
1025 CompiledModuleKind::Generic(ref module) => module,
1026 _ => unreachable!(),
1027 }
1028 }
1029
1030 fn reserve_address_space() -> Result<Self::AddressSpace, Self::Error> {
1031 Mmap::reserve_address_space(VM_SANDBOX_MAXIMUM_NATIVE_CODE_SIZE as usize + VM_SANDBOX_MAXIMUM_JUMP_TABLE_VIRTUAL_SIZE as usize)
1032 }
1033
1034 fn prepare_program(init: SandboxInit, mut map: Self::AddressSpace) -> Result<Self::Program, Self::Error> {
1035 let native_page_size = get_native_page_size();
1036 let cfg = init.memory_config(native_page_size)?;
1037
1038 let jump_table_offset = cfg.code_size as usize;
1039 let sysreturn_offset = jump_table_offset + (VM_ADDR_JUMP_TABLE_RETURN_TO_HOST - VM_ADDR_JUMP_TABLE) as usize;
1040
1041 map.modify_and_protect(0, cfg.code_size as usize, PROT_EXEC, |slice| {
1042 slice[..init.code.len()].copy_from_slice(init.code);
1043 })?;
1044
1045 map.modify_and_protect(jump_table_offset, cfg.jump_table_size as usize, PROT_READ, |slice| {
1046 slice[..init.jump_table.len()].copy_from_slice(init.jump_table);
1047 })?;
1048
1049 map.modify_and_protect(sysreturn_offset, native_page_size, PROT_READ, |slice| {
1050 slice[..8].copy_from_slice(&init.sysreturn_address.to_le_bytes());
1051 })?;
1052
1053 log::trace!(
1054 "New code range: 0x{:x}-0x{:x} (0x{:x})",
1055 map.as_ptr() as u64,
1056 map.as_ptr() as u64 + u64::from(cfg.code_size),
1057 cfg.code_size
1058 );
1059
1060 log::trace!(
1061 "New jump table range: 0x{:x}-0x{:x} (0x{:x})",
1062 map.as_ptr() as u64 + jump_table_offset as u64,
1063 map.as_ptr() as u64 + jump_table_offset as u64 + u64::from(cfg.jump_table_size),
1064 cfg.jump_table_size
1065 );
1066
1067 log::trace!(
1068 "New sysreturn address: 0x{:x} (set at 0x{:x})",
1069 init.sysreturn_address,
1070 map.as_ptr() as u64 + sysreturn_offset as u64
1071 );
1072
1073 Ok(SandboxProgram(Arc::new(SandboxProgramInner {
1074 memory_config: cfg,
1075 ro_data: init.guest_init.ro_data.to_vec(),
1076 rw_data: init.guest_init.rw_data.to_vec(),
1077 code_memory: map,
1078 code_length: init.code.len(),
1079 })))
1080 }
1081
1082 fn spawn(_config: &SandboxConfig) -> Result<Self, Error> {
1083 register_signal_handlers_if_necessary()?;
1084
1085 let guest_memory_offset = get_guest_memory_offset();
1086 let mut memory = Mmap::reserve_address_space(guest_memory_offset + 0x100000000)?;
1087
1088 polkavm_common::static_assert!(GUEST_MEMORY_TO_VMCTX_OFFSET < 0);
1090 memory.mprotect(0, guest_memory_offset, PROT_READ | PROT_WRITE)?;
1091
1092 unsafe {
1094 core::ptr::write(vmctx_mut_ptr(&mut memory), VmCtx::new());
1095 }
1096
1097 Ok(Sandbox {
1098 poison: Poison::None,
1099 program: None,
1100 memory,
1101 guest_memory_offset,
1102 module: None,
1103 })
1104 }
1105
1106 fn execute(&mut self, args: ExecuteArgs) -> Result<(), ExecutionError<Self::Error>> {
1107 if !matches!(self.poison, Poison::None) {
1108 return Err(ExecutionError::Error("sandbox has been poisoned".into()));
1109 }
1110
1111 self.poison = Poison::Executing;
1112 match self.execute_impl(args) {
1113 result @ Err(ExecutionError::Error(_)) => {
1114 self.poison = Poison::Poisoned;
1115 result
1116 }
1117 result @ (Ok(()) | Err(ExecutionError::Trap(_) | ExecutionError::OutOfGas)) => {
1118 self.poison = Poison::None;
1119 result
1120 }
1121 }
1122 }
1123
1124 #[inline]
1125 fn access(&mut self) -> SandboxAccess {
1126 SandboxAccess { sandbox: self }
1127 }
1128
1129 fn pid(&self) -> Option<u32> {
1130 None
1131 }
1132
1133 fn address_table() -> AddressTable {
1134 AddressTable::from_raw(AddressTableRaw {
1135 syscall_hostcall,
1136 syscall_trap,
1137 syscall_return,
1138 syscall_trace,
1139 syscall_sbrk,
1140 })
1141 }
1142
1143 fn vmctx_regs_offset() -> usize {
1144 get_field_offset!(VmCtx::new(), |base| base.regs())
1145 }
1146
1147 fn vmctx_gas_offset() -> usize {
1148 get_field_offset!(VmCtx::new(), |base| &base.gas)
1149 }
1150
1151 fn vmctx_heap_info_offset() -> usize {
1152 get_field_offset!(VmCtx::new(), |base| &base.heap_info)
1153 }
1154
1155 fn gas_remaining_impl(&self) -> Result<Option<Gas>, super::OutOfGas> {
1156 let Some(module) = self.module.as_ref() else { return Ok(None) };
1157 if module.gas_metering().is_none() { return Ok(None) };
1158 let raw_gas = self.vmctx().gas;
1159 Gas::from_i64(raw_gas).ok_or(super::OutOfGas).map(Some)
1160 }
1161
1162 fn sync(&mut self) -> Result<(), Self::Error> {
1163 Ok(())
1164 }
1165}
1166
1167pub struct SandboxAccess<'a> {
1168 sandbox: &'a mut Sandbox,
1169}
1170
1171impl<'a> From<SandboxAccess<'a>> for BackendAccess<'a> {
1172 fn from(access: SandboxAccess<'a>) -> Self {
1173 BackendAccess::CompiledGeneric(access)
1174 }
1175}
1176
1177impl<'a> Access<'a> for SandboxAccess<'a> {
1178 type Error = MemoryAccessError<&'static str>;
1179
1180 fn get_reg(&self, reg: Reg) -> u32 {
1181 assert!(!matches!(self.sandbox.poison, Poison::Poisoned), "sandbox has been poisoned");
1182 self.sandbox.vmctx().regs[reg as usize]
1183 }
1184
1185 fn set_reg(&mut self, reg: Reg, value: u32) {
1186 assert!(!matches!(self.sandbox.poison, Poison::Poisoned), "sandbox has been poisoned");
1187 self.sandbox.vmctx_mut().regs[reg as usize] = value;
1188 }
1189
1190 fn read_memory_into_slice<'slice, T>(&self, address: u32, buffer: &'slice mut T) -> Result<&'slice mut [u8], Self::Error>
1191 where
1192 T: ?Sized + AsUninitSliceMut,
1193 {
1194 let buffer = buffer.as_uninit_slice_mut();
1195 log::trace!(
1196 "Reading memory: 0x{:x}-0x{:x} ({} bytes)",
1197 address,
1198 address as usize + buffer.len(),
1199 buffer.len()
1200 );
1201
1202 if matches!(self.sandbox.poison, Poison::Poisoned) {
1203 return Err(MemoryAccessError {
1204 address,
1205 length: buffer.len() as u64,
1206 error: "read failed: sandbox has been poisoned",
1207 });
1208 }
1209
1210 let Some(slice) = self.sandbox.get_memory_slice(address, buffer.len() as u32) else {
1211 return Err(MemoryAccessError {
1212 address,
1213 length: buffer.len() as u64,
1214 error: "out of range read",
1215 });
1216 };
1217
1218 Ok(byte_slice_init(buffer, slice))
1219 }
1220
1221 fn write_memory(&mut self, address: u32, data: &[u8]) -> Result<(), Self::Error> {
1222 log::trace!(
1223 "Writing memory: 0x{:x}-0x{:x} ({} bytes)",
1224 address,
1225 address as usize + data.len(),
1226 data.len()
1227 );
1228
1229 if matches!(self.sandbox.poison, Poison::Poisoned) {
1230 return Err(MemoryAccessError {
1231 address,
1232 length: data.len() as u64,
1233 error: "write failed: sandbox has been poisoned",
1234 });
1235 }
1236
1237 let Some(slice) = self.sandbox.get_memory_slice_mut(address, data.len() as u32) else {
1238 return Err(MemoryAccessError {
1239 address,
1240 length: data.len() as u64,
1241 error: "out of range write",
1242 });
1243 };
1244
1245 slice.copy_from_slice(data);
1246 Ok(())
1247 }
1248
1249 fn sbrk(&mut self, size: u32) -> Option<u32> {
1250 let new_heap_top = self.sandbox.vmctx().heap_info.heap_top + u64::from(size);
1251
1252 match unsafe { sbrk(self.sandbox.vmctx_mut(), new_heap_top) } {
1254 Ok(result) => result,
1255 Err(()) => panic!("sbrk failed")
1256 }
1257 }
1258
1259 fn heap_size(&self) -> u32 {
1260 let Some(program) = self.sandbox.program.as_ref() else { return 0 };
1261 let heap_base = program.0.memory_config.memory_map.heap_base();
1262 let heap_top = self.sandbox.vmctx().heap_info.heap_top;
1263 (heap_top - u64::from(heap_base)) as u32
1264 }
1265
1266 fn program_counter(&self) -> Option<u32> {
1267 self.sandbox.vmctx().instruction_number
1268 }
1269
1270 fn native_program_counter(&self) -> Option<u64> {
1271 self.sandbox.vmctx().native_program_counter
1272 }
1273
1274 fn gas_remaining(&self) -> Option<Gas> {
1275 use super::Sandbox;
1276 self.sandbox.gas_remaining_impl().ok().unwrap_or(Some(Gas::MIN))
1277 }
1278
1279 fn consume_gas(&mut self, gas: u64) {
1280 if self.sandbox.module.as_ref().and_then(|module| module.gas_metering()).is_none() {
1281 return;
1282 }
1283
1284 let gas_remaining = &mut self.sandbox.vmctx_mut().gas;
1285 *gas_remaining = gas_remaining.checked_sub_unsigned(gas).unwrap_or(-1);
1286 }
1287}