polkavm_common/
abi.rs

1//! Everything in this module affects the ABI of the guest programs, either by affecting
2//! their observable behavior (no matter how obscure), or changing which programs are accepted by the VM.
3
4use crate::utils::{align_to_next_page_u32, align_to_next_page_u64};
5use core::ops::Range;
6
7const ADDRESS_SPACE_SIZE: u64 = 0x100000000_u64;
8
9/// The minimum page size of the VM.
10pub const VM_MIN_PAGE_SIZE: u32 = 0x1000;
11
12/// The maximum page size of the VM.
13pub const VM_MAX_PAGE_SIZE: u32 = 0x10000;
14
15static_assert!(VM_MIN_PAGE_SIZE <= VM_MAX_PAGE_SIZE);
16
17/// The address at which the program's stack starts inside of the VM.
18///
19/// This is directly accessible by the program running inside of the VM.
20pub const VM_ADDR_USER_STACK_HIGH: u32 = (ADDRESS_SPACE_SIZE - VM_MAX_PAGE_SIZE as u64) as u32;
21
22/// The address which, when jumped to, will return to the host.
23///
24/// There isn't actually anything there; it's just a virtual address.
25pub const VM_ADDR_RETURN_TO_HOST: u32 = 0xffff0000;
26static_assert!(VM_ADDR_RETURN_TO_HOST & 0b11 == 0);
27
28/// The maximum number of VM instructions a program can be composed of.
29pub const VM_MAXIMUM_INSTRUCTION_COUNT: u32 = 2 * 1024 * 1024;
30
31/// The maximum number of functions the program can import.
32pub const VM_MAXIMUM_IMPORT_COUNT: u32 = 1024;
33
34/// The maximum number of functions the program can export.
35pub const VM_MAXIMUM_EXPORT_COUNT: u32 = 1024;
36
37/// The minimum required alignment of runtime code pointers.
38// TODO: Support the C extension in the linker and lower this to 2.
39pub const VM_CODE_ADDRESS_ALIGNMENT: u32 = 4;
40
41/// The memory map of a given guest program.
42#[derive(Clone)]
43#[repr(C)] // NOTE: Used on the host <-> zygote boundary.
44pub struct MemoryMap {
45    page_size: u32,
46    ro_data_size: u32,
47    rw_data_size: u32,
48    stack_size: u32,
49    heap_base: u32,
50    max_heap_size: u32,
51}
52
53impl MemoryMap {
54    /// Creates an empty memory map.
55    #[inline]
56    pub const fn empty() -> Self {
57        Self {
58            page_size: 0,
59            ro_data_size: 0,
60            rw_data_size: 0,
61            stack_size: 0,
62            heap_base: 0,
63            max_heap_size: 0,
64        }
65    }
66
67    /// Calculates the memory map from the given parameters.
68    pub fn new(page_size: u32, ro_data_size: u32, rw_data_size: u32, stack_size: u32) -> Result<Self, &'static str> {
69        if page_size < VM_MIN_PAGE_SIZE {
70            return Err("invalid page size: page size is too small");
71        }
72
73        if page_size > VM_MAX_PAGE_SIZE {
74            return Err("invalid page size: page size is too big");
75        }
76
77        if !page_size.is_power_of_two() {
78            return Err("invalid page size: page size is not a power of two");
79        }
80
81        let Some(ro_data_address_space) = align_to_next_page_u64(u64::from(VM_MAX_PAGE_SIZE), u64::from(ro_data_size)) else {
82            return Err("the size of read-only data is too big");
83        };
84
85        let Some(ro_data_size) = align_to_next_page_u32(page_size, ro_data_size) else {
86            return Err("the size of read-only data is too big");
87        };
88
89        let Some(rw_data_address_space) = align_to_next_page_u64(u64::from(VM_MAX_PAGE_SIZE), u64::from(rw_data_size)) else {
90            return Err("the size of read-write data is too big");
91        };
92
93        let original_rw_data_size = rw_data_size;
94        let Some(rw_data_size) = align_to_next_page_u32(page_size, rw_data_size) else {
95            return Err("the size of read-write data is too big");
96        };
97
98        let Some(stack_address_space) = align_to_next_page_u64(u64::from(VM_MAX_PAGE_SIZE), u64::from(stack_size)) else {
99            return Err("the size of the stack is too big");
100        };
101
102        let Some(stack_size) = align_to_next_page_u32(page_size, stack_size) else {
103            return Err("the size of the stack is too big");
104        };
105
106        let mut address_low: u64 = 0;
107
108        address_low += u64::from(VM_MAX_PAGE_SIZE);
109        address_low += ro_data_address_space;
110        address_low += u64::from(VM_MAX_PAGE_SIZE);
111
112        let heap_base = address_low + u64::from(original_rw_data_size);
113        address_low += rw_data_address_space;
114        let heap_slack = address_low - heap_base;
115        address_low += u64::from(VM_MAX_PAGE_SIZE);
116
117        let mut address_high: u64 = u64::from(VM_ADDR_USER_STACK_HIGH);
118        address_high -= stack_address_space;
119
120        if address_low > address_high {
121            return Err("maximum memory size exceeded");
122        }
123
124        let max_heap_size = address_high - address_low + heap_slack;
125
126        Ok(Self {
127            page_size,
128            ro_data_size,
129            rw_data_size,
130            stack_size,
131            heap_base: heap_base as u32,
132            max_heap_size: max_heap_size as u32,
133        })
134    }
135
136    /// The page size of the program.
137    #[inline]
138    pub fn page_size(&self) -> u32 {
139        self.page_size
140    }
141
142    /// The address at which the program's heap starts.
143    #[inline]
144    pub fn heap_base(&self) -> u32 {
145        self.heap_base
146    }
147
148    /// The maximum size of the program's heap.
149    #[inline]
150    pub fn max_heap_size(&self) -> u32 {
151        self.max_heap_size
152    }
153
154    /// The address at where the program's read-only data starts inside of the VM.
155    #[inline]
156    pub fn ro_data_address(&self) -> u32 {
157        VM_MAX_PAGE_SIZE
158    }
159
160    /// The size of the program's read-only data.
161    #[inline]
162    pub fn ro_data_size(&self) -> u32 {
163        self.ro_data_size
164    }
165
166    /// The range of addresses where the program's read-only data is inside of the VM.
167    #[inline]
168    pub fn ro_data_range(&self) -> Range<u32> {
169        self.ro_data_address()..self.ro_data_address() + self.ro_data_size()
170    }
171
172    /// The address at where the program's read-write data starts inside of the VM.
173    #[inline]
174    pub fn rw_data_address(&self) -> u32 {
175        match align_to_next_page_u32(VM_MAX_PAGE_SIZE, self.ro_data_address() + self.ro_data_size) {
176            Some(offset) => offset + VM_MAX_PAGE_SIZE,
177            None => unreachable!(),
178        }
179    }
180
181    /// The size of the program's read-write data.
182    #[inline]
183    pub fn rw_data_size(&self) -> u32 {
184        self.rw_data_size
185    }
186
187    /// The range of addresses where the program's read-write data is inside of the VM.
188    #[inline]
189    pub fn rw_data_range(&self) -> Range<u32> {
190        self.rw_data_address()..self.rw_data_address() + self.rw_data_size()
191    }
192
193    /// The address at where the program's stack starts inside of the VM.
194    #[inline]
195    pub fn stack_address_low(&self) -> u32 {
196        self.stack_address_high() - self.stack_size
197    }
198
199    /// The address at where the program's stack ends inside of the VM.
200    #[inline]
201    pub fn stack_address_high(&self) -> u32 {
202        VM_ADDR_USER_STACK_HIGH
203    }
204
205    /// The size of the program's stack.
206    #[inline]
207    pub fn stack_size(&self) -> u32 {
208        self.stack_size
209    }
210
211    /// The range of addresses where the program's stack is inside of the VM.
212    #[inline]
213    pub fn stack_range(&self) -> Range<u32> {
214        self.stack_address_low()..self.stack_address_high()
215    }
216}
217
218#[test]
219fn test_memory_map() {
220    {
221        let map = MemoryMap::new(0x4000, 1, 1, 1).unwrap();
222        assert_eq!(map.ro_data_address(), 0x10000);
223        assert_eq!(map.ro_data_size(), 0x4000);
224        assert_eq!(map.rw_data_address(), 0x30000);
225        assert_eq!(map.rw_data_size(), 0x4000);
226        assert_eq!(map.stack_size(), 0x4000);
227        assert_eq!(map.stack_address_high(), 0xffff0000);
228        assert_eq!(map.stack_address_low(), 0xfffec000);
229
230        assert_eq!(map.heap_base(), 0x30001);
231        assert_eq!(
232            u64::from(map.max_heap_size()),
233            ADDRESS_SPACE_SIZE - u64::from(VM_MAX_PAGE_SIZE) * 3 - u64::from(map.heap_base())
234        );
235    }
236
237    let max_size = (ADDRESS_SPACE_SIZE - u64::from(VM_MAX_PAGE_SIZE) * 4) as u32;
238
239    {
240        // Read-only data takes the whole address space.
241        let map = MemoryMap::new(0x4000, max_size, 0, 0).unwrap();
242        assert_eq!(map.ro_data_address(), 0x10000);
243        assert_eq!(map.ro_data_size(), max_size);
244        assert_eq!(map.rw_data_address(), map.ro_data_address() + VM_MAX_PAGE_SIZE + max_size);
245        assert_eq!(map.rw_data_size(), 0);
246        assert_eq!(map.stack_address_high(), VM_ADDR_USER_STACK_HIGH);
247        assert_eq!(map.stack_address_low(), VM_ADDR_USER_STACK_HIGH);
248        assert_eq!(map.stack_size(), 0);
249
250        assert_eq!(map.heap_base(), map.rw_data_address());
251        assert_eq!(map.max_heap_size(), 0);
252    }
253
254    assert!(MemoryMap::new(0x4000, max_size + 1, 0, 0).is_err());
255    assert!(MemoryMap::new(0x4000, max_size, 1, 0).is_err());
256    assert!(MemoryMap::new(0x4000, max_size, 0, 1).is_err());
257
258    {
259        // Read-write data takes the whole address space.
260        let map = MemoryMap::new(0x4000, 0, max_size, 0).unwrap();
261        assert_eq!(map.ro_data_address(), VM_MAX_PAGE_SIZE);
262        assert_eq!(map.ro_data_size(), 0);
263        assert_eq!(map.rw_data_address(), VM_MAX_PAGE_SIZE * 2);
264        assert_eq!(map.rw_data_size(), max_size);
265        assert_eq!(map.stack_address_high(), VM_ADDR_USER_STACK_HIGH);
266        assert_eq!(map.stack_address_low(), VM_ADDR_USER_STACK_HIGH);
267        assert_eq!(map.stack_size(), 0);
268
269        assert_eq!(map.heap_base(), map.rw_data_address() + map.rw_data_size());
270        assert_eq!(map.max_heap_size(), 0);
271    }
272
273    {
274        // Stack takes the whole address space.
275        let map = MemoryMap::new(0x4000, 0, 0, max_size).unwrap();
276        assert_eq!(map.ro_data_address(), VM_MAX_PAGE_SIZE);
277        assert_eq!(map.ro_data_size(), 0);
278        assert_eq!(map.rw_data_address(), VM_MAX_PAGE_SIZE * 2);
279        assert_eq!(map.rw_data_size(), 0);
280        assert_eq!(map.stack_address_high(), VM_ADDR_USER_STACK_HIGH);
281        assert_eq!(map.stack_address_low(), VM_ADDR_USER_STACK_HIGH - max_size);
282        assert_eq!(map.stack_size(), max_size);
283
284        assert_eq!(map.heap_base(), map.rw_data_address());
285        assert_eq!(map.max_heap_size(), 0);
286    }
287}