1use crate::utils::{align_to_next_page_u32, align_to_next_page_u64};
5use core::ops::Range;
6
7const ADDRESS_SPACE_SIZE: u64 = 0x100000000_u64;
8
9pub const VM_MIN_PAGE_SIZE: u32 = 0x1000;
11
12pub const VM_MAX_PAGE_SIZE: u32 = 0x10000;
14
15static_assert!(VM_MIN_PAGE_SIZE <= VM_MAX_PAGE_SIZE);
16
17pub const VM_ADDR_USER_STACK_HIGH: u32 = (ADDRESS_SPACE_SIZE - VM_MAX_PAGE_SIZE as u64) as u32;
21
22pub const VM_ADDR_RETURN_TO_HOST: u32 = 0xffff0000;
26static_assert!(VM_ADDR_RETURN_TO_HOST & 0b11 == 0);
27
28pub const VM_MAXIMUM_INSTRUCTION_COUNT: u32 = 2 * 1024 * 1024;
30
31pub const VM_MAXIMUM_IMPORT_COUNT: u32 = 1024;
33
34pub const VM_MAXIMUM_EXPORT_COUNT: u32 = 1024;
36
37pub const VM_CODE_ADDRESS_ALIGNMENT: u32 = 4;
40
41#[derive(Clone)]
43#[repr(C)] pub struct MemoryMap {
45 page_size: u32,
46 ro_data_size: u32,
47 rw_data_size: u32,
48 stack_size: u32,
49 heap_base: u32,
50 max_heap_size: u32,
51}
52
53impl MemoryMap {
54 #[inline]
56 pub const fn empty() -> Self {
57 Self {
58 page_size: 0,
59 ro_data_size: 0,
60 rw_data_size: 0,
61 stack_size: 0,
62 heap_base: 0,
63 max_heap_size: 0,
64 }
65 }
66
67 pub fn new(page_size: u32, ro_data_size: u32, rw_data_size: u32, stack_size: u32) -> Result<Self, &'static str> {
69 if page_size < VM_MIN_PAGE_SIZE {
70 return Err("invalid page size: page size is too small");
71 }
72
73 if page_size > VM_MAX_PAGE_SIZE {
74 return Err("invalid page size: page size is too big");
75 }
76
77 if !page_size.is_power_of_two() {
78 return Err("invalid page size: page size is not a power of two");
79 }
80
81 let Some(ro_data_address_space) = align_to_next_page_u64(u64::from(VM_MAX_PAGE_SIZE), u64::from(ro_data_size)) else {
82 return Err("the size of read-only data is too big");
83 };
84
85 let Some(ro_data_size) = align_to_next_page_u32(page_size, ro_data_size) else {
86 return Err("the size of read-only data is too big");
87 };
88
89 let Some(rw_data_address_space) = align_to_next_page_u64(u64::from(VM_MAX_PAGE_SIZE), u64::from(rw_data_size)) else {
90 return Err("the size of read-write data is too big");
91 };
92
93 let original_rw_data_size = rw_data_size;
94 let Some(rw_data_size) = align_to_next_page_u32(page_size, rw_data_size) else {
95 return Err("the size of read-write data is too big");
96 };
97
98 let Some(stack_address_space) = align_to_next_page_u64(u64::from(VM_MAX_PAGE_SIZE), u64::from(stack_size)) else {
99 return Err("the size of the stack is too big");
100 };
101
102 let Some(stack_size) = align_to_next_page_u32(page_size, stack_size) else {
103 return Err("the size of the stack is too big");
104 };
105
106 let mut address_low: u64 = 0;
107
108 address_low += u64::from(VM_MAX_PAGE_SIZE);
109 address_low += ro_data_address_space;
110 address_low += u64::from(VM_MAX_PAGE_SIZE);
111
112 let heap_base = address_low + u64::from(original_rw_data_size);
113 address_low += rw_data_address_space;
114 let heap_slack = address_low - heap_base;
115 address_low += u64::from(VM_MAX_PAGE_SIZE);
116
117 let mut address_high: u64 = u64::from(VM_ADDR_USER_STACK_HIGH);
118 address_high -= stack_address_space;
119
120 if address_low > address_high {
121 return Err("maximum memory size exceeded");
122 }
123
124 let max_heap_size = address_high - address_low + heap_slack;
125
126 Ok(Self {
127 page_size,
128 ro_data_size,
129 rw_data_size,
130 stack_size,
131 heap_base: heap_base as u32,
132 max_heap_size: max_heap_size as u32,
133 })
134 }
135
136 #[inline]
138 pub fn page_size(&self) -> u32 {
139 self.page_size
140 }
141
142 #[inline]
144 pub fn heap_base(&self) -> u32 {
145 self.heap_base
146 }
147
148 #[inline]
150 pub fn max_heap_size(&self) -> u32 {
151 self.max_heap_size
152 }
153
154 #[inline]
156 pub fn ro_data_address(&self) -> u32 {
157 VM_MAX_PAGE_SIZE
158 }
159
160 #[inline]
162 pub fn ro_data_size(&self) -> u32 {
163 self.ro_data_size
164 }
165
166 #[inline]
168 pub fn ro_data_range(&self) -> Range<u32> {
169 self.ro_data_address()..self.ro_data_address() + self.ro_data_size()
170 }
171
172 #[inline]
174 pub fn rw_data_address(&self) -> u32 {
175 match align_to_next_page_u32(VM_MAX_PAGE_SIZE, self.ro_data_address() + self.ro_data_size) {
176 Some(offset) => offset + VM_MAX_PAGE_SIZE,
177 None => unreachable!(),
178 }
179 }
180
181 #[inline]
183 pub fn rw_data_size(&self) -> u32 {
184 self.rw_data_size
185 }
186
187 #[inline]
189 pub fn rw_data_range(&self) -> Range<u32> {
190 self.rw_data_address()..self.rw_data_address() + self.rw_data_size()
191 }
192
193 #[inline]
195 pub fn stack_address_low(&self) -> u32 {
196 self.stack_address_high() - self.stack_size
197 }
198
199 #[inline]
201 pub fn stack_address_high(&self) -> u32 {
202 VM_ADDR_USER_STACK_HIGH
203 }
204
205 #[inline]
207 pub fn stack_size(&self) -> u32 {
208 self.stack_size
209 }
210
211 #[inline]
213 pub fn stack_range(&self) -> Range<u32> {
214 self.stack_address_low()..self.stack_address_high()
215 }
216}
217
218#[test]
219fn test_memory_map() {
220 {
221 let map = MemoryMap::new(0x4000, 1, 1, 1).unwrap();
222 assert_eq!(map.ro_data_address(), 0x10000);
223 assert_eq!(map.ro_data_size(), 0x4000);
224 assert_eq!(map.rw_data_address(), 0x30000);
225 assert_eq!(map.rw_data_size(), 0x4000);
226 assert_eq!(map.stack_size(), 0x4000);
227 assert_eq!(map.stack_address_high(), 0xffff0000);
228 assert_eq!(map.stack_address_low(), 0xfffec000);
229
230 assert_eq!(map.heap_base(), 0x30001);
231 assert_eq!(
232 u64::from(map.max_heap_size()),
233 ADDRESS_SPACE_SIZE - u64::from(VM_MAX_PAGE_SIZE) * 3 - u64::from(map.heap_base())
234 );
235 }
236
237 let max_size = (ADDRESS_SPACE_SIZE - u64::from(VM_MAX_PAGE_SIZE) * 4) as u32;
238
239 {
240 let map = MemoryMap::new(0x4000, max_size, 0, 0).unwrap();
242 assert_eq!(map.ro_data_address(), 0x10000);
243 assert_eq!(map.ro_data_size(), max_size);
244 assert_eq!(map.rw_data_address(), map.ro_data_address() + VM_MAX_PAGE_SIZE + max_size);
245 assert_eq!(map.rw_data_size(), 0);
246 assert_eq!(map.stack_address_high(), VM_ADDR_USER_STACK_HIGH);
247 assert_eq!(map.stack_address_low(), VM_ADDR_USER_STACK_HIGH);
248 assert_eq!(map.stack_size(), 0);
249
250 assert_eq!(map.heap_base(), map.rw_data_address());
251 assert_eq!(map.max_heap_size(), 0);
252 }
253
254 assert!(MemoryMap::new(0x4000, max_size + 1, 0, 0).is_err());
255 assert!(MemoryMap::new(0x4000, max_size, 1, 0).is_err());
256 assert!(MemoryMap::new(0x4000, max_size, 0, 1).is_err());
257
258 {
259 let map = MemoryMap::new(0x4000, 0, max_size, 0).unwrap();
261 assert_eq!(map.ro_data_address(), VM_MAX_PAGE_SIZE);
262 assert_eq!(map.ro_data_size(), 0);
263 assert_eq!(map.rw_data_address(), VM_MAX_PAGE_SIZE * 2);
264 assert_eq!(map.rw_data_size(), max_size);
265 assert_eq!(map.stack_address_high(), VM_ADDR_USER_STACK_HIGH);
266 assert_eq!(map.stack_address_low(), VM_ADDR_USER_STACK_HIGH);
267 assert_eq!(map.stack_size(), 0);
268
269 assert_eq!(map.heap_base(), map.rw_data_address() + map.rw_data_size());
270 assert_eq!(map.max_heap_size(), 0);
271 }
272
273 {
274 let map = MemoryMap::new(0x4000, 0, 0, max_size).unwrap();
276 assert_eq!(map.ro_data_address(), VM_MAX_PAGE_SIZE);
277 assert_eq!(map.ro_data_size(), 0);
278 assert_eq!(map.rw_data_address(), VM_MAX_PAGE_SIZE * 2);
279 assert_eq!(map.rw_data_size(), 0);
280 assert_eq!(map.stack_address_high(), VM_ADDR_USER_STACK_HIGH);
281 assert_eq!(map.stack_address_low(), VM_ADDR_USER_STACK_HIGH - max_size);
282 assert_eq!(map.stack_size(), max_size);
283
284 assert_eq!(map.heap_base(), map.rw_data_address());
285 assert_eq!(map.max_heap_size(), 0);
286 }
287}