wasmtime_runtime/instance/
allocator.rs1use crate::imports::Imports;
2use crate::instance::{Instance, InstanceHandle, RuntimeMemoryCreator};
3use crate::memory::{DefaultMemoryCreator, Memory};
4use crate::table::Table;
5use crate::{CompiledModuleId, ModuleRuntimeInfo, Store};
6use anyhow::{anyhow, bail, Result};
7use std::alloc;
8use std::any::Any;
9use std::convert::TryFrom;
10use std::ptr;
11use std::sync::Arc;
12use wasmtime_environ::{
13 DefinedMemoryIndex, DefinedTableIndex, HostPtr, InitMemory, MemoryInitialization,
14 MemoryInitializer, Module, PrimaryMap, TableInitialization, TableInitializer, Trap, VMOffsets,
15 WasmType, WASM_PAGE_SIZE,
16};
17
18#[cfg(feature = "pooling-allocator")]
19mod pooling;
20
21#[cfg(feature = "pooling-allocator")]
22pub use self::pooling::{InstanceLimits, PoolingInstanceAllocator, PoolingInstanceAllocatorConfig};
23
24pub struct InstanceAllocationRequest<'a> {
26 pub runtime_info: &'a Arc<dyn ModuleRuntimeInfo>,
32
33 pub imports: Imports<'a>,
35
36 pub host_state: Box<dyn Any + Send + Sync>,
38
39 pub store: StorePtr,
54}
55
56pub struct StorePtr(Option<*mut dyn Store>);
62impl StorePtr {
63 pub fn empty() -> Self {
65 Self(None)
66 }
67 pub fn new(ptr: *mut dyn Store) -> Self {
69 Self(Some(ptr))
70 }
71 pub fn as_raw(&self) -> Option<*mut dyn Store> {
73 self.0.clone()
74 }
75 pub(crate) unsafe fn get(&mut self) -> Option<&mut dyn Store> {
78 match self.0 {
79 Some(ptr) => Some(&mut *ptr),
80 None => None,
81 }
82 }
83}
84
85pub unsafe trait InstanceAllocator {
91 fn validate(&self, module: &Module, offsets: &VMOffsets<HostPtr>) -> Result<()> {
93 drop((module, offsets));
94 Ok(())
95 }
96
97 fn allocate(&self, mut req: InstanceAllocationRequest) -> Result<InstanceHandle> {
106 let index = self.allocate_index(&req)?;
107 let module = req.runtime_info.module();
108 let mut memories =
109 PrimaryMap::with_capacity(module.memory_plans.len() - module.num_imported_memories);
110 let mut tables =
111 PrimaryMap::with_capacity(module.table_plans.len() - module.num_imported_tables);
112
113 let result = self
114 .allocate_memories(index, &mut req, &mut memories)
115 .and_then(|()| self.allocate_tables(index, &mut req, &mut tables));
116 if let Err(e) = result {
117 self.deallocate_memories(index, &mut memories);
118 self.deallocate_tables(index, &mut tables);
119 self.deallocate_index(index);
120 return Err(e);
121 }
122
123 unsafe { Ok(Instance::new(req, index, memories, tables)) }
124 }
125
126 fn deallocate(&self, handle: &mut InstanceHandle) {
131 let index = handle.instance().index;
132 self.deallocate_memories(index, &mut handle.instance_mut().memories);
133 self.deallocate_tables(index, &mut handle.instance_mut().tables);
134 unsafe {
135 let layout = Instance::alloc_layout(handle.instance().offsets());
136 ptr::drop_in_place(handle.instance);
137 alloc::dealloc(handle.instance.cast(), layout);
138 handle.instance = std::ptr::null_mut();
139 }
140 self.deallocate_index(index);
141 }
142
143 fn allocate_index(&self, req: &InstanceAllocationRequest) -> Result<usize>;
148
149 fn deallocate_index(&self, index: usize);
151
152 fn allocate_memories(
162 &self,
163 index: usize,
164 req: &mut InstanceAllocationRequest,
165 mems: &mut PrimaryMap<DefinedMemoryIndex, Memory>,
166 ) -> Result<()>;
167
168 fn deallocate_memories(&self, index: usize, mems: &mut PrimaryMap<DefinedMemoryIndex, Memory>);
171
172 fn allocate_tables(
174 &self,
175 index: usize,
176 req: &mut InstanceAllocationRequest,
177 tables: &mut PrimaryMap<DefinedTableIndex, Table>,
178 ) -> Result<()>;
179
180 fn deallocate_tables(&self, index: usize, tables: &mut PrimaryMap<DefinedTableIndex, Table>);
182
183 #[cfg(feature = "async")]
185 fn allocate_fiber_stack(&self) -> Result<wasmtime_fiber::FiberStack>;
186
187 #[cfg(feature = "async")]
193 unsafe fn deallocate_fiber_stack(&self, stack: &wasmtime_fiber::FiberStack);
194
195 fn purge_module(&self, module: CompiledModuleId);
201}
202
203fn get_table_init_start(init: &TableInitializer, instance: &mut Instance) -> Result<u32> {
204 match init.base {
205 Some(base) => {
206 let val = unsafe { *(*instance.defined_or_imported_global_ptr(base)).as_u32() };
207
208 init.offset
209 .checked_add(val)
210 .ok_or_else(|| anyhow!("element segment global base overflows"))
211 }
212 None => Ok(init.offset),
213 }
214}
215
216fn check_table_init_bounds(instance: &mut Instance, module: &Module) -> Result<()> {
217 match &module.table_initialization {
218 TableInitialization::FuncTable { segments, .. }
219 | TableInitialization::Segments { segments } => {
220 for segment in segments {
221 let table = unsafe { &*instance.get_table(segment.table_index) };
222 let start = get_table_init_start(segment, instance)?;
223 let start = usize::try_from(start).unwrap();
224 let end = start.checked_add(segment.elements.len());
225
226 match end {
227 Some(end) if end <= table.size() as usize => {
228 }
230 _ => {
231 bail!("table out of bounds: elements segment does not fit")
232 }
233 }
234 }
235 }
236 }
237
238 Ok(())
239}
240
241fn initialize_tables(instance: &mut Instance, module: &Module) -> Result<()> {
242 match &module.table_initialization {
250 TableInitialization::FuncTable { segments, .. }
251 | TableInitialization::Segments { segments } => {
252 for segment in segments {
253 let start = get_table_init_start(segment, instance)?;
254 instance.table_init_segment(
255 segment.table_index,
256 &segment.elements,
257 start,
258 0,
259 segment.elements.len() as u32,
260 )?;
261 }
262 }
263 }
264
265 Ok(())
266}
267
268fn get_memory_init_start(init: &MemoryInitializer, instance: &mut Instance) -> Result<u64> {
269 match init.base {
270 Some(base) => {
271 let mem64 = instance.module().memory_plans[init.memory_index]
272 .memory
273 .memory64;
274 let val = unsafe {
275 let global = instance.defined_or_imported_global_ptr(base);
276 if mem64 {
277 *(*global).as_u64()
278 } else {
279 u64::from(*(*global).as_u32())
280 }
281 };
282
283 init.offset
284 .checked_add(val)
285 .ok_or_else(|| anyhow!("data segment global base overflows"))
286 }
287 None => Ok(init.offset),
288 }
289}
290
291fn check_memory_init_bounds(
292 instance: &mut Instance,
293 initializers: &[MemoryInitializer],
294) -> Result<()> {
295 for init in initializers {
296 let memory = instance.get_memory(init.memory_index);
297 let start = get_memory_init_start(init, instance)?;
298 let end = usize::try_from(start)
299 .ok()
300 .and_then(|start| start.checked_add(init.data.len()));
301
302 match end {
303 Some(end) if end <= memory.current_length() => {
304 }
306 _ => {
307 bail!("memory out of bounds: data segment does not fit")
308 }
309 }
310 }
311
312 Ok(())
313}
314
315fn initialize_memories(instance: &mut Instance, module: &Module) -> Result<()> {
316 let memory_size_in_pages = &|instance: &mut Instance, memory| {
317 (instance.get_memory(memory).current_length() as u64) / u64::from(WASM_PAGE_SIZE)
318 };
319
320 let get_global_as_u64 = &mut |instance: &mut Instance, global| unsafe {
323 let def = instance.defined_or_imported_global_ptr(global);
324 if module.globals[global].wasm_ty == WasmType::I64 {
325 *(*def).as_u64()
326 } else {
327 u64::from(*(*def).as_u32())
328 }
329 };
330
331 let ok = module.memory_initialization.init_memory(
340 instance,
341 InitMemory::Runtime {
342 memory_size_in_pages,
343 get_global_as_u64,
344 },
345 |instance, memory_index, init| {
346 if let Some(memory_index) = module.defined_memory_index(memory_index) {
351 if !instance.memories[memory_index].needs_init() {
352 return true;
353 }
354 }
355 let memory = instance.get_memory(memory_index);
356
357 unsafe {
358 let src = instance.wasm_data(init.data.clone());
359 let dst = memory.base.add(usize::try_from(init.offset).unwrap());
360 ptr::copy_nonoverlapping(src.as_ptr(), dst, src.len())
364 }
365 true
366 },
367 );
368 if !ok {
369 return Err(Trap::MemoryOutOfBounds.into());
370 }
371
372 Ok(())
373}
374
375fn check_init_bounds(instance: &mut Instance, module: &Module) -> Result<()> {
376 check_table_init_bounds(instance, module)?;
377
378 match &module.memory_initialization {
379 MemoryInitialization::Segmented(initializers) => {
380 check_memory_init_bounds(instance, initializers)?;
381 }
382 MemoryInitialization::Static { .. } => {}
384 }
385
386 Ok(())
387}
388
389pub(super) fn initialize_instance(
390 instance: &mut Instance,
391 module: &Module,
392 is_bulk_memory: bool,
393) -> Result<()> {
394 if !is_bulk_memory {
399 check_init_bounds(instance, module)?;
400 }
401
402 initialize_tables(instance, module)?;
404
405 initialize_memories(instance, &module)?;
407
408 Ok(())
409}
410
411#[derive(Clone)]
413pub struct OnDemandInstanceAllocator {
414 mem_creator: Option<Arc<dyn RuntimeMemoryCreator>>,
415 #[cfg(feature = "async")]
416 stack_size: usize,
417}
418
419impl OnDemandInstanceAllocator {
420 pub fn new(mem_creator: Option<Arc<dyn RuntimeMemoryCreator>>, stack_size: usize) -> Self {
422 drop(stack_size); Self {
424 mem_creator,
425 #[cfg(feature = "async")]
426 stack_size,
427 }
428 }
429}
430
431impl Default for OnDemandInstanceAllocator {
432 fn default() -> Self {
433 Self {
434 mem_creator: None,
435 #[cfg(feature = "async")]
436 stack_size: 0,
437 }
438 }
439}
440
441unsafe impl InstanceAllocator for OnDemandInstanceAllocator {
442 fn allocate_index(&self, _req: &InstanceAllocationRequest) -> Result<usize> {
443 Ok(0)
444 }
445
446 fn deallocate_index(&self, index: usize) {
447 assert_eq!(index, 0);
448 }
449
450 fn allocate_memories(
451 &self,
452 _index: usize,
453 req: &mut InstanceAllocationRequest,
454 memories: &mut PrimaryMap<DefinedMemoryIndex, Memory>,
455 ) -> Result<()> {
456 let module = req.runtime_info.module();
457 let creator = self
458 .mem_creator
459 .as_deref()
460 .unwrap_or_else(|| &DefaultMemoryCreator);
461 let num_imports = module.num_imported_memories;
462 for (memory_idx, plan) in module.memory_plans.iter().skip(num_imports) {
463 let defined_memory_idx = module
464 .defined_memory_index(memory_idx)
465 .expect("Skipped imports, should never be None");
466 let image = req.runtime_info.memory_image(defined_memory_idx)?;
467
468 memories.push(Memory::new_dynamic(
469 plan,
470 creator,
471 unsafe {
472 req.store
473 .get()
474 .expect("if module has memory plans, store is not empty")
475 },
476 image,
477 )?);
478 }
479 Ok(())
480 }
481
482 fn deallocate_memories(
483 &self,
484 _index: usize,
485 _mems: &mut PrimaryMap<DefinedMemoryIndex, Memory>,
486 ) {
487 }
489
490 fn allocate_tables(
491 &self,
492 _index: usize,
493 req: &mut InstanceAllocationRequest,
494 tables: &mut PrimaryMap<DefinedTableIndex, Table>,
495 ) -> Result<()> {
496 let module = req.runtime_info.module();
497 let num_imports = module.num_imported_tables;
498 for (_, table) in module.table_plans.iter().skip(num_imports) {
499 tables.push(Table::new_dynamic(table, unsafe {
500 req.store
501 .get()
502 .expect("if module has table plans, store is not empty")
503 })?);
504 }
505 Ok(())
506 }
507
508 fn deallocate_tables(&self, _index: usize, _tables: &mut PrimaryMap<DefinedTableIndex, Table>) {
509 }
511
512 #[cfg(feature = "async")]
513 fn allocate_fiber_stack(&self) -> Result<wasmtime_fiber::FiberStack> {
514 if self.stack_size == 0 {
515 bail!("fiber stacks are not supported by the allocator")
516 }
517
518 let stack = wasmtime_fiber::FiberStack::new(self.stack_size)?;
519 Ok(stack)
520 }
521
522 #[cfg(feature = "async")]
523 unsafe fn deallocate_fiber_stack(&self, _stack: &wasmtime_fiber::FiberStack) {
524 }
526
527 fn purge_module(&self, _: CompiledModuleId) {}
528}