wasmtime_runtime/memory.rs
1//! Memory management for linear memories.
2//!
3//! `RuntimeLinearMemory` is to WebAssembly linear memories what `Table` is to WebAssembly tables.
4
5use crate::mmap::Mmap;
6use crate::parking_spot::ParkingSpot;
7use crate::vmcontext::VMMemoryDefinition;
8use crate::{MemoryImage, MemoryImageSlot, Store, WaitResult};
9use anyhow::Error;
10use anyhow::{bail, format_err, Result};
11use std::convert::TryFrom;
12use std::ops::Range;
13use std::sync::atomic::{AtomicU32, AtomicU64, Ordering};
14use std::sync::{Arc, RwLock};
15use std::time::Instant;
16use wasmtime_environ::{MemoryPlan, MemoryStyle, Trap, WASM32_MAX_PAGES, WASM64_MAX_PAGES};
17
18const WASM_PAGE_SIZE: usize = wasmtime_environ::WASM_PAGE_SIZE as usize;
19const WASM_PAGE_SIZE_U64: u64 = wasmtime_environ::WASM_PAGE_SIZE as u64;
20
21/// A memory allocator
22pub trait RuntimeMemoryCreator: Send + Sync {
23 /// Create new RuntimeLinearMemory
24 fn new_memory(
25 &self,
26 plan: &MemoryPlan,
27 minimum: usize,
28 maximum: Option<usize>,
29 // Optionally, a memory image for CoW backing.
30 memory_image: Option<&Arc<MemoryImage>>,
31 ) -> Result<Box<dyn RuntimeLinearMemory>>;
32}
33
34/// A default memory allocator used by Wasmtime
35pub struct DefaultMemoryCreator;
36
37impl RuntimeMemoryCreator for DefaultMemoryCreator {
38 /// Create new MmapMemory
39 fn new_memory(
40 &self,
41 plan: &MemoryPlan,
42 minimum: usize,
43 maximum: Option<usize>,
44 memory_image: Option<&Arc<MemoryImage>>,
45 ) -> Result<Box<dyn RuntimeLinearMemory>> {
46 Ok(Box::new(MmapMemory::new(
47 plan,
48 minimum,
49 maximum,
50 memory_image,
51 )?))
52 }
53}
54
55/// A linear memory
56pub trait RuntimeLinearMemory: Send + Sync {
57 /// Returns the number of allocated bytes.
58 fn byte_size(&self) -> usize;
59
60 /// Returns the maximum number of bytes the memory can grow to.
61 /// Returns `None` if the memory is unbounded.
62 fn maximum_byte_size(&self) -> Option<usize>;
63
64 /// Grows a memory by `delta_pages`.
65 ///
66 /// This performs the necessary checks on the growth before delegating to
67 /// the underlying `grow_to` implementation. A default implementation of
68 /// this memory is provided here since this is assumed to be the same for
69 /// most kinds of memory; one exception is shared memory, which must perform
70 /// all the steps of the default implementation *plus* the required locking.
71 ///
72 /// The `store` is used only for error reporting.
73 fn grow(
74 &mut self,
75 delta_pages: u64,
76 mut store: Option<&mut dyn Store>,
77 ) -> Result<Option<(usize, usize)>, Error> {
78 let old_byte_size = self.byte_size();
79
80 // Wasm spec: when growing by 0 pages, always return the current size.
81 if delta_pages == 0 {
82 return Ok(Some((old_byte_size, old_byte_size)));
83 }
84
85 // The largest wasm-page-aligned region of memory is possible to
86 // represent in a `usize`. This will be impossible for the system to
87 // actually allocate.
88 let absolute_max = 0usize.wrapping_sub(WASM_PAGE_SIZE);
89
90 // Calculate the byte size of the new allocation. Let it overflow up to
91 // `usize::MAX`, then clamp it down to `absolute_max`.
92 let new_byte_size = usize::try_from(delta_pages)
93 .unwrap_or(usize::MAX)
94 .saturating_mul(WASM_PAGE_SIZE)
95 .saturating_add(old_byte_size);
96 let new_byte_size = if new_byte_size > absolute_max {
97 absolute_max
98 } else {
99 new_byte_size
100 };
101
102 let maximum = self.maximum_byte_size();
103 // Store limiter gets first chance to reject memory_growing.
104 if let Some(store) = &mut store {
105 if !store.memory_growing(old_byte_size, new_byte_size, maximum)? {
106 return Ok(None);
107 }
108 }
109
110 // Never exceed maximum, even if limiter permitted it.
111 if let Some(max) = maximum {
112 if new_byte_size > max {
113 if let Some(store) = store {
114 // FIXME: shared memories may not have an associated store
115 // to report the growth failure to but the error should not
116 // be dropped
117 // (https://github.com/bytecodealliance/wasmtime/issues/4240).
118 store.memory_grow_failed(&format_err!("Memory maximum size exceeded"));
119 }
120 return Ok(None);
121 }
122 }
123
124 match self.grow_to(new_byte_size) {
125 Ok(_) => Ok(Some((old_byte_size, new_byte_size))),
126 Err(e) => {
127 // FIXME: shared memories may not have an associated store to
128 // report the growth failure to but the error should not be
129 // dropped
130 // (https://github.com/bytecodealliance/wasmtime/issues/4240).
131 if let Some(store) = store {
132 store.memory_grow_failed(&e);
133 }
134 Ok(None)
135 }
136 }
137 }
138
139 /// Grow memory to the specified amount of bytes.
140 ///
141 /// Returns an error if memory can't be grown by the specified amount
142 /// of bytes.
143 fn grow_to(&mut self, size: usize) -> Result<()>;
144
145 /// Return a `VMMemoryDefinition` for exposing the memory to compiled wasm
146 /// code.
147 fn vmmemory(&mut self) -> VMMemoryDefinition;
148
149 /// Does this memory need initialization? It may not if it already
150 /// has initial contents courtesy of the `MemoryImage` passed to
151 /// `RuntimeMemoryCreator::new_memory()`.
152 fn needs_init(&self) -> bool;
153
154 /// Used for optional dynamic downcasting.
155 fn as_any_mut(&mut self) -> &mut dyn std::any::Any;
156
157 /// Returns the range of addresses that may be reached by WebAssembly.
158 ///
159 /// This starts at the base of linear memory and ends at the end of the
160 /// guard pages, if any.
161 fn wasm_accessible(&self) -> Range<usize>;
162}
163
164/// A linear memory instance.
165#[derive(Debug)]
166pub struct MmapMemory {
167 // The underlying allocation.
168 mmap: Mmap,
169
170 // The number of bytes that are accessible in `mmap` and available for
171 // reading and writing.
172 //
173 // This region starts at `pre_guard_size` offset from the base of `mmap`.
174 accessible: usize,
175
176 // The optional maximum accessible size, in bytes, for this linear memory.
177 //
178 // Note that this maximum does not factor in guard pages, so this isn't the
179 // maximum size of the linear address space reservation for this memory.
180 maximum: Option<usize>,
181
182 // The amount of extra bytes to reserve whenever memory grows. This is
183 // specified so that the cost of repeated growth is amortized.
184 extra_to_reserve_on_growth: usize,
185
186 // Size in bytes of extra guard pages before the start and after the end to
187 // optimize loads and stores with constant offsets.
188 pre_guard_size: usize,
189 offset_guard_size: usize,
190
191 // An optional CoW mapping that provides the initial content of this
192 // MmapMemory, if mapped.
193 memory_image: Option<MemoryImageSlot>,
194}
195
196impl MmapMemory {
197 /// Create a new linear memory instance with specified minimum and maximum
198 /// number of wasm pages.
199 pub fn new(
200 plan: &MemoryPlan,
201 minimum: usize,
202 mut maximum: Option<usize>,
203 memory_image: Option<&Arc<MemoryImage>>,
204 ) -> Result<Self> {
205 // It's a programmer error for these two configuration values to exceed
206 // the host available address space, so panic if such a configuration is
207 // found (mostly an issue for hypothetical 32-bit hosts).
208 let offset_guard_bytes = usize::try_from(plan.offset_guard_size).unwrap();
209 let pre_guard_bytes = usize::try_from(plan.pre_guard_size).unwrap();
210
211 let (alloc_bytes, extra_to_reserve_on_growth) = match plan.style {
212 // Dynamic memories start with the minimum size plus the `reserve`
213 // amount specified to grow into.
214 MemoryStyle::Dynamic { reserve } => (minimum, usize::try_from(reserve).unwrap()),
215
216 // Static memories will never move in memory and consequently get
217 // their entire allocation up-front with no extra room to grow into.
218 // Note that the `maximum` is adjusted here to whatever the smaller
219 // of the two is, the `maximum` given or the `bound` specified for
220 // this memory.
221 MemoryStyle::Static { bound } => {
222 assert!(bound >= plan.memory.minimum);
223 let bound_bytes =
224 usize::try_from(bound.checked_mul(WASM_PAGE_SIZE_U64).unwrap()).unwrap();
225 maximum = Some(bound_bytes.min(maximum.unwrap_or(usize::MAX)));
226 (bound_bytes, 0)
227 }
228 };
229
230 let request_bytes = pre_guard_bytes
231 .checked_add(alloc_bytes)
232 .and_then(|i| i.checked_add(extra_to_reserve_on_growth))
233 .and_then(|i| i.checked_add(offset_guard_bytes))
234 .ok_or_else(|| format_err!("cannot allocate {} with guard regions", minimum))?;
235 let mut mmap = Mmap::accessible_reserved(0, request_bytes)?;
236
237 if minimum > 0 {
238 mmap.make_accessible(pre_guard_bytes, minimum)?;
239 }
240
241 // If a memory image was specified, try to create the MemoryImageSlot on
242 // top of our mmap.
243 let memory_image = match memory_image {
244 Some(image) => {
245 let base = unsafe { mmap.as_mut_ptr().add(pre_guard_bytes) };
246 let mut slot = MemoryImageSlot::create(
247 base.cast(),
248 minimum,
249 alloc_bytes + extra_to_reserve_on_growth,
250 );
251 slot.instantiate(minimum, Some(image), &plan)?;
252 // On drop, we will unmap our mmap'd range that this slot was
253 // mapped on top of, so there is no need for the slot to wipe
254 // it with an anonymous mapping first.
255 slot.no_clear_on_drop();
256 Some(slot)
257 }
258 None => None,
259 };
260
261 Ok(Self {
262 mmap,
263 accessible: minimum,
264 maximum,
265 pre_guard_size: pre_guard_bytes,
266 offset_guard_size: offset_guard_bytes,
267 extra_to_reserve_on_growth,
268 memory_image,
269 })
270 }
271}
272
273impl RuntimeLinearMemory for MmapMemory {
274 fn byte_size(&self) -> usize {
275 self.accessible
276 }
277
278 fn maximum_byte_size(&self) -> Option<usize> {
279 self.maximum
280 }
281
282 fn grow_to(&mut self, new_size: usize) -> Result<()> {
283 if new_size > self.mmap.len() - self.offset_guard_size - self.pre_guard_size {
284 // If the new size of this heap exceeds the current size of the
285 // allocation we have, then this must be a dynamic heap. Use
286 // `new_size` to calculate a new size of an allocation, allocate it,
287 // and then copy over the memory from before.
288 let request_bytes = self
289 .pre_guard_size
290 .checked_add(new_size)
291 .and_then(|s| s.checked_add(self.extra_to_reserve_on_growth))
292 .and_then(|s| s.checked_add(self.offset_guard_size))
293 .ok_or_else(|| format_err!("overflow calculating size of memory allocation"))?;
294
295 let mut new_mmap = Mmap::accessible_reserved(0, request_bytes)?;
296 new_mmap.make_accessible(self.pre_guard_size, new_size)?;
297
298 new_mmap.as_mut_slice()[self.pre_guard_size..][..self.accessible]
299 .copy_from_slice(&self.mmap.as_slice()[self.pre_guard_size..][..self.accessible]);
300
301 // Now drop the MemoryImageSlot, if any. We've lost the CoW
302 // advantages by explicitly copying all data, but we have
303 // preserved all of its content; so we no longer need the
304 // mapping. We need to do this before we (implicitly) drop the
305 // `mmap` field by overwriting it below.
306 drop(self.memory_image.take());
307
308 self.mmap = new_mmap;
309 } else if let Some(image) = self.memory_image.as_mut() {
310 // MemoryImageSlot has its own growth mechanisms; defer to its
311 // implementation.
312 image.set_heap_limit(new_size)?;
313 } else {
314 // If the new size of this heap fits within the existing allocation
315 // then all we need to do is to make the new pages accessible. This
316 // can happen either for "static" heaps which always hit this case,
317 // or "dynamic" heaps which have some space reserved after the
318 // initial allocation to grow into before the heap is moved in
319 // memory.
320 assert!(new_size > self.accessible);
321 self.mmap.make_accessible(
322 self.pre_guard_size + self.accessible,
323 new_size - self.accessible,
324 )?;
325 }
326
327 self.accessible = new_size;
328
329 Ok(())
330 }
331
332 fn vmmemory(&mut self) -> VMMemoryDefinition {
333 VMMemoryDefinition {
334 base: unsafe { self.mmap.as_mut_ptr().add(self.pre_guard_size) },
335 current_length: self.accessible.into(),
336 }
337 }
338
339 fn needs_init(&self) -> bool {
340 // If we're using a CoW mapping, then no initialization
341 // is needed.
342 self.memory_image.is_none()
343 }
344
345 fn as_any_mut(&mut self) -> &mut dyn std::any::Any {
346 self
347 }
348
349 fn wasm_accessible(&self) -> Range<usize> {
350 let base = self.mmap.as_mut_ptr() as usize + self.pre_guard_size;
351 let end = base + (self.mmap.len() - self.pre_guard_size);
352 base..end
353 }
354}
355
356/// A "static" memory where the lifetime of the backing memory is managed
357/// elsewhere. Currently used with the pooling allocator.
358struct StaticMemory {
359 /// The memory in the host for this wasm memory. The length of this
360 /// slice is the maximum size of the memory that can be grown to.
361 base: &'static mut [u8],
362
363 /// The current size, in bytes, of this memory.
364 size: usize,
365
366 /// The size, in bytes, of the virtual address allocation starting at `base`
367 /// and going to the end of the guard pages at the end of the linear memory.
368 memory_and_guard_size: usize,
369
370 /// The image management, if any, for this memory. Owned here and
371 /// returned to the pooling allocator when termination occurs.
372 memory_image: MemoryImageSlot,
373}
374
375impl StaticMemory {
376 fn new(
377 base: &'static mut [u8],
378 initial_size: usize,
379 maximum_size: Option<usize>,
380 memory_image: MemoryImageSlot,
381 memory_and_guard_size: usize,
382 ) -> Result<Self> {
383 if base.len() < initial_size {
384 bail!(
385 "initial memory size of {} exceeds the pooling allocator's \
386 configured maximum memory size of {} bytes",
387 initial_size,
388 base.len(),
389 );
390 }
391
392 // Only use the part of the slice that is necessary.
393 let base = match maximum_size {
394 Some(max) if max < base.len() => &mut base[..max],
395 _ => base,
396 };
397
398 Ok(Self {
399 base,
400 size: initial_size,
401 memory_image,
402 memory_and_guard_size,
403 })
404 }
405}
406
407impl RuntimeLinearMemory for StaticMemory {
408 fn byte_size(&self) -> usize {
409 self.size
410 }
411
412 fn maximum_byte_size(&self) -> Option<usize> {
413 Some(self.base.len())
414 }
415
416 fn grow_to(&mut self, new_byte_size: usize) -> Result<()> {
417 // Never exceed the static memory size; this check should have been made
418 // prior to arriving here.
419 assert!(new_byte_size <= self.base.len());
420
421 self.memory_image.set_heap_limit(new_byte_size)?;
422
423 // Update our accounting of the available size.
424 self.size = new_byte_size;
425 Ok(())
426 }
427
428 fn vmmemory(&mut self) -> VMMemoryDefinition {
429 VMMemoryDefinition {
430 base: self.base.as_mut_ptr().cast(),
431 current_length: self.size.into(),
432 }
433 }
434
435 fn needs_init(&self) -> bool {
436 !self.memory_image.has_image()
437 }
438
439 fn as_any_mut(&mut self) -> &mut dyn std::any::Any {
440 self
441 }
442
443 fn wasm_accessible(&self) -> Range<usize> {
444 let base = self.base.as_ptr() as usize;
445 let end = base + self.memory_and_guard_size;
446 base..end
447 }
448}
449
450/// For shared memory (and only for shared memory), this lock-version restricts
451/// access when growing the memory or checking its size. This is to conform with
452/// the [thread proposal]: "When `IsSharedArrayBuffer(...)` is true, the return
453/// value should be the result of an atomic read-modify-write of the new size to
454/// the internal `length` slot."
455///
456/// [thread proposal]:
457/// https://github.com/WebAssembly/threads/blob/master/proposals/threads/Overview.md#webassemblymemoryprototypegrow
458#[derive(Clone)]
459pub struct SharedMemory(Arc<SharedMemoryInner>);
460
461struct SharedMemoryInner {
462 memory: RwLock<Box<dyn RuntimeLinearMemory>>,
463 spot: ParkingSpot,
464 ty: wasmtime_environ::Memory,
465 def: LongTermVMMemoryDefinition,
466}
467
468impl SharedMemory {
469 /// Construct a new [`SharedMemory`].
470 pub fn new(plan: MemoryPlan) -> Result<Self> {
471 let (minimum_bytes, maximum_bytes) = Memory::limit_new(&plan, None)?;
472 let mmap_memory = MmapMemory::new(&plan, minimum_bytes, maximum_bytes, None)?;
473 Self::wrap(&plan, Box::new(mmap_memory), plan.memory)
474 }
475
476 /// Wrap an existing [Memory] with the locking provided by a [SharedMemory].
477 pub fn wrap(
478 plan: &MemoryPlan,
479 mut memory: Box<dyn RuntimeLinearMemory>,
480 ty: wasmtime_environ::Memory,
481 ) -> Result<Self> {
482 if !ty.shared {
483 bail!("shared memory must have a `shared` memory type");
484 }
485 if !matches!(plan.style, MemoryStyle::Static { .. }) {
486 bail!("shared memory can only be built from a static memory allocation")
487 }
488 assert!(
489 memory.as_any_mut().type_id() != std::any::TypeId::of::<SharedMemory>(),
490 "cannot re-wrap a shared memory"
491 );
492 Ok(Self(Arc::new(SharedMemoryInner {
493 ty,
494 spot: ParkingSpot::default(),
495 def: LongTermVMMemoryDefinition(memory.vmmemory()),
496 memory: RwLock::new(memory),
497 })))
498 }
499
500 /// Return the memory type for this [`SharedMemory`].
501 pub fn ty(&self) -> wasmtime_environ::Memory {
502 self.0.ty
503 }
504
505 /// Convert this shared memory into a [`Memory`].
506 pub fn as_memory(self) -> Memory {
507 Memory(Box::new(self))
508 }
509
510 /// Return a pointer to the shared memory's [VMMemoryDefinition].
511 pub fn vmmemory_ptr(&self) -> *const VMMemoryDefinition {
512 &self.0.def.0
513 }
514
515 /// Same as `RuntimeLinearMemory::grow`, except with `&self`.
516 pub fn grow(
517 &self,
518 delta_pages: u64,
519 store: Option<&mut dyn Store>,
520 ) -> Result<Option<(usize, usize)>, Error> {
521 let mut memory = self.0.memory.write().unwrap();
522 let result = memory.grow(delta_pages, store)?;
523 if let Some((_old_size_in_bytes, new_size_in_bytes)) = result {
524 // Store the new size to the `VMMemoryDefinition` for JIT-generated
525 // code (and runtime functions) to access. No other code can be
526 // growing this memory due to the write lock, but code in other
527 // threads could have access to this shared memory and we want them
528 // to see the most consistent version of the `current_length`; a
529 // weaker consistency is possible if we accept them seeing an older,
530 // smaller memory size (assumption: memory only grows) but presently
531 // we are aiming for accuracy.
532 //
533 // Note that it could be possible to access a memory address that is
534 // now-valid due to changes to the page flags in `grow` above but
535 // beyond the `memory.size` that we are about to assign to. In these
536 // and similar cases, discussion in the thread proposal concluded
537 // that: "multiple accesses in one thread racing with another
538 // thread's `memory.grow` that are in-bounds only after the grow
539 // commits may independently succeed or trap" (see
540 // https://github.com/WebAssembly/threads/issues/26#issuecomment-433930711).
541 // In other words, some non-determinism is acceptable when using
542 // `memory.size` on work being done by `memory.grow`.
543 self.0
544 .def
545 .0
546 .current_length
547 .store(new_size_in_bytes, Ordering::SeqCst);
548 }
549 Ok(result)
550 }
551
552 /// Implementation of `memory.atomic.notify` for this shared memory.
553 pub fn atomic_notify(&self, addr_index: u64, count: u32) -> Result<u32, Trap> {
554 validate_atomic_addr(&self.0.def.0, addr_index, 4, 4)?;
555 Ok(self.0.spot.unpark(addr_index, count))
556 }
557
558 /// Implementation of `memory.atomic.wait32` for this shared memory.
559 pub fn atomic_wait32(
560 &self,
561 addr_index: u64,
562 expected: u32,
563 timeout: Option<Instant>,
564 ) -> Result<WaitResult, Trap> {
565 let addr = validate_atomic_addr(&self.0.def.0, addr_index, 4, 4)?;
566 // SAFETY: `addr_index` was validated by `validate_atomic_addr` above.
567 assert!(std::mem::size_of::<AtomicU32>() == 4);
568 assert!(std::mem::align_of::<AtomicU32>() <= 4);
569 let atomic = unsafe { &*(addr as *const AtomicU32) };
570
571 // We want the sequential consistency of `SeqCst` to ensure that the `load` sees the value that the `notify` will/would see.
572 // All WASM atomic operations are also `SeqCst`.
573 let validate = || atomic.load(Ordering::SeqCst) == expected;
574
575 Ok(self.0.spot.park(addr_index, validate, timeout))
576 }
577
578 /// Implementation of `memory.atomic.wait64` for this shared memory.
579 pub fn atomic_wait64(
580 &self,
581 addr_index: u64,
582 expected: u64,
583 timeout: Option<Instant>,
584 ) -> Result<WaitResult, Trap> {
585 let addr = validate_atomic_addr(&self.0.def.0, addr_index, 8, 8)?;
586 // SAFETY: `addr_index` was validated by `validate_atomic_addr` above.
587 assert!(std::mem::size_of::<AtomicU64>() == 8);
588 assert!(std::mem::align_of::<AtomicU64>() <= 8);
589 let atomic = unsafe { &*(addr as *const AtomicU64) };
590
591 // We want the sequential consistency of `SeqCst` to ensure that the `load` sees the value that the `notify` will/would see.
592 // All WASM atomic operations are also `SeqCst`.
593 let validate = || atomic.load(Ordering::SeqCst) == expected;
594
595 Ok(self.0.spot.park(addr_index, validate, timeout))
596 }
597}
598
599/// Shared memory needs some representation of a `VMMemoryDefinition` for
600/// JIT-generated code to access. This structure owns the base pointer and
601/// length to the actual memory and we share this definition across threads by:
602/// - never changing the base pointer; according to the specification, shared
603/// memory must be created with a known maximum size so it can be allocated
604/// once and never moved
605/// - carefully changing the length, using atomic accesses in both the runtime
606/// and JIT-generated code.
607struct LongTermVMMemoryDefinition(VMMemoryDefinition);
608unsafe impl Send for LongTermVMMemoryDefinition {}
609unsafe impl Sync for LongTermVMMemoryDefinition {}
610
611/// Proxy all calls through the [`RwLock`].
612impl RuntimeLinearMemory for SharedMemory {
613 fn byte_size(&self) -> usize {
614 self.0.memory.read().unwrap().byte_size()
615 }
616
617 fn maximum_byte_size(&self) -> Option<usize> {
618 self.0.memory.read().unwrap().maximum_byte_size()
619 }
620
621 fn grow(
622 &mut self,
623 delta_pages: u64,
624 store: Option<&mut dyn Store>,
625 ) -> Result<Option<(usize, usize)>, Error> {
626 SharedMemory::grow(self, delta_pages, store)
627 }
628
629 fn grow_to(&mut self, size: usize) -> Result<()> {
630 self.0.memory.write().unwrap().grow_to(size)
631 }
632
633 fn vmmemory(&mut self) -> VMMemoryDefinition {
634 // `vmmemory()` is used for writing the `VMMemoryDefinition` of a memory
635 // into its `VMContext`; this should never be possible for a shared
636 // memory because the only `VMMemoryDefinition` for it should be stored
637 // in its own `def` field.
638 unreachable!()
639 }
640
641 fn needs_init(&self) -> bool {
642 self.0.memory.read().unwrap().needs_init()
643 }
644
645 fn as_any_mut(&mut self) -> &mut dyn std::any::Any {
646 self
647 }
648
649 fn wasm_accessible(&self) -> Range<usize> {
650 self.0.memory.read().unwrap().wasm_accessible()
651 }
652}
653
654/// Representation of a runtime wasm linear memory.
655pub struct Memory(Box<dyn RuntimeLinearMemory>);
656
657impl Memory {
658 /// Create a new dynamic (movable) memory instance for the specified plan.
659 pub fn new_dynamic(
660 plan: &MemoryPlan,
661 creator: &dyn RuntimeMemoryCreator,
662 store: &mut dyn Store,
663 memory_image: Option<&Arc<MemoryImage>>,
664 ) -> Result<Self> {
665 let (minimum, maximum) = Self::limit_new(plan, Some(store))?;
666 let allocation = creator.new_memory(plan, minimum, maximum, memory_image)?;
667 let allocation = if plan.memory.shared {
668 Box::new(SharedMemory::wrap(plan, allocation, plan.memory)?)
669 } else {
670 allocation
671 };
672 Ok(Memory(allocation))
673 }
674
675 /// Create a new static (immovable) memory instance for the specified plan.
676 pub fn new_static(
677 plan: &MemoryPlan,
678 base: &'static mut [u8],
679 memory_image: MemoryImageSlot,
680 memory_and_guard_size: usize,
681 store: &mut dyn Store,
682 ) -> Result<Self> {
683 let (minimum, maximum) = Self::limit_new(plan, Some(store))?;
684 let pooled_memory =
685 StaticMemory::new(base, minimum, maximum, memory_image, memory_and_guard_size)?;
686 let allocation = Box::new(pooled_memory);
687 let allocation: Box<dyn RuntimeLinearMemory> = if plan.memory.shared {
688 // FIXME: since the pooling allocator owns the memory allocation
689 // (which is torn down with the instance), the current shared memory
690 // implementation will cause problems; see
691 // https://github.com/bytecodealliance/wasmtime/issues/4244.
692 todo!("using shared memory with the pooling allocator is a work in progress");
693 } else {
694 allocation
695 };
696 Ok(Memory(allocation))
697 }
698
699 /// Calls the `store`'s limiter to optionally prevent a memory from being allocated.
700 ///
701 /// Returns the minimum size and optional maximum size of the memory, in
702 /// bytes.
703 fn limit_new(
704 plan: &MemoryPlan,
705 store: Option<&mut dyn Store>,
706 ) -> Result<(usize, Option<usize>)> {
707 // Sanity-check what should already be true from wasm module validation.
708 let absolute_max = if plan.memory.memory64 {
709 WASM64_MAX_PAGES
710 } else {
711 WASM32_MAX_PAGES
712 };
713 assert!(plan.memory.minimum <= absolute_max);
714 assert!(plan.memory.maximum.is_none() || plan.memory.maximum.unwrap() <= absolute_max);
715
716 // This is the absolute possible maximum that the module can try to
717 // allocate, which is our entire address space minus a wasm page. That
718 // shouldn't ever actually work in terms of an allocation because
719 // presumably the kernel wants *something* for itself, but this is used
720 // to pass to the `store`'s limiter for a requested size
721 // to approximate the scale of the request that the wasm module is
722 // making. This is necessary because the limiter works on `usize` bytes
723 // whereas we're working with possibly-overflowing `u64` calculations
724 // here. To actually faithfully represent the byte requests of modules
725 // we'd have to represent things as `u128`, but that's kinda
726 // overkill for this purpose.
727 let absolute_max = 0usize.wrapping_sub(WASM_PAGE_SIZE);
728
729 // If the minimum memory size overflows the size of our own address
730 // space, then we can't satisfy this request, but defer the error to
731 // later so the `store` can be informed that an effective oom is
732 // happening.
733 let minimum = plan
734 .memory
735 .minimum
736 .checked_mul(WASM_PAGE_SIZE_U64)
737 .and_then(|m| usize::try_from(m).ok());
738
739 // The plan stores the maximum size in units of wasm pages, but we
740 // use units of bytes. Unlike for the `minimum` size we silently clamp
741 // the effective maximum size to `absolute_max` above if the maximum is
742 // too large. This should be ok since as a wasm runtime we get to
743 // arbitrarily decide the actual maximum size of memory, regardless of
744 // what's actually listed on the memory itself.
745 let mut maximum = plan.memory.maximum.map(|max| {
746 usize::try_from(max)
747 .ok()
748 .and_then(|m| m.checked_mul(WASM_PAGE_SIZE))
749 .unwrap_or(absolute_max)
750 });
751
752 // If this is a 32-bit memory and no maximum is otherwise listed then we
753 // need to still specify a maximum size of 4GB. If the host platform is
754 // 32-bit then there's no need to limit the maximum this way since no
755 // allocation of 4GB can succeed, but for 64-bit platforms this is
756 // required to limit memories to 4GB.
757 if !plan.memory.memory64 && maximum.is_none() {
758 maximum = usize::try_from(1u64 << 32).ok();
759 }
760
761 // Inform the store's limiter what's about to happen. This will let the
762 // limiter reject anything if necessary, and this also guarantees that
763 // we should call the limiter for all requested memories, even if our
764 // `minimum` calculation overflowed. This means that the `minimum` we're
765 // informing the limiter is lossy and may not be 100% accurate, but for
766 // now the expected uses of limiter means that's ok.
767 if let Some(store) = store {
768 // We ignore the store limits for shared memories since they are
769 // technically not created within a store (though, trickily, they
770 // may be associated with one in order to get a `vmctx`).
771 if !plan.memory.shared {
772 if !store.memory_growing(0, minimum.unwrap_or(absolute_max), maximum)? {
773 bail!(
774 "memory minimum size of {} pages exceeds memory limits",
775 plan.memory.minimum
776 );
777 }
778 }
779 }
780
781 // At this point we need to actually handle overflows, so bail out with
782 // an error if we made it this far.
783 let minimum = minimum.ok_or_else(|| {
784 format_err!(
785 "memory minimum size of {} pages exceeds memory limits",
786 plan.memory.minimum
787 )
788 })?;
789 Ok((minimum, maximum))
790 }
791
792 /// Returns the number of allocated wasm pages.
793 pub fn byte_size(&self) -> usize {
794 self.0.byte_size()
795 }
796
797 /// Returns the maximum number of pages the memory can grow to at runtime.
798 ///
799 /// Returns `None` if the memory is unbounded.
800 ///
801 /// The runtime maximum may not be equal to the maximum from the linear memory's
802 /// Wasm type when it is being constrained by an instance allocator.
803 pub fn maximum_byte_size(&self) -> Option<usize> {
804 self.0.maximum_byte_size()
805 }
806
807 /// Returns whether or not this memory needs initialization. It
808 /// may not if it already has initial content thanks to a CoW
809 /// mechanism.
810 pub(crate) fn needs_init(&self) -> bool {
811 self.0.needs_init()
812 }
813
814 /// Grow memory by the specified amount of wasm pages.
815 ///
816 /// Returns `None` if memory can't be grown by the specified amount
817 /// of wasm pages. Returns `Some` with the old size of memory, in bytes, on
818 /// successful growth.
819 ///
820 /// # Safety
821 ///
822 /// Resizing the memory can reallocate the memory buffer for dynamic memories.
823 /// An instance's `VMContext` may have pointers to the memory's base and will
824 /// need to be fixed up after growing the memory.
825 ///
826 /// Generally, prefer using `InstanceHandle::memory_grow`, which encapsulates
827 /// this unsafety.
828 ///
829 /// Ensure that the provided Store is not used to get access any Memory
830 /// which lives inside it.
831 pub unsafe fn grow(
832 &mut self,
833 delta_pages: u64,
834 store: Option<&mut dyn Store>,
835 ) -> Result<Option<usize>, Error> {
836 self.0
837 .grow(delta_pages, store)
838 .map(|opt| opt.map(|(old, _new)| old))
839 }
840
841 /// Return a `VMMemoryDefinition` for exposing the memory to compiled wasm code.
842 pub fn vmmemory(&mut self) -> VMMemoryDefinition {
843 self.0.vmmemory()
844 }
845
846 /// Consume the memory, returning its [`MemoryImageSlot`] if any is present.
847 /// The image should only be present for a subset of memories created with
848 /// [`Memory::new_static()`].
849 #[cfg(feature = "pooling-allocator")]
850 pub fn unwrap_static_image(mut self) -> MemoryImageSlot {
851 let mem = self.0.as_any_mut().downcast_mut::<StaticMemory>().unwrap();
852 std::mem::replace(&mut mem.memory_image, MemoryImageSlot::dummy())
853 }
854
855 /// If the [Memory] is a [SharedMemory], unwrap it and return a clone to
856 /// that shared memory.
857 pub fn as_shared_memory(&mut self) -> Option<&mut SharedMemory> {
858 let as_any = self.0.as_any_mut();
859 if let Some(m) = as_any.downcast_mut::<SharedMemory>() {
860 Some(m)
861 } else {
862 None
863 }
864 }
865
866 /// Implementation of `memory.atomic.notify` for all memories.
867 pub fn atomic_notify(&mut self, addr: u64, count: u32) -> Result<u32, Trap> {
868 match self.0.as_any_mut().downcast_mut::<SharedMemory>() {
869 Some(m) => m.atomic_notify(addr, count),
870 None => {
871 validate_atomic_addr(&self.vmmemory(), addr, 4, 4)?;
872 Ok(0)
873 }
874 }
875 }
876
877 /// Implementation of `memory.atomic.wait32` for all memories.
878 pub fn atomic_wait32(
879 &mut self,
880 addr: u64,
881 expected: u32,
882 deadline: Option<Instant>,
883 ) -> Result<WaitResult, Trap> {
884 match self.0.as_any_mut().downcast_mut::<SharedMemory>() {
885 Some(m) => m.atomic_wait32(addr, expected, deadline),
886 None => {
887 validate_atomic_addr(&self.vmmemory(), addr, 4, 4)?;
888 Err(Trap::AtomicWaitNonSharedMemory)
889 }
890 }
891 }
892
893 /// Implementation of `memory.atomic.wait64` for all memories.
894 pub fn atomic_wait64(
895 &mut self,
896 addr: u64,
897 expected: u64,
898 deadline: Option<Instant>,
899 ) -> Result<WaitResult, Trap> {
900 match self.0.as_any_mut().downcast_mut::<SharedMemory>() {
901 Some(m) => m.atomic_wait64(addr, expected, deadline),
902 None => {
903 validate_atomic_addr(&self.vmmemory(), addr, 8, 8)?;
904 Err(Trap::AtomicWaitNonSharedMemory)
905 }
906 }
907 }
908
909 /// Returns the range of bytes that WebAssembly should be able to address in
910 /// this linear memory. Note that this includes guard pages which wasm can
911 /// hit.
912 pub fn wasm_accessible(&self) -> Range<usize> {
913 self.0.wasm_accessible()
914 }
915}
916
917/// In the configurations where bounds checks were elided in JIT code (because
918/// we are using static memories with virtual memory guard pages) this manual
919/// check is here so we don't segfault from Rust. For other configurations,
920/// these checks are required anyways.
921fn validate_atomic_addr(
922 def: &VMMemoryDefinition,
923 addr: u64,
924 access_size: u64,
925 access_alignment: u64,
926) -> Result<*mut u8, Trap> {
927 debug_assert!(access_alignment.is_power_of_two());
928 if !(addr % access_alignment == 0) {
929 return Err(Trap::HeapMisaligned);
930 }
931
932 let length = u64::try_from(def.current_length()).unwrap();
933 if !(addr.saturating_add(access_size) < length) {
934 return Err(Trap::MemoryOutOfBounds);
935 }
936
937 Ok(def.base.wrapping_add(addr as usize))
938}