referrerpolicy=no-referrer-when-downgrade

sc_allocator/
freeing_bump.rs

1// This file is part of Substrate.
2
3// Copyright (C) Parity Technologies (UK) Ltd.
4// SPDX-License-Identifier: Apache-2.0
5
6// Licensed under the Apache License, Version 2.0 (the "License");
7// you may not use this file except in compliance with the License.
8// You may obtain a copy of the License at
9//
10// 	http://www.apache.org/licenses/LICENSE-2.0
11//
12// Unless required by applicable law or agreed to in writing, software
13// distributed under the License is distributed on an "AS IS" BASIS,
14// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15// See the License for the specific language governing permissions and
16// limitations under the License.
17
18//! This module implements a freeing-bump allocator.
19//!
20//! The heap is a continuous linear memory and chunks are allocated using a bump allocator.
21//!
22//! ```ignore
23//! +-------------+-------------------------------------------------+
24//! | <allocated> | <unallocated>                                   |
25//! +-------------+-------------------------------------------------+
26//!               ^
27//!               |_ bumper
28//! ```
29//!
30//! Only allocations with sizes of power of two can be allocated. If the incoming request has a non
31//! power of two size it is increased to the nearest power of two. The power of two of size is
32//! referred as **an order**.
33//!
34//! Each allocation has a header immediately preceding to it. The header is always 8 bytes and can
35//! be of two types: free and occupied.
36//!
37//! For implementing freeing we maintain a linked lists for each order. The maximum supported
38//! allocation size is capped, therefore the number of orders and thus the linked lists is as well
39//! limited. Currently, the maximum size of an allocation is 32 MiB.
40//!
41//! When the allocator serves an allocation request it first checks the linked list for the
42//! respective order. If it doesn't have any free chunks, the allocator requests memory from the
43//! bump allocator. In any case the order is stored in the header of the allocation.
44//!
45//! Upon deallocation we get the order of the allocation from its header and then add that
46//! allocation to the linked list for the respective order.
47//!
48//! # Caveats
49//!
50//! This is a fast allocator but it is also dumb. There are specifically two main shortcomings
51//! that the user should keep in mind:
52//!
53//! - Once the bump allocator space is exhausted, there is no way to reclaim the memory. This means
54//!   that it's possible to end up in a situation where there are no live allocations yet a new
55//!   allocation will fail.
56//!
57//!   Let's look into an example. Given a heap of 32 MiB. The user makes a 32 MiB allocation that we
58//!   call `X` . Now the heap is full. Then user deallocates `X`. Since all the space in the bump
59//!   allocator was consumed by the 32 MiB allocation, allocations of all sizes except 32 MiB will
60//!   fail.
61//!
62//! - Sizes of allocations are rounded up to the nearest order. That is, an allocation of 2,00001
63//!   MiB will be put into the bucket of 4 MiB. Therefore, any allocation of size `(N, 2N]` will
64//!   take up to `2N`, thus assuming a uniform distribution of allocation sizes, the average amount
65//!   in use of a `2N` space on the heap will be `(3N + ε) / 2`. So average utilization is going to
66//!   be around 75% (`(3N + ε) / 2 / 2N`) meaning that around 25% of the space in allocation will be
67//!   wasted. This is more pronounced (in terms of absolute heap amounts) with larger allocation
68//!   sizes.
69
70use crate::{Error, Memory, MAX_WASM_PAGES, PAGE_SIZE};
71pub use sp_core::MAX_POSSIBLE_ALLOCATION;
72use sp_wasm_interface::{Pointer, WordSize};
73use std::{
74	cmp::{max, min},
75	mem,
76	ops::{Index, IndexMut, Range},
77};
78
79/// The minimal alignment guaranteed by this allocator.
80///
81/// The alignment of 8 is chosen because it is the maximum size of a primitive type supported by the
82/// target version of wasm32: i64's natural alignment is 8.
83const ALIGNMENT: u32 = 8;
84
85// Each pointer is prefixed with 8 bytes, which identify the list index
86// to which it belongs.
87const HEADER_SIZE: u32 = 8;
88
89/// Create an allocator error.
90fn error(msg: &'static str) -> Error {
91	Error::Other(msg)
92}
93
94const LOG_TARGET: &str = "wasm-heap";
95
96// The minimum possible allocation size is chosen to be 8 bytes because in that case we would have
97// easier time to provide the guaranteed alignment of 8.
98//
99// The maximum possible allocation size is set in the primitives to 32MiB.
100//
101// N_ORDERS - represents the number of orders supported.
102//
103// This number corresponds to the number of powers between the minimum possible allocation and
104// maximum possible allocation, or: 2^3...2^25 (both ends inclusive, hence 23).
105const N_ORDERS: usize = 23;
106const MIN_POSSIBLE_ALLOCATION: u32 = 8; // 2^3 bytes, 8 bytes
107
108/// The exponent for the power of two sized block adjusted to the minimum size.
109///
110/// This way, if `MIN_POSSIBLE_ALLOCATION == 8`, we would get:
111///
112/// power_of_two_size | order
113/// 8                 | 0
114/// 16                | 1
115/// 32                | 2
116/// 64                | 3
117/// ...
118/// 16777216          | 21
119/// 33554432          | 22
120///
121/// and so on.
122#[derive(Copy, Clone, PartialEq, Eq, Debug)]
123struct Order(u32);
124
125impl Order {
126	/// Create `Order` object from a raw order.
127	///
128	/// Returns `Err` if it is greater than the maximum supported order.
129	fn from_raw(order: u32) -> Result<Self, Error> {
130		if order < N_ORDERS as u32 {
131			Ok(Self(order))
132		} else {
133			Err(error("invalid order"))
134		}
135	}
136
137	/// Compute the order by the given size
138	///
139	/// The size is clamped, so that the following holds:
140	///
141	/// `MIN_POSSIBLE_ALLOCATION <= size <= MAX_POSSIBLE_ALLOCATION`
142	fn from_size(size: u32) -> Result<Self, Error> {
143		let clamped_size = if size > MAX_POSSIBLE_ALLOCATION {
144			log::warn!(target: LOG_TARGET, "going to fail due to allocating {:?}", size);
145			return Err(Error::RequestedAllocationTooLarge)
146		} else if size < MIN_POSSIBLE_ALLOCATION {
147			MIN_POSSIBLE_ALLOCATION
148		} else {
149			size
150		};
151
152		// Round the clamped size to the next power of two.
153		//
154		// It returns the unchanged value if the value is already a power of two.
155		let power_of_two_size = clamped_size.next_power_of_two();
156
157		// Compute the number of trailing zeroes to get the order. We adjust it by the number of
158		// trailing zeroes in the minimum possible allocation.
159		let order = power_of_two_size.trailing_zeros() - MIN_POSSIBLE_ALLOCATION.trailing_zeros();
160
161		Ok(Self(order))
162	}
163
164	/// Returns the corresponding size in bytes for this order.
165	///
166	/// Note that it is always a power of two.
167	fn size(&self) -> u32 {
168		MIN_POSSIBLE_ALLOCATION << self.0
169	}
170
171	/// Extract the order as `u32`.
172	fn into_raw(self) -> u32 {
173		self.0
174	}
175}
176
177/// A special magic value for a pointer in a link that denotes the end of the linked list.
178const NIL_MARKER: u32 = u32::MAX;
179
180/// A link between headers in the free list.
181#[derive(Clone, Copy, Debug, PartialEq, Eq)]
182enum Link {
183	/// Nil, denotes that there is no next element.
184	Nil,
185	/// Link to the next element represented as a pointer to the header.
186	Ptr(u32),
187}
188
189impl Link {
190	/// Creates a link from raw value.
191	fn from_raw(raw: u32) -> Self {
192		if raw != NIL_MARKER {
193			Self::Ptr(raw)
194		} else {
195			Self::Nil
196		}
197	}
198
199	/// Converts this link into a raw u32.
200	fn into_raw(self) -> u32 {
201		match self {
202			Self::Nil => NIL_MARKER,
203			Self::Ptr(ptr) => ptr,
204		}
205	}
206}
207
208/// A header of an allocation.
209///
210/// The header is encoded in memory as follows.
211///
212/// ## Free header
213///
214/// ```ignore
215/// 64             32                  0
216//  +--------------+-------------------+
217/// |            0 | next element link |
218/// +--------------+-------------------+
219/// ```
220/// ## Occupied header
221/// ```ignore
222/// 64             32                  0
223//  +--------------+-------------------+
224/// |            1 |             order |
225/// +--------------+-------------------+
226/// ```
227#[derive(Clone, Debug, PartialEq, Eq)]
228enum Header {
229	/// A free header contains a link to the next element to form a free linked list.
230	Free(Link),
231	/// An occupied header has attached order to know in which free list we should put the
232	/// allocation upon deallocation.
233	Occupied(Order),
234}
235
236impl Header {
237	/// Reads a header from memory.
238	///
239	/// Returns an error if the `header_ptr` is out of bounds of the linear memory or if the read
240	/// header is corrupted (e.g. the order is incorrect).
241	fn read_from(memory: &impl Memory, header_ptr: u32) -> Result<Self, Error> {
242		let raw_header = memory.read_le_u64(header_ptr)?;
243
244		// Check if the header represents an occupied or free allocation and extract the header data
245		// by trimming (and discarding) the high bits.
246		let occupied = raw_header & 0x00000001_00000000 != 0;
247		let header_data = raw_header as u32;
248
249		Ok(if occupied {
250			Self::Occupied(Order::from_raw(header_data)?)
251		} else {
252			Self::Free(Link::from_raw(header_data))
253		})
254	}
255
256	/// Write out this header to memory.
257	///
258	/// Returns an error if the `header_ptr` is out of bounds of the linear memory.
259	fn write_into(&self, memory: &mut impl Memory, header_ptr: u32) -> Result<(), Error> {
260		let (header_data, occupied_mask) = match *self {
261			Self::Occupied(order) => (order.into_raw(), 0x00000001_00000000),
262			Self::Free(link) => (link.into_raw(), 0x00000000_00000000),
263		};
264		let raw_header = header_data as u64 | occupied_mask;
265		memory.write_le_u64(header_ptr, raw_header)?;
266		Ok(())
267	}
268
269	/// Returns the order of the allocation if this is an occupied header.
270	fn into_occupied(self) -> Option<Order> {
271		match self {
272			Self::Occupied(order) => Some(order),
273			_ => None,
274		}
275	}
276
277	/// Returns the link to the next element in the free list if this is a free header.
278	fn into_free(self) -> Option<Link> {
279		match self {
280			Self::Free(link) => Some(link),
281			_ => None,
282		}
283	}
284}
285
286/// This struct represents a collection of intrusive linked lists for each order.
287struct FreeLists {
288	heads: [Link; N_ORDERS],
289}
290
291impl FreeLists {
292	/// Creates the free empty lists.
293	fn new() -> Self {
294		Self { heads: [Link::Nil; N_ORDERS] }
295	}
296
297	/// Replaces a given link for the specified order and returns the old one.
298	fn replace(&mut self, order: Order, new: Link) -> Link {
299		let prev = self[order];
300		self[order] = new;
301		prev
302	}
303}
304
305impl Index<Order> for FreeLists {
306	type Output = Link;
307	fn index(&self, index: Order) -> &Link {
308		&self.heads[index.0 as usize]
309	}
310}
311
312impl IndexMut<Order> for FreeLists {
313	fn index_mut(&mut self, index: Order) -> &mut Link {
314		&mut self.heads[index.0 as usize]
315	}
316}
317
318/// Memory allocation stats gathered during the lifetime of the allocator.
319#[derive(Clone, Debug, Default)]
320#[non_exhaustive]
321pub struct AllocationStats {
322	/// The current number of bytes allocated.
323	///
324	/// This represents how many bytes are allocated *right now*.
325	pub bytes_allocated: u32,
326
327	/// The peak number of bytes ever allocated.
328	///
329	/// This is the maximum the `bytes_allocated` ever reached.
330	pub bytes_allocated_peak: u32,
331
332	/// The sum of every allocation ever made.
333	///
334	/// This increases every time a new allocation is made.
335	pub bytes_allocated_sum: u128,
336
337	/// The amount of address space (in bytes) used by the allocator.
338	///
339	/// This is calculated as the difference between the allocator's bumper
340	/// and the heap base.
341	///
342	/// Currently the bumper's only ever incremented, so this is simultaneously
343	/// the current value as well as the peak value.
344	pub address_space_used: u32,
345}
346
347/// Convert the given `size` in bytes into the number of pages.
348///
349/// The returned number of pages is ensured to be big enough to hold memory with the given `size`.
350///
351/// Returns `None` if the number of pages to not fit into `u32`.
352fn pages_from_size(size: u64) -> Option<u32> {
353	u32::try_from(size.div_ceil(PAGE_SIZE as u64)).ok()
354}
355
356/// An implementation of freeing bump allocator.
357///
358/// Refer to the module-level documentation for further details.
359pub struct FreeingBumpHeapAllocator {
360	original_heap_base: u32,
361	bumper: u32,
362	free_lists: FreeLists,
363	poisoned: bool,
364	last_observed_memory_size: u64,
365	stats: AllocationStats,
366}
367
368impl Drop for FreeingBumpHeapAllocator {
369	fn drop(&mut self) {
370		log::debug!(target: LOG_TARGET, "allocator dropped: {:?}", self.stats)
371	}
372}
373
374impl FreeingBumpHeapAllocator {
375	/// Creates a new allocation heap which follows a freeing-bump strategy.
376	///
377	/// # Arguments
378	///
379	/// - `heap_base` - the offset from the beginning of the linear memory where the heap starts.
380	pub fn new(heap_base: u32) -> Self {
381		let aligned_heap_base = heap_base.div_ceil(ALIGNMENT) * ALIGNMENT;
382
383		FreeingBumpHeapAllocator {
384			original_heap_base: aligned_heap_base,
385			bumper: aligned_heap_base,
386			free_lists: FreeLists::new(),
387			poisoned: false,
388			last_observed_memory_size: 0,
389			stats: AllocationStats::default(),
390		}
391	}
392
393	/// Gets requested number of bytes to allocate and returns a pointer.
394	/// The maximum size which can be allocated at once is 32 MiB.
395	/// There is no minimum size, but whatever size is passed into
396	/// this function is rounded to the next power of two. If the requested
397	/// size is below 8 bytes it will be rounded up to 8 bytes.
398	///
399	/// The identity or the type of the passed memory object does not matter. However, the size of
400	/// memory cannot shrink compared to the memory passed in previous invocations.
401	///
402	/// NOTE: Once the allocator has returned an error all subsequent requests will return an error.
403	///
404	/// # Arguments
405	///
406	/// - `mem` - a slice representing the linear memory on which this allocator operates.
407	/// - `size` - size in bytes of the allocation request
408	pub fn allocate(
409		&mut self,
410		mem: &mut impl Memory,
411		size: WordSize,
412	) -> Result<Pointer<u8>, Error> {
413		if self.poisoned {
414			return Err(error("the allocator has been poisoned"))
415		}
416
417		let bomb = PoisonBomb { poisoned: &mut self.poisoned };
418
419		Self::observe_memory_size(&mut self.last_observed_memory_size, mem)?;
420		let order = Order::from_size(size)?;
421
422		let header_ptr: u32 = match self.free_lists[order] {
423			Link::Ptr(header_ptr) => {
424				if (u64::from(header_ptr) + u64::from(order.size()) + u64::from(HEADER_SIZE)) >
425					mem.size()
426				{
427					return Err(error("Invalid header pointer detected"))
428				}
429
430				// Remove this header from the free list.
431				let next_free = Header::read_from(mem, header_ptr)?
432					.into_free()
433					.ok_or_else(|| error("free list points to a occupied header"))?;
434				self.free_lists[order] = next_free;
435
436				header_ptr
437			},
438			Link::Nil => {
439				// Corresponding free list is empty. Allocate a new item.
440				Self::bump(&mut self.bumper, order.size() + HEADER_SIZE, mem)?
441			},
442		};
443
444		// Write the order in the occupied header.
445		Header::Occupied(order).write_into(mem, header_ptr)?;
446
447		self.stats.bytes_allocated += order.size() + HEADER_SIZE;
448		self.stats.bytes_allocated_sum += u128::from(order.size() + HEADER_SIZE);
449		self.stats.bytes_allocated_peak =
450			max(self.stats.bytes_allocated_peak, self.stats.bytes_allocated);
451		self.stats.address_space_used = self.bumper - self.original_heap_base;
452
453		log::trace!(target: LOG_TARGET, "after allocation: {:?}", self.stats);
454
455		bomb.disarm();
456		Ok(Pointer::new(header_ptr + HEADER_SIZE))
457	}
458
459	/// Deallocates the space which was allocated for a pointer.
460	///
461	/// The identity or the type of the passed memory object does not matter. However, the size of
462	/// memory cannot shrink compared to the memory passed in previous invocations.
463	///
464	/// NOTE: Once the allocator has returned an error all subsequent requests will return an error.
465	///
466	/// # Arguments
467	///
468	/// - `mem` - a slice representing the linear memory on which this allocator operates.
469	/// - `ptr` - pointer to the allocated chunk
470	pub fn deallocate(&mut self, mem: &mut impl Memory, ptr: Pointer<u8>) -> Result<(), Error> {
471		if self.poisoned {
472			return Err(error("the allocator has been poisoned"))
473		}
474
475		let bomb = PoisonBomb { poisoned: &mut self.poisoned };
476
477		Self::observe_memory_size(&mut self.last_observed_memory_size, mem)?;
478
479		let header_ptr = u32::from(ptr)
480			.checked_sub(HEADER_SIZE)
481			.ok_or_else(|| error("Invalid pointer for deallocation"))?;
482
483		let order = Header::read_from(mem, header_ptr)?
484			.into_occupied()
485			.ok_or_else(|| error("the allocation points to an empty header"))?;
486
487		// Update the just freed header and knit it back to the free list.
488		let prev_head = self.free_lists.replace(order, Link::Ptr(header_ptr));
489		Header::Free(prev_head).write_into(mem, header_ptr)?;
490
491		self.stats.bytes_allocated = self
492			.stats
493			.bytes_allocated
494			.checked_sub(order.size() + HEADER_SIZE)
495			.ok_or_else(|| error("underflow of the currently allocated bytes count"))?;
496
497		log::trace!("after deallocation: {:?}", self.stats);
498
499		bomb.disarm();
500		Ok(())
501	}
502
503	/// Returns the allocation stats for this allocator.
504	pub fn stats(&self) -> AllocationStats {
505		self.stats.clone()
506	}
507
508	/// Increases the `bumper` by `size`.
509	///
510	/// Returns the `bumper` from before the increase. Returns an `Error::AllocatorOutOfSpace` if
511	/// the operation would exhaust the heap.
512	fn bump(bumper: &mut u32, size: u32, memory: &mut impl Memory) -> Result<u32, Error> {
513		let required_size = u64::from(*bumper) + u64::from(size);
514
515		if required_size > memory.size() {
516			let required_pages =
517				pages_from_size(required_size).ok_or_else(|| Error::AllocatorOutOfSpace)?;
518
519			let current_pages = memory.pages();
520			let max_pages = memory.max_pages().unwrap_or(MAX_WASM_PAGES);
521			debug_assert!(
522				current_pages < required_pages,
523				"current pages {current_pages} < required pages {required_pages}"
524			);
525
526			if current_pages >= max_pages {
527				log::debug!(
528					target: LOG_TARGET,
529					"Wasm pages ({current_pages}) are already at the maximum.",
530				);
531
532				return Err(Error::AllocatorOutOfSpace)
533			} else if required_pages > max_pages {
534				log::debug!(
535					target: LOG_TARGET,
536					"Failed to grow memory from {current_pages} pages to at least {required_pages}\
537						 pages due to the maximum limit of {max_pages} pages",
538				);
539				return Err(Error::AllocatorOutOfSpace)
540			}
541
542			// Ideally we want to double our current number of pages,
543			// as long as it's less than the absolute maximum we can have.
544			let next_pages = min(current_pages * 2, max_pages);
545			// ...but if even more pages are required then try to allocate that many.
546			let next_pages = max(next_pages, required_pages);
547
548			if memory.grow(next_pages - current_pages).is_err() {
549				log::error!(
550					target: LOG_TARGET,
551					"Failed to grow memory from {current_pages} pages to {next_pages} pages",
552				);
553
554				return Err(Error::AllocatorOutOfSpace)
555			}
556
557			debug_assert_eq!(memory.pages(), next_pages, "Number of pages should have increased!");
558		}
559
560		let res = *bumper;
561		*bumper += size;
562		Ok(res)
563	}
564
565	fn observe_memory_size(
566		last_observed_memory_size: &mut u64,
567		mem: &mut impl Memory,
568	) -> Result<(), Error> {
569		if mem.size() < *last_observed_memory_size {
570			return Err(Error::MemoryShrinked)
571		}
572		*last_observed_memory_size = mem.size();
573		Ok(())
574	}
575}
576
577/// A trait for abstraction of accesses to a wasm linear memory. Used to read or modify the
578/// allocation prefixes.
579///
580/// A wasm linear memory behaves similarly to a vector. The address space doesn't have holes and is
581/// accessible up to the reported size.
582///
583/// The linear memory can grow in size with the wasm page granularity (64KiB), but it cannot shrink.
584trait MemoryExt: Memory {
585	/// Read a u64 from the heap in LE form. Returns an error if any of the bytes read are out of
586	/// bounds.
587	fn read_le_u64(&self, ptr: u32) -> Result<u64, Error> {
588		self.with_access(|memory| {
589			let range =
590				heap_range(ptr, 8, memory.len()).ok_or_else(|| error("read out of heap bounds"))?;
591			let bytes = memory[range]
592				.try_into()
593				.expect("[u8] slice of length 8 must be convertible to [u8; 8]");
594			Ok(u64::from_le_bytes(bytes))
595		})
596	}
597
598	/// Write a u64 to the heap in LE form. Returns an error if any of the bytes written are out of
599	/// bounds.
600	fn write_le_u64(&mut self, ptr: u32, val: u64) -> Result<(), Error> {
601		self.with_access_mut(|memory| {
602			let range = heap_range(ptr, 8, memory.len())
603				.ok_or_else(|| error("write out of heap bounds"))?;
604			let bytes = val.to_le_bytes();
605			memory[range].copy_from_slice(&bytes[..]);
606			Ok(())
607		})
608	}
609
610	/// Returns the full size of the memory in bytes.
611	fn size(&self) -> u64 {
612		debug_assert!(self.pages() <= MAX_WASM_PAGES);
613
614		self.pages() as u64 * PAGE_SIZE as u64
615	}
616}
617
618impl<T: Memory> MemoryExt for T {}
619
620fn heap_range(offset: u32, length: u32, heap_len: usize) -> Option<Range<usize>> {
621	let start = offset as usize;
622	let end = offset.checked_add(length)? as usize;
623	if end <= heap_len {
624		Some(start..end)
625	} else {
626		None
627	}
628}
629
630/// A guard that will raise the poisoned flag on drop unless disarmed.
631struct PoisonBomb<'a> {
632	poisoned: &'a mut bool,
633}
634
635impl<'a> PoisonBomb<'a> {
636	fn disarm(self) {
637		mem::forget(self)
638	}
639}
640
641impl<'a> Drop for PoisonBomb<'a> {
642	fn drop(&mut self) {
643		*self.poisoned = true;
644	}
645}
646
647#[cfg(test)]
648mod tests {
649	use super::*;
650
651	/// Makes a pointer out of the given address.
652	fn to_pointer(address: u32) -> Pointer<u8> {
653		Pointer::new(address)
654	}
655
656	#[derive(Debug)]
657	struct MemoryInstance {
658		data: Vec<u8>,
659		max_wasm_pages: u32,
660	}
661
662	impl MemoryInstance {
663		fn with_pages(pages: u32) -> Self {
664			Self { data: vec![0; (pages * PAGE_SIZE) as usize], max_wasm_pages: MAX_WASM_PAGES }
665		}
666
667		fn set_max_wasm_pages(&mut self, max_pages: u32) {
668			self.max_wasm_pages = max_pages;
669		}
670	}
671
672	impl Memory for MemoryInstance {
673		fn with_access<R>(&self, run: impl FnOnce(&[u8]) -> R) -> R {
674			run(&self.data)
675		}
676
677		fn with_access_mut<R>(&mut self, run: impl FnOnce(&mut [u8]) -> R) -> R {
678			run(&mut self.data)
679		}
680
681		fn pages(&self) -> u32 {
682			pages_from_size(self.data.len() as u64).unwrap()
683		}
684
685		fn max_pages(&self) -> Option<u32> {
686			Some(self.max_wasm_pages)
687		}
688
689		fn grow(&mut self, pages: u32) -> Result<(), ()> {
690			if self.pages() + pages > self.max_wasm_pages {
691				Err(())
692			} else {
693				self.data.resize(((self.pages() + pages) * PAGE_SIZE) as usize, 0);
694				Ok(())
695			}
696		}
697	}
698
699	#[test]
700	fn test_pages_from_size() {
701		assert_eq!(pages_from_size(0).unwrap(), 0);
702		assert_eq!(pages_from_size(1).unwrap(), 1);
703		assert_eq!(pages_from_size(65536).unwrap(), 1);
704		assert_eq!(pages_from_size(65536 + 1).unwrap(), 2);
705		assert_eq!(pages_from_size(2 * 65536).unwrap(), 2);
706		assert_eq!(pages_from_size(2 * 65536 + 1).unwrap(), 3);
707	}
708
709	#[test]
710	fn should_allocate_properly() {
711		// given
712		let mut mem = MemoryInstance::with_pages(1);
713		let mut heap = FreeingBumpHeapAllocator::new(0);
714
715		// when
716		let ptr = heap.allocate(&mut mem, 1).unwrap();
717
718		// then
719		// returned pointer must start right after `HEADER_SIZE`
720		assert_eq!(ptr, to_pointer(HEADER_SIZE));
721	}
722
723	#[test]
724	fn should_always_align_pointers_to_multiples_of_8() {
725		// given
726		let mut mem = MemoryInstance::with_pages(1);
727		let mut heap = FreeingBumpHeapAllocator::new(13);
728
729		// when
730		let ptr = heap.allocate(&mut mem, 1).unwrap();
731
732		// then
733		// the pointer must start at the next multiple of 8 from 13
734		// + the prefix of 8 bytes.
735		assert_eq!(ptr, to_pointer(24));
736	}
737
738	#[test]
739	fn should_increment_pointers_properly() {
740		// given
741		let mut mem = MemoryInstance::with_pages(1);
742		let mut heap = FreeingBumpHeapAllocator::new(0);
743
744		// when
745		let ptr1 = heap.allocate(&mut mem, 1).unwrap();
746		let ptr2 = heap.allocate(&mut mem, 9).unwrap();
747		let ptr3 = heap.allocate(&mut mem, 1).unwrap();
748
749		// then
750		// a prefix of 8 bytes is prepended to each pointer
751		assert_eq!(ptr1, to_pointer(HEADER_SIZE));
752
753		// the prefix of 8 bytes + the content of ptr1 padded to the lowest possible
754		// item size of 8 bytes + the prefix of ptr1
755		assert_eq!(ptr2, to_pointer(24));
756
757		// ptr2 + its content of 16 bytes + the prefix of 8 bytes
758		assert_eq!(ptr3, to_pointer(24 + 16 + HEADER_SIZE));
759	}
760
761	#[test]
762	fn should_free_properly() {
763		// given
764		let mut mem = MemoryInstance::with_pages(1);
765		let mut heap = FreeingBumpHeapAllocator::new(0);
766		let ptr1 = heap.allocate(&mut mem, 1).unwrap();
767		// the prefix of 8 bytes is prepended to the pointer
768		assert_eq!(ptr1, to_pointer(HEADER_SIZE));
769
770		let ptr2 = heap.allocate(&mut mem, 1).unwrap();
771		// the prefix of 8 bytes + the content of ptr 1 is prepended to the pointer
772		assert_eq!(ptr2, to_pointer(24));
773
774		// when
775		heap.deallocate(&mut mem, ptr2).unwrap();
776
777		// then
778		// then the heads table should contain a pointer to the
779		// prefix of ptr2 in the leftmost entry
780		assert_eq!(heap.free_lists.heads[0], Link::Ptr(u32::from(ptr2) - HEADER_SIZE));
781	}
782
783	#[test]
784	fn should_deallocate_and_reallocate_properly() {
785		// given
786		let mut mem = MemoryInstance::with_pages(1);
787		let padded_offset = 16;
788		let mut heap = FreeingBumpHeapAllocator::new(13);
789
790		let ptr1 = heap.allocate(&mut mem, 1).unwrap();
791		// the prefix of 8 bytes is prepended to the pointer
792		assert_eq!(ptr1, to_pointer(padded_offset + HEADER_SIZE));
793
794		let ptr2 = heap.allocate(&mut mem, 9).unwrap();
795		// the padded_offset + the previously allocated ptr (8 bytes prefix +
796		// 8 bytes content) + the prefix of 8 bytes which is prepended to the
797		// current pointer
798		assert_eq!(ptr2, to_pointer(padded_offset + 16 + HEADER_SIZE));
799
800		// when
801		heap.deallocate(&mut mem, ptr2).unwrap();
802		let ptr3 = heap.allocate(&mut mem, 9).unwrap();
803
804		// then
805		// should have re-allocated
806		assert_eq!(ptr3, to_pointer(padded_offset + 16 + HEADER_SIZE));
807		assert_eq!(heap.free_lists.heads, [Link::Nil; N_ORDERS]);
808	}
809
810	#[test]
811	fn should_build_linked_list_of_free_areas_properly() {
812		// given
813		let mut mem = MemoryInstance::with_pages(1);
814		let mut heap = FreeingBumpHeapAllocator::new(0);
815
816		let ptr1 = heap.allocate(&mut mem, 8).unwrap();
817		let ptr2 = heap.allocate(&mut mem, 8).unwrap();
818		let ptr3 = heap.allocate(&mut mem, 8).unwrap();
819
820		// when
821		heap.deallocate(&mut mem, ptr1).unwrap();
822		heap.deallocate(&mut mem, ptr2).unwrap();
823		heap.deallocate(&mut mem, ptr3).unwrap();
824
825		// then
826		assert_eq!(heap.free_lists.heads[0], Link::Ptr(u32::from(ptr3) - HEADER_SIZE));
827
828		let ptr4 = heap.allocate(&mut mem, 8).unwrap();
829		assert_eq!(ptr4, ptr3);
830
831		assert_eq!(heap.free_lists.heads[0], Link::Ptr(u32::from(ptr2) - HEADER_SIZE));
832	}
833
834	#[test]
835	fn should_not_allocate_if_too_large() {
836		// given
837		let mut mem = MemoryInstance::with_pages(1);
838		mem.set_max_wasm_pages(1);
839		let mut heap = FreeingBumpHeapAllocator::new(13);
840
841		// when
842		let ptr = heap.allocate(&mut mem, PAGE_SIZE - 13);
843
844		// then
845		assert_eq!(Error::AllocatorOutOfSpace, ptr.unwrap_err());
846	}
847
848	#[test]
849	fn should_not_allocate_if_full() {
850		// given
851		let mut mem = MemoryInstance::with_pages(1);
852		mem.set_max_wasm_pages(1);
853		let mut heap = FreeingBumpHeapAllocator::new(0);
854		let ptr1 = heap.allocate(&mut mem, (PAGE_SIZE / 2) - HEADER_SIZE).unwrap();
855		assert_eq!(ptr1, to_pointer(HEADER_SIZE));
856
857		// when
858		let ptr2 = heap.allocate(&mut mem, PAGE_SIZE / 2);
859
860		// then
861		// there is no room for another half page incl. its 8 byte prefix
862		match ptr2.unwrap_err() {
863			Error::AllocatorOutOfSpace => {},
864			e => panic!("Expected allocator out of space error, got: {:?}", e),
865		}
866	}
867
868	#[test]
869	fn should_allocate_max_possible_allocation_size() {
870		// given
871		let mut mem = MemoryInstance::with_pages(1);
872		let mut heap = FreeingBumpHeapAllocator::new(0);
873
874		// when
875		let ptr = heap.allocate(&mut mem, MAX_POSSIBLE_ALLOCATION).unwrap();
876
877		// then
878		assert_eq!(ptr, to_pointer(HEADER_SIZE));
879	}
880
881	#[test]
882	fn should_not_allocate_if_requested_size_too_large() {
883		// given
884		let mut mem = MemoryInstance::with_pages(1);
885		let mut heap = FreeingBumpHeapAllocator::new(0);
886
887		// when
888		let ptr = heap.allocate(&mut mem, MAX_POSSIBLE_ALLOCATION + 1);
889
890		// then
891		assert_eq!(Error::RequestedAllocationTooLarge, ptr.unwrap_err());
892	}
893
894	#[test]
895	fn should_return_error_when_bumper_greater_than_heap_size() {
896		// given
897		let mut mem = MemoryInstance::with_pages(1);
898		mem.set_max_wasm_pages(1);
899		let mut heap = FreeingBumpHeapAllocator::new(0);
900
901		let mut ptrs = Vec::new();
902		for _ in 0..(PAGE_SIZE as usize / 40) {
903			ptrs.push(heap.allocate(&mut mem, 32).expect("Allocate 32 byte"));
904		}
905
906		assert_eq!(heap.stats.bytes_allocated, PAGE_SIZE - 16);
907		assert_eq!(heap.bumper, PAGE_SIZE - 16);
908
909		ptrs.into_iter()
910			.for_each(|ptr| heap.deallocate(&mut mem, ptr).expect("Deallocate 32 byte"));
911
912		assert_eq!(heap.stats.bytes_allocated, 0);
913		assert_eq!(heap.stats.bytes_allocated_peak, PAGE_SIZE - 16);
914		assert_eq!(heap.bumper, PAGE_SIZE - 16);
915
916		// Allocate another 8 byte to use the full heap.
917		heap.allocate(&mut mem, 8).expect("Allocate 8 byte");
918
919		// when
920		// the `bumper` value is equal to `size` here and any
921		// further allocation which would increment the bumper must fail.
922		// we try to allocate 8 bytes here, which will increment the
923		// bumper since no 8 byte item has been freed before.
924		assert_eq!(heap.bumper as u64, mem.size());
925		let ptr = heap.allocate(&mut mem, 8);
926
927		// then
928		assert_eq!(Error::AllocatorOutOfSpace, ptr.unwrap_err());
929	}
930
931	#[test]
932	fn should_include_prefixes_in_total_heap_size() {
933		// given
934		let mut mem = MemoryInstance::with_pages(1);
935		let mut heap = FreeingBumpHeapAllocator::new(1);
936
937		// when
938		// an item size of 16 must be used then
939		heap.allocate(&mut mem, 9).unwrap();
940
941		// then
942		assert_eq!(heap.stats.bytes_allocated, HEADER_SIZE + 16);
943	}
944
945	#[test]
946	fn should_calculate_total_heap_size_to_zero() {
947		// given
948		let mut mem = MemoryInstance::with_pages(1);
949		let mut heap = FreeingBumpHeapAllocator::new(13);
950
951		// when
952		let ptr = heap.allocate(&mut mem, 42).unwrap();
953		assert_eq!(ptr, to_pointer(16 + HEADER_SIZE));
954		heap.deallocate(&mut mem, ptr).unwrap();
955
956		// then
957		assert_eq!(heap.stats.bytes_allocated, 0);
958	}
959
960	#[test]
961	fn should_calculate_total_size_of_zero() {
962		// given
963		let mut mem = MemoryInstance::with_pages(1);
964		let mut heap = FreeingBumpHeapAllocator::new(19);
965
966		// when
967		for _ in 1..10 {
968			let ptr = heap.allocate(&mut mem, 42).unwrap();
969			heap.deallocate(&mut mem, ptr).unwrap();
970		}
971
972		// then
973		assert_eq!(heap.stats.bytes_allocated, 0);
974	}
975
976	#[test]
977	fn should_read_and_write_u64_correctly() {
978		// given
979		let mut mem = MemoryInstance::with_pages(1);
980
981		// when
982		mem.write_le_u64(40, 4480113).unwrap();
983
984		// then
985		let value = MemoryExt::read_le_u64(&mut mem, 40).unwrap();
986		assert_eq!(value, 4480113);
987	}
988
989	#[test]
990	fn should_get_item_size_from_order() {
991		// given
992		let raw_order = 0;
993
994		// when
995		let item_size = Order::from_raw(raw_order).unwrap().size();
996
997		// then
998		assert_eq!(item_size, 8);
999	}
1000
1001	#[test]
1002	fn should_get_max_item_size_from_index() {
1003		// given
1004		let raw_order = 22;
1005
1006		// when
1007		let item_size = Order::from_raw(raw_order).unwrap().size();
1008
1009		// then
1010		assert_eq!(item_size as u32, MAX_POSSIBLE_ALLOCATION);
1011	}
1012
1013	#[test]
1014	fn deallocate_needs_to_maintain_linked_list() {
1015		let mut mem = MemoryInstance::with_pages(1);
1016		let mut heap = FreeingBumpHeapAllocator::new(0);
1017
1018		// Allocate and free some pointers
1019		let ptrs = (0..4).map(|_| heap.allocate(&mut mem, 8).unwrap()).collect::<Vec<_>>();
1020		ptrs.iter().rev().for_each(|ptr| heap.deallocate(&mut mem, *ptr).unwrap());
1021
1022		// Second time we should be able to allocate all of them again and get the same pointers!
1023		let new_ptrs = (0..4).map(|_| heap.allocate(&mut mem, 8).unwrap()).collect::<Vec<_>>();
1024		assert_eq!(ptrs, new_ptrs);
1025	}
1026
1027	#[test]
1028	fn header_read_write() {
1029		let roundtrip = |header: Header| {
1030			let mut memory = MemoryInstance::with_pages(1);
1031			header.write_into(&mut memory, 0).unwrap();
1032
1033			let read_header = Header::read_from(&memory, 0).unwrap();
1034			assert_eq!(header, read_header);
1035		};
1036
1037		roundtrip(Header::Occupied(Order(0)));
1038		roundtrip(Header::Occupied(Order(1)));
1039		roundtrip(Header::Free(Link::Nil));
1040		roundtrip(Header::Free(Link::Ptr(0)));
1041		roundtrip(Header::Free(Link::Ptr(4)));
1042	}
1043
1044	#[test]
1045	fn poison_oom() {
1046		// given
1047		let mut mem = MemoryInstance::with_pages(1);
1048		mem.set_max_wasm_pages(1);
1049
1050		let mut heap = FreeingBumpHeapAllocator::new(0);
1051
1052		// when
1053		let alloc_ptr = heap.allocate(&mut mem, PAGE_SIZE / 2).unwrap();
1054		assert_eq!(Error::AllocatorOutOfSpace, heap.allocate(&mut mem, PAGE_SIZE).unwrap_err());
1055
1056		// then
1057		assert!(heap.poisoned);
1058		assert!(heap.deallocate(&mut mem, alloc_ptr).is_err());
1059	}
1060
1061	#[test]
1062	fn test_n_orders() {
1063		// Test that N_ORDERS is consistent with min and max possible allocation.
1064		assert_eq!(
1065			MIN_POSSIBLE_ALLOCATION * 2u32.pow(N_ORDERS as u32 - 1),
1066			MAX_POSSIBLE_ALLOCATION
1067		);
1068	}
1069
1070	#[test]
1071	fn accepts_growing_memory() {
1072		let mut mem = MemoryInstance::with_pages(1);
1073		let mut heap = FreeingBumpHeapAllocator::new(0);
1074
1075		heap.allocate(&mut mem, PAGE_SIZE / 2).unwrap();
1076		heap.allocate(&mut mem, PAGE_SIZE / 2).unwrap();
1077
1078		mem.grow(1).unwrap();
1079
1080		heap.allocate(&mut mem, PAGE_SIZE / 2).unwrap();
1081	}
1082
1083	#[test]
1084	fn doesnt_accept_shrinking_memory() {
1085		let mut mem = MemoryInstance::with_pages(2);
1086		let mut heap = FreeingBumpHeapAllocator::new(0);
1087
1088		heap.allocate(&mut mem, PAGE_SIZE / 2).unwrap();
1089
1090		mem.data.truncate(PAGE_SIZE as usize);
1091
1092		match heap.allocate(&mut mem, PAGE_SIZE / 2).unwrap_err() {
1093			Error::MemoryShrinked => (),
1094			_ => panic!(),
1095		}
1096	}
1097
1098	#[test]
1099	fn should_grow_memory_when_running_out_of_memory() {
1100		let mut mem = MemoryInstance::with_pages(1);
1101		let mut heap = FreeingBumpHeapAllocator::new(0);
1102
1103		assert_eq!(1, mem.pages());
1104
1105		heap.allocate(&mut mem, PAGE_SIZE * 2).unwrap();
1106
1107		assert_eq!(3, mem.pages());
1108	}
1109
1110	#[test]
1111	fn modifying_the_header_leads_to_an_error() {
1112		let mut mem = MemoryInstance::with_pages(1);
1113		let mut heap = FreeingBumpHeapAllocator::new(0);
1114
1115		let ptr = heap.allocate(&mut mem, 5).unwrap();
1116
1117		heap.deallocate(&mut mem, ptr).unwrap();
1118
1119		Header::Free(Link::Ptr(u32::MAX - 1))
1120			.write_into(&mut mem, u32::from(ptr) - HEADER_SIZE)
1121			.unwrap();
1122
1123		heap.allocate(&mut mem, 5).unwrap();
1124		assert!(heap
1125			.allocate(&mut mem, 5)
1126			.unwrap_err()
1127			.to_string()
1128			.contains("Invalid header pointer"));
1129	}
1130}