wasmtime/runtime/
store.rs

1//! Wasmtime's "store" type
2//!
3//! This module, and its submodules, contain the `Store` type and various types
4//! used to interact with it. At first glance this is a pretty confusing module
5//! where you need to know the difference between:
6//!
7//! * `Store<T>`
8//! * `StoreContext<T>`
9//! * `StoreContextMut<T>`
10//! * `AsContext`
11//! * `AsContextMut`
12//! * `StoreInner<T>`
13//! * `StoreOpaque`
14//! * `StoreData`
15//!
16//! There's... quite a lot going on here, and it's easy to be confused. This
17//! comment is ideally going to serve the purpose of clarifying what all these
18//! types are for and why they're motivated.
19//!
20//! First it's important to know what's "internal" and what's "external". Almost
21//! everything above is defined as `pub`, but only some of the items are
22//! reexported to the outside world to be usable from this crate. Otherwise all
23//! items are `pub` within this `store` module, and the `store` module is
24//! private to the `wasmtime` crate. Notably `Store<T>`, `StoreContext<T>`,
25//! `StoreContextMut<T>`, `AsContext`, and `AsContextMut` are all public
26//! interfaces to the `wasmtime` crate. You can think of these as:
27//!
28//! * `Store<T>` - an owned reference to a store, the "root of everything"
29//! * `StoreContext<T>` - basically `&StoreInner<T>`
30//! * `StoreContextMut<T>` - more-or-less `&mut StoreInner<T>` with caveats.
31//!   Explained later.
32//! * `AsContext` - similar to `AsRef`, but produces `StoreContext<T>`
33//! * `AsContextMut` - similar to `AsMut`, but produces `StoreContextMut<T>`
34//!
35//! Next comes the internal structure of the `Store<T>` itself. This looks like:
36//!
37//! * `Store<T>` - this type is just a pointer large. It's primarily just
38//!   intended to be consumed by the outside world. Note that the "just a
39//!   pointer large" is a load-bearing implementation detail in Wasmtime. This
40//!   enables it to store a pointer to its own trait object which doesn't need
41//!   to change over time.
42//!
43//! * `StoreInner<T>` - the first layer of the contents of a `Store<T>`, what's
44//!   stored inside the `Box`. This is the general Rust pattern when one struct
45//!   is a layer over another. The surprising part, though, is that this is
46//!   further subdivided. This structure only contains things which actually
47//!   need `T` itself. The downside of this structure is that it's always
48//!   generic and means that code is monomorphized into consumer crates. We
49//!   strive to have things be as monomorphic as possible in `wasmtime` so this
50//!   type is not heavily used.
51//!
52//! * `StoreOpaque` - this is the primary contents of the `StoreInner<T>` type.
53//!   Stored inline in the outer type the "opaque" here means that it's a
54//!   "store" but it doesn't have access to the `T`. This is the primary
55//!   "internal" reference that Wasmtime uses since `T` is rarely needed by the
56//!   internals of Wasmtime.
57//!
58//! * `StoreData` - this is a final helper struct stored within `StoreOpaque`.
59//!   All references of Wasm items into a `Store` are actually indices into a
60//!   table in this structure, and the `StoreData` being separate makes it a bit
61//!   easier to manage/define/work with. There's no real fundamental reason this
62//!   is split out, although sometimes it's useful to have separate borrows into
63//!   these tables than the `StoreOpaque`.
64//!
65//! A major caveat with these representations is that the internal `&mut
66//! StoreInner<T>` is never handed out publicly to consumers of this crate, only
67//! through a wrapper of `StoreContextMut<'_, T>`. The reason for this is that
68//! we want to provide mutable, but not destructive, access to the contents of a
69//! `Store`. For example if a `StoreInner<T>` were replaced with some other
70//! `StoreInner<T>` then that would drop live instances, possibly those
71//! currently executing beneath the current stack frame. This would not be a
72//! safe operation.
73//!
74//! This means, though, that the `wasmtime` crate, which liberally uses `&mut
75//! StoreOpaque` internally, has to be careful to never actually destroy the
76//! contents of `StoreOpaque`. This is an invariant that we, as the authors of
77//! `wasmtime`, must uphold for the public interface to be safe.
78
79use crate::RootSet;
80#[cfg(feature = "component-model-async")]
81use crate::component::ComponentStoreData;
82#[cfg(feature = "async")]
83use crate::fiber;
84use crate::module::RegisteredModuleId;
85use crate::prelude::*;
86#[cfg(feature = "gc")]
87use crate::runtime::vm::GcRootsList;
88#[cfg(feature = "stack-switching")]
89use crate::runtime::vm::VMContRef;
90use crate::runtime::vm::mpk::ProtectionKey;
91use crate::runtime::vm::{
92    self, GcStore, Imports, InstanceAllocationRequest, InstanceAllocator, InstanceHandle,
93    Interpreter, InterpreterRef, ModuleRuntimeInfo, OnDemandInstanceAllocator, SendSyncPtr,
94    SignalHandler, StoreBox, StorePtr, Unwind, VMContext, VMFuncRef, VMGcRef, VMStoreContext,
95};
96use crate::trampoline::VMHostGlobalContext;
97use crate::{Engine, Module, Trap, Val, ValRaw, module::ModuleRegistry};
98use crate::{Global, Instance, Memory, Table, Uninhabited};
99use alloc::sync::Arc;
100use core::fmt;
101use core::marker;
102use core::mem::{self, ManuallyDrop};
103use core::num::NonZeroU64;
104use core::ops::{Deref, DerefMut};
105use core::pin::Pin;
106use core::ptr::NonNull;
107use wasmtime_environ::{DefinedGlobalIndex, DefinedTableIndex, EntityRef, PrimaryMap, TripleExt};
108
109mod context;
110pub use self::context::*;
111mod data;
112pub use self::data::*;
113mod func_refs;
114use func_refs::FuncRefs;
115#[cfg(feature = "async")]
116mod token;
117#[cfg(feature = "async")]
118pub(crate) use token::StoreToken;
119#[cfg(feature = "async")]
120mod async_;
121#[cfg(all(feature = "async", feature = "call-hook"))]
122pub use self::async_::CallHookHandler;
123#[cfg(feature = "gc")]
124mod gc;
125
126/// A [`Store`] is a collection of WebAssembly instances and host-defined state.
127///
128/// All WebAssembly instances and items will be attached to and refer to a
129/// [`Store`]. For example instances, functions, globals, and tables are all
130/// attached to a [`Store`]. Instances are created by instantiating a
131/// [`Module`](crate::Module) within a [`Store`].
132///
133/// A [`Store`] is intended to be a short-lived object in a program. No form
134/// of GC is implemented at this time so once an instance is created within a
135/// [`Store`] it will not be deallocated until the [`Store`] itself is dropped.
136/// This makes [`Store`] unsuitable for creating an unbounded number of
137/// instances in it because [`Store`] will never release this memory. It's
138/// recommended to have a [`Store`] correspond roughly to the lifetime of a
139/// "main instance" that an embedding is interested in executing.
140///
141/// ## Type parameter `T`
142///
143/// Each [`Store`] has a type parameter `T` associated with it. This `T`
144/// represents state defined by the host. This state will be accessible through
145/// the [`Caller`](crate::Caller) type that host-defined functions get access
146/// to. This `T` is suitable for storing `Store`-specific information which
147/// imported functions may want access to.
148///
149/// The data `T` can be accessed through methods like [`Store::data`] and
150/// [`Store::data_mut`].
151///
152/// ## Stores, contexts, oh my
153///
154/// Most methods in Wasmtime take something of the form
155/// [`AsContext`](crate::AsContext) or [`AsContextMut`](crate::AsContextMut) as
156/// the first argument. These two traits allow ergonomically passing in the
157/// context you currently have to any method. The primary two sources of
158/// contexts are:
159///
160/// * `Store<T>`
161/// * `Caller<'_, T>`
162///
163/// corresponding to what you create and what you have access to in a host
164/// function. You can also explicitly acquire a [`StoreContext`] or
165/// [`StoreContextMut`] and pass that around as well.
166///
167/// Note that all methods on [`Store`] are mirrored onto [`StoreContext`],
168/// [`StoreContextMut`], and [`Caller`](crate::Caller). This way no matter what
169/// form of context you have you can call various methods, create objects, etc.
170///
171/// ## Stores and `Default`
172///
173/// You can create a store with default configuration settings using
174/// `Store::default()`. This will create a brand new [`Engine`] with default
175/// configuration (see [`Config`](crate::Config) for more information).
176///
177/// ## Cross-store usage of items
178///
179/// In `wasmtime` wasm items such as [`Global`] and [`Memory`] "belong" to a
180/// [`Store`]. The store they belong to is the one they were created with
181/// (passed in as a parameter) or instantiated with. This store is the only
182/// store that can be used to interact with wasm items after they're created.
183///
184/// The `wasmtime` crate will panic if the [`Store`] argument passed in to these
185/// operations is incorrect. In other words it's considered a programmer error
186/// rather than a recoverable error for the wrong [`Store`] to be used when
187/// calling APIs.
188pub struct Store<T: 'static> {
189    // for comments about `ManuallyDrop`, see `Store::into_data`
190    inner: ManuallyDrop<Box<StoreInner<T>>>,
191}
192
193#[derive(Copy, Clone, Debug)]
194/// Passed to the argument of [`Store::call_hook`] to indicate a state transition in
195/// the WebAssembly VM.
196pub enum CallHook {
197    /// Indicates the VM is calling a WebAssembly function, from the host.
198    CallingWasm,
199    /// Indicates the VM is returning from a WebAssembly function, to the host.
200    ReturningFromWasm,
201    /// Indicates the VM is calling a host function, from WebAssembly.
202    CallingHost,
203    /// Indicates the VM is returning from a host function, to WebAssembly.
204    ReturningFromHost,
205}
206
207impl CallHook {
208    /// Indicates the VM is entering host code (exiting WebAssembly code)
209    pub fn entering_host(&self) -> bool {
210        match self {
211            CallHook::ReturningFromWasm | CallHook::CallingHost => true,
212            _ => false,
213        }
214    }
215    /// Indicates the VM is exiting host code (entering WebAssembly code)
216    pub fn exiting_host(&self) -> bool {
217        match self {
218            CallHook::ReturningFromHost | CallHook::CallingWasm => true,
219            _ => false,
220        }
221    }
222}
223
224/// Internal contents of a `Store<T>` that live on the heap.
225///
226/// The members of this struct are those that need to be generic over `T`, the
227/// store's internal type storage. Otherwise all things that don't rely on `T`
228/// should go into `StoreOpaque`.
229pub struct StoreInner<T: 'static> {
230    /// Generic metadata about the store that doesn't need access to `T`.
231    inner: StoreOpaque,
232
233    limiter: Option<ResourceLimiterInner<T>>,
234    call_hook: Option<CallHookInner<T>>,
235    #[cfg(target_has_atomic = "64")]
236    epoch_deadline_behavior:
237        Option<Box<dyn FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync>>,
238    // for comments about `ManuallyDrop`, see `Store::into_data`
239    data: ManuallyDrop<T>,
240}
241
242enum ResourceLimiterInner<T> {
243    Sync(Box<dyn FnMut(&mut T) -> &mut (dyn crate::ResourceLimiter) + Send + Sync>),
244    #[cfg(feature = "async")]
245    Async(Box<dyn FnMut(&mut T) -> &mut (dyn crate::ResourceLimiterAsync) + Send + Sync>),
246}
247
248enum CallHookInner<T: 'static> {
249    #[cfg(feature = "call-hook")]
250    Sync(Box<dyn FnMut(StoreContextMut<'_, T>, CallHook) -> Result<()> + Send + Sync>),
251    #[cfg(all(feature = "async", feature = "call-hook"))]
252    Async(Box<dyn CallHookHandler<T> + Send + Sync>),
253    #[allow(dead_code)]
254    ForceTypeParameterToBeUsed {
255        uninhabited: Uninhabited,
256        _marker: marker::PhantomData<T>,
257    },
258}
259
260/// What to do after returning from a callback when the engine epoch reaches
261/// the deadline for a Store during execution of a function using that store.
262#[non_exhaustive]
263pub enum UpdateDeadline {
264    /// Extend the deadline by the specified number of ticks.
265    Continue(u64),
266    /// Extend the deadline by the specified number of ticks after yielding to
267    /// the async executor loop. This can only be used with an async [`Store`]
268    /// configured via [`Config::async_support`](crate::Config::async_support).
269    #[cfg(feature = "async")]
270    Yield(u64),
271    /// Extend the deadline by the specified number of ticks after yielding to
272    /// the async executor loop. This can only be used with an async [`Store`]
273    /// configured via [`Config::async_support`](crate::Config::async_support).
274    ///
275    /// The yield will be performed by the future provided; when using `tokio`
276    /// it is recommended to provide [`tokio::task::yield_now`](https://docs.rs/tokio/latest/tokio/task/fn.yield_now.html)
277    /// here.
278    #[cfg(feature = "async")]
279    YieldCustom(
280        u64,
281        ::core::pin::Pin<Box<dyn ::core::future::Future<Output = ()> + Send>>,
282    ),
283}
284
285// Forward methods on `StoreOpaque` to also being on `StoreInner<T>`
286impl<T> Deref for StoreInner<T> {
287    type Target = StoreOpaque;
288    fn deref(&self) -> &Self::Target {
289        &self.inner
290    }
291}
292
293impl<T> DerefMut for StoreInner<T> {
294    fn deref_mut(&mut self) -> &mut Self::Target {
295        &mut self.inner
296    }
297}
298
299/// Monomorphic storage for a `Store<T>`.
300///
301/// This structure contains the bulk of the metadata about a `Store`. This is
302/// used internally in Wasmtime when dependence on the `T` of `Store<T>` isn't
303/// necessary, allowing code to be monomorphic and compiled into the `wasmtime`
304/// crate itself.
305pub struct StoreOpaque {
306    // This `StoreOpaque` structure has references to itself. These aren't
307    // immediately evident, however, so we need to tell the compiler that it
308    // contains self-references. This notably suppresses `noalias` annotations
309    // when this shows up in compiled code because types of this structure do
310    // indeed alias itself. An example of this is `default_callee` holds a
311    // `*mut dyn Store` to the address of this `StoreOpaque` itself, indeed
312    // aliasing!
313    //
314    // It's somewhat unclear to me at this time if this is 100% sufficient to
315    // get all the right codegen in all the right places. For example does
316    // `Store` need to internally contain a `Pin<Box<StoreInner<T>>>`? Do the
317    // contexts need to contain `Pin<&mut StoreInner<T>>`? I'm not familiar
318    // enough with `Pin` to understand if it's appropriate here (we do, for
319    // example want to allow movement in and out of `data: T`, just not movement
320    // of most of the other members). It's also not clear if using `Pin` in a
321    // few places buys us much other than a bunch of `unsafe` that we already
322    // sort of hand-wave away.
323    //
324    // In any case this seems like a good mid-ground for now where we're at
325    // least telling the compiler something about all the aliasing happening
326    // within a `Store`.
327    _marker: marker::PhantomPinned,
328
329    engine: Engine,
330    vm_store_context: VMStoreContext,
331
332    // Contains all continuations ever allocated throughout the lifetime of this
333    // store.
334    #[cfg(feature = "stack-switching")]
335    continuations: Vec<Box<VMContRef>>,
336
337    instances: PrimaryMap<InstanceId, StoreInstance>,
338
339    #[cfg(feature = "component-model")]
340    num_component_instances: usize,
341    signal_handler: Option<SignalHandler>,
342    modules: ModuleRegistry,
343    func_refs: FuncRefs,
344    host_globals: PrimaryMap<DefinedGlobalIndex, StoreBox<VMHostGlobalContext>>,
345    // GC-related fields.
346    gc_store: Option<GcStore>,
347    gc_roots: RootSet,
348    #[cfg(feature = "gc")]
349    gc_roots_list: GcRootsList,
350    // Types for which the embedder has created an allocator for.
351    #[cfg(feature = "gc")]
352    gc_host_alloc_types: crate::hash_set::HashSet<crate::type_registry::RegisteredType>,
353
354    // Numbers of resources instantiated in this store, and their limits
355    instance_count: usize,
356    instance_limit: usize,
357    memory_count: usize,
358    memory_limit: usize,
359    table_count: usize,
360    table_limit: usize,
361    #[cfg(feature = "async")]
362    async_state: fiber::AsyncState,
363
364    // If fuel_yield_interval is enabled, then we store the remaining fuel (that isn't in
365    // runtime_limits) here. The total amount of fuel is the runtime limits and reserve added
366    // together. Then when we run out of gas, we inject the yield amount from the reserve
367    // until the reserve is empty.
368    fuel_reserve: u64,
369    fuel_yield_interval: Option<NonZeroU64>,
370    /// Indexed data within this `Store`, used to store information about
371    /// globals, functions, memories, etc.
372    store_data: StoreData,
373    traitobj: StorePtr,
374    default_caller_vmctx: SendSyncPtr<VMContext>,
375
376    /// Used to optimized wasm->host calls when the host function is defined with
377    /// `Func::new` to avoid allocating a new vector each time a function is
378    /// called.
379    hostcall_val_storage: Vec<Val>,
380    /// Same as `hostcall_val_storage`, but for the direction of the host
381    /// calling wasm.
382    wasm_val_raw_storage: Vec<ValRaw>,
383
384    /// Keep track of what protection key is being used during allocation so
385    /// that the right memory pages can be enabled when entering WebAssembly
386    /// guest code.
387    pkey: Option<ProtectionKey>,
388
389    /// Runtime state for components used in the handling of resources, borrow,
390    /// and calls. These also interact with the `ResourceAny` type and its
391    /// internal representation.
392    #[cfg(feature = "component-model")]
393    component_host_table: vm::component::ResourceTable,
394    #[cfg(feature = "component-model")]
395    component_calls: vm::component::CallContexts,
396    #[cfg(feature = "component-model")]
397    host_resource_data: crate::component::HostResourceData,
398
399    /// State related to the executor of wasm code.
400    ///
401    /// For example if Pulley is enabled and configured then this will store a
402    /// Pulley interpreter.
403    executor: Executor,
404}
405
406/// Executor state within `StoreOpaque`.
407///
408/// Effectively stores Pulley interpreter state and handles conditional support
409/// for Cranelift at compile time.
410pub(crate) enum Executor {
411    Interpreter(Interpreter),
412    #[cfg(has_host_compiler_backend)]
413    Native,
414}
415
416impl Executor {
417    pub(crate) fn new(engine: &Engine) -> Self {
418        #[cfg(has_host_compiler_backend)]
419        if cfg!(feature = "pulley") && engine.target().is_pulley() {
420            Executor::Interpreter(Interpreter::new(engine))
421        } else {
422            Executor::Native
423        }
424        #[cfg(not(has_host_compiler_backend))]
425        {
426            debug_assert!(engine.target().is_pulley());
427            Executor::Interpreter(Interpreter::new(engine))
428        }
429    }
430}
431
432/// A borrowed reference to `Executor` above.
433pub(crate) enum ExecutorRef<'a> {
434    Interpreter(InterpreterRef<'a>),
435    #[cfg(has_host_compiler_backend)]
436    Native,
437}
438
439/// An RAII type to automatically mark a region of code as unsafe for GC.
440#[doc(hidden)]
441pub struct AutoAssertNoGc<'a> {
442    store: &'a mut StoreOpaque,
443    entered: bool,
444}
445
446impl<'a> AutoAssertNoGc<'a> {
447    #[inline]
448    pub fn new(store: &'a mut StoreOpaque) -> Self {
449        let entered = if !cfg!(feature = "gc") {
450            false
451        } else if let Some(gc_store) = store.gc_store.as_mut() {
452            gc_store.gc_heap.enter_no_gc_scope();
453            true
454        } else {
455            false
456        };
457
458        AutoAssertNoGc { store, entered }
459    }
460
461    /// Creates an `AutoAssertNoGc` value which is forcibly "not entered" and
462    /// disables checks for no GC happening for the duration of this value.
463    ///
464    /// This is used when it is statically otherwise known that a GC doesn't
465    /// happen for the various types involved.
466    ///
467    /// # Unsafety
468    ///
469    /// This method is `unsafe` as it does not provide the same safety
470    /// guarantees as `AutoAssertNoGc::new`. It must be guaranteed by the
471    /// caller that a GC doesn't happen.
472    #[inline]
473    pub unsafe fn disabled(store: &'a mut StoreOpaque) -> Self {
474        if cfg!(debug_assertions) {
475            AutoAssertNoGc::new(store)
476        } else {
477            AutoAssertNoGc {
478                store,
479                entered: false,
480            }
481        }
482    }
483}
484
485impl core::ops::Deref for AutoAssertNoGc<'_> {
486    type Target = StoreOpaque;
487
488    #[inline]
489    fn deref(&self) -> &Self::Target {
490        &*self.store
491    }
492}
493
494impl core::ops::DerefMut for AutoAssertNoGc<'_> {
495    #[inline]
496    fn deref_mut(&mut self) -> &mut Self::Target {
497        &mut *self.store
498    }
499}
500
501impl Drop for AutoAssertNoGc<'_> {
502    #[inline]
503    fn drop(&mut self) {
504        if self.entered {
505            self.store.unwrap_gc_store_mut().gc_heap.exit_no_gc_scope();
506        }
507    }
508}
509
510/// Used to associate instances with the store.
511///
512/// This is needed to track if the instance was allocated explicitly with the on-demand
513/// instance allocator.
514struct StoreInstance {
515    handle: InstanceHandle,
516    kind: StoreInstanceKind,
517}
518
519enum StoreInstanceKind {
520    /// An actual, non-dummy instance.
521    Real {
522        /// The id of this instance's module inside our owning store's
523        /// `ModuleRegistry`.
524        module_id: RegisteredModuleId,
525    },
526
527    /// This is a dummy instance that is just an implementation detail for
528    /// something else. For example, host-created memories internally create a
529    /// dummy instance.
530    ///
531    /// Regardless of the configured instance allocator for the engine, dummy
532    /// instances always use the on-demand allocator to deallocate the instance.
533    Dummy,
534}
535
536impl<T> Store<T> {
537    /// Creates a new [`Store`] to be associated with the given [`Engine`] and
538    /// `data` provided.
539    ///
540    /// The created [`Store`] will place no additional limits on the size of
541    /// linear memories or tables at runtime. Linear memories and tables will
542    /// be allowed to grow to any upper limit specified in their definitions.
543    /// The store will limit the number of instances, linear memories, and
544    /// tables created to 10,000. This can be overridden with the
545    /// [`Store::limiter`] configuration method.
546    pub fn new(engine: &Engine, data: T) -> Self {
547        let store_data = StoreData::new();
548        log::trace!("creating new store {:?}", store_data.id());
549
550        let pkey = engine.allocator().next_available_pkey();
551
552        let inner = StoreOpaque {
553            _marker: marker::PhantomPinned,
554            engine: engine.clone(),
555            vm_store_context: Default::default(),
556            #[cfg(feature = "stack-switching")]
557            continuations: Vec::new(),
558            instances: PrimaryMap::new(),
559            #[cfg(feature = "component-model")]
560            num_component_instances: 0,
561            signal_handler: None,
562            gc_store: None,
563            gc_roots: RootSet::default(),
564            #[cfg(feature = "gc")]
565            gc_roots_list: GcRootsList::default(),
566            #[cfg(feature = "gc")]
567            gc_host_alloc_types: Default::default(),
568            modules: ModuleRegistry::default(),
569            func_refs: FuncRefs::default(),
570            host_globals: PrimaryMap::new(),
571            instance_count: 0,
572            instance_limit: crate::DEFAULT_INSTANCE_LIMIT,
573            memory_count: 0,
574            memory_limit: crate::DEFAULT_MEMORY_LIMIT,
575            table_count: 0,
576            table_limit: crate::DEFAULT_TABLE_LIMIT,
577            #[cfg(feature = "async")]
578            async_state: Default::default(),
579            fuel_reserve: 0,
580            fuel_yield_interval: None,
581            store_data,
582            traitobj: StorePtr::empty(),
583            default_caller_vmctx: SendSyncPtr::new(NonNull::dangling()),
584            hostcall_val_storage: Vec::new(),
585            wasm_val_raw_storage: Vec::new(),
586            pkey,
587            #[cfg(feature = "component-model")]
588            component_host_table: Default::default(),
589            #[cfg(feature = "component-model")]
590            component_calls: Default::default(),
591            #[cfg(feature = "component-model")]
592            host_resource_data: Default::default(),
593            executor: Executor::new(engine),
594        };
595        let mut inner = Box::new(StoreInner {
596            inner,
597            limiter: None,
598            call_hook: None,
599            #[cfg(target_has_atomic = "64")]
600            epoch_deadline_behavior: None,
601            data: ManuallyDrop::new(data),
602        });
603
604        inner.traitobj = StorePtr::new(NonNull::from(&mut *inner));
605
606        // Wasmtime uses the callee argument to host functions to learn about
607        // the original pointer to the `Store` itself, allowing it to
608        // reconstruct a `StoreContextMut<T>`. When we initially call a `Func`,
609        // however, there's no "callee" to provide. To fix this we allocate a
610        // single "default callee" for the entire `Store`. This is then used as
611        // part of `Func::call` to guarantee that the `callee: *mut VMContext`
612        // is never null.
613        let module = Arc::new(wasmtime_environ::Module::default());
614        let shim = ModuleRuntimeInfo::bare(module);
615        let allocator = OnDemandInstanceAllocator::default();
616
617        allocator
618            .validate_module(shim.env_module(), shim.offsets())
619            .unwrap();
620
621        unsafe {
622            let id = inner
623                .allocate_instance(
624                    AllocateInstanceKind::Dummy {
625                        allocator: &allocator,
626                    },
627                    &shim,
628                    Default::default(),
629                )
630                .expect("failed to allocate default callee");
631            let default_caller_vmctx = inner.instance(id).vmctx();
632            inner.default_caller_vmctx = default_caller_vmctx.into();
633        }
634
635        Self {
636            inner: ManuallyDrop::new(inner),
637        }
638    }
639
640    /// Access the underlying data owned by this `Store`.
641    #[inline]
642    pub fn data(&self) -> &T {
643        self.inner.data()
644    }
645
646    /// Access the underlying data owned by this `Store`.
647    #[inline]
648    pub fn data_mut(&mut self) -> &mut T {
649        self.inner.data_mut()
650    }
651
652    fn run_manual_drop_routines(&mut self) {
653        // We need to drop the fibers of each component instance before
654        // attempting to drop the instances themselves since the fibers may need
655        // to be resumed and allowed to exit cleanly before we yank the state
656        // out from under them.
657        #[cfg(feature = "component-model-async")]
658        ComponentStoreData::drop_fibers(&mut self.inner);
659
660        // Ensure all fiber stacks, even cached ones, are all flushed out to the
661        // instance allocator.
662        self.inner.flush_fiber_stack();
663    }
664
665    /// Consumes this [`Store`], destroying it, and returns the underlying data.
666    pub fn into_data(mut self) -> T {
667        self.run_manual_drop_routines();
668
669        // This is an unsafe operation because we want to avoid having a runtime
670        // check or boolean for whether the data is actually contained within a
671        // `Store`. The data itself is stored as `ManuallyDrop` since we're
672        // manually managing the memory here, and there's also a `ManuallyDrop`
673        // around the `Box<StoreInner<T>>`. The way this works though is a bit
674        // tricky, so here's how things get dropped appropriately:
675        //
676        // * When a `Store<T>` is normally dropped, the custom destructor for
677        //   `Store<T>` will drop `T`, then the `self.inner` field. The
678        //   rustc-glue destructor runs for `Box<StoreInner<T>>` which drops
679        //   `StoreInner<T>`. This cleans up all internal fields and doesn't
680        //   touch `T` because it's wrapped in `ManuallyDrop`.
681        //
682        // * When calling this method we skip the top-level destructor for
683        //   `Store<T>` with `mem::forget`. This skips both the destructor for
684        //   `T` and the destructor for `StoreInner<T>`. We do, however, run the
685        //   destructor for `Box<StoreInner<T>>` which, like above, will skip
686        //   the destructor for `T` since it's `ManuallyDrop`.
687        //
688        // In both cases all the other fields of `StoreInner<T>` should all get
689        // dropped, and the manual management of destructors is basically
690        // between this method and `Drop for Store<T>`. Note that this also
691        // means that `Drop for StoreInner<T>` cannot access `self.data`, so
692        // there is a comment indicating this as well.
693        unsafe {
694            let mut inner = ManuallyDrop::take(&mut self.inner);
695            core::mem::forget(self);
696            ManuallyDrop::take(&mut inner.data)
697        }
698    }
699
700    /// Configures the [`ResourceLimiter`] used to limit resource creation
701    /// within this [`Store`].
702    ///
703    /// Whenever resources such as linear memory, tables, or instances are
704    /// allocated the `limiter` specified here is invoked with the store's data
705    /// `T` and the returned [`ResourceLimiter`] is used to limit the operation
706    /// being allocated. The returned [`ResourceLimiter`] is intended to live
707    /// within the `T` itself, for example by storing a
708    /// [`StoreLimits`](crate::StoreLimits).
709    ///
710    /// Note that this limiter is only used to limit the creation/growth of
711    /// resources in the future, this does not retroactively attempt to apply
712    /// limits to the [`Store`].
713    ///
714    /// # Examples
715    ///
716    /// ```
717    /// use wasmtime::*;
718    ///
719    /// struct MyApplicationState {
720    ///     my_state: u32,
721    ///     limits: StoreLimits,
722    /// }
723    ///
724    /// let engine = Engine::default();
725    /// let my_state = MyApplicationState {
726    ///     my_state: 42,
727    ///     limits: StoreLimitsBuilder::new()
728    ///         .memory_size(1 << 20 /* 1 MB */)
729    ///         .instances(2)
730    ///         .build(),
731    /// };
732    /// let mut store = Store::new(&engine, my_state);
733    /// store.limiter(|state| &mut state.limits);
734    ///
735    /// // Creation of smaller memories is allowed
736    /// Memory::new(&mut store, MemoryType::new(1, None)).unwrap();
737    ///
738    /// // Creation of a larger memory, however, will exceed the 1MB limit we've
739    /// // configured
740    /// assert!(Memory::new(&mut store, MemoryType::new(1000, None)).is_err());
741    ///
742    /// // The number of instances in this store is limited to 2, so the third
743    /// // instance here should fail.
744    /// let module = Module::new(&engine, "(module)").unwrap();
745    /// assert!(Instance::new(&mut store, &module, &[]).is_ok());
746    /// assert!(Instance::new(&mut store, &module, &[]).is_ok());
747    /// assert!(Instance::new(&mut store, &module, &[]).is_err());
748    /// ```
749    ///
750    /// [`ResourceLimiter`]: crate::ResourceLimiter
751    pub fn limiter(
752        &mut self,
753        mut limiter: impl FnMut(&mut T) -> &mut (dyn crate::ResourceLimiter) + Send + Sync + 'static,
754    ) {
755        // Apply the limits on instances, tables, and memory given by the limiter:
756        let inner = &mut self.inner;
757        let (instance_limit, table_limit, memory_limit) = {
758            let l = limiter(&mut inner.data);
759            (l.instances(), l.tables(), l.memories())
760        };
761        let innermost = &mut inner.inner;
762        innermost.instance_limit = instance_limit;
763        innermost.table_limit = table_limit;
764        innermost.memory_limit = memory_limit;
765
766        // Save the limiter accessor function:
767        inner.limiter = Some(ResourceLimiterInner::Sync(Box::new(limiter)));
768    }
769
770    /// Configure a function that runs on calls and returns between WebAssembly
771    /// and host code.
772    ///
773    /// The function is passed a [`CallHook`] argument, which indicates which
774    /// state transition the VM is making.
775    ///
776    /// This function may return a [`Trap`]. If a trap is returned when an
777    /// import was called, it is immediately raised as-if the host import had
778    /// returned the trap. If a trap is returned after wasm returns to the host
779    /// then the wasm function's result is ignored and this trap is returned
780    /// instead.
781    ///
782    /// After this function returns a trap, it may be called for subsequent returns
783    /// to host or wasm code as the trap propagates to the root call.
784    #[cfg(feature = "call-hook")]
785    pub fn call_hook(
786        &mut self,
787        hook: impl FnMut(StoreContextMut<'_, T>, CallHook) -> Result<()> + Send + Sync + 'static,
788    ) {
789        self.inner.call_hook = Some(CallHookInner::Sync(Box::new(hook)));
790    }
791
792    /// Returns the [`Engine`] that this store is associated with.
793    pub fn engine(&self) -> &Engine {
794        self.inner.engine()
795    }
796
797    /// Perform garbage collection.
798    ///
799    /// Note that it is not required to actively call this function. GC will
800    /// automatically happen according to various internal heuristics. This is
801    /// provided if fine-grained control over the GC is desired.
802    ///
803    /// If you are calling this method after an attempted allocation failed, you
804    /// may pass in the [`GcHeapOutOfMemory`][crate::GcHeapOutOfMemory] error.
805    /// When you do so, this method will attempt to create enough space in the
806    /// GC heap for that allocation, so that it will succeed on the next
807    /// attempt.
808    ///
809    /// This method is only available when the `gc` Cargo feature is enabled.
810    #[cfg(feature = "gc")]
811    pub fn gc(&mut self, why: Option<&crate::GcHeapOutOfMemory<()>>) {
812        assert!(!self.inner.async_support());
813        self.inner.gc(why);
814    }
815
816    /// Returns the amount fuel in this [`Store`]. When fuel is enabled, it must
817    /// be configured via [`Store::set_fuel`].
818    ///
819    /// # Errors
820    ///
821    /// This function will return an error if fuel consumption is not enabled
822    /// via [`Config::consume_fuel`](crate::Config::consume_fuel).
823    pub fn get_fuel(&self) -> Result<u64> {
824        self.inner.get_fuel()
825    }
826
827    /// Set the fuel to this [`Store`] for wasm to consume while executing.
828    ///
829    /// For this method to work fuel consumption must be enabled via
830    /// [`Config::consume_fuel`](crate::Config::consume_fuel). By default a
831    /// [`Store`] starts with 0 fuel for wasm to execute with (meaning it will
832    /// immediately trap). This function must be called for the store to have
833    /// some fuel to allow WebAssembly to execute.
834    ///
835    /// Most WebAssembly instructions consume 1 unit of fuel. Some
836    /// instructions, such as `nop`, `drop`, `block`, and `loop`, consume 0
837    /// units, as any execution cost associated with them involves other
838    /// instructions which do consume fuel.
839    ///
840    /// Note that when fuel is entirely consumed it will cause wasm to trap.
841    ///
842    /// # Errors
843    ///
844    /// This function will return an error if fuel consumption is not enabled via
845    /// [`Config::consume_fuel`](crate::Config::consume_fuel).
846    pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
847        self.inner.set_fuel(fuel)
848    }
849
850    /// Configures a [`Store`] to yield execution of async WebAssembly code
851    /// periodically.
852    ///
853    /// When a [`Store`] is configured to consume fuel with
854    /// [`Config::consume_fuel`](crate::Config::consume_fuel) this method will
855    /// configure WebAssembly to be suspended and control will be yielded back to the
856    /// caller every `interval` units of fuel consumed. This is only suitable with use of
857    /// a store associated with an [async config](crate::Config::async_support) because
858    /// only then are futures used and yields are possible.
859    ///
860    /// The purpose of this behavior is to ensure that futures which represent
861    /// execution of WebAssembly do not execute too long inside their
862    /// `Future::poll` method. This allows for some form of cooperative
863    /// multitasking where WebAssembly will voluntarily yield control
864    /// periodically (based on fuel consumption) back to the running thread.
865    ///
866    /// Note that futures returned by this crate will automatically flag
867    /// themselves to get re-polled if a yield happens. This means that
868    /// WebAssembly will continue to execute, just after giving the host an
869    /// opportunity to do something else.
870    ///
871    /// The `interval` parameter indicates how much fuel should be
872    /// consumed between yields of an async future. When fuel runs out wasm will trap.
873    ///
874    /// # Error
875    ///
876    /// This method will error if it is not called on a store associated with an [async
877    /// config](crate::Config::async_support).
878    pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
879        self.inner.fuel_async_yield_interval(interval)
880    }
881
882    /// Sets the epoch deadline to a certain number of ticks in the future.
883    ///
884    /// When the Wasm guest code is compiled with epoch-interruption
885    /// instrumentation
886    /// ([`Config::epoch_interruption()`](crate::Config::epoch_interruption)),
887    /// and when the `Engine`'s epoch is incremented
888    /// ([`Engine::increment_epoch()`](crate::Engine::increment_epoch))
889    /// past a deadline, execution can be configured to either trap or
890    /// yield and then continue.
891    ///
892    /// This deadline is always set relative to the current epoch:
893    /// `ticks_beyond_current` ticks in the future. The deadline can
894    /// be set explicitly via this method, or refilled automatically
895    /// on a yield if configured via
896    /// [`epoch_deadline_async_yield_and_update()`](Store::epoch_deadline_async_yield_and_update). After
897    /// this method is invoked, the deadline is reached when
898    /// [`Engine::increment_epoch()`] has been invoked at least
899    /// `ticks_beyond_current` times.
900    ///
901    /// By default a store will trap immediately with an epoch deadline of 0
902    /// (which has always "elapsed"). This method is required to be configured
903    /// for stores with epochs enabled to some future epoch deadline.
904    ///
905    /// See documentation on
906    /// [`Config::epoch_interruption()`](crate::Config::epoch_interruption)
907    /// for an introduction to epoch-based interruption.
908    #[cfg(target_has_atomic = "64")]
909    pub fn set_epoch_deadline(&mut self, ticks_beyond_current: u64) {
910        self.inner.set_epoch_deadline(ticks_beyond_current);
911    }
912
913    /// Configures epoch-deadline expiration to trap.
914    ///
915    /// When epoch-interruption-instrumented code is executed on this
916    /// store and the epoch deadline is reached before completion,
917    /// with the store configured in this way, execution will
918    /// terminate with a trap as soon as an epoch check in the
919    /// instrumented code is reached.
920    ///
921    /// This behavior is the default if the store is not otherwise
922    /// configured via
923    /// [`epoch_deadline_trap()`](Store::epoch_deadline_trap),
924    /// [`epoch_deadline_callback()`](Store::epoch_deadline_callback) or
925    /// [`epoch_deadline_async_yield_and_update()`](Store::epoch_deadline_async_yield_and_update).
926    ///
927    /// This setting is intended to allow for coarse-grained
928    /// interruption, but not a deterministic deadline of a fixed,
929    /// finite interval. For deterministic interruption, see the
930    /// "fuel" mechanism instead.
931    ///
932    /// Note that when this is used it's required to call
933    /// [`Store::set_epoch_deadline`] or otherwise wasm will always immediately
934    /// trap.
935    ///
936    /// See documentation on
937    /// [`Config::epoch_interruption()`](crate::Config::epoch_interruption)
938    /// for an introduction to epoch-based interruption.
939    #[cfg(target_has_atomic = "64")]
940    pub fn epoch_deadline_trap(&mut self) {
941        self.inner.epoch_deadline_trap();
942    }
943
944    /// Configures epoch-deadline expiration to invoke a custom callback
945    /// function.
946    ///
947    /// When epoch-interruption-instrumented code is executed on this
948    /// store and the epoch deadline is reached before completion, the
949    /// provided callback function is invoked.
950    ///
951    /// This callback should either return an [`UpdateDeadline`], or
952    /// return an error, which will terminate execution with a trap.
953    ///
954    /// The [`UpdateDeadline`] is a positive number of ticks to
955    /// add to the epoch deadline, as well as indicating what
956    /// to do after the callback returns. If the [`Store`] is
957    /// configured with async support, then the callback may return
958    /// [`UpdateDeadline::Yield`] or [`UpdateDeadline::YieldCustom`]
959    /// to yield to the async executor before updating the epoch deadline.
960    /// Alternatively, the callback may return [`UpdateDeadline::Continue`] to
961    /// update the epoch deadline immediately.
962    ///
963    /// This setting is intended to allow for coarse-grained
964    /// interruption, but not a deterministic deadline of a fixed,
965    /// finite interval. For deterministic interruption, see the
966    /// "fuel" mechanism instead.
967    ///
968    /// See documentation on
969    /// [`Config::epoch_interruption()`](crate::Config::epoch_interruption)
970    /// for an introduction to epoch-based interruption.
971    #[cfg(target_has_atomic = "64")]
972    pub fn epoch_deadline_callback(
973        &mut self,
974        callback: impl FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync + 'static,
975    ) {
976        self.inner.epoch_deadline_callback(Box::new(callback));
977    }
978}
979
980impl<'a, T> StoreContext<'a, T> {
981    pub(crate) fn async_support(&self) -> bool {
982        self.0.async_support()
983    }
984
985    /// Returns the underlying [`Engine`] this store is connected to.
986    pub fn engine(&self) -> &Engine {
987        self.0.engine()
988    }
989
990    /// Access the underlying data owned by this `Store`.
991    ///
992    /// Same as [`Store::data`].
993    pub fn data(&self) -> &'a T {
994        self.0.data()
995    }
996
997    /// Returns the remaining fuel in this store.
998    ///
999    /// For more information see [`Store::get_fuel`].
1000    pub fn get_fuel(&self) -> Result<u64> {
1001        self.0.get_fuel()
1002    }
1003}
1004
1005impl<'a, T> StoreContextMut<'a, T> {
1006    /// Access the underlying data owned by this `Store`.
1007    ///
1008    /// Same as [`Store::data`].
1009    pub fn data(&self) -> &T {
1010        self.0.data()
1011    }
1012
1013    /// Access the underlying data owned by this `Store`.
1014    ///
1015    /// Same as [`Store::data_mut`].
1016    pub fn data_mut(&mut self) -> &mut T {
1017        self.0.data_mut()
1018    }
1019
1020    /// Returns the underlying [`Engine`] this store is connected to.
1021    pub fn engine(&self) -> &Engine {
1022        self.0.engine()
1023    }
1024
1025    /// Perform garbage collection of `ExternRef`s.
1026    ///
1027    /// Same as [`Store::gc`].
1028    ///
1029    /// This method is only available when the `gc` Cargo feature is enabled.
1030    #[cfg(feature = "gc")]
1031    pub fn gc(&mut self, why: Option<&crate::GcHeapOutOfMemory<()>>) {
1032        self.0.gc(why);
1033    }
1034
1035    /// Returns remaining fuel in this store.
1036    ///
1037    /// For more information see [`Store::get_fuel`]
1038    pub fn get_fuel(&self) -> Result<u64> {
1039        self.0.get_fuel()
1040    }
1041
1042    /// Set the amount of fuel in this store.
1043    ///
1044    /// For more information see [`Store::set_fuel`]
1045    pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
1046        self.0.set_fuel(fuel)
1047    }
1048
1049    /// Configures this `Store` to periodically yield while executing futures.
1050    ///
1051    /// For more information see [`Store::fuel_async_yield_interval`]
1052    pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
1053        self.0.fuel_async_yield_interval(interval)
1054    }
1055
1056    /// Sets the epoch deadline to a certain number of ticks in the future.
1057    ///
1058    /// For more information see [`Store::set_epoch_deadline`].
1059    #[cfg(target_has_atomic = "64")]
1060    pub fn set_epoch_deadline(&mut self, ticks_beyond_current: u64) {
1061        self.0.set_epoch_deadline(ticks_beyond_current);
1062    }
1063
1064    /// Configures epoch-deadline expiration to trap.
1065    ///
1066    /// For more information see [`Store::epoch_deadline_trap`].
1067    #[cfg(target_has_atomic = "64")]
1068    pub fn epoch_deadline_trap(&mut self) {
1069        self.0.epoch_deadline_trap();
1070    }
1071}
1072
1073impl<T> StoreInner<T> {
1074    #[inline]
1075    fn data(&self) -> &T {
1076        &self.data
1077    }
1078
1079    #[inline]
1080    fn data_mut(&mut self) -> &mut T {
1081        &mut self.data
1082    }
1083
1084    #[inline]
1085    pub fn call_hook(&mut self, s: CallHook) -> Result<()> {
1086        if self.inner.pkey.is_none() && self.call_hook.is_none() {
1087            Ok(())
1088        } else {
1089            self.call_hook_slow_path(s)
1090        }
1091    }
1092
1093    fn call_hook_slow_path(&mut self, s: CallHook) -> Result<()> {
1094        if let Some(pkey) = &self.inner.pkey {
1095            let allocator = self.engine().allocator();
1096            match s {
1097                CallHook::CallingWasm | CallHook::ReturningFromHost => {
1098                    allocator.restrict_to_pkey(*pkey)
1099                }
1100                CallHook::ReturningFromWasm | CallHook::CallingHost => allocator.allow_all_pkeys(),
1101            }
1102        }
1103
1104        // Temporarily take the configured behavior to avoid mutably borrowing
1105        // multiple times.
1106        #[cfg_attr(not(feature = "call-hook"), allow(unreachable_patterns))]
1107        if let Some(mut call_hook) = self.call_hook.take() {
1108            let result = self.invoke_call_hook(&mut call_hook, s);
1109            self.call_hook = Some(call_hook);
1110            return result;
1111        }
1112
1113        Ok(())
1114    }
1115
1116    fn invoke_call_hook(&mut self, call_hook: &mut CallHookInner<T>, s: CallHook) -> Result<()> {
1117        match call_hook {
1118            #[cfg(feature = "call-hook")]
1119            CallHookInner::Sync(hook) => hook((&mut *self).as_context_mut(), s),
1120
1121            #[cfg(all(feature = "async", feature = "call-hook"))]
1122            CallHookInner::Async(handler) => {
1123                if !self.can_block() {
1124                    bail!("couldn't grab async_cx for call hook")
1125                }
1126                return (&mut *self)
1127                    .as_context_mut()
1128                    .with_blocking(|store, cx| cx.block_on(handler.handle_call_event(store, s)))?;
1129            }
1130
1131            CallHookInner::ForceTypeParameterToBeUsed { uninhabited, .. } => {
1132                let _ = s;
1133                match *uninhabited {}
1134            }
1135        }
1136    }
1137
1138    #[cfg(not(feature = "async"))]
1139    fn flush_fiber_stack(&mut self) {
1140        // noop shim so code can assume this always exists.
1141    }
1142}
1143
1144fn get_fuel(injected_fuel: i64, fuel_reserve: u64) -> u64 {
1145    fuel_reserve.saturating_add_signed(-injected_fuel)
1146}
1147
1148// Add remaining fuel from the reserve into the active fuel if there is any left.
1149fn refuel(
1150    injected_fuel: &mut i64,
1151    fuel_reserve: &mut u64,
1152    yield_interval: Option<NonZeroU64>,
1153) -> bool {
1154    let fuel = get_fuel(*injected_fuel, *fuel_reserve);
1155    if fuel > 0 {
1156        set_fuel(injected_fuel, fuel_reserve, yield_interval, fuel);
1157        true
1158    } else {
1159        false
1160    }
1161}
1162
1163fn set_fuel(
1164    injected_fuel: &mut i64,
1165    fuel_reserve: &mut u64,
1166    yield_interval: Option<NonZeroU64>,
1167    new_fuel_amount: u64,
1168) {
1169    let interval = yield_interval.unwrap_or(NonZeroU64::MAX).get();
1170    // If we're yielding periodically we only store the "active" amount of fuel into consumed_ptr
1171    // for the VM to use.
1172    let injected = core::cmp::min(interval, new_fuel_amount);
1173    // Fuel in the VM is stored as an i64, so we have to cap the amount of fuel we inject into the
1174    // VM at once to be i64 range.
1175    let injected = core::cmp::min(injected, i64::MAX as u64);
1176    // Add whatever is left over after injection to the reserve for later use.
1177    *fuel_reserve = new_fuel_amount - injected;
1178    // Within the VM we increment to count fuel, so inject a negative amount. The VM will halt when
1179    // this counter is positive.
1180    *injected_fuel = -(injected as i64);
1181}
1182
1183#[doc(hidden)]
1184impl StoreOpaque {
1185    pub fn id(&self) -> StoreId {
1186        self.store_data.id()
1187    }
1188
1189    pub fn bump_resource_counts(&mut self, module: &Module) -> Result<()> {
1190        fn bump(slot: &mut usize, max: usize, amt: usize, desc: &str) -> Result<()> {
1191            let new = slot.saturating_add(amt);
1192            if new > max {
1193                bail!(
1194                    "resource limit exceeded: {} count too high at {}",
1195                    desc,
1196                    new
1197                );
1198            }
1199            *slot = new;
1200            Ok(())
1201        }
1202
1203        let module = module.env_module();
1204        let memories = module.num_defined_memories();
1205        let tables = module.num_defined_tables();
1206
1207        bump(&mut self.instance_count, self.instance_limit, 1, "instance")?;
1208        bump(
1209            &mut self.memory_count,
1210            self.memory_limit,
1211            memories,
1212            "memory",
1213        )?;
1214        bump(&mut self.table_count, self.table_limit, tables, "table")?;
1215
1216        Ok(())
1217    }
1218
1219    #[inline]
1220    pub fn async_support(&self) -> bool {
1221        cfg!(feature = "async") && self.engine().config().async_support
1222    }
1223
1224    #[inline]
1225    pub fn engine(&self) -> &Engine {
1226        &self.engine
1227    }
1228
1229    #[inline]
1230    pub fn store_data(&self) -> &StoreData {
1231        &self.store_data
1232    }
1233
1234    #[inline]
1235    pub fn store_data_mut(&mut self) -> &mut StoreData {
1236        &mut self.store_data
1237    }
1238
1239    #[inline]
1240    pub(crate) fn modules(&self) -> &ModuleRegistry {
1241        &self.modules
1242    }
1243
1244    #[inline]
1245    pub(crate) fn modules_mut(&mut self) -> &mut ModuleRegistry {
1246        &mut self.modules
1247    }
1248
1249    pub(crate) fn func_refs_and_modules(&mut self) -> (&mut FuncRefs, &ModuleRegistry) {
1250        (&mut self.func_refs, &self.modules)
1251    }
1252
1253    pub(crate) fn host_globals(
1254        &self,
1255    ) -> &PrimaryMap<DefinedGlobalIndex, StoreBox<VMHostGlobalContext>> {
1256        &self.host_globals
1257    }
1258
1259    pub(crate) fn host_globals_mut(
1260        &mut self,
1261    ) -> &mut PrimaryMap<DefinedGlobalIndex, StoreBox<VMHostGlobalContext>> {
1262        &mut self.host_globals
1263    }
1264
1265    pub fn module_for_instance(&self, instance: StoreInstanceId) -> Option<&'_ Module> {
1266        instance.store_id().assert_belongs_to(self.id());
1267        match self.instances[instance.instance()].kind {
1268            StoreInstanceKind::Dummy => None,
1269            StoreInstanceKind::Real { module_id } => {
1270                let module = self
1271                    .modules()
1272                    .lookup_module_by_id(module_id)
1273                    .expect("should always have a registered module for real instances");
1274                Some(module)
1275            }
1276        }
1277    }
1278
1279    /// Accessor from `InstanceId` to `&vm::Instance`.
1280    ///
1281    /// Note that if you have a `StoreInstanceId` you should use
1282    /// `StoreInstanceId::get` instead. This assumes that `id` has been
1283    /// validated to already belong to this store.
1284    #[inline]
1285    pub fn instance(&self, id: InstanceId) -> &vm::Instance {
1286        self.instances[id].handle.get()
1287    }
1288
1289    /// Accessor from `InstanceId` to `Pin<&mut vm::Instance>`.
1290    ///
1291    /// Note that if you have a `StoreInstanceId` you should use
1292    /// `StoreInstanceId::get_mut` instead. This assumes that `id` has been
1293    /// validated to already belong to this store.
1294    #[inline]
1295    pub fn instance_mut(&mut self, id: InstanceId) -> Pin<&mut vm::Instance> {
1296        self.instances[id].handle.get_mut()
1297    }
1298
1299    /// Get all instances (ignoring dummy instances) within this store.
1300    pub fn all_instances<'a>(&'a mut self) -> impl ExactSizeIterator<Item = Instance> + 'a {
1301        let instances = self
1302            .instances
1303            .iter()
1304            .filter_map(|(id, inst)| {
1305                if let StoreInstanceKind::Dummy = inst.kind {
1306                    None
1307                } else {
1308                    Some(id)
1309                }
1310            })
1311            .collect::<Vec<_>>();
1312        instances
1313            .into_iter()
1314            .map(|i| Instance::from_wasmtime(i, self))
1315    }
1316
1317    /// Get all memories (host- or Wasm-defined) within this store.
1318    pub fn all_memories<'a>(&'a mut self) -> impl Iterator<Item = Memory> + 'a {
1319        // NB: Host-created memories have dummy instances. Therefore, we can get
1320        // all memories in the store by iterating over all instances (including
1321        // dummy instances) and getting each of their defined memories.
1322        let mems = self
1323            .instances
1324            .iter_mut()
1325            .flat_map(|(_, instance)| instance.handle.get().defined_memories())
1326            .collect::<Vec<_>>();
1327        mems.into_iter()
1328            .map(|memory| unsafe { Memory::from_wasmtime_memory(memory, self) })
1329    }
1330
1331    /// Iterate over all tables (host- or Wasm-defined) within this store.
1332    pub fn for_each_table(&mut self, mut f: impl FnMut(&mut Self, Table)) {
1333        // NB: Host-created tables have dummy instances. Therefore, we can get
1334        // all tables in the store by iterating over all instances (including
1335        // dummy instances) and getting each of their defined memories.
1336        for id in self.instances.keys() {
1337            let instance = StoreInstanceId::new(self.id(), id);
1338            for table in 0..self.instance(id).env_module().num_defined_tables() {
1339                let table = DefinedTableIndex::new(table);
1340                f(self, Table::from_raw(instance, table));
1341            }
1342        }
1343    }
1344
1345    /// Iterate over all globals (host- or Wasm-defined) within this store.
1346    pub fn for_each_global(&mut self, mut f: impl FnMut(&mut Self, Global)) {
1347        // First enumerate all the host-created globals.
1348        for global in self.host_globals.keys() {
1349            let global = Global::new_host(self, global);
1350            f(self, global);
1351        }
1352
1353        // Then enumerate all instances' defined globals.
1354        for id in self.instances.keys() {
1355            for index in 0..self.instance(id).env_module().num_defined_globals() {
1356                let index = DefinedGlobalIndex::new(index);
1357                let global = Global::new_instance(self, id, index);
1358                f(self, global);
1359            }
1360        }
1361    }
1362
1363    #[cfg_attr(not(target_os = "linux"), allow(dead_code))] // not used on all platforms
1364    pub fn set_signal_handler(&mut self, handler: Option<SignalHandler>) {
1365        self.signal_handler = handler;
1366    }
1367
1368    #[inline]
1369    pub fn vm_store_context(&self) -> &VMStoreContext {
1370        &self.vm_store_context
1371    }
1372
1373    #[inline]
1374    pub fn vm_store_context_mut(&mut self) -> &mut VMStoreContext {
1375        &mut self.vm_store_context
1376    }
1377
1378    #[inline(never)]
1379    pub(crate) fn allocate_gc_heap(&mut self) -> Result<()> {
1380        log::trace!("allocating GC heap for store {:?}", self.id());
1381
1382        assert!(self.gc_store.is_none());
1383        assert_eq!(
1384            self.vm_store_context.gc_heap.base.as_non_null(),
1385            NonNull::dangling(),
1386        );
1387        assert_eq!(self.vm_store_context.gc_heap.current_length(), 0);
1388
1389        let vmstore = self.traitobj();
1390        let gc_store = allocate_gc_store(self.engine(), vmstore, self.get_pkey())?;
1391        self.vm_store_context.gc_heap = gc_store.vmmemory_definition();
1392        self.gc_store = Some(gc_store);
1393        return Ok(());
1394
1395        #[cfg(feature = "gc")]
1396        fn allocate_gc_store(
1397            engine: &Engine,
1398            vmstore: NonNull<dyn vm::VMStore>,
1399            pkey: Option<ProtectionKey>,
1400        ) -> Result<GcStore> {
1401            use wasmtime_environ::packed_option::ReservedValue;
1402
1403            ensure!(
1404                engine.features().gc_types(),
1405                "cannot allocate a GC store when GC is disabled at configuration time"
1406            );
1407
1408            // First, allocate the memory that will be our GC heap's storage.
1409            let mut request = InstanceAllocationRequest {
1410                id: InstanceId::reserved_value(),
1411                runtime_info: &ModuleRuntimeInfo::bare(Arc::new(
1412                    wasmtime_environ::Module::default(),
1413                )),
1414                imports: vm::Imports::default(),
1415                store: StorePtr::new(vmstore),
1416                wmemcheck: false,
1417                pkey,
1418                tunables: engine.tunables(),
1419            };
1420            let mem_ty = engine.tunables().gc_heap_memory_type();
1421            let tunables = engine.tunables();
1422
1423            // SAFETY: We validated the GC heap's memory type during engine creation.
1424            let (mem_alloc_index, mem) = unsafe {
1425                engine
1426                    .allocator()
1427                    .allocate_memory(&mut request, &mem_ty, tunables, None)?
1428            };
1429
1430            // Then, allocate the actual GC heap, passing in that memory
1431            // storage.
1432            let gc_runtime = engine
1433                .gc_runtime()
1434                .context("no GC runtime: GC disabled at compile time or configuration time")?;
1435            let (index, heap) =
1436                engine
1437                    .allocator()
1438                    .allocate_gc_heap(engine, &**gc_runtime, mem_alloc_index, mem)?;
1439
1440            Ok(GcStore::new(index, heap))
1441        }
1442
1443        #[cfg(not(feature = "gc"))]
1444        fn allocate_gc_store(
1445            _engine: &Engine,
1446            _vmstore: NonNull<dyn vm::VMStore>,
1447            _pkey: Option<ProtectionKey>,
1448        ) -> Result<GcStore> {
1449            bail!("cannot allocate a GC store: the `gc` feature was disabled at compile time")
1450        }
1451    }
1452
1453    #[inline]
1454    pub(crate) fn gc_store(&self) -> Result<&GcStore> {
1455        match &self.gc_store {
1456            Some(gc_store) => Ok(gc_store),
1457            None => bail!("GC heap not initialized yet"),
1458        }
1459    }
1460
1461    #[inline]
1462    pub(crate) fn gc_store_mut(&mut self) -> Result<&mut GcStore> {
1463        if self.gc_store.is_none() {
1464            self.allocate_gc_heap()?;
1465        }
1466        Ok(self.unwrap_gc_store_mut())
1467    }
1468
1469    /// If this store is configured with a GC heap, return a mutable reference
1470    /// to it. Otherwise, return `None`.
1471    #[inline]
1472    pub(crate) fn optional_gc_store_mut(&mut self) -> Option<&mut GcStore> {
1473        if cfg!(not(feature = "gc")) || !self.engine.features().gc_types() {
1474            debug_assert!(self.gc_store.is_none());
1475            None
1476        } else {
1477            self.gc_store.as_mut()
1478        }
1479    }
1480
1481    #[inline]
1482    #[track_caller]
1483    #[cfg(feature = "gc")]
1484    pub(crate) fn unwrap_gc_store(&self) -> &GcStore {
1485        self.gc_store
1486            .as_ref()
1487            .expect("attempted to access the store's GC heap before it has been allocated")
1488    }
1489
1490    #[inline]
1491    #[track_caller]
1492    pub(crate) fn unwrap_gc_store_mut(&mut self) -> &mut GcStore {
1493        self.gc_store
1494            .as_mut()
1495            .expect("attempted to access the store's GC heap before it has been allocated")
1496    }
1497
1498    #[inline]
1499    pub(crate) fn gc_roots(&self) -> &RootSet {
1500        &self.gc_roots
1501    }
1502
1503    #[inline]
1504    #[cfg(feature = "gc")]
1505    pub(crate) fn gc_roots_mut(&mut self) -> &mut RootSet {
1506        &mut self.gc_roots
1507    }
1508
1509    #[inline]
1510    pub(crate) fn exit_gc_lifo_scope(&mut self, scope: usize) {
1511        self.gc_roots.exit_lifo_scope(self.gc_store.as_mut(), scope);
1512    }
1513
1514    #[cfg(feature = "gc")]
1515    fn do_gc(&mut self) {
1516        assert!(
1517            !self.async_support(),
1518            "must use `store.gc_async()` instead of `store.gc()` for async stores"
1519        );
1520
1521        // If the GC heap hasn't been initialized, there is nothing to collect.
1522        if self.gc_store.is_none() {
1523            return;
1524        }
1525
1526        log::trace!("============ Begin GC ===========");
1527
1528        // Take the GC roots out of `self` so we can borrow it mutably but still
1529        // call mutable methods on `self`.
1530        let mut roots = core::mem::take(&mut self.gc_roots_list);
1531
1532        self.trace_roots(&mut roots);
1533        self.unwrap_gc_store_mut().gc(unsafe { roots.iter() });
1534
1535        // Restore the GC roots for the next GC.
1536        roots.clear();
1537        self.gc_roots_list = roots;
1538
1539        log::trace!("============ End GC ===========");
1540    }
1541
1542    #[cfg(feature = "gc")]
1543    fn trace_roots(&mut self, gc_roots_list: &mut GcRootsList) {
1544        log::trace!("Begin trace GC roots");
1545
1546        // We shouldn't have any leftover, stale GC roots.
1547        assert!(gc_roots_list.is_empty());
1548
1549        self.trace_wasm_stack_roots(gc_roots_list);
1550        #[cfg(feature = "stack-switching")]
1551        self.trace_wasm_continuation_roots(gc_roots_list);
1552        self.trace_vmctx_roots(gc_roots_list);
1553        self.trace_user_roots(gc_roots_list);
1554
1555        log::trace!("End trace GC roots")
1556    }
1557
1558    #[cfg(feature = "gc")]
1559    fn trace_wasm_stack_frame(
1560        &self,
1561        gc_roots_list: &mut GcRootsList,
1562        frame: crate::runtime::vm::Frame,
1563    ) {
1564        use crate::runtime::vm::SendSyncPtr;
1565        use core::ptr::NonNull;
1566
1567        let pc = frame.pc();
1568        debug_assert!(pc != 0, "we should always get a valid PC for Wasm frames");
1569
1570        let fp = frame.fp() as *mut usize;
1571        debug_assert!(
1572            !fp.is_null(),
1573            "we should always get a valid frame pointer for Wasm frames"
1574        );
1575
1576        let module_info = self
1577            .modules()
1578            .lookup_module_by_pc(pc)
1579            .expect("should have module info for Wasm frame");
1580
1581        let stack_map = match module_info.lookup_stack_map(pc) {
1582            Some(sm) => sm,
1583            None => {
1584                log::trace!("No stack map for this Wasm frame");
1585                return;
1586            }
1587        };
1588        log::trace!(
1589            "We have a stack map that maps {} bytes in this Wasm frame",
1590            stack_map.frame_size()
1591        );
1592
1593        let sp = unsafe { stack_map.sp(fp) };
1594        for stack_slot in unsafe { stack_map.live_gc_refs(sp) } {
1595            let raw: u32 = unsafe { core::ptr::read(stack_slot) };
1596            log::trace!("Stack slot @ {stack_slot:p} = {raw:#x}");
1597
1598            let gc_ref = VMGcRef::from_raw_u32(raw);
1599            if gc_ref.is_some() {
1600                unsafe {
1601                    gc_roots_list
1602                        .add_wasm_stack_root(SendSyncPtr::new(NonNull::new(stack_slot).unwrap()));
1603                }
1604            }
1605        }
1606    }
1607
1608    #[cfg(feature = "gc")]
1609    fn trace_wasm_stack_roots(&mut self, gc_roots_list: &mut GcRootsList) {
1610        use crate::runtime::vm::Backtrace;
1611        log::trace!("Begin trace GC roots :: Wasm stack");
1612
1613        Backtrace::trace(self, |frame| {
1614            self.trace_wasm_stack_frame(gc_roots_list, frame);
1615            core::ops::ControlFlow::Continue(())
1616        });
1617
1618        log::trace!("End trace GC roots :: Wasm stack");
1619    }
1620
1621    #[cfg(all(feature = "gc", feature = "stack-switching"))]
1622    fn trace_wasm_continuation_roots(&mut self, gc_roots_list: &mut GcRootsList) {
1623        use crate::{runtime::vm::Backtrace, vm::VMStackState};
1624        log::trace!("Begin trace GC roots :: continuations");
1625
1626        for continuation in &self.continuations {
1627            let state = continuation.common_stack_information.state;
1628
1629            // FIXME(frank-emrich) In general, it is not enough to just trace
1630            // through the stacks of continuations; we also need to look through
1631            // their `cont.bind` arguments. However, we don't currently have
1632            // enough RTTI information to check if any of the values in the
1633            // buffers used by `cont.bind` are GC values. As a workaround, note
1634            // that we currently disallow cont.bind-ing GC values altogether.
1635            // This way, it is okay not to check them here.
1636            match state {
1637                VMStackState::Suspended => {
1638                    Backtrace::trace_suspended_continuation(self, continuation.deref(), |frame| {
1639                        self.trace_wasm_stack_frame(gc_roots_list, frame);
1640                        core::ops::ControlFlow::Continue(())
1641                    });
1642                }
1643                VMStackState::Running => {
1644                    // Handled by `trace_wasm_stack_roots`.
1645                }
1646                VMStackState::Parent => {
1647                    // We don't know whether our child is suspended or running, but in
1648                    // either case things should be hanlded correctly when traversing
1649                    // further along in the chain, nothing required at this point.
1650                }
1651                VMStackState::Fresh | VMStackState::Returned => {
1652                    // Fresh/Returned continuations have no gc values on their stack.
1653                }
1654            }
1655        }
1656
1657        log::trace!("End trace GC roots :: continuations");
1658    }
1659
1660    #[cfg(feature = "gc")]
1661    fn trace_vmctx_roots(&mut self, gc_roots_list: &mut GcRootsList) {
1662        log::trace!("Begin trace GC roots :: vmctx");
1663        self.for_each_global(|store, global| global.trace_root(store, gc_roots_list));
1664        self.for_each_table(|store, table| table.trace_roots(store, gc_roots_list));
1665        log::trace!("End trace GC roots :: vmctx");
1666    }
1667
1668    #[cfg(feature = "gc")]
1669    fn trace_user_roots(&mut self, gc_roots_list: &mut GcRootsList) {
1670        log::trace!("Begin trace GC roots :: user");
1671        self.gc_roots.trace_roots(gc_roots_list);
1672        log::trace!("End trace GC roots :: user");
1673    }
1674
1675    /// Insert a host-allocated GC type into this store.
1676    ///
1677    /// This makes it suitable for the embedder to allocate instances of this
1678    /// type in this store, and we don't have to worry about the type being
1679    /// reclaimed (since it is possible that none of the Wasm modules in this
1680    /// store are holding it alive).
1681    #[cfg(feature = "gc")]
1682    pub(crate) fn insert_gc_host_alloc_type(&mut self, ty: crate::type_registry::RegisteredType) {
1683        self.gc_host_alloc_types.insert(ty);
1684    }
1685
1686    pub fn get_fuel(&self) -> Result<u64> {
1687        anyhow::ensure!(
1688            self.engine().tunables().consume_fuel,
1689            "fuel is not configured in this store"
1690        );
1691        let injected_fuel = unsafe { *self.vm_store_context.fuel_consumed.get() };
1692        Ok(get_fuel(injected_fuel, self.fuel_reserve))
1693    }
1694
1695    fn refuel(&mut self) -> bool {
1696        let injected_fuel = unsafe { &mut *self.vm_store_context.fuel_consumed.get() };
1697        refuel(
1698            injected_fuel,
1699            &mut self.fuel_reserve,
1700            self.fuel_yield_interval,
1701        )
1702    }
1703
1704    pub fn set_fuel(&mut self, fuel: u64) -> Result<()> {
1705        anyhow::ensure!(
1706            self.engine().tunables().consume_fuel,
1707            "fuel is not configured in this store"
1708        );
1709        let injected_fuel = unsafe { &mut *self.vm_store_context.fuel_consumed.get() };
1710        set_fuel(
1711            injected_fuel,
1712            &mut self.fuel_reserve,
1713            self.fuel_yield_interval,
1714            fuel,
1715        );
1716        Ok(())
1717    }
1718
1719    pub fn fuel_async_yield_interval(&mut self, interval: Option<u64>) -> Result<()> {
1720        anyhow::ensure!(
1721            self.engine().tunables().consume_fuel,
1722            "fuel is not configured in this store"
1723        );
1724        anyhow::ensure!(
1725            self.engine().config().async_support,
1726            "async support is not configured in this store"
1727        );
1728        anyhow::ensure!(
1729            interval != Some(0),
1730            "fuel_async_yield_interval must not be 0"
1731        );
1732        self.fuel_yield_interval = interval.and_then(|i| NonZeroU64::new(i));
1733        // Reset the fuel active + reserve states by resetting the amount.
1734        self.set_fuel(self.get_fuel()?)
1735    }
1736
1737    #[inline]
1738    pub fn signal_handler(&self) -> Option<*const SignalHandler> {
1739        let handler = self.signal_handler.as_ref()?;
1740        Some(handler)
1741    }
1742
1743    #[inline]
1744    pub fn vm_store_context_ptr(&self) -> NonNull<VMStoreContext> {
1745        NonNull::from(&self.vm_store_context)
1746    }
1747
1748    #[inline]
1749    pub fn default_caller(&self) -> NonNull<VMContext> {
1750        self.default_caller_vmctx.as_non_null()
1751    }
1752
1753    #[inline]
1754    pub fn traitobj(&self) -> NonNull<dyn vm::VMStore> {
1755        self.traitobj.as_raw().unwrap()
1756    }
1757
1758    #[inline]
1759    pub fn traitobj_mut(&mut self) -> &mut dyn vm::VMStore {
1760        unsafe { self.traitobj().as_mut() }
1761    }
1762
1763    /// Takes the cached `Vec<Val>` stored internally across hostcalls to get
1764    /// used as part of calling the host in a `Func::new` method invocation.
1765    #[inline]
1766    pub fn take_hostcall_val_storage(&mut self) -> Vec<Val> {
1767        mem::take(&mut self.hostcall_val_storage)
1768    }
1769
1770    /// Restores the vector previously taken by `take_hostcall_val_storage`
1771    /// above back into the store, allowing it to be used in the future for the
1772    /// next wasm->host call.
1773    #[inline]
1774    pub fn save_hostcall_val_storage(&mut self, storage: Vec<Val>) {
1775        if storage.capacity() > self.hostcall_val_storage.capacity() {
1776            self.hostcall_val_storage = storage;
1777        }
1778    }
1779
1780    /// Same as `take_hostcall_val_storage`, but for the direction of the host
1781    /// calling wasm.
1782    #[inline]
1783    pub fn take_wasm_val_raw_storage(&mut self) -> Vec<ValRaw> {
1784        mem::take(&mut self.wasm_val_raw_storage)
1785    }
1786
1787    /// Same as `save_hostcall_val_storage`, but for the direction of the host
1788    /// calling wasm.
1789    #[inline]
1790    pub fn save_wasm_val_raw_storage(&mut self, storage: Vec<ValRaw>) {
1791        if storage.capacity() > self.wasm_val_raw_storage.capacity() {
1792            self.wasm_val_raw_storage = storage;
1793        }
1794    }
1795
1796    /// Translates a WebAssembly fault at the native `pc` and native `addr` to a
1797    /// WebAssembly-relative fault.
1798    ///
1799    /// This function may abort the process if `addr` is not found to actually
1800    /// reside in any linear memory. In such a situation it means that the
1801    /// segfault was erroneously caught by Wasmtime and is possibly indicative
1802    /// of a code generator bug.
1803    ///
1804    /// This function returns `None` for dynamically-bounds-checked-memories
1805    /// with spectre mitigations enabled since the hardware fault address is
1806    /// always zero in these situations which means that the trapping context
1807    /// doesn't have enough information to report the fault address.
1808    pub(crate) fn wasm_fault(&self, pc: usize, addr: usize) -> Option<vm::WasmFault> {
1809        // There are a few instances where a "close to zero" pointer is loaded
1810        // and we expect that to happen:
1811        //
1812        // * Explicitly bounds-checked memories with spectre-guards enabled will
1813        //   cause out-of-bounds accesses to get routed to address 0, so allow
1814        //   wasm instructions to fault on the null address.
1815        // * `call_indirect` when invoking a null function pointer may load data
1816        //   from the a `VMFuncRef` whose address is null, meaning any field of
1817        //   `VMFuncRef` could be the address of the fault.
1818        //
1819        // In these situations where the address is so small it won't be in any
1820        // instance, so skip the checks below.
1821        if addr <= mem::size_of::<VMFuncRef>() {
1822            const _: () = {
1823                // static-assert that `VMFuncRef` isn't too big to ensure that
1824                // it lives solely within the first page as we currently only
1825                // have the guarantee that the first page of memory is unmapped,
1826                // no more.
1827                assert!(mem::size_of::<VMFuncRef>() <= 512);
1828            };
1829            return None;
1830        }
1831
1832        // Search all known instances in this store for this address. Note that
1833        // this is probably not the speediest way to do this. Traps, however,
1834        // are generally not expected to be super fast and additionally stores
1835        // probably don't have all that many instances or memories.
1836        //
1837        // If this loop becomes hot in the future, however, it should be
1838        // possible to precompute maps about linear memories in a store and have
1839        // a quicker lookup.
1840        let mut fault = None;
1841        for (_, instance) in self.instances.iter() {
1842            if let Some(f) = instance.handle.get().wasm_fault(addr) {
1843                assert!(fault.is_none());
1844                fault = Some(f);
1845            }
1846        }
1847        if fault.is_some() {
1848            return fault;
1849        }
1850
1851        cfg_if::cfg_if! {
1852            if #[cfg(feature = "std")] {
1853                // With the standard library a rich error can be printed here
1854                // to stderr and the native abort path is used.
1855                eprintln!(
1856                    "\
1857Wasmtime caught a segfault for a wasm program because the faulting instruction
1858is allowed to segfault due to how linear memories are implemented. The address
1859that was accessed, however, is not known to any linear memory in use within this
1860Store. This may be indicative of a critical bug in Wasmtime's code generation
1861because all addresses which are known to be reachable from wasm won't reach this
1862message.
1863
1864    pc:      0x{pc:x}
1865    address: 0x{addr:x}
1866
1867This is a possible security issue because WebAssembly has accessed something it
1868shouldn't have been able to. Other accesses may have succeeded and this one just
1869happened to be caught. The process will now be aborted to prevent this damage
1870from going any further and to alert what's going on. If this is a security
1871issue please reach out to the Wasmtime team via its security policy
1872at https://bytecodealliance.org/security.
1873"
1874                );
1875                std::process::abort();
1876            } else if #[cfg(panic = "abort")] {
1877                // Without the standard library but with `panic=abort` then
1878                // it's safe to panic as that's known to halt execution. For
1879                // now avoid the above error message as well since without
1880                // `std` it's probably best to be a bit more size-conscious.
1881                let _ = pc;
1882                panic!("invalid fault");
1883            } else {
1884                // Without `std` and with `panic = "unwind"` there's no
1885                // dedicated API to abort the process portably, so manufacture
1886                // this with a double-panic.
1887                let _ = pc;
1888
1889                struct PanicAgainOnDrop;
1890
1891                impl Drop for PanicAgainOnDrop {
1892                    fn drop(&mut self) {
1893                        panic!("panicking again to trigger a process abort");
1894                    }
1895
1896                }
1897
1898                let _bomb = PanicAgainOnDrop;
1899
1900                panic!("invalid fault");
1901            }
1902        }
1903    }
1904
1905    /// Retrieve the store's protection key.
1906    #[inline]
1907    pub(crate) fn get_pkey(&self) -> Option<ProtectionKey> {
1908        self.pkey
1909    }
1910
1911    #[inline]
1912    #[cfg(feature = "component-model")]
1913    pub(crate) fn component_resource_state(
1914        &mut self,
1915    ) -> (
1916        &mut vm::component::CallContexts,
1917        &mut vm::component::ResourceTable,
1918        &mut crate::component::HostResourceData,
1919    ) {
1920        (
1921            &mut self.component_calls,
1922            &mut self.component_host_table,
1923            &mut self.host_resource_data,
1924        )
1925    }
1926
1927    #[cfg(feature = "component-model")]
1928    pub(crate) fn push_component_instance(&mut self, instance: crate::component::Instance) {
1929        // We don't actually need the instance itself right now, but it seems
1930        // like something we will almost certainly eventually want to keep
1931        // around, so force callers to provide it.
1932        let _ = instance;
1933
1934        self.num_component_instances += 1;
1935    }
1936
1937    #[cfg(feature = "component-model")]
1938    pub(crate) fn component_resource_state_with_instance(
1939        &mut self,
1940        instance: crate::component::Instance,
1941    ) -> (
1942        &mut vm::component::CallContexts,
1943        &mut vm::component::ResourceTable,
1944        &mut crate::component::HostResourceData,
1945        Pin<&mut vm::component::ComponentInstance>,
1946    ) {
1947        (
1948            &mut self.component_calls,
1949            &mut self.component_host_table,
1950            &mut self.host_resource_data,
1951            instance.id().from_data_get_mut(&mut self.store_data),
1952        )
1953    }
1954
1955    #[cfg(feature = "async")]
1956    pub(crate) fn fiber_async_state_mut(&mut self) -> &mut fiber::AsyncState {
1957        &mut self.async_state
1958    }
1959
1960    #[cfg(feature = "async")]
1961    pub(crate) fn has_pkey(&self) -> bool {
1962        self.pkey.is_some()
1963    }
1964
1965    pub(crate) fn executor(&mut self) -> ExecutorRef<'_> {
1966        match &mut self.executor {
1967            Executor::Interpreter(i) => ExecutorRef::Interpreter(i.as_interpreter_ref()),
1968            #[cfg(has_host_compiler_backend)]
1969            Executor::Native => ExecutorRef::Native,
1970        }
1971    }
1972
1973    #[cfg(feature = "async")]
1974    pub(crate) fn swap_executor(&mut self, executor: &mut Executor) {
1975        mem::swap(&mut self.executor, executor);
1976    }
1977
1978    pub(crate) fn unwinder(&self) -> &'static dyn Unwind {
1979        match &self.executor {
1980            Executor::Interpreter(i) => i.unwinder(),
1981            #[cfg(has_host_compiler_backend)]
1982            Executor::Native => &vm::UnwindHost,
1983        }
1984    }
1985
1986    /// Allocates a new continuation. Note that we currently don't support
1987    /// deallocating them. Instead, all continuations remain allocated
1988    /// throughout the store's lifetime.
1989    #[cfg(feature = "stack-switching")]
1990    pub fn allocate_continuation(&mut self) -> Result<*mut VMContRef> {
1991        // FIXME(frank-emrich) Do we need to pin this?
1992        let mut continuation = Box::new(VMContRef::empty());
1993        let stack_size = self.engine.config().async_stack_size;
1994        let stack = crate::vm::VMContinuationStack::new(stack_size)?;
1995        continuation.stack = stack;
1996        let ptr = continuation.deref_mut() as *mut VMContRef;
1997        self.continuations.push(continuation);
1998        Ok(ptr)
1999    }
2000
2001    /// Constructs and executes an `InstanceAllocationRequest` and pushes the
2002    /// returned instance into the store.
2003    ///
2004    /// This is a helper method for invoking
2005    /// `InstanceAllocator::allocate_module` with the appropriate parameters
2006    /// from this store's own configuration. The `kind` provided is used to
2007    /// distinguish between "real" modules and dummy ones that are synthesized
2008    /// for embedder-created memories, globals, tables, etc. The `kind` will
2009    /// also use a different instance allocator by default, the one passed in,
2010    /// rather than the engine's default allocator.
2011    ///
2012    /// This method will push the instance within `StoreOpaque` onto the
2013    /// `instances` array and return the `InstanceId` which can be use to look
2014    /// it up within the store.
2015    ///
2016    /// # Safety
2017    ///
2018    /// The request's associated module, memories, tables, and vmctx must have
2019    /// already have been validated by `validate_module` for the allocator
2020    /// configured. This is typically done during module construction for
2021    /// example.
2022    pub(crate) unsafe fn allocate_instance(
2023        &mut self,
2024        kind: AllocateInstanceKind<'_>,
2025        runtime_info: &ModuleRuntimeInfo,
2026        imports: Imports<'_>,
2027    ) -> Result<InstanceId> {
2028        let id = self.instances.next_key();
2029
2030        let allocator = match kind {
2031            AllocateInstanceKind::Module(_) => self.engine().allocator(),
2032            AllocateInstanceKind::Dummy { allocator } => allocator,
2033        };
2034        let handle = allocator.allocate_module(InstanceAllocationRequest {
2035            id,
2036            runtime_info,
2037            imports,
2038            store: StorePtr::new(self.traitobj()),
2039            wmemcheck: self.engine().config().wmemcheck,
2040            pkey: self.get_pkey(),
2041            tunables: self.engine().tunables(),
2042        })?;
2043
2044        let actual = match kind {
2045            AllocateInstanceKind::Module(module_id) => {
2046                log::trace!(
2047                    "Adding instance to store: store={:?}, module={module_id:?}, instance={id:?}",
2048                    self.id()
2049                );
2050                self.instances.push(StoreInstance {
2051                    handle,
2052                    kind: StoreInstanceKind::Real { module_id },
2053                })
2054            }
2055            AllocateInstanceKind::Dummy { .. } => {
2056                log::trace!(
2057                    "Adding dummy instance to store: store={:?}, instance={id:?}",
2058                    self.id()
2059                );
2060                self.instances.push(StoreInstance {
2061                    handle,
2062                    kind: StoreInstanceKind::Dummy,
2063                })
2064            }
2065        };
2066
2067        // double-check we didn't accidentally allocate two instances and our
2068        // prediction of what the id would be is indeed the id it should be.
2069        assert_eq!(id, actual);
2070
2071        Ok(id)
2072    }
2073
2074    /// Returns the `StoreInstanceId` that can be used to re-acquire access to
2075    /// `vmctx` from a store later on.
2076    ///
2077    /// # Safety
2078    ///
2079    /// This method is unsafe as it cannot validate that `vmctx` is a valid
2080    /// allocation that lives within this store.
2081    pub(crate) unsafe fn vmctx_id(&self, vmctx: NonNull<VMContext>) -> StoreInstanceId {
2082        let instance_id = vm::Instance::from_vmctx(vmctx, |i| i.id());
2083        StoreInstanceId::new(self.id(), instance_id)
2084    }
2085}
2086
2087/// Helper parameter to [`StoreOpaque::allocate_instance`].
2088pub(crate) enum AllocateInstanceKind<'a> {
2089    /// An embedder-provided module is being allocated meaning that the default
2090    /// engine's allocator will be used.
2091    Module(RegisteredModuleId),
2092
2093    /// Add a dummy instance that to the store.
2094    ///
2095    /// These are instances that are just implementation details of something
2096    /// else (e.g. host-created memories that are not actually defined in any
2097    /// Wasm module) and therefore shouldn't show up in things like core dumps.
2098    ///
2099    /// A custom, typically OnDemand-flavored, allocator is provided to execute
2100    /// the allocation.
2101    Dummy {
2102        allocator: &'a dyn InstanceAllocator,
2103    },
2104}
2105
2106unsafe impl<T> vm::VMStore for StoreInner<T> {
2107    #[cfg(feature = "component-model-async")]
2108    fn component_async_store(
2109        &mut self,
2110    ) -> &mut dyn crate::runtime::component::VMComponentAsyncStore {
2111        self
2112    }
2113
2114    fn store_opaque(&self) -> &StoreOpaque {
2115        &self.inner
2116    }
2117
2118    fn store_opaque_mut(&mut self) -> &mut StoreOpaque {
2119        &mut self.inner
2120    }
2121
2122    fn memory_growing(
2123        &mut self,
2124        current: usize,
2125        desired: usize,
2126        maximum: Option<usize>,
2127    ) -> Result<bool, anyhow::Error> {
2128        match self.limiter {
2129            Some(ResourceLimiterInner::Sync(ref mut limiter)) => {
2130                limiter(&mut self.data).memory_growing(current, desired, maximum)
2131            }
2132            #[cfg(feature = "async")]
2133            Some(ResourceLimiterInner::Async(_)) => self.block_on(|store| {
2134                let limiter = match &mut store.0.limiter {
2135                    Some(ResourceLimiterInner::Async(limiter)) => limiter,
2136                    _ => unreachable!(),
2137                };
2138                limiter(&mut store.0.data).memory_growing(current, desired, maximum)
2139            })?,
2140            None => Ok(true),
2141        }
2142    }
2143
2144    fn memory_grow_failed(&mut self, error: anyhow::Error) -> Result<()> {
2145        match self.limiter {
2146            Some(ResourceLimiterInner::Sync(ref mut limiter)) => {
2147                limiter(&mut self.data).memory_grow_failed(error)
2148            }
2149            #[cfg(feature = "async")]
2150            Some(ResourceLimiterInner::Async(ref mut limiter)) => {
2151                limiter(&mut self.data).memory_grow_failed(error)
2152            }
2153            None => {
2154                log::debug!("ignoring memory growth failure error: {error:?}");
2155                Ok(())
2156            }
2157        }
2158    }
2159
2160    fn table_growing(
2161        &mut self,
2162        current: usize,
2163        desired: usize,
2164        maximum: Option<usize>,
2165    ) -> Result<bool, anyhow::Error> {
2166        match self.limiter {
2167            Some(ResourceLimiterInner::Sync(ref mut limiter)) => {
2168                limiter(&mut self.data).table_growing(current, desired, maximum)
2169            }
2170            #[cfg(feature = "async")]
2171            Some(ResourceLimiterInner::Async(_)) => self.block_on(|store| {
2172                let limiter = match &mut store.0.limiter {
2173                    Some(ResourceLimiterInner::Async(limiter)) => limiter,
2174                    _ => unreachable!(),
2175                };
2176                limiter(&mut store.0.data).table_growing(current, desired, maximum)
2177            })?,
2178            None => Ok(true),
2179        }
2180    }
2181
2182    fn table_grow_failed(&mut self, error: anyhow::Error) -> Result<()> {
2183        match self.limiter {
2184            Some(ResourceLimiterInner::Sync(ref mut limiter)) => {
2185                limiter(&mut self.data).table_grow_failed(error)
2186            }
2187            #[cfg(feature = "async")]
2188            Some(ResourceLimiterInner::Async(ref mut limiter)) => {
2189                limiter(&mut self.data).table_grow_failed(error)
2190            }
2191            None => {
2192                log::debug!("ignoring table growth failure: {error:?}");
2193                Ok(())
2194            }
2195        }
2196    }
2197
2198    fn out_of_gas(&mut self) -> Result<()> {
2199        if !self.refuel() {
2200            return Err(Trap::OutOfFuel.into());
2201        }
2202        #[cfg(feature = "async")]
2203        if self.fuel_yield_interval.is_some() {
2204            self.async_yield_impl()?;
2205        }
2206        Ok(())
2207    }
2208
2209    #[cfg(target_has_atomic = "64")]
2210    fn new_epoch(&mut self) -> Result<u64, anyhow::Error> {
2211        // Temporarily take the configured behavior to avoid mutably borrowing
2212        // multiple times.
2213        let mut behavior = self.epoch_deadline_behavior.take();
2214        let delta_result = match &mut behavior {
2215            None => Err(Trap::Interrupt.into()),
2216            Some(callback) => callback((&mut *self).as_context_mut()).and_then(|update| {
2217                let delta = match update {
2218                    UpdateDeadline::Continue(delta) => delta,
2219                    #[cfg(feature = "async")]
2220                    UpdateDeadline::Yield(delta) => {
2221                        assert!(
2222                            self.async_support(),
2223                            "cannot use `UpdateDeadline::Yield` without enabling async support in the config"
2224                        );
2225                        // Do the async yield. May return a trap if future was
2226                        // canceled while we're yielded.
2227                        self.async_yield_impl()?;
2228                        delta
2229                    }
2230                    #[cfg(feature = "async")]
2231                    UpdateDeadline::YieldCustom(delta, future) => {
2232                        assert!(
2233                            self.async_support(),
2234                            "cannot use `UpdateDeadline::YieldCustom` without enabling async support in the config"
2235                        );
2236
2237                        // When control returns, we have a `Result<()>` passed
2238                        // in from the host fiber. If this finished successfully then
2239                        // we were resumed normally via a `poll`, so keep going.  If
2240                        // the future was dropped while we were yielded, then we need
2241                        // to clean up this fiber. Do so by raising a trap which will
2242                        // abort all wasm and get caught on the other side to clean
2243                        // things up.
2244                        self.block_on(|_| future)?;
2245                        delta
2246                    }
2247                };
2248
2249                // Set a new deadline and return the new epoch deadline so
2250                // the Wasm code doesn't have to reload it.
2251                self.set_epoch_deadline(delta);
2252                Ok(self.get_epoch_deadline())
2253            })
2254        };
2255
2256        // Put back the original behavior which was replaced by `take`.
2257        self.epoch_deadline_behavior = behavior;
2258        delta_result
2259    }
2260
2261    #[cfg(feature = "gc")]
2262    unsafe fn maybe_async_grow_or_collect_gc_heap(
2263        &mut self,
2264        root: Option<VMGcRef>,
2265        bytes_needed: Option<u64>,
2266    ) -> Result<Option<VMGcRef>> {
2267        self.inner.maybe_async_gc(root, bytes_needed)
2268    }
2269
2270    #[cfg(not(feature = "gc"))]
2271    unsafe fn maybe_async_grow_or_collect_gc_heap(
2272        &mut self,
2273        root: Option<VMGcRef>,
2274        _bytes_needed: Option<u64>,
2275    ) -> Result<Option<VMGcRef>> {
2276        Ok(root)
2277    }
2278
2279    #[cfg(feature = "component-model")]
2280    fn component_calls(&mut self) -> &mut vm::component::CallContexts {
2281        &mut self.component_calls
2282    }
2283}
2284
2285impl<T> StoreInner<T> {
2286    #[cfg(target_has_atomic = "64")]
2287    pub(crate) fn set_epoch_deadline(&mut self, delta: u64) {
2288        // Set a new deadline based on the "epoch deadline delta".
2289        //
2290        // Also, note that when this update is performed while Wasm is
2291        // on the stack, the Wasm will reload the new value once we
2292        // return into it.
2293        let current_epoch = self.engine().current_epoch();
2294        let epoch_deadline = self.vm_store_context.epoch_deadline.get_mut();
2295        *epoch_deadline = current_epoch + delta;
2296    }
2297
2298    #[cfg(target_has_atomic = "64")]
2299    fn epoch_deadline_trap(&mut self) {
2300        self.epoch_deadline_behavior = None;
2301    }
2302
2303    #[cfg(target_has_atomic = "64")]
2304    fn epoch_deadline_callback(
2305        &mut self,
2306        callback: Box<dyn FnMut(StoreContextMut<T>) -> Result<UpdateDeadline> + Send + Sync>,
2307    ) {
2308        self.epoch_deadline_behavior = Some(callback);
2309    }
2310
2311    fn get_epoch_deadline(&mut self) -> u64 {
2312        *self.vm_store_context.epoch_deadline.get_mut()
2313    }
2314}
2315
2316impl<T: Default> Default for Store<T> {
2317    fn default() -> Store<T> {
2318        Store::new(&Engine::default(), T::default())
2319    }
2320}
2321
2322impl<T: fmt::Debug> fmt::Debug for Store<T> {
2323    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
2324        let inner = &**self.inner as *const StoreInner<T>;
2325        f.debug_struct("Store")
2326            .field("inner", &inner)
2327            .field("data", &self.inner.data)
2328            .finish()
2329    }
2330}
2331
2332impl<T> Drop for Store<T> {
2333    fn drop(&mut self) {
2334        self.run_manual_drop_routines();
2335
2336        // for documentation on this `unsafe`, see `into_data`.
2337        unsafe {
2338            ManuallyDrop::drop(&mut self.inner.data);
2339            ManuallyDrop::drop(&mut self.inner);
2340        }
2341    }
2342}
2343
2344impl Drop for StoreOpaque {
2345    fn drop(&mut self) {
2346        // NB it's important that this destructor does not access `self.data`.
2347        // That is deallocated by `Drop for Store<T>` above.
2348
2349        unsafe {
2350            let allocator = self.engine.allocator();
2351            let ondemand = OnDemandInstanceAllocator::default();
2352            let store_id = self.id();
2353
2354            #[cfg(feature = "gc")]
2355            if let Some(gc_store) = self.gc_store.take() {
2356                let gc_alloc_index = gc_store.allocation_index;
2357                log::trace!("store {store_id:?} is deallocating GC heap {gc_alloc_index:?}");
2358                debug_assert!(self.engine.features().gc_types());
2359                let (mem_alloc_index, mem) =
2360                    allocator.deallocate_gc_heap(gc_alloc_index, gc_store.gc_heap);
2361                allocator.deallocate_memory(None, mem_alloc_index, mem);
2362            }
2363
2364            for (id, instance) in self.instances.iter_mut() {
2365                log::trace!("store {store_id:?} is deallocating {id:?}");
2366                if let StoreInstanceKind::Dummy = instance.kind {
2367                    ondemand.deallocate_module(&mut instance.handle);
2368                } else {
2369                    allocator.deallocate_module(&mut instance.handle);
2370                }
2371            }
2372
2373            #[cfg(feature = "component-model")]
2374            {
2375                for _ in 0..self.num_component_instances {
2376                    allocator.decrement_component_instance_count();
2377                }
2378            }
2379        }
2380    }
2381}
2382
2383#[cfg(test)]
2384mod tests {
2385    use super::{get_fuel, refuel, set_fuel};
2386    use std::num::NonZeroU64;
2387
2388    struct FuelTank {
2389        pub consumed_fuel: i64,
2390        pub reserve_fuel: u64,
2391        pub yield_interval: Option<NonZeroU64>,
2392    }
2393
2394    impl FuelTank {
2395        fn new() -> Self {
2396            FuelTank {
2397                consumed_fuel: 0,
2398                reserve_fuel: 0,
2399                yield_interval: None,
2400            }
2401        }
2402        fn get_fuel(&self) -> u64 {
2403            get_fuel(self.consumed_fuel, self.reserve_fuel)
2404        }
2405        fn refuel(&mut self) -> bool {
2406            refuel(
2407                &mut self.consumed_fuel,
2408                &mut self.reserve_fuel,
2409                self.yield_interval,
2410            )
2411        }
2412        fn set_fuel(&mut self, fuel: u64) {
2413            set_fuel(
2414                &mut self.consumed_fuel,
2415                &mut self.reserve_fuel,
2416                self.yield_interval,
2417                fuel,
2418            );
2419        }
2420    }
2421
2422    #[test]
2423    fn smoke() {
2424        let mut tank = FuelTank::new();
2425        tank.set_fuel(10);
2426        assert_eq!(tank.consumed_fuel, -10);
2427        assert_eq!(tank.reserve_fuel, 0);
2428
2429        tank.yield_interval = NonZeroU64::new(10);
2430        tank.set_fuel(25);
2431        assert_eq!(tank.consumed_fuel, -10);
2432        assert_eq!(tank.reserve_fuel, 15);
2433    }
2434
2435    #[test]
2436    fn does_not_lose_precision() {
2437        let mut tank = FuelTank::new();
2438        tank.set_fuel(u64::MAX);
2439        assert_eq!(tank.get_fuel(), u64::MAX);
2440
2441        tank.set_fuel(i64::MAX as u64);
2442        assert_eq!(tank.get_fuel(), i64::MAX as u64);
2443
2444        tank.set_fuel(i64::MAX as u64 + 1);
2445        assert_eq!(tank.get_fuel(), i64::MAX as u64 + 1);
2446    }
2447
2448    #[test]
2449    fn yielding_does_not_lose_precision() {
2450        let mut tank = FuelTank::new();
2451
2452        tank.yield_interval = NonZeroU64::new(10);
2453        tank.set_fuel(u64::MAX);
2454        assert_eq!(tank.get_fuel(), u64::MAX);
2455        assert_eq!(tank.consumed_fuel, -10);
2456        assert_eq!(tank.reserve_fuel, u64::MAX - 10);
2457
2458        tank.yield_interval = NonZeroU64::new(u64::MAX);
2459        tank.set_fuel(u64::MAX);
2460        assert_eq!(tank.get_fuel(), u64::MAX);
2461        assert_eq!(tank.consumed_fuel, -i64::MAX);
2462        assert_eq!(tank.reserve_fuel, u64::MAX - (i64::MAX as u64));
2463
2464        tank.yield_interval = NonZeroU64::new((i64::MAX as u64) + 1);
2465        tank.set_fuel(u64::MAX);
2466        assert_eq!(tank.get_fuel(), u64::MAX);
2467        assert_eq!(tank.consumed_fuel, -i64::MAX);
2468        assert_eq!(tank.reserve_fuel, u64::MAX - (i64::MAX as u64));
2469    }
2470
2471    #[test]
2472    fn refueling() {
2473        // It's possible to fuel to have consumed over the limit as some instructions can consume
2474        // multiple units of fuel at once. Refueling should be strict in it's consumption and not
2475        // add more fuel than there is.
2476        let mut tank = FuelTank::new();
2477
2478        tank.yield_interval = NonZeroU64::new(10);
2479        tank.reserve_fuel = 42;
2480        tank.consumed_fuel = 4;
2481        assert!(tank.refuel());
2482        assert_eq!(tank.reserve_fuel, 28);
2483        assert_eq!(tank.consumed_fuel, -10);
2484
2485        tank.yield_interval = NonZeroU64::new(1);
2486        tank.reserve_fuel = 8;
2487        tank.consumed_fuel = 4;
2488        assert_eq!(tank.get_fuel(), 4);
2489        assert!(tank.refuel());
2490        assert_eq!(tank.reserve_fuel, 3);
2491        assert_eq!(tank.consumed_fuel, -1);
2492        assert_eq!(tank.get_fuel(), 4);
2493
2494        tank.yield_interval = NonZeroU64::new(10);
2495        tank.reserve_fuel = 3;
2496        tank.consumed_fuel = 4;
2497        assert_eq!(tank.get_fuel(), 0);
2498        assert!(!tank.refuel());
2499        assert_eq!(tank.reserve_fuel, 3);
2500        assert_eq!(tank.consumed_fuel, 4);
2501        assert_eq!(tank.get_fuel(), 0);
2502    }
2503}