wasmtime/
config.rs

1use crate::memory::MemoryCreator;
2use crate::trampoline::MemoryCreatorProxy;
3use anyhow::{bail, Result};
4use serde::{Deserialize, Serialize};
5use std::collections::{HashMap, HashSet};
6use std::fmt;
7#[cfg(feature = "cache")]
8use std::path::Path;
9use std::str::FromStr;
10use std::sync::Arc;
11use target_lexicon::Architecture;
12use wasmparser::WasmFeatures;
13#[cfg(feature = "cache")]
14use wasmtime_cache::CacheConfig;
15use wasmtime_environ::Tunables;
16use wasmtime_jit::{JitDumpAgent, NullProfilerAgent, PerfMapAgent, ProfilingAgent, VTuneAgent};
17use wasmtime_runtime::{InstanceAllocator, OnDemandInstanceAllocator, RuntimeMemoryCreator};
18
19pub use wasmtime_environ::CacheStore;
20
21/// Represents the module instance allocation strategy to use.
22#[derive(Clone)]
23pub enum InstanceAllocationStrategy {
24    /// The on-demand instance allocation strategy.
25    ///
26    /// Resources related to a module instance are allocated at instantiation time and
27    /// immediately deallocated when the `Store` referencing the instance is dropped.
28    ///
29    /// This is the default allocation strategy for Wasmtime.
30    OnDemand,
31    /// The pooling instance allocation strategy.
32    ///
33    /// A pool of resources is created in advance and module instantiation reuses resources
34    /// from the pool. Resources are returned to the pool when the `Store` referencing the instance
35    /// is dropped.
36    #[cfg(feature = "pooling-allocator")]
37    Pooling(PoolingAllocationConfig),
38}
39
40impl InstanceAllocationStrategy {
41    /// The default pooling instance allocation strategy.
42    #[cfg(feature = "pooling-allocator")]
43    pub fn pooling() -> Self {
44        Self::Pooling(Default::default())
45    }
46}
47
48impl Default for InstanceAllocationStrategy {
49    fn default() -> Self {
50        Self::OnDemand
51    }
52}
53
54#[derive(Clone)]
55/// Configure the strategy used for versioning in serializing and deserializing [`crate::Module`].
56pub enum ModuleVersionStrategy {
57    /// Use the wasmtime crate's Cargo package version.
58    WasmtimeVersion,
59    /// Use a custom version string. Must be at most 255 bytes.
60    Custom(String),
61    /// Emit no version string in serialization, and accept all version strings in deserialization.
62    None,
63}
64
65impl Default for ModuleVersionStrategy {
66    fn default() -> Self {
67        ModuleVersionStrategy::WasmtimeVersion
68    }
69}
70
71/// Global configuration options used to create an [`Engine`](crate::Engine)
72/// and customize its behavior.
73///
74/// This structure exposed a builder-like interface and is primarily consumed by
75/// [`Engine::new()`](crate::Engine::new).
76///
77/// The validation of `Config` is deferred until the engine is being built, thus
78/// a problematic config may cause `Engine::new` to fail.
79#[derive(Clone)]
80pub struct Config {
81    #[cfg(compiler)]
82    compiler_config: CompilerConfig,
83    profiling_strategy: ProfilingStrategy,
84
85    pub(crate) tunables: Tunables,
86    #[cfg(feature = "cache")]
87    pub(crate) cache_config: CacheConfig,
88    pub(crate) mem_creator: Option<Arc<dyn RuntimeMemoryCreator>>,
89    pub(crate) allocation_strategy: InstanceAllocationStrategy,
90    pub(crate) max_wasm_stack: usize,
91    pub(crate) features: WasmFeatures,
92    pub(crate) wasm_backtrace: bool,
93    pub(crate) wasm_backtrace_details_env_used: bool,
94    pub(crate) native_unwind_info: bool,
95    #[cfg(feature = "async")]
96    pub(crate) async_stack_size: usize,
97    pub(crate) async_support: bool,
98    pub(crate) module_version: ModuleVersionStrategy,
99    pub(crate) parallel_compilation: bool,
100    pub(crate) memory_init_cow: bool,
101    pub(crate) memory_guaranteed_dense_image_size: u64,
102    pub(crate) force_memory_init_memfd: bool,
103}
104
105/// User-provided configuration for the compiler.
106#[cfg(compiler)]
107#[derive(Debug, Clone)]
108struct CompilerConfig {
109    strategy: Strategy,
110    target: Option<target_lexicon::Triple>,
111    settings: HashMap<String, String>,
112    flags: HashSet<String>,
113    #[cfg(compiler)]
114    cache_store: Option<Arc<dyn CacheStore>>,
115}
116
117#[cfg(compiler)]
118impl CompilerConfig {
119    fn new(strategy: Strategy) -> Self {
120        Self {
121            strategy,
122            target: None,
123            settings: HashMap::new(),
124            flags: HashSet::new(),
125            cache_store: None,
126        }
127    }
128
129    /// Ensures that the key is not set or equals to the given value.
130    /// If the key is not set, it will be set to the given value.
131    ///
132    /// # Returns
133    ///
134    /// Returns true if successfully set or already had the given setting
135    /// value, or false if the setting was explicitly set to something
136    /// else previously.
137    fn ensure_setting_unset_or_given(&mut self, k: &str, v: &str) -> bool {
138        if let Some(value) = self.settings.get(k) {
139            if value != v {
140                return false;
141            }
142        } else {
143            self.settings.insert(k.to_string(), v.to_string());
144        }
145        true
146    }
147}
148
149#[cfg(compiler)]
150impl Default for CompilerConfig {
151    fn default() -> Self {
152        Self::new(Strategy::Auto)
153    }
154}
155
156impl Config {
157    /// Creates a new configuration object with the default configuration
158    /// specified.
159    pub fn new() -> Self {
160        let mut ret = Self {
161            tunables: Tunables::default(),
162            #[cfg(compiler)]
163            compiler_config: CompilerConfig::default(),
164            #[cfg(feature = "cache")]
165            cache_config: CacheConfig::new_cache_disabled(),
166            profiling_strategy: ProfilingStrategy::None,
167            mem_creator: None,
168            allocation_strategy: InstanceAllocationStrategy::OnDemand,
169            // 512k of stack -- note that this is chosen currently to not be too
170            // big, not be too small, and be a good default for most platforms.
171            // One platform of particular note is Windows where the stack size
172            // of the main thread seems to, by default, be smaller than that of
173            // Linux and macOS. This 512k value at least lets our current test
174            // suite pass on the main thread of Windows (using `--test-threads
175            // 1` forces this), or at least it passed when this change was
176            // committed.
177            max_wasm_stack: 512 * 1024,
178            wasm_backtrace: true,
179            wasm_backtrace_details_env_used: false,
180            native_unwind_info: true,
181            features: WasmFeatures::default(),
182            #[cfg(feature = "async")]
183            async_stack_size: 2 << 20,
184            async_support: false,
185            module_version: ModuleVersionStrategy::default(),
186            parallel_compilation: true,
187            memory_init_cow: true,
188            memory_guaranteed_dense_image_size: 16 << 20,
189            force_memory_init_memfd: false,
190        };
191        #[cfg(compiler)]
192        {
193            ret.cranelift_debug_verifier(false);
194            ret.cranelift_opt_level(OptLevel::Speed);
195        }
196
197        ret.wasm_reference_types(true);
198        ret.wasm_multi_value(true);
199        ret.wasm_bulk_memory(true);
200        ret.wasm_simd(true);
201        ret.wasm_backtrace_details(WasmBacktraceDetails::Environment);
202
203        // This is on-by-default in `wasmparser` since it's a stage 4+ proposal
204        // but it's not implemented in Wasmtime yet so disable it.
205        ret.features.tail_call = false;
206
207        ret
208    }
209
210    /// Sets the target triple for the [`Config`].
211    ///
212    /// By default, the host target triple is used for the [`Config`].
213    ///
214    /// This method can be used to change the target triple.
215    ///
216    /// Cranelift flags will not be inferred for the given target and any
217    /// existing target-specific Cranelift flags will be cleared.
218    ///
219    /// # Errors
220    ///
221    /// This method will error if the given target triple is not supported.
222    #[cfg(compiler)]
223    #[cfg_attr(nightlydoc, doc(cfg(feature = "cranelift")))] // see build.rs
224    pub fn target(&mut self, target: &str) -> Result<&mut Self> {
225        self.compiler_config.target =
226            Some(target_lexicon::Triple::from_str(target).map_err(|e| anyhow::anyhow!(e))?);
227
228        Ok(self)
229    }
230
231    /// Enables the incremental compilation cache in Cranelift, using the provided `CacheStore`
232    /// backend for storage.
233    #[cfg(all(feature = "incremental-cache", feature = "cranelift"))]
234    pub fn enable_incremental_compilation(
235        &mut self,
236        cache_store: Arc<dyn CacheStore>,
237    ) -> Result<&mut Self> {
238        self.compiler_config.cache_store = Some(cache_store);
239        Ok(self)
240    }
241
242    /// Whether or not to enable support for asynchronous functions in Wasmtime.
243    ///
244    /// When enabled, the config can optionally define host functions with `async`.
245    /// Instances created and functions called with this `Config` *must* be called
246    /// through their asynchronous APIs, however. For example using
247    /// [`Func::call`](crate::Func::call) will panic when used with this config.
248    ///
249    /// # Asynchronous Wasm
250    ///
251    /// WebAssembly does not currently have a way to specify at the bytecode
252    /// level what is and isn't async. Host-defined functions, however, may be
253    /// defined as `async`. WebAssembly imports always appear synchronous, which
254    /// gives rise to a bit of an impedance mismatch here. To solve this
255    /// Wasmtime supports "asynchronous configs" which enables calling these
256    /// asynchronous functions in a way that looks synchronous to the executing
257    /// WebAssembly code.
258    ///
259    /// An asynchronous config must always invoke wasm code asynchronously,
260    /// meaning we'll always represent its computation as a
261    /// [`Future`](std::future::Future). The `poll` method of the futures
262    /// returned by Wasmtime will perform the actual work of calling the
263    /// WebAssembly. Wasmtime won't manage its own thread pools or similar,
264    /// that's left up to the embedder.
265    ///
266    /// To implement futures in a way that WebAssembly sees asynchronous host
267    /// functions as synchronous, all async Wasmtime futures will execute on a
268    /// separately allocated native stack from the thread otherwise executing
269    /// Wasmtime. This separate native stack can then be switched to and from.
270    /// Using this whenever an `async` host function returns a future that
271    /// resolves to `Pending` we switch away from the temporary stack back to
272    /// the main stack and propagate the `Pending` status.
273    ///
274    /// In general it's encouraged that the integration with `async` and
275    /// wasmtime is designed early on in your embedding of Wasmtime to ensure
276    /// that it's planned that WebAssembly executes in the right context of your
277    /// application.
278    ///
279    /// # Execution in `poll`
280    ///
281    /// The [`Future::poll`](std::future::Future::poll) method is the main
282    /// driving force behind Rust's futures. That method's own documentation
283    /// states "an implementation of `poll` should strive to return quickly, and
284    /// should not block". This, however, can be at odds with executing
285    /// WebAssembly code as part of the `poll` method itself. If your
286    /// WebAssembly is untrusted then this could allow the `poll` method to take
287    /// arbitrarily long in the worst case, likely blocking all other
288    /// asynchronous tasks.
289    ///
290    /// To remedy this situation you have a a few possible ways to solve this:
291    ///
292    /// * The most efficient solution is to enable
293    ///   [`Config::epoch_interruption`] in conjunction with
294    ///   [`crate::Store::epoch_deadline_async_yield_and_update`]. Coupled with
295    ///   periodic calls to [`crate::Engine::increment_epoch`] this will cause
296    ///   executing WebAssembly to periodically yield back according to the
297    ///   epoch configuration settings. This enables `Future::poll` to take at
298    ///   most a certain amount of time according to epoch configuration
299    ///   settings and when increments happen. The benefit of this approach is
300    ///   that the instrumentation in compiled code is quite lightweight, but a
301    ///   downside can be that the scheduling is somewhat nondeterministic since
302    ///   increments are usually timer-based which are not always deterministic.
303    ///
304    ///   Note that to prevent infinite execution of wasm it's recommended to
305    ///   place a timeout on the entire future representing executing wasm code
306    ///   and the periodic yields with epochs should ensure that when the
307    ///   timeout is reached it's appropriately recognized.
308    ///
309    /// * Alternatively you can enable the
310    ///   [`Config::consume_fuel`](crate::Config::consume_fuel) method as well
311    ///   as [`crate::Store::out_of_fuel_async_yield`] When doing so this will
312    ///   configure Wasmtime futures to yield periodically while they're
313    ///   executing WebAssembly code. After consuming the specified amount of
314    ///   fuel wasm futures will return `Poll::Pending` from their `poll`
315    ///   method, and will get automatically re-polled later. This enables the
316    ///   `Future::poll` method to take roughly a fixed amount of time since
317    ///   fuel is guaranteed to get consumed while wasm is executing. Unlike
318    ///   epoch-based preemption this is deterministic since wasm always
319    ///   consumes a fixed amount of fuel per-operation. The downside of this
320    ///   approach, however, is that the compiled code instrumentation is
321    ///   significantly more expensive than epoch checks.
322    ///
323    ///   Note that to prevent infinite execution of wasm it's recommended to
324    ///   place a timeout on the entire future representing executing wasm code
325    ///   and the periodic yields with epochs should ensure that when the
326    ///   timeout is reached it's appropriately recognized.
327    ///
328    /// In all cases special care needs to be taken when integrating
329    /// asynchronous wasm into your application. You should carefully plan where
330    /// WebAssembly will execute and what compute resources will be allotted to
331    /// it. If Wasmtime doesn't support exactly what you'd like just yet, please
332    /// feel free to open an issue!
333    #[cfg(feature = "async")]
334    #[cfg_attr(nightlydoc, doc(cfg(feature = "async")))]
335    pub fn async_support(&mut self, enable: bool) -> &mut Self {
336        self.async_support = enable;
337        self
338    }
339
340    /// Configures whether DWARF debug information will be emitted during
341    /// compilation.
342    ///
343    /// By default this option is `false`.
344    pub fn debug_info(&mut self, enable: bool) -> &mut Self {
345        self.tunables.generate_native_debuginfo = enable;
346        self
347    }
348
349    /// Configures whether [`WasmBacktrace`] will be present in the context of
350    /// errors returned from Wasmtime.
351    ///
352    /// A backtrace may be collected whenever an error is returned from a host
353    /// function call through to WebAssembly or when WebAssembly itself hits a
354    /// trap condition, such as an out-of-bounds memory access. This flag
355    /// indicates, in these conditions, whether the backtrace is collected or
356    /// not.
357    ///
358    /// Currently wasm backtraces are implemented through frame pointer walking.
359    /// This means that collecting a backtrace is expected to be a fast and
360    /// relatively cheap operation. Additionally backtrace collection is
361    /// suitable in concurrent environments since one thread capturing a
362    /// backtrace won't block other threads.
363    ///
364    /// Collected backtraces are attached via [`anyhow::Error::context`] to
365    /// errors returned from host functions. The [`WasmBacktrace`] type can be
366    /// acquired via [`anyhow::Error::downcast_ref`] to inspect the backtrace.
367    /// When this option is disabled then this context is never applied to
368    /// errors coming out of wasm.
369    ///
370    /// This option is `true` by default.
371    ///
372    /// [`WasmBacktrace`]: crate::WasmBacktrace
373    pub fn wasm_backtrace(&mut self, enable: bool) -> &mut Self {
374        self.wasm_backtrace = enable;
375        self
376    }
377
378    /// Configures whether backtraces in `Trap` will parse debug info in the wasm file to
379    /// have filename/line number information.
380    ///
381    /// When enabled this will causes modules to retain debugging information
382    /// found in wasm binaries. This debug information will be used when a trap
383    /// happens to symbolicate each stack frame and attempt to print a
384    /// filename/line number for each wasm frame in the stack trace.
385    ///
386    /// By default this option is `WasmBacktraceDetails::Environment`, meaning
387    /// that wasm will read `WASMTIME_BACKTRACE_DETAILS` to indicate whether details
388    /// should be parsed.
389    pub fn wasm_backtrace_details(&mut self, enable: WasmBacktraceDetails) -> &mut Self {
390        self.wasm_backtrace_details_env_used = false;
391        self.tunables.parse_wasm_debuginfo = match enable {
392            WasmBacktraceDetails::Enable => true,
393            WasmBacktraceDetails::Disable => false,
394            WasmBacktraceDetails::Environment => {
395                self.wasm_backtrace_details_env_used = true;
396                std::env::var("WASMTIME_BACKTRACE_DETAILS")
397                    .map(|s| s == "1")
398                    .unwrap_or(false)
399            }
400        };
401        self
402    }
403
404    /// Configures whether to generate native unwind information
405    /// (e.g. `.eh_frame` on Linux).
406    ///
407    /// This configuration option only exists to help third-party stack
408    /// capturing mechanisms, such as the system's unwinder or the `backtrace`
409    /// crate, determine how to unwind through Wasm frames. It does not affect
410    /// whether Wasmtime can capture Wasm backtraces or not. The presence of
411    /// [`WasmBacktrace`] is controlled by the [`Config::wasm_backtrace`]
412    /// option.
413    ///
414    /// Note that native unwind information is always generated when targeting
415    /// Windows, since the Windows ABI requires it.
416    ///
417    /// This option defaults to `true`.
418    ///
419    /// [`WasmBacktrace`]: crate::WasmBacktrace
420    pub fn native_unwind_info(&mut self, enable: bool) -> &mut Self {
421        self.native_unwind_info = enable;
422        self
423    }
424
425    /// Configures whether execution of WebAssembly will "consume fuel" to
426    /// either halt or yield execution as desired.
427    ///
428    /// This can be used to deterministically prevent infinitely-executing
429    /// WebAssembly code by instrumenting generated code to consume fuel as it
430    /// executes. When fuel runs out the behavior is defined by configuration
431    /// within a [`Store`], and by default a trap is raised.
432    ///
433    /// Note that a [`Store`] starts with no fuel, so if you enable this option
434    /// you'll have to be sure to pour some fuel into [`Store`] before
435    /// executing some code.
436    ///
437    /// By default this option is `false`.
438    ///
439    /// [`Store`]: crate::Store
440    pub fn consume_fuel(&mut self, enable: bool) -> &mut Self {
441        self.tunables.consume_fuel = enable;
442        self
443    }
444
445    /// Enables epoch-based interruption.
446    ///
447    /// When executing code in async mode, we sometimes want to
448    /// implement a form of cooperative timeslicing: long-running Wasm
449    /// guest code should periodically yield to the executor
450    /// loop. This yielding could be implemented by using "fuel" (see
451    /// [`consume_fuel`](Config::consume_fuel)). However, fuel
452    /// instrumentation is somewhat expensive: it modifies the
453    /// compiled form of the Wasm code so that it maintains a precise
454    /// instruction count, frequently checking this count against the
455    /// remaining fuel. If one does not need this precise count or
456    /// deterministic interruptions, and only needs a periodic
457    /// interrupt of some form, then It would be better to have a more
458    /// lightweight mechanism.
459    ///
460    /// Epoch-based interruption is that mechanism. There is a global
461    /// "epoch", which is a counter that divides time into arbitrary
462    /// periods (or epochs). This counter lives on the
463    /// [`Engine`](crate::Engine) and can be incremented by calling
464    /// [`Engine::increment_epoch`](crate::Engine::increment_epoch).
465    /// Epoch-based instrumentation works by setting a "deadline
466    /// epoch". The compiled code knows the deadline, and at certain
467    /// points, checks the current epoch against that deadline. It
468    /// will yield if the deadline has been reached.
469    ///
470    /// The idea is that checking an infrequently-changing counter is
471    /// cheaper than counting and frequently storing a precise metric
472    /// (instructions executed) locally. The interruptions are not
473    /// deterministic, but if the embedder increments the epoch in a
474    /// periodic way (say, every regular timer tick by a thread or
475    /// signal handler), then we can ensure that all async code will
476    /// yield to the executor within a bounded time.
477    ///
478    /// The deadline check cannot be avoided by malicious wasm code. It is safe
479    /// to use epoch deadlines to limit the execution time of untrusted
480    /// code.
481    ///
482    /// The [`Store`](crate::Store) tracks the deadline, and controls
483    /// what happens when the deadline is reached during
484    /// execution. Several behaviors are possible:
485    ///
486    /// - Trap if code is executing when the epoch deadline is
487    ///   met. See
488    ///   [`Store::epoch_deadline_trap`](crate::Store::epoch_deadline_trap).
489    ///
490    /// - Call an arbitrary function. This function may chose to trap or
491    ///   increment the epoch. See
492    ///   [`Store::epoch_deadline_callback`](crate::Store::epoch_deadline_callback).
493    ///
494    /// - Yield to the executor loop, then resume when the future is
495    ///   next polled. See
496    ///   [`Store::epoch_deadline_async_yield_and_update`](crate::Store::epoch_deadline_async_yield_and_update).
497    ///
498    /// Trapping is the default. The yielding behaviour may be used for
499    /// the timeslicing behavior described above.
500    ///
501    /// This feature is available with or without async support.
502    /// However, without async support, the timeslicing behaviour is
503    /// not available. This means epoch-based interruption can only
504    /// serve as a simple external-interruption mechanism.
505    ///
506    /// An initial deadline must be set before executing code by calling
507    /// [`Store::set_epoch_deadline`](crate::Store::set_epoch_deadline). If this
508    /// deadline is not configured then wasm will immediately trap.
509    ///
510    /// ## When to use fuel vs. epochs
511    ///
512    /// In general, epoch-based interruption results in faster
513    /// execution. This difference is sometimes significant: in some
514    /// measurements, up to 2-3x. This is because epoch-based
515    /// interruption does less work: it only watches for a global
516    /// rarely-changing counter to increment, rather than keeping a
517    /// local frequently-changing counter and comparing it to a
518    /// deadline.
519    ///
520    /// Fuel, in contrast, should be used when *deterministic*
521    /// yielding or trapping is needed. For example, if it is required
522    /// that the same function call with the same starting state will
523    /// always either complete or trap with an out-of-fuel error,
524    /// deterministically, then fuel with a fixed bound should be
525    /// used.
526    ///
527    /// # See Also
528    ///
529    /// - [`Engine::increment_epoch`](crate::Engine::increment_epoch)
530    /// - [`Store::set_epoch_deadline`](crate::Store::set_epoch_deadline)
531    /// - [`Store::epoch_deadline_trap`](crate::Store::epoch_deadline_trap)
532    /// - [`Store::epoch_deadline_callback`](crate::Store::epoch_deadline_callback)
533    /// - [`Store::epoch_deadline_async_yield_and_update`](crate::Store::epoch_deadline_async_yield_and_update)
534    pub fn epoch_interruption(&mut self, enable: bool) -> &mut Self {
535        self.tunables.epoch_interruption = enable;
536        self
537    }
538
539    /// Configures the maximum amount of stack space available for
540    /// executing WebAssembly code.
541    ///
542    /// WebAssembly has well-defined semantics on stack overflow. This is
543    /// intended to be a knob which can help configure how much stack space
544    /// wasm execution is allowed to consume. Note that the number here is not
545    /// super-precise, but rather wasm will take at most "pretty close to this
546    /// much" stack space.
547    ///
548    /// If a wasm call (or series of nested wasm calls) take more stack space
549    /// than the `size` specified then a stack overflow trap will be raised.
550    ///
551    /// Caveat: this knob only limits the stack space consumed by wasm code.
552    /// More importantly, it does not ensure that this much stack space is
553    /// available on the calling thread stack. Exhausting the thread stack
554    /// typically leads to an **abort** of the process.
555    ///
556    /// Here are some examples of how that could happen:
557    ///
558    /// - Let's assume this option is set to 2 MiB and then a thread that has
559    ///   a stack with 512 KiB left.
560    ///
561    ///   If wasm code consumes more than 512 KiB then the process will be aborted.
562    ///
563    /// - Assuming the same conditions, but this time wasm code does not consume
564    ///   any stack but calls into a host function. The host function consumes
565    ///   more than 512 KiB of stack space. The process will be aborted.
566    ///
567    /// There's another gotcha related to recursive calling into wasm: the stack
568    /// space consumed by a host function is counted towards this limit. The
569    /// host functions are not prevented from consuming more than this limit.
570    /// However, if the host function that used more than this limit and called
571    /// back into wasm, then the execution will trap immediatelly because of
572    /// stack overflow.
573    ///
574    /// When the `async` feature is enabled, this value cannot exceed the
575    /// `async_stack_size` option. Be careful not to set this value too close
576    /// to `async_stack_size` as doing so may limit how much stack space
577    /// is available for host functions.
578    ///
579    /// By default this option is 512 KiB.
580    ///
581    /// # Errors
582    ///
583    /// The `Engine::new` method will fail if the `size` specified here is
584    /// either 0 or larger than the [`Config::async_stack_size`] configuration.
585    pub fn max_wasm_stack(&mut self, size: usize) -> &mut Self {
586        self.max_wasm_stack = size;
587        self
588    }
589
590    /// Configures the size of the stacks used for asynchronous execution.
591    ///
592    /// This setting configures the size of the stacks that are allocated for
593    /// asynchronous execution. The value cannot be less than `max_wasm_stack`.
594    ///
595    /// The amount of stack space guaranteed for host functions is
596    /// `async_stack_size - max_wasm_stack`, so take care not to set these two values
597    /// close to one another; doing so may cause host functions to overflow the
598    /// stack and abort the process.
599    ///
600    /// By default this option is 2 MiB.
601    ///
602    /// # Errors
603    ///
604    /// The `Engine::new` method will fail if the value for this option is
605    /// smaller than the [`Config::max_wasm_stack`] option.
606    #[cfg(feature = "async")]
607    #[cfg_attr(nightlydoc, doc(cfg(feature = "async")))]
608    pub fn async_stack_size(&mut self, size: usize) -> &mut Self {
609        self.async_stack_size = size;
610        self
611    }
612
613    /// Configures whether the WebAssembly threads proposal will be enabled for
614    /// compilation.
615    ///
616    /// The [WebAssembly threads proposal][threads] is not currently fully
617    /// standardized and is undergoing development. Additionally the support in
618    /// wasmtime itself is still being worked on. Support for this feature can
619    /// be enabled through this method for appropriate wasm modules.
620    ///
621    /// This feature gates items such as shared memories and atomic
622    /// instructions. Note that the threads feature depends on the
623    /// bulk memory feature, which is enabled by default.
624    ///
625    /// This is `false` by default.
626    ///
627    /// > **Note**: Wasmtime does not implement everything for the wasm threads
628    /// > spec at this time, so bugs, panics, and possibly segfaults should be
629    /// > expected. This should not be enabled in a production setting right
630    /// > now.
631    ///
632    /// # Errors
633    ///
634    /// The validation of this feature are deferred until the engine is being built,
635    /// and thus may cause `Engine::new` fail if the `bulk_memory` feature is disabled.
636    ///
637    /// [threads]: https://github.com/webassembly/threads
638    pub fn wasm_threads(&mut self, enable: bool) -> &mut Self {
639        self.features.threads = enable;
640        self
641    }
642
643    /// Configures whether the [WebAssembly reference types proposal][proposal]
644    /// will be enabled for compilation.
645    ///
646    /// This feature gates items such as the `externref` and `funcref` types as
647    /// well as allowing a module to define multiple tables.
648    ///
649    /// Note that the reference types proposal depends on the bulk memory proposal.
650    ///
651    /// This feature is `true` by default.
652    ///
653    /// # Errors
654    ///
655    /// The validation of this feature are deferred until the engine is being built,
656    /// and thus may cause `Engine::new` fail if the `bulk_memory` feature is disabled.
657    ///
658    /// [proposal]: https://github.com/webassembly/reference-types
659    pub fn wasm_reference_types(&mut self, enable: bool) -> &mut Self {
660        self.features.reference_types = enable;
661        self
662    }
663
664    /// Configures whether the WebAssembly SIMD proposal will be
665    /// enabled for compilation.
666    ///
667    /// The [WebAssembly SIMD proposal][proposal]. This feature gates items such
668    /// as the `v128` type and all of its operators being in a module. Note that
669    /// this does not enable the [relaxed simd proposal] as that is not
670    /// implemented in Wasmtime at this time.
671    ///
672    /// On x86_64 platforms note that enabling this feature requires SSE 4.2 and
673    /// below to be available on the target platform. Compilation will fail if
674    /// the compile target does not include SSE 4.2.
675    ///
676    /// This is `true` by default.
677    ///
678    /// [proposal]: https://github.com/webassembly/simd
679    /// [relaxed simd proposal]: https://github.com/WebAssembly/relaxed-simd
680    pub fn wasm_simd(&mut self, enable: bool) -> &mut Self {
681        self.features.simd = enable;
682        self
683    }
684
685    /// Configures whether the WebAssembly Relaxed SIMD proposal will be
686    /// enabled for compilation.
687    ///
688    /// The [WebAssembly Relaxed SIMD proposal][proposal] is not, at the time of
689    /// this writing, at stage 4. The relaxed SIMD proposal adds new
690    /// instructions to WebAssembly which, for some specific inputs, are allowed
691    /// to produce different results on different hosts. More-or-less this
692    /// proposal enables exposing platform-specific semantics of SIMD
693    /// instructions in a controlled fashion to a WebAssembly program. From an
694    /// embedder's perspective this means that WebAssembly programs may execute
695    /// differently depending on whether the host is x86_64 or AArch64, for
696    /// example.
697    ///
698    /// By default Wasmtime lowers relaxed SIMD instructions to the fastest
699    /// lowering for the platform it's running on. This means that, by default,
700    /// some relaxed SIMD instructions may have different results for the same
701    /// inputs across x86_64 and AArch64. This behavior can be disabled through
702    /// the [`Config::relaxed_simd_deterministic`] option which will force
703    /// deterministic behavior across all platforms, as classified by the
704    /// specification, at the cost of performance.
705    ///
706    /// This is `false` by default.
707    ///
708    /// [proposal]: https://github.com/webassembly/relaxed-simd
709    pub fn wasm_relaxed_simd(&mut self, enable: bool) -> &mut Self {
710        self.features.relaxed_simd = enable;
711        self
712    }
713
714    /// This option can be used to control the behavior of the [relaxed SIMD
715    /// proposal's][proposal] instructions.
716    ///
717    /// The relaxed SIMD proposal introduces instructions that are allowed to
718    /// have different behavior on different architectures, primarily to afford
719    /// an efficient implementation on all architectures. This means, however,
720    /// that the same module may execute differently on one host than another,
721    /// which typically is not otherwise the case. This option is provided to
722    /// force Wasmtime to generate deterministic code for all relaxed simd
723    /// instructions, at the cost of performance, for all architectures. When
724    /// this option is enabled then the deterministic behavior of all
725    /// instructions in the relaxed SIMD proposal is selected.
726    ///
727    /// This is `false` by default.
728    ///
729    /// [proposal]: https://github.com/webassembly/relaxed-simd
730    pub fn relaxed_simd_deterministic(&mut self, enable: bool) -> &mut Self {
731        self.tunables.relaxed_simd_deterministic = enable;
732        self
733    }
734
735    /// Configures whether the [WebAssembly bulk memory operations
736    /// proposal][proposal] will be enabled for compilation.
737    ///
738    /// This feature gates items such as the `memory.copy` instruction, passive
739    /// data/table segments, etc, being in a module.
740    ///
741    /// This is `true` by default.
742    ///
743    /// Feature `reference_types`, which is also `true` by default, requires
744    /// this feature to be enabled. Thus disabling this feature must also disable
745    /// `reference_types` as well using [`wasm_reference_types`](crate::Config::wasm_reference_types).
746    ///
747    /// # Errors
748    ///
749    /// Disabling this feature without disabling `reference_types` will cause
750    /// `Engine::new` to fail.
751    ///
752    /// [proposal]: https://github.com/webassembly/bulk-memory-operations
753    pub fn wasm_bulk_memory(&mut self, enable: bool) -> &mut Self {
754        self.features.bulk_memory = enable;
755        self
756    }
757
758    /// Configures whether the WebAssembly multi-value [proposal] will
759    /// be enabled for compilation.
760    ///
761    /// This feature gates functions and blocks returning multiple values in a
762    /// module, for example.
763    ///
764    /// This is `true` by default.
765    ///
766    /// [proposal]: https://github.com/webassembly/multi-value
767    pub fn wasm_multi_value(&mut self, enable: bool) -> &mut Self {
768        self.features.multi_value = enable;
769        self
770    }
771
772    /// Configures whether the WebAssembly multi-memory [proposal] will
773    /// be enabled for compilation.
774    ///
775    /// This feature gates modules having more than one linear memory
776    /// declaration or import.
777    ///
778    /// This is `false` by default.
779    ///
780    /// [proposal]: https://github.com/webassembly/multi-memory
781    pub fn wasm_multi_memory(&mut self, enable: bool) -> &mut Self {
782        self.features.multi_memory = enable;
783        self
784    }
785
786    /// Configures whether the WebAssembly memory64 [proposal] will
787    /// be enabled for compilation.
788    ///
789    /// Note that this the upstream specification is not finalized and Wasmtime
790    /// may also have bugs for this feature since it hasn't been exercised
791    /// much.
792    ///
793    /// This is `false` by default.
794    ///
795    /// [proposal]: https://github.com/webassembly/memory64
796    pub fn wasm_memory64(&mut self, enable: bool) -> &mut Self {
797        self.features.memory64 = enable;
798        self
799    }
800
801    /// Configures whether the WebAssembly component-model [proposal] will
802    /// be enabled for compilation.
803    ///
804    /// Note that this feature is a work-in-progress and is incomplete.
805    ///
806    /// This is `false` by default.
807    ///
808    /// [proposal]: https://github.com/webassembly/component-model
809    #[cfg(feature = "component-model")]
810    pub fn wasm_component_model(&mut self, enable: bool) -> &mut Self {
811        self.features.component_model = enable;
812        self
813    }
814
815    /// Configures which compilation strategy will be used for wasm modules.
816    ///
817    /// This method can be used to configure which compiler is used for wasm
818    /// modules, and for more documentation consult the [`Strategy`] enumeration
819    /// and its documentation.
820    ///
821    /// The default value for this is `Strategy::Auto`.
822    #[cfg(compiler)]
823    #[cfg_attr(nightlydoc, doc(cfg(feature = "cranelift")))] // see build.rs
824    pub fn strategy(&mut self, strategy: Strategy) -> &mut Self {
825        self.compiler_config.strategy = strategy;
826        self
827    }
828
829    /// Creates a default profiler based on the profiling strategy chosen.
830    ///
831    /// Profiler creation calls the type's default initializer where the purpose is
832    /// really just to put in place the type used for profiling.
833    ///
834    /// Some [`ProfilingStrategy`] require specific platforms or particular feature
835    /// to be enabled, such as `ProfilingStrategy::JitDump` requires the `jitdump`
836    /// feature.
837    ///
838    /// # Errors
839    ///
840    /// The validation of this field is deferred until the engine is being built, and thus may
841    /// cause `Engine::new` fail if the required feature is disabled, or the platform is not
842    /// supported.
843    pub fn profiler(&mut self, profile: ProfilingStrategy) -> &mut Self {
844        self.profiling_strategy = profile;
845        self
846    }
847
848    /// Configures whether the debug verifier of Cranelift is enabled or not.
849    ///
850    /// When Cranelift is used as a code generation backend this will configure
851    /// it to have the `enable_verifier` flag which will enable a number of debug
852    /// checks inside of Cranelift. This is largely only useful for the
853    /// developers of wasmtime itself.
854    ///
855    /// The default value for this is `false`
856    #[cfg(compiler)]
857    #[cfg_attr(nightlydoc, doc(cfg(feature = "cranelift")))] // see build.rs
858    pub fn cranelift_debug_verifier(&mut self, enable: bool) -> &mut Self {
859        let val = if enable { "true" } else { "false" };
860        self.compiler_config
861            .settings
862            .insert("enable_verifier".to_string(), val.to_string());
863        self
864    }
865
866    /// Configures the Cranelift code generator optimization level.
867    ///
868    /// When the Cranelift code generator is used you can configure the
869    /// optimization level used for generated code in a few various ways. For
870    /// more information see the documentation of [`OptLevel`].
871    ///
872    /// The default value for this is `OptLevel::None`.
873    #[cfg(compiler)]
874    #[cfg_attr(nightlydoc, doc(cfg(feature = "cranelift")))] // see build.rs
875    pub fn cranelift_opt_level(&mut self, level: OptLevel) -> &mut Self {
876        let val = match level {
877            OptLevel::None => "none",
878            OptLevel::Speed => "speed",
879            OptLevel::SpeedAndSize => "speed_and_size",
880        };
881        self.compiler_config
882            .settings
883            .insert("opt_level".to_string(), val.to_string());
884        self
885    }
886
887    /// Configures the Cranelift code generator to use its
888    /// "egraph"-based mid-end optimizer.
889    ///
890    /// This optimizer has replaced the compiler's more traditional
891    /// pipeline of optimization passes with a unified code-rewriting
892    /// system. It is on by default, but the traditional optimization
893    /// pass structure is still available for now (it is deprecrated and
894    /// will be removed in a future version).
895    ///
896    /// The default value for this is `true`.
897    #[cfg(compiler)]
898    #[cfg_attr(nightlydoc, doc(cfg(feature = "cranelift")))] // see build.rs
899    #[deprecated(
900        since = "5.0.0",
901        note = "egraphs will be the default and this method will be removed in a future version."
902    )]
903    pub fn cranelift_use_egraphs(&mut self, enable: bool) -> &mut Self {
904        let val = if enable { "true" } else { "false" };
905        self.compiler_config
906            .settings
907            .insert("use_egraphs".to_string(), val.to_string());
908        self
909    }
910
911    /// Configures whether Cranelift should perform a NaN-canonicalization pass.
912    ///
913    /// When Cranelift is used as a code generation backend this will configure
914    /// it to replace NaNs with a single canonical value. This is useful for users
915    /// requiring entirely deterministic WebAssembly computation.
916    /// This is not required by the WebAssembly spec, so it is not enabled by default.
917    ///
918    /// The default value for this is `false`
919    #[cfg(compiler)]
920    #[cfg_attr(nightlydoc, doc(cfg(feature = "cranelift")))] // see build.rs
921    pub fn cranelift_nan_canonicalization(&mut self, enable: bool) -> &mut Self {
922        let val = if enable { "true" } else { "false" };
923        self.compiler_config
924            .settings
925            .insert("enable_nan_canonicalization".to_string(), val.to_string());
926        self
927    }
928
929    /// Allows setting a Cranelift boolean flag or preset. This allows
930    /// fine-tuning of Cranelift settings.
931    ///
932    /// Since Cranelift flags may be unstable, this method should not be considered to be stable
933    /// either; other `Config` functions should be preferred for stability.
934    ///
935    /// # Safety
936    ///
937    /// This is marked as unsafe, because setting the wrong flag might break invariants,
938    /// resulting in execution hazards.
939    ///
940    /// # Errors
941    ///
942    /// The validation of the flags are deferred until the engine is being built, and thus may
943    /// cause `Engine::new` fail if the flag's name does not exist, or the value is not appropriate
944    /// for the flag type.
945    #[cfg(compiler)]
946    #[cfg_attr(nightlydoc, doc(cfg(feature = "cranelift")))] // see build.rs
947    pub unsafe fn cranelift_flag_enable(&mut self, flag: &str) -> &mut Self {
948        self.compiler_config.flags.insert(flag.to_string());
949        self
950    }
951
952    /// Allows settings another Cranelift flag defined by a flag name and value. This allows
953    /// fine-tuning of Cranelift settings.
954    ///
955    /// Since Cranelift flags may be unstable, this method should not be considered to be stable
956    /// either; other `Config` functions should be preferred for stability.
957    ///
958    /// # Safety
959    ///
960    /// This is marked as unsafe, because setting the wrong flag might break invariants,
961    /// resulting in execution hazards.
962    ///
963    /// # Errors
964    ///
965    /// The validation of the flags are deferred until the engine is being built, and thus may
966    /// cause `Engine::new` fail if the flag's name does not exist, or incompatible with other
967    /// settings.
968    ///
969    /// For example, feature `wasm_backtrace` will set `unwind_info` to `true`, but if it's
970    /// manually set to false then it will fail.
971    #[cfg(compiler)]
972    #[cfg_attr(nightlydoc, doc(cfg(feature = "cranelift")))] // see build.rs
973    pub unsafe fn cranelift_flag_set(&mut self, name: &str, value: &str) -> &mut Self {
974        self.compiler_config
975            .settings
976            .insert(name.to_string(), value.to_string());
977        self
978    }
979
980    /// Loads cache configuration specified at `path`.
981    ///
982    /// This method will read the file specified by `path` on the filesystem and
983    /// attempt to load cache configuration from it. This method can also fail
984    /// due to I/O errors, misconfiguration, syntax errors, etc. For expected
985    /// syntax in the configuration file see the [documentation online][docs].
986    ///
987    /// By default cache configuration is not enabled or loaded.
988    ///
989    /// This method is only available when the `cache` feature of this crate is
990    /// enabled.
991    ///
992    /// # Errors
993    ///
994    /// This method can fail due to any error that happens when loading the file
995    /// pointed to by `path` and attempting to load the cache configuration.
996    ///
997    /// [docs]: https://bytecodealliance.github.io/wasmtime/cli-cache.html
998    #[cfg(feature = "cache")]
999    #[cfg_attr(nightlydoc, doc(cfg(feature = "cache")))]
1000    pub fn cache_config_load(&mut self, path: impl AsRef<Path>) -> Result<&mut Self> {
1001        self.cache_config = CacheConfig::from_file(Some(path.as_ref()))?;
1002        Ok(self)
1003    }
1004
1005    /// Disable caching.
1006    ///
1007    /// Every call to [`Module::new(my_wasm)`][crate::Module::new] will
1008    /// recompile `my_wasm`, even when it is unchanged.
1009    ///
1010    /// By default, new configs do not have caching enabled. This method is only
1011    /// useful for disabling a previous cache configuration.
1012    ///
1013    /// This method is only available when the `cache` feature of this crate is
1014    /// enabled.
1015    #[cfg(feature = "cache")]
1016    #[cfg_attr(nightlydoc, doc(cfg(feature = "cache")))]
1017    pub fn disable_cache(&mut self) -> &mut Self {
1018        self.cache_config = CacheConfig::new_cache_disabled();
1019        self
1020    }
1021
1022    /// Loads cache configuration from the system default path.
1023    ///
1024    /// This commit is the same as [`Config::cache_config_load`] except that it
1025    /// does not take a path argument and instead loads the default
1026    /// configuration present on the system. This is located, for example, on
1027    /// Unix at `$HOME/.config/wasmtime/config.toml` and is typically created
1028    /// with the `wasmtime config new` command.
1029    ///
1030    /// By default cache configuration is not enabled or loaded.
1031    ///
1032    /// This method is only available when the `cache` feature of this crate is
1033    /// enabled.
1034    ///
1035    /// # Errors
1036    ///
1037    /// This method can fail due to any error that happens when loading the
1038    /// default system configuration. Note that it is not an error if the
1039    /// default config file does not exist, in which case the default settings
1040    /// for an enabled cache are applied.
1041    ///
1042    /// [docs]: https://bytecodealliance.github.io/wasmtime/cli-cache.html
1043    #[cfg(feature = "cache")]
1044    #[cfg_attr(nightlydoc, doc(cfg(feature = "cache")))]
1045    pub fn cache_config_load_default(&mut self) -> Result<&mut Self> {
1046        self.cache_config = CacheConfig::from_file(None)?;
1047        Ok(self)
1048    }
1049
1050    /// Sets a custom memory creator.
1051    ///
1052    /// Custom memory creators are used when creating host `Memory` objects or when
1053    /// creating instance linear memories for the on-demand instance allocation strategy.
1054    pub fn with_host_memory(&mut self, mem_creator: Arc<dyn MemoryCreator>) -> &mut Self {
1055        self.mem_creator = Some(Arc::new(MemoryCreatorProxy(mem_creator)));
1056        self
1057    }
1058
1059    /// Sets the instance allocation strategy to use.
1060    ///
1061    /// When using the pooling instance allocation strategy, all linear memories
1062    /// will be created as "static" and the
1063    /// [`Config::static_memory_maximum_size`] and
1064    /// [`Config::static_memory_guard_size`] options will be used to configure
1065    /// the virtual memory allocations of linear memories.
1066    pub fn allocation_strategy(&mut self, strategy: InstanceAllocationStrategy) -> &mut Self {
1067        self.allocation_strategy = strategy;
1068        self
1069    }
1070
1071    /// Configures the maximum size, in bytes, where a linear memory is
1072    /// considered static, above which it'll be considered dynamic.
1073    ///
1074    /// > Note: this value has important performance ramifications, be sure to
1075    /// > understand what this value does before tweaking it and benchmarking.
1076    ///
1077    /// This function configures the threshold for wasm memories whether they're
1078    /// implemented as a dynamically relocatable chunk of memory or a statically
1079    /// located chunk of memory. The `max_size` parameter here is the size, in
1080    /// bytes, where if the maximum size of a linear memory is below `max_size`
1081    /// then it will be statically allocated with enough space to never have to
1082    /// move. If the maximum size of a linear memory is larger than `max_size`
1083    /// then wasm memory will be dynamically located and may move in memory
1084    /// through growth operations.
1085    ///
1086    /// Specifying a `max_size` of 0 means that all memories will be dynamic and
1087    /// may be relocated through `memory.grow`. Also note that if any wasm
1088    /// memory's maximum size is below `max_size` then it will still reserve
1089    /// `max_size` bytes in the virtual memory space.
1090    ///
1091    /// ## Static vs Dynamic Memory
1092    ///
1093    /// Linear memories represent contiguous arrays of bytes, but they can also
1094    /// be grown through the API and wasm instructions. When memory is grown if
1095    /// space hasn't been preallocated then growth may involve relocating the
1096    /// base pointer in memory. Memories in Wasmtime are classified in two
1097    /// different ways:
1098    ///
1099    /// * **static** - these memories preallocate all space necessary they'll
1100    ///   ever need, meaning that the base pointer of these memories is never
1101    ///   moved. Static memories may take more virtual memory space because of
1102    ///   pre-reserving space for memories.
1103    ///
1104    /// * **dynamic** - these memories are not preallocated and may move during
1105    ///   growth operations. Dynamic memories consume less virtual memory space
1106    ///   because they don't need to preallocate space for future growth.
1107    ///
1108    /// Static memories can be optimized better in JIT code because once the
1109    /// base address is loaded in a function it's known that we never need to
1110    /// reload it because it never changes, `memory.grow` is generally a pretty
1111    /// fast operation because the wasm memory is never relocated, and under
1112    /// some conditions bounds checks can be elided on memory accesses.
1113    ///
1114    /// Dynamic memories can't be quite as heavily optimized because the base
1115    /// address may need to be reloaded more often, they may require relocating
1116    /// lots of data on `memory.grow`, and dynamic memories require
1117    /// unconditional bounds checks on all memory accesses.
1118    ///
1119    /// ## Should you use static or dynamic memory?
1120    ///
1121    /// In general you probably don't need to change the value of this property.
1122    /// The defaults here are optimized for each target platform to consume a
1123    /// reasonable amount of physical memory while also generating speedy
1124    /// machine code.
1125    ///
1126    /// One of the main reasons you may want to configure this today is if your
1127    /// environment can't reserve virtual memory space for each wasm linear
1128    /// memory. On 64-bit platforms wasm memories require a 6GB reservation by
1129    /// default, and system limits may prevent this in some scenarios. In this
1130    /// case you may wish to force memories to be allocated dynamically meaning
1131    /// that the virtual memory footprint of creating a wasm memory should be
1132    /// exactly what's used by the wasm itself.
1133    ///
1134    /// For 32-bit memories a static memory must contain at least 4GB of
1135    /// reserved address space plus a guard page to elide any bounds checks at
1136    /// all. Smaller static memories will use similar bounds checks as dynamic
1137    /// memories.
1138    ///
1139    /// ## Default
1140    ///
1141    /// The default value for this property depends on the host platform. For
1142    /// 64-bit platforms there's lots of address space available, so the default
1143    /// configured here is 4GB. WebAssembly linear memories currently max out at
1144    /// 4GB which means that on 64-bit platforms Wasmtime by default always uses
1145    /// a static memory. This, coupled with a sufficiently sized guard region,
1146    /// should produce the fastest JIT code on 64-bit platforms, but does
1147    /// require a large address space reservation for each wasm memory.
1148    ///
1149    /// For 32-bit platforms this value defaults to 1GB. This means that wasm
1150    /// memories whose maximum size is less than 1GB will be allocated
1151    /// statically, otherwise they'll be considered dynamic.
1152    ///
1153    /// ## Static Memory and Pooled Instance Allocation
1154    ///
1155    /// When using the pooling instance allocator memories are considered to
1156    /// always be static memories, they are never dynamic. This setting
1157    /// configures the size of linear memory to reserve for each memory in the
1158    /// pooling allocator.
1159    pub fn static_memory_maximum_size(&mut self, max_size: u64) -> &mut Self {
1160        let max_pages = max_size / u64::from(wasmtime_environ::WASM_PAGE_SIZE);
1161        self.tunables.static_memory_bound = max_pages;
1162        self
1163    }
1164
1165    /// Indicates that the "static" style of memory should always be used.
1166    ///
1167    /// This configuration option enables selecting the "static" option for all
1168    /// linear memories created within this `Config`. This means that all
1169    /// memories will be allocated up-front and will never move. Additionally
1170    /// this means that all memories are synthetically limited by the
1171    /// [`Config::static_memory_maximum_size`] option, irregardless of what the
1172    /// actual maximum size is on the memory's original type.
1173    ///
1174    /// For the difference between static and dynamic memories, see the
1175    /// [`Config::static_memory_maximum_size`].
1176    pub fn static_memory_forced(&mut self, force: bool) -> &mut Self {
1177        self.tunables.static_memory_bound_is_maximum = force;
1178        self
1179    }
1180
1181    /// Configures the size, in bytes, of the guard region used at the end of a
1182    /// static memory's address space reservation.
1183    ///
1184    /// > Note: this value has important performance ramifications, be sure to
1185    /// > understand what this value does before tweaking it and benchmarking.
1186    ///
1187    /// All WebAssembly loads/stores are bounds-checked and generate a trap if
1188    /// they're out-of-bounds. Loads and stores are often very performance
1189    /// critical, so we want the bounds check to be as fast as possible!
1190    /// Accelerating these memory accesses is the motivation for a guard after a
1191    /// memory allocation.
1192    ///
1193    /// Memories (both static and dynamic) can be configured with a guard at the
1194    /// end of them which consists of unmapped virtual memory. This unmapped
1195    /// memory will trigger a memory access violation (e.g. segfault) if
1196    /// accessed. This allows JIT code to elide bounds checks if it can prove
1197    /// that an access, if out of bounds, would hit the guard region. This means
1198    /// that having such a guard of unmapped memory can remove the need for
1199    /// bounds checks in JIT code.
1200    ///
1201    /// For the difference between static and dynamic memories, see the
1202    /// [`Config::static_memory_maximum_size`].
1203    ///
1204    /// ## How big should the guard be?
1205    ///
1206    /// In general, like with configuring `static_memory_maximum_size`, you
1207    /// probably don't want to change this value from the defaults. Otherwise,
1208    /// though, the size of the guard region affects the number of bounds checks
1209    /// needed for generated wasm code. More specifically, loads/stores with
1210    /// immediate offsets will generate bounds checks based on how big the guard
1211    /// page is.
1212    ///
1213    /// For 32-bit memories a 4GB static memory is required to even start
1214    /// removing bounds checks. A 4GB guard size will guarantee that the module
1215    /// has zero bounds checks for memory accesses. A 2GB guard size will
1216    /// eliminate all bounds checks with an immediate offset less than 2GB. A
1217    /// guard size of zero means that all memory accesses will still have bounds
1218    /// checks.
1219    ///
1220    /// ## Default
1221    ///
1222    /// The default value for this property is 2GB on 64-bit platforms. This
1223    /// allows eliminating almost all bounds checks on loads/stores with an
1224    /// immediate offset of less than 2GB. On 32-bit platforms this defaults to
1225    /// 64KB.
1226    ///
1227    /// ## Errors
1228    ///
1229    /// The `Engine::new` method will return an error if this option is smaller
1230    /// than the value configured for [`Config::dynamic_memory_guard_size`].
1231    pub fn static_memory_guard_size(&mut self, guard_size: u64) -> &mut Self {
1232        let guard_size = round_up_to_pages(guard_size);
1233        self.tunables.static_memory_offset_guard_size = guard_size;
1234        self
1235    }
1236
1237    /// Configures the size, in bytes, of the guard region used at the end of a
1238    /// dynamic memory's address space reservation.
1239    ///
1240    /// For the difference between static and dynamic memories, see the
1241    /// [`Config::static_memory_maximum_size`]
1242    ///
1243    /// For more information about what a guard is, see the documentation on
1244    /// [`Config::static_memory_guard_size`].
1245    ///
1246    /// Note that the size of the guard region for dynamic memories is not super
1247    /// critical for performance. Making it reasonably-sized can improve
1248    /// generated code slightly, but for maximum performance you'll want to lean
1249    /// towards static memories rather than dynamic anyway.
1250    ///
1251    /// Also note that the dynamic memory guard size must be smaller than the
1252    /// static memory guard size, so if a large dynamic memory guard is
1253    /// specified then the static memory guard size will also be automatically
1254    /// increased.
1255    ///
1256    /// ## Default
1257    ///
1258    /// This value defaults to 64KB.
1259    ///
1260    /// ## Errors
1261    ///
1262    /// The `Engine::new` method will return an error if this option is larger
1263    /// than the value configured for [`Config::static_memory_guard_size`].
1264    pub fn dynamic_memory_guard_size(&mut self, guard_size: u64) -> &mut Self {
1265        let guard_size = round_up_to_pages(guard_size);
1266        self.tunables.dynamic_memory_offset_guard_size = guard_size;
1267        self
1268    }
1269
1270    /// Configures the size, in bytes, of the extra virtual memory space
1271    /// reserved after a "dynamic" memory for growing into.
1272    ///
1273    /// For the difference between static and dynamic memories, see the
1274    /// [`Config::static_memory_maximum_size`]
1275    ///
1276    /// Dynamic memories can be relocated in the process's virtual address space
1277    /// on growth and do not always reserve their entire space up-front. This
1278    /// means that a growth of the memory may require movement in the address
1279    /// space, which in the worst case can copy a large number of bytes from one
1280    /// region to another.
1281    ///
1282    /// This setting configures how many bytes are reserved after the initial
1283    /// reservation for a dynamic memory for growing into. A value of 0 here
1284    /// means that no extra bytes are reserved and all calls to `memory.grow`
1285    /// will need to relocate the wasm linear memory (copying all the bytes). A
1286    /// value of 1 megabyte, however, means that `memory.grow` can allocate up
1287    /// to a megabyte of extra memory before the memory needs to be moved in
1288    /// linear memory.
1289    ///
1290    /// Note that this is a currently simple heuristic for optimizing the growth
1291    /// of dynamic memories, primarily implemented for the memory64 proposal
1292    /// where all memories are currently "dynamic". This is unlikely to be a
1293    /// one-size-fits-all style approach and if you're an embedder running into
1294    /// issues with dynamic memories and growth and are interested in having
1295    /// other growth strategies available here please feel free to [open an
1296    /// issue on the Wasmtime repository][issue]!
1297    ///
1298    /// [issue]: https://github.com/bytecodealliance/wasmtime/issues/ne
1299    ///
1300    /// ## Default
1301    ///
1302    /// For 64-bit platforms this defaults to 2GB, and for 32-bit platforms this
1303    /// defaults to 1MB.
1304    pub fn dynamic_memory_reserved_for_growth(&mut self, reserved: u64) -> &mut Self {
1305        self.tunables.dynamic_memory_growth_reserve = round_up_to_pages(reserved);
1306        self
1307    }
1308
1309    /// Indicates whether a guard region is present before allocations of
1310    /// linear memory.
1311    ///
1312    /// Guard regions before linear memories are never used during normal
1313    /// operation of WebAssembly modules, even if they have out-of-bounds
1314    /// loads. The only purpose for a preceding guard region in linear memory
1315    /// is extra protection against possible bugs in code generators like
1316    /// Cranelift. This setting does not affect performance in any way, but will
1317    /// result in larger virtual memory reservations for linear memories (it
1318    /// won't actually ever use more memory, just use more of the address
1319    /// space).
1320    ///
1321    /// The size of the guard region before linear memory is the same as the
1322    /// guard size that comes after linear memory, which is configured by
1323    /// [`Config::static_memory_guard_size`] and
1324    /// [`Config::dynamic_memory_guard_size`].
1325    ///
1326    /// ## Default
1327    ///
1328    /// This value defaults to `true`.
1329    pub fn guard_before_linear_memory(&mut self, guard: bool) -> &mut Self {
1330        self.tunables.guard_before_linear_memory = guard;
1331        self
1332    }
1333
1334    /// Configure the version information used in serialized and deserialzied [`crate::Module`]s.
1335    /// This effects the behavior of [`crate::Module::serialize()`], as well as
1336    /// [`crate::Module::deserialize()`] and related functions.
1337    ///
1338    /// The default strategy is to use the wasmtime crate's Cargo package version.
1339    pub fn module_version(&mut self, strategy: ModuleVersionStrategy) -> Result<&mut Self> {
1340        match strategy {
1341            // This case requires special precondition for assertion in SerializedModule::to_bytes
1342            ModuleVersionStrategy::Custom(ref v) => {
1343                if v.as_bytes().len() > 255 {
1344                    bail!("custom module version cannot be more than 255 bytes: {}", v);
1345                }
1346            }
1347            _ => {}
1348        }
1349        self.module_version = strategy;
1350        Ok(self)
1351    }
1352
1353    /// Configure wether wasmtime should compile a module using multiple
1354    /// threads.
1355    ///
1356    /// Disabling this will result in a single thread being used to compile
1357    /// the wasm bytecode.
1358    ///
1359    /// By default parallel compilation is enabled.
1360    #[cfg(feature = "parallel-compilation")]
1361    #[cfg_attr(nightlydoc, doc(cfg(feature = "parallel-compilation")))]
1362    pub fn parallel_compilation(&mut self, parallel: bool) -> &mut Self {
1363        self.parallel_compilation = parallel;
1364        self
1365    }
1366
1367    /// Configures whether compiled artifacts will contain information to map
1368    /// native program addresses back to the original wasm module.
1369    ///
1370    /// This configuration option is `true` by default and, if enabled,
1371    /// generates the appropriate tables in compiled modules to map from native
1372    /// address back to wasm source addresses. This is used for displaying wasm
1373    /// program counters in backtraces as well as generating filenames/line
1374    /// numbers if so configured as well (and the original wasm module has DWARF
1375    /// debugging information present).
1376    pub fn generate_address_map(&mut self, generate: bool) -> &mut Self {
1377        self.tunables.generate_address_map = generate;
1378        self
1379    }
1380
1381    /// Configures whether copy-on-write memory-mapped data is used to
1382    /// initialize a linear memory.
1383    ///
1384    /// Initializing linear memory via a copy-on-write mapping can drastically
1385    /// improve instantiation costs of a WebAssembly module because copying
1386    /// memory is deferred. Additionally if a page of memory is only ever read
1387    /// from WebAssembly and never written too then the same underlying page of
1388    /// data will be reused between all instantiations of a module meaning that
1389    /// if a module is instantiated many times this can lower the overall memory
1390    /// required needed to run that module.
1391    ///
1392    /// This feature is only applicable when a WebAssembly module meets specific
1393    /// criteria to be initialized in this fashion, such as:
1394    ///
1395    /// * Only memories defined in the module can be initialized this way.
1396    /// * Data segments for memory must use statically known offsets.
1397    /// * Data segments for memory must all be in-bounds.
1398    ///
1399    /// Modules which do not meet these criteria will fall back to
1400    /// initialization of linear memory based on copying memory.
1401    ///
1402    /// This feature of Wasmtime is also platform-specific:
1403    ///
1404    /// * Linux - this feature is supported for all instances of [`Module`].
1405    ///   Modules backed by an existing mmap (such as those created by
1406    ///   [`Module::deserialize_file`]) will reuse that mmap to cow-initialize
1407    ///   memory. Other instance of [`Module`] may use the `memfd_create`
1408    ///   syscall to create an initialization image to `mmap`.
1409    /// * Unix (not Linux) - this feature is only supported when loading modules
1410    ///   from a precompiled file via [`Module::deserialize_file`] where there
1411    ///   is a file descriptor to use to map data into the process. Note that
1412    ///   the module must have been compiled with this setting enabled as well.
1413    /// * Windows - there is no support for this feature at this time. Memory
1414    ///   initialization will always copy bytes.
1415    ///
1416    /// By default this option is enabled.
1417    ///
1418    /// [`Module::deserialize_file`]: crate::Module::deserialize_file
1419    /// [`Module`]: crate::Module
1420    pub fn memory_init_cow(&mut self, enable: bool) -> &mut Self {
1421        self.memory_init_cow = enable;
1422        self
1423    }
1424
1425    /// A configuration option to force the usage of `memfd_create` on Linux to
1426    /// be used as the backing source for a module's initial memory image.
1427    ///
1428    /// When [`Config::memory_init_cow`] is enabled, which is enabled by
1429    /// default, module memory initialization images are taken from a module's
1430    /// original mmap if possible. If a precompiled module was loaded from disk
1431    /// this means that the disk's file is used as an mmap source for the
1432    /// initial linear memory contents. This option can be used to force, on
1433    /// Linux, that instead of using the original file on disk a new in-memory
1434    /// file is created with `memfd_create` to hold the contents of the initial
1435    /// image.
1436    ///
1437    /// This option can be used to avoid possibly loading the contents of memory
1438    /// from disk through a page fault. Instead with `memfd_create` the contents
1439    /// of memory are always in RAM, meaning that even page faults which
1440    /// initially populate a wasm linear memory will only work with RAM instead
1441    /// of ever hitting the disk that the original precompiled module is stored
1442    /// on.
1443    ///
1444    /// This option is disabled by default.
1445    pub fn force_memory_init_memfd(&mut self, enable: bool) -> &mut Self {
1446        self.force_memory_init_memfd = enable;
1447        self
1448    }
1449
1450    /// Configures the "guaranteed dense image size" for copy-on-write
1451    /// initialized memories.
1452    ///
1453    /// When using the [`Config::memory_init_cow`] feature to initialize memory
1454    /// efficiently (which is enabled by default), compiled modules contain an
1455    /// image of the module's initial heap. If the module has a fairly sparse
1456    /// initial heap, with just a few data segments at very different offsets,
1457    /// this could result in a large region of zero bytes in the image. In
1458    /// other words, it's not very memory-efficient.
1459    ///
1460    /// We normally use a heuristic to avoid this: if less than half
1461    /// of the initialized range (first non-zero to last non-zero
1462    /// byte) of any memory in the module has pages with nonzero
1463    /// bytes, then we avoid creating a memory image for the entire module.
1464    ///
1465    /// However, if the embedder always needs the instantiation-time efficiency
1466    /// of copy-on-write initialization, and is otherwise carefully controlling
1467    /// parameters of the modules (for example, by limiting the maximum heap
1468    /// size of the modules), then it may be desirable to ensure a memory image
1469    /// is created even if this could go against the heuristic above. Thus, we
1470    /// add another condition: there is a size of initialized data region up to
1471    /// which we *always* allow a memory image. The embedder can set this to a
1472    /// known maximum heap size if they desire to always get the benefits of
1473    /// copy-on-write images.
1474    ///
1475    /// In the future we may implement a "best of both worlds"
1476    /// solution where we have a dense image up to some limit, and
1477    /// then support a sparse list of initializers beyond that; this
1478    /// would get most of the benefit of copy-on-write and pay the incremental
1479    /// cost of eager initialization only for those bits of memory
1480    /// that are out-of-bounds. However, for now, an embedder desiring
1481    /// fast instantiation should ensure that this setting is as large
1482    /// as the maximum module initial memory content size.
1483    ///
1484    /// By default this value is 16 MiB.
1485    pub fn memory_guaranteed_dense_image_size(&mut self, size_in_bytes: u64) -> &mut Self {
1486        self.memory_guaranteed_dense_image_size = size_in_bytes;
1487        self
1488    }
1489
1490    pub(crate) fn validate(&self) -> Result<()> {
1491        if self.features.reference_types && !self.features.bulk_memory {
1492            bail!("feature 'reference_types' requires 'bulk_memory' to be enabled");
1493        }
1494        if self.features.threads && !self.features.bulk_memory {
1495            bail!("feature 'threads' requires 'bulk_memory' to be enabled");
1496        }
1497        #[cfg(feature = "async")]
1498        if self.max_wasm_stack > self.async_stack_size {
1499            bail!("max_wasm_stack size cannot exceed the async_stack_size");
1500        }
1501        if self.max_wasm_stack == 0 {
1502            bail!("max_wasm_stack size cannot be zero");
1503        }
1504        if self.tunables.static_memory_offset_guard_size
1505            < self.tunables.dynamic_memory_offset_guard_size
1506        {
1507            bail!("static memory guard size cannot be smaller than dynamic memory guard size");
1508        }
1509
1510        Ok(())
1511    }
1512
1513    pub(crate) fn build_allocator(&self) -> Result<Box<dyn InstanceAllocator + Send + Sync>> {
1514        #[cfg(feature = "async")]
1515        let stack_size = self.async_stack_size;
1516
1517        #[cfg(not(feature = "async"))]
1518        let stack_size = 0;
1519
1520        match &self.allocation_strategy {
1521            InstanceAllocationStrategy::OnDemand => Ok(Box::new(OnDemandInstanceAllocator::new(
1522                self.mem_creator.clone(),
1523                stack_size,
1524            ))),
1525            #[cfg(feature = "pooling-allocator")]
1526            InstanceAllocationStrategy::Pooling(config) => {
1527                let mut config = config.config;
1528                config.stack_size = stack_size;
1529                Ok(Box::new(wasmtime_runtime::PoolingInstanceAllocator::new(
1530                    &config,
1531                    &self.tunables,
1532                )?))
1533            }
1534        }
1535    }
1536
1537    pub(crate) fn build_profiler(&self) -> Result<Box<dyn ProfilingAgent>> {
1538        Ok(match self.profiling_strategy {
1539            ProfilingStrategy::PerfMap => Box::new(PerfMapAgent::new()?) as Box<dyn ProfilingAgent>,
1540            ProfilingStrategy::JitDump => Box::new(JitDumpAgent::new()?) as Box<dyn ProfilingAgent>,
1541            ProfilingStrategy::VTune => Box::new(VTuneAgent::new()?) as Box<dyn ProfilingAgent>,
1542            ProfilingStrategy::None => Box::new(NullProfilerAgent),
1543        })
1544    }
1545
1546    #[cfg(compiler)]
1547    pub(crate) fn build_compiler(&mut self) -> Result<Box<dyn wasmtime_environ::Compiler>> {
1548        let mut compiler = match self.compiler_config.strategy {
1549            Strategy::Auto | Strategy::Cranelift => wasmtime_cranelift::builder(),
1550        };
1551
1552        if let Some(target) = &self.compiler_config.target {
1553            compiler.target(target.clone())?;
1554        }
1555
1556        // If probestack is enabled for a target, Wasmtime will always use the
1557        // inline strategy which doesn't require us to define a `__probestack`
1558        // function or similar.
1559        self.compiler_config
1560            .settings
1561            .insert("probestack_strategy".into(), "inline".into());
1562
1563        let host = target_lexicon::Triple::host();
1564        let target = self.compiler_config.target.as_ref().unwrap_or(&host);
1565
1566        // On supported targets, we enable stack probing by default.
1567        // This is required on Windows because of the way Windows
1568        // commits its stacks, but it's also a good idea on other
1569        // platforms to ensure guard pages are hit for large frame
1570        // sizes.
1571        if probestack_supported(target.architecture) {
1572            self.compiler_config
1573                .flags
1574                .insert("enable_probestack".into());
1575        }
1576
1577        if self.native_unwind_info ||
1578             // Windows always needs unwind info, since it is part of the ABI.
1579             target.operating_system == target_lexicon::OperatingSystem::Windows
1580        {
1581            if !self
1582                .compiler_config
1583                .ensure_setting_unset_or_given("unwind_info", "true")
1584            {
1585                bail!("compiler option 'unwind_info' must be enabled profiling");
1586            }
1587        }
1588
1589        // We require frame pointers for correct stack walking, which is safety
1590        // critical in the presence of reference types, and otherwise it is just
1591        // really bad developer experience to get wrong.
1592        self.compiler_config
1593            .settings
1594            .insert("preserve_frame_pointers".into(), "true".into());
1595
1596        // check for incompatible compiler options and set required values
1597        if self.features.reference_types {
1598            if !self
1599                .compiler_config
1600                .ensure_setting_unset_or_given("enable_safepoints", "true")
1601            {
1602                bail!("compiler option 'enable_safepoints' must be enabled when 'reference types' is enabled");
1603            }
1604        }
1605        if self.features.simd {
1606            if !self
1607                .compiler_config
1608                .ensure_setting_unset_or_given("enable_simd", "true")
1609            {
1610                bail!("compiler option 'enable_simd' must be enabled when 'simd' is enabled");
1611            }
1612        }
1613
1614        if self.features.relaxed_simd && !self.features.simd {
1615            bail!("cannot disable the simd proposal but enable the relaxed simd proposal");
1616        }
1617
1618        // Apply compiler settings and flags
1619        for (k, v) in self.compiler_config.settings.iter() {
1620            compiler.set(k, v)?;
1621        }
1622        for flag in self.compiler_config.flags.iter() {
1623            compiler.enable(flag)?;
1624        }
1625
1626        if let Some(cache_store) = &self.compiler_config.cache_store {
1627            compiler.enable_incremental_compilation(cache_store.clone())?;
1628        }
1629
1630        compiler.build()
1631    }
1632
1633    /// Internal setting for whether adapter modules for components will have
1634    /// extra WebAssembly instructions inserted performing more debug checks
1635    /// then are necessary.
1636    #[cfg(feature = "component-model")]
1637    pub fn debug_adapter_modules(&mut self, debug: bool) -> &mut Self {
1638        self.tunables.debug_adapter_modules = debug;
1639        self
1640    }
1641}
1642
1643fn round_up_to_pages(val: u64) -> u64 {
1644    let page_size = wasmtime_runtime::page_size() as u64;
1645    debug_assert!(page_size.is_power_of_two());
1646    val.checked_add(page_size - 1)
1647        .map(|val| val & !(page_size - 1))
1648        .unwrap_or(u64::MAX / page_size + 1)
1649}
1650
1651impl Default for Config {
1652    fn default() -> Config {
1653        Config::new()
1654    }
1655}
1656
1657impl fmt::Debug for Config {
1658    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1659        let mut f = f.debug_struct("Config");
1660        f.field("debug_info", &self.tunables.generate_native_debuginfo)
1661            .field("parse_wasm_debuginfo", &self.tunables.parse_wasm_debuginfo)
1662            .field("wasm_threads", &self.features.threads)
1663            .field("wasm_reference_types", &self.features.reference_types)
1664            .field("wasm_bulk_memory", &self.features.bulk_memory)
1665            .field("wasm_simd", &self.features.simd)
1666            .field("wasm_relaxed_simd", &self.features.relaxed_simd)
1667            .field("wasm_multi_value", &self.features.multi_value)
1668            .field(
1669                "static_memory_maximum_size",
1670                &(u64::from(self.tunables.static_memory_bound)
1671                    * u64::from(wasmtime_environ::WASM_PAGE_SIZE)),
1672            )
1673            .field(
1674                "static_memory_guard_size",
1675                &self.tunables.static_memory_offset_guard_size,
1676            )
1677            .field(
1678                "dynamic_memory_guard_size",
1679                &self.tunables.dynamic_memory_offset_guard_size,
1680            )
1681            .field(
1682                "guard_before_linear_memory",
1683                &self.tunables.guard_before_linear_memory,
1684            )
1685            .field("parallel_compilation", &self.parallel_compilation);
1686        #[cfg(compiler)]
1687        {
1688            f.field("compiler_config", &self.compiler_config);
1689        }
1690        f.finish()
1691    }
1692}
1693
1694/// Possible Compilation strategies for a wasm module.
1695///
1696/// This is used as an argument to the [`Config::strategy`] method.
1697#[non_exhaustive]
1698#[derive(Clone, Debug, Copy)]
1699pub enum Strategy {
1700    /// An indicator that the compilation strategy should be automatically
1701    /// selected.
1702    ///
1703    /// This is generally what you want for most projects and indicates that the
1704    /// `wasmtime` crate itself should make the decision about what the best
1705    /// code generator for a wasm module is.
1706    ///
1707    /// Currently this always defaults to Cranelift, but the default value may
1708    /// change over time.
1709    Auto,
1710
1711    /// Currently the default backend, Cranelift aims to be a reasonably fast
1712    /// code generator which generates high quality machine code.
1713    Cranelift,
1714}
1715
1716/// Possible optimization levels for the Cranelift codegen backend.
1717#[non_exhaustive]
1718#[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq)]
1719pub enum OptLevel {
1720    /// No optimizations performed, minimizes compilation time by disabling most
1721    /// optimizations.
1722    None,
1723    /// Generates the fastest possible code, but may take longer.
1724    Speed,
1725    /// Similar to `speed`, but also performs transformations aimed at reducing
1726    /// code size.
1727    SpeedAndSize,
1728}
1729
1730/// Select which profiling technique to support.
1731#[derive(Debug, Clone, Copy, PartialEq)]
1732pub enum ProfilingStrategy {
1733    /// No profiler support.
1734    None,
1735
1736    /// Collect function name information as the "perf map" file format, used with `perf` on Linux.
1737    PerfMap,
1738
1739    /// Collect profiling info for "jitdump" file format, used with `perf` on
1740    /// Linux.
1741    JitDump,
1742
1743    /// Collect profiling info using the "ittapi", used with `VTune` on Linux.
1744    VTune,
1745}
1746
1747impl FromStr for ProfilingStrategy {
1748    type Err = anyhow::Error;
1749
1750    fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
1751        match s {
1752            "none" => Ok(Self::None),
1753            "perfmap" => Ok(Self::PerfMap),
1754            "jitdump" => Ok(Self::JitDump),
1755            "vtune" => Ok(Self::VTune),
1756            _ => anyhow::bail!("unknown value for profiling strategy"),
1757        }
1758    }
1759}
1760
1761/// Select how wasm backtrace detailed information is handled.
1762#[derive(Debug, Clone, Copy)]
1763pub enum WasmBacktraceDetails {
1764    /// Support is unconditionally enabled and wasmtime will parse and read
1765    /// debug information.
1766    Enable,
1767
1768    /// Support is disabled, and wasmtime will not parse debug information for
1769    /// backtrace details.
1770    Disable,
1771
1772    /// Support for backtrace details is conditional on the
1773    /// `WASMTIME_BACKTRACE_DETAILS` environment variable.
1774    Environment,
1775}
1776
1777/// Configuration options used with [`InstanceAllocationStrategy::Pooling`] to
1778/// change the behavior of the pooling instance allocator.
1779///
1780/// This structure has a builder-style API in the same manner as [`Config`] and
1781/// is configured with [`Config::allocation_strategy`].
1782#[cfg(feature = "pooling-allocator")]
1783#[derive(Debug, Clone, Default)]
1784pub struct PoolingAllocationConfig {
1785    config: wasmtime_runtime::PoolingInstanceAllocatorConfig,
1786}
1787
1788#[cfg(feature = "pooling-allocator")]
1789impl PoolingAllocationConfig {
1790    /// Configures the maximum number of "unused warm slots" to retain in the
1791    /// pooling allocator.
1792    ///
1793    /// The pooling allocator operates over slots to allocate from, and each
1794    /// slot is considered "cold" if it's never been used before or "warm" if
1795    /// it's been used by some module in the past. Slots in the pooling
1796    /// allocator additionally track an "affinity" flag to a particular core
1797    /// wasm module. When a module is instantiated into a slot then the slot is
1798    /// considered affine to that module, even after the instance has been
1799    /// dealloocated.
1800    ///
1801    /// When a new instance is created then a slot must be chosen, and the
1802    /// current algorithm for selecting a slot is:
1803    ///
1804    /// * If there are slots that are affine to the module being instantiated,
1805    ///   then the most recently used slot is selected to be allocated from.
1806    ///   This is done to improve reuse of resources such as memory mappings and
1807    ///   additionally try to benefit from temporal locality for things like
1808    ///   caches.
1809    ///
1810    /// * Otherwise if there are more than N affine slots to other modules, then
1811    ///   one of those affine slots is chosen to be allocated. The slot chosen
1812    ///   is picked on a least-recently-used basis.
1813    ///
1814    /// * Finally, if there are less than N affine slots to other modules, then
1815    ///   the non-affine slots are allocated from.
1816    ///
1817    /// This setting, `max_unused_warm_slots`, is the value for N in the above
1818    /// algorithm. The purpose of this setting is to have a knob over the RSS
1819    /// impact of "unused slots" for a long-running wasm server.
1820    ///
1821    /// If this setting is set to 0, for example, then affine slots are
1822    /// aggressively resused on a least-recently-used basis. A "cold" slot is
1823    /// only used if there are no affine slots available to allocate from. This
1824    /// means that the set of slots used over the lifetime of a program is the
1825    /// same as the maximum concurrent number of wasm instances.
1826    ///
1827    /// If this setting is set to infinity, however, then cold slots are
1828    /// prioritized to be allocated from. This means that the set of slots used
1829    /// over the lifetime of a program will approach
1830    /// [`PoolingAllocationConfig::instance_count`], or the maximum number of
1831    /// slots in the pooling allocator.
1832    ///
1833    /// Wasmtime does not aggressively decommit all resources associated with a
1834    /// slot when the slot is not in use. For example the
1835    /// [`PoolingAllocationConfig::linear_memory_keep_resident`] option can be
1836    /// used to keep memory associated with a slot, even when it's not in use.
1837    /// This means that the total set of used slots in the pooling instance
1838    /// allocator can impact the overall RSS usage of a program.
1839    ///
1840    /// The default value for this option is 100.
1841    pub fn max_unused_warm_slots(&mut self, max: u32) -> &mut Self {
1842        self.config.max_unused_warm_slots = max;
1843        self
1844    }
1845
1846    /// Configures whether or not stacks used for async futures are reset to
1847    /// zero after usage.
1848    ///
1849    /// When the [`async_support`](Config::async_support) method is enabled for
1850    /// Wasmtime and the [`call_async`] variant
1851    /// of calling WebAssembly is used then Wasmtime will create a separate
1852    /// runtime execution stack for each future produced by [`call_async`].
1853    /// During the deallocation process Wasmtime won't by default reset the
1854    /// contents of the stack back to zero.
1855    ///
1856    /// When this option is enabled it can be seen as a defense-in-depth
1857    /// mechanism to reset a stack back to zero. This is not required for
1858    /// correctness and can be a costly operation in highly concurrent
1859    /// environments due to modifications of the virtual address space requiring
1860    /// process-wide synchronization.
1861    ///
1862    /// This option defaults to `false`.
1863    ///
1864    /// [`call_async`]: crate::TypedFunc::call_async
1865    #[cfg(feature = "async")]
1866    #[cfg_attr(nightlydoc, doc(cfg(feature = "async")))]
1867    pub fn async_stack_zeroing(&mut self, enable: bool) -> &mut Self {
1868        self.config.async_stack_zeroing = enable;
1869        self
1870    }
1871
1872    /// How much memory, in bytes, to keep resident for async stacks allocated
1873    /// with the pooling allocator.
1874    ///
1875    /// When [`PoolingAllocationConfig::async_stack_zeroing`] is enabled then
1876    /// Wasmtime will reset the contents of async stacks back to zero upon
1877    /// deallocation. This option can be used to perform the zeroing operation
1878    /// with `memset` up to a certain threshold of bytes instead of using system
1879    /// calls to reset the stack to zero.
1880    ///
1881    /// Note that when using this option the memory with async stacks will
1882    /// never be decommitted.
1883    #[cfg(feature = "async")]
1884    #[cfg_attr(nightlydoc, doc(cfg(feature = "async")))]
1885    pub fn async_stack_keep_resident(&mut self, size: usize) -> &mut Self {
1886        let size = round_up_to_pages(size as u64) as usize;
1887        self.config.async_stack_keep_resident = size;
1888        self
1889    }
1890
1891    /// How much memory, in bytes, to keep resident for each linear memory
1892    /// after deallocation.
1893    ///
1894    /// This option is only applicable on Linux and has no effect on other
1895    /// platforms.
1896    ///
1897    /// By default Wasmtime will use `madvise` to reset the entire contents of
1898    /// linear memory back to zero when a linear memory is deallocated. This
1899    /// option can be used to use `memset` instead to set memory back to zero
1900    /// which can, in some configurations, reduce the number of page faults
1901    /// taken when a slot is reused.
1902    pub fn linear_memory_keep_resident(&mut self, size: usize) -> &mut Self {
1903        let size = round_up_to_pages(size as u64) as usize;
1904        self.config.linear_memory_keep_resident = size;
1905        self
1906    }
1907
1908    /// How much memory, in bytes, to keep resident for each table after
1909    /// deallocation.
1910    ///
1911    /// This option is only applicable on Linux and has no effect on other
1912    /// platforms.
1913    ///
1914    /// This option is the same as
1915    /// [`PoolingAllocationConfig::linear_memory_keep_resident`] except that it
1916    /// is applicable to tables instead.
1917    pub fn table_keep_resident(&mut self, size: usize) -> &mut Self {
1918        let size = round_up_to_pages(size as u64) as usize;
1919        self.config.table_keep_resident = size;
1920        self
1921    }
1922
1923    /// The maximum number of concurrent instances supported (default is 1000).
1924    ///
1925    /// This value has a direct impact on the amount of memory allocated by the pooling
1926    /// instance allocator.
1927    ///
1928    /// The pooling instance allocator allocates three memory pools with sizes depending on this value:
1929    ///
1930    /// * An instance pool, where each entry in the pool can store the runtime representation
1931    ///   of an instance, including a maximal `VMContext` structure.
1932    ///
1933    /// * A memory pool, where each entry in the pool contains the reserved address space for each
1934    ///   linear memory supported by an instance.
1935    ///
1936    /// * A table pool, where each entry in the pool contains the space needed for each WebAssembly table
1937    ///   supported by an instance (see `table_elements` to control the size of each table).
1938    ///
1939    /// Additionally, this value will also control the maximum number of execution stacks allowed for
1940    /// asynchronous execution (one per instance), when enabled.
1941    ///
1942    /// The memory pool will reserve a large quantity of host process address space to elide the bounds
1943    /// checks required for correct WebAssembly memory semantics. Even for 64-bit address spaces, the
1944    /// address space is limited when dealing with a large number of supported instances.
1945    ///
1946    /// For example, on Linux x86_64, the userland address space limit is 128 TiB. That might seem like a lot,
1947    /// but each linear memory will *reserve* 6 GiB of space by default. Multiply that by the number of linear
1948    /// memories each instance supports and then by the number of supported instances and it becomes apparent
1949    /// that address space can be exhausted depending on the number of supported instances.
1950    pub fn instance_count(&mut self, count: u32) -> &mut Self {
1951        self.config.limits.count = count;
1952        self
1953    }
1954
1955    /// The maximum size, in bytes, allocated for an instance and its
1956    /// `VMContext`.
1957    ///
1958    /// This amount of space is pre-allocated for `count` number of instances
1959    /// and is used to store the runtime `wasmtime_runtime::Instance` structure
1960    /// along with its adjacent `VMContext` structure. The `Instance` type has a
1961    /// static size but `VMContext` is dynamically sized depending on the module
1962    /// being instantiated. This size limit loosely correlates to the size of
1963    /// the wasm module, taking into account factors such as:
1964    ///
1965    /// * number of functions
1966    /// * number of globals
1967    /// * number of memories
1968    /// * number of tables
1969    /// * number of function types
1970    ///
1971    /// If the allocated size per instance is too small then instantiation of a
1972    /// module will fail at runtime with an error indicating how many bytes were
1973    /// needed. This amount of bytes are committed to memory per-instance when
1974    /// a pooling allocator is created.
1975    ///
1976    /// The default value for this is 1MB.
1977    pub fn instance_size(&mut self, size: usize) -> &mut Self {
1978        self.config.limits.size = size;
1979        self
1980    }
1981
1982    /// The maximum number of defined tables for a module (default is 1).
1983    ///
1984    /// This value controls the capacity of the `VMTableDefinition` table in each instance's
1985    /// `VMContext` structure.
1986    ///
1987    /// The allocated size of the table will be `tables * sizeof(VMTableDefinition)` for each
1988    /// instance regardless of how many tables are defined by an instance's module.
1989    pub fn instance_tables(&mut self, tables: u32) -> &mut Self {
1990        self.config.limits.tables = tables;
1991        self
1992    }
1993
1994    /// The maximum table elements for any table defined in a module (default is 10000).
1995    ///
1996    /// If a table's minimum element limit is greater than this value, the module will
1997    /// fail to instantiate.
1998    ///
1999    /// If a table's maximum element limit is unbounded or greater than this value,
2000    /// the maximum will be `table_elements` for the purpose of any `table.grow` instruction.
2001    ///
2002    /// This value is used to reserve the maximum space for each supported table; table elements
2003    /// are pointer-sized in the Wasmtime runtime.  Therefore, the space reserved for each instance
2004    /// is `tables * table_elements * sizeof::<*const ()>`.
2005    pub fn instance_table_elements(&mut self, elements: u32) -> &mut Self {
2006        self.config.limits.table_elements = elements;
2007        self
2008    }
2009
2010    /// The maximum number of defined linear memories for a module (default is 1).
2011    ///
2012    /// This value controls the capacity of the `VMMemoryDefinition` table in each instance's
2013    /// `VMContext` structure.
2014    ///
2015    /// The allocated size of the table will be `memories * sizeof(VMMemoryDefinition)` for each
2016    /// instance regardless of how many memories are defined by an instance's module.
2017    pub fn instance_memories(&mut self, memories: u32) -> &mut Self {
2018        self.config.limits.memories = memories;
2019        self
2020    }
2021
2022    /// The maximum number of pages for any linear memory defined in a module (default is 160).
2023    ///
2024    /// The default of 160 means at most 10 MiB of host memory may be committed for each instance.
2025    ///
2026    /// If a memory's minimum page limit is greater than this value, the module will
2027    /// fail to instantiate.
2028    ///
2029    /// If a memory's maximum page limit is unbounded or greater than this value,
2030    /// the maximum will be `memory_pages` for the purpose of any `memory.grow` instruction.
2031    ///
2032    /// This value is used to control the maximum accessible space for each linear memory of an instance.
2033    ///
2034    /// The reservation size of each linear memory is controlled by the
2035    /// `static_memory_maximum_size` setting and this value cannot
2036    /// exceed the configured static memory maximum size.
2037    pub fn instance_memory_pages(&mut self, pages: u64) -> &mut Self {
2038        self.config.limits.memory_pages = pages;
2039        self
2040    }
2041}
2042
2043pub(crate) fn probestack_supported(arch: Architecture) -> bool {
2044    matches!(
2045        arch,
2046        Architecture::X86_64 | Architecture::Aarch64(_) | Architecture::Riscv64(_)
2047    )
2048}