wasmtime_runtime/traphandlers.rs
1//! WebAssembly trap handling, which is built on top of the lower-level
2//! signalhandling mechanisms.
3
4mod backtrace;
5
6use crate::{VMContext, VMRuntimeLimits};
7use anyhow::Error;
8use std::any::Any;
9use std::cell::{Cell, UnsafeCell};
10use std::mem::MaybeUninit;
11use std::ptr;
12use std::sync::Once;
13
14pub use self::backtrace::Backtrace;
15pub use self::tls::{tls_eager_initialize, TlsRestore};
16
17#[link(name = "wasmtime-helpers")]
18extern "C" {
19 #[allow(improper_ctypes)]
20 fn wasmtime_setjmp(
21 jmp_buf: *mut *const u8,
22 callback: extern "C" fn(*mut u8, *mut VMContext),
23 payload: *mut u8,
24 callee: *mut VMContext,
25 ) -> i32;
26 fn wasmtime_longjmp(jmp_buf: *const u8) -> !;
27}
28
29cfg_if::cfg_if! {
30 if #[cfg(all(target_os = "macos", not(feature = "posix-signals-on-macos")))] {
31 mod macos;
32 use macos as sys;
33 } else if #[cfg(unix)] {
34 mod unix;
35 use unix as sys;
36 } else if #[cfg(target_os = "windows")] {
37 mod windows;
38 use windows as sys;
39 }
40}
41
42pub use sys::SignalHandler;
43
44/// Globally-set callback to determine whether a program counter is actually a
45/// wasm trap.
46///
47/// This is initialized during `init_traps` below. The definition lives within
48/// `wasmtime` currently.
49static mut IS_WASM_PC: fn(usize) -> bool = |_| false;
50
51/// This function is required to be called before any WebAssembly is entered.
52/// This will configure global state such as signal handlers to prepare the
53/// process to receive wasm traps.
54///
55/// This function must not only be called globally once before entering
56/// WebAssembly but it must also be called once-per-thread that enters
57/// WebAssembly. Currently in wasmtime's integration this function is called on
58/// creation of a `Engine`.
59///
60/// The `is_wasm_pc` argument is used when a trap happens to determine if a
61/// program counter is the pc of an actual wasm trap or not. This is then used
62/// to disambiguate faults that happen due to wasm and faults that happen due to
63/// bugs in Rust or elsewhere.
64pub fn init_traps(is_wasm_pc: fn(usize) -> bool) {
65 static INIT: Once = Once::new();
66 INIT.call_once(|| unsafe {
67 IS_WASM_PC = is_wasm_pc;
68 sys::platform_init();
69 });
70}
71
72/// Raises a trap immediately.
73///
74/// This function performs as-if a wasm trap was just executed. This trap
75/// payload is then returned from `catch_traps` below.
76///
77/// # Safety
78///
79/// Only safe to call when wasm code is on the stack, aka `catch_traps` must
80/// have been previously called. Additionally no Rust destructors can be on the
81/// stack. They will be skipped and not executed.
82pub unsafe fn raise_trap(reason: TrapReason) -> ! {
83 tls::with(|info| info.unwrap().unwind_with(UnwindReason::Trap(reason)))
84}
85
86/// Raises a user-defined trap immediately.
87///
88/// This function performs as-if a wasm trap was just executed, only the trap
89/// has a dynamic payload associated with it which is user-provided. This trap
90/// payload is then returned from `catch_traps` below.
91///
92/// # Safety
93///
94/// Only safe to call when wasm code is on the stack, aka `catch_traps` must
95/// have been previously called. Additionally no Rust destructors can be on the
96/// stack. They will be skipped and not executed.
97pub unsafe fn raise_user_trap(error: Error, needs_backtrace: bool) -> ! {
98 raise_trap(TrapReason::User {
99 error,
100 needs_backtrace,
101 })
102}
103
104/// Raises a trap from inside library code immediately.
105///
106/// This function performs as-if a wasm trap was just executed. This trap
107/// payload is then returned from `catch_traps` below.
108///
109/// # Safety
110///
111/// Only safe to call when wasm code is on the stack, aka `catch_traps` must
112/// have been previously called. Additionally no Rust destructors can be on the
113/// stack. They will be skipped and not executed.
114pub unsafe fn raise_lib_trap(trap: wasmtime_environ::Trap) -> ! {
115 raise_trap(TrapReason::Wasm(trap))
116}
117
118/// Carries a Rust panic across wasm code and resumes the panic on the other
119/// side.
120///
121/// # Safety
122///
123/// Only safe to call when wasm code is on the stack, aka `catch_traps` must
124/// have been previously called. Additionally no Rust destructors can be on the
125/// stack. They will be skipped and not executed.
126pub unsafe fn resume_panic(payload: Box<dyn Any + Send>) -> ! {
127 tls::with(|info| info.unwrap().unwind_with(UnwindReason::Panic(payload)))
128}
129
130/// Stores trace message with backtrace.
131#[derive(Debug)]
132pub struct Trap {
133 /// Original reason from where this trap originated.
134 pub reason: TrapReason,
135 /// Wasm backtrace of the trap, if any.
136 pub backtrace: Option<Backtrace>,
137}
138
139/// Enumeration of different methods of raising a trap.
140#[derive(Debug)]
141pub enum TrapReason {
142 /// A user-raised trap through `raise_user_trap`.
143 User {
144 /// The actual user trap error.
145 error: Error,
146 /// Whether we need to capture a backtrace for this error or not.
147 needs_backtrace: bool,
148 },
149
150 /// A trap raised from Cranelift-generated code.
151 Jit {
152 /// The program counter where this trap originated.
153 ///
154 /// This is later used with side tables from compilation to translate
155 /// the trapping address to a trap code.
156 pc: usize,
157
158 /// If the trap was a memory-related trap such as SIGSEGV then this
159 /// field will contain the address of the inaccessible data.
160 ///
161 /// Note that wasm loads/stores are not guaranteed to fill in this
162 /// information. Dynamically-bounds-checked memories, for example, will
163 /// not access an invalid address but may instead load from NULL or may
164 /// explicitly jump to a `ud2` instruction. This is only available for
165 /// fault-based traps which are one of the main ways, but not the only
166 /// way, to run wasm.
167 faulting_addr: Option<usize>,
168 },
169
170 /// A trap raised from a wasm libcall
171 Wasm(wasmtime_environ::Trap),
172}
173
174impl TrapReason {
175 /// Create a new `TrapReason::User` that does not have a backtrace yet.
176 pub fn user_without_backtrace(error: Error) -> Self {
177 TrapReason::User {
178 error,
179 needs_backtrace: true,
180 }
181 }
182
183 /// Create a new `TrapReason::User` that already has a backtrace.
184 pub fn user_with_backtrace(error: Error) -> Self {
185 TrapReason::User {
186 error,
187 needs_backtrace: false,
188 }
189 }
190
191 /// Is this a JIT trap?
192 pub fn is_jit(&self) -> bool {
193 matches!(self, TrapReason::Jit { .. })
194 }
195}
196
197impl From<Error> for TrapReason {
198 fn from(err: Error) -> Self {
199 TrapReason::user_without_backtrace(err)
200 }
201}
202
203impl From<wasmtime_environ::Trap> for TrapReason {
204 fn from(code: wasmtime_environ::Trap) -> Self {
205 TrapReason::Wasm(code)
206 }
207}
208
209/// Catches any wasm traps that happen within the execution of `closure`,
210/// returning them as a `Result`.
211///
212/// Highly unsafe since `closure` won't have any dtors run.
213pub unsafe fn catch_traps<'a, F>(
214 signal_handler: Option<*const SignalHandler<'static>>,
215 capture_backtrace: bool,
216 caller: *mut VMContext,
217 mut closure: F,
218) -> Result<(), Box<Trap>>
219where
220 F: FnMut(*mut VMContext),
221{
222 let limits = (*caller).instance_mut().runtime_limits();
223
224 let result = CallThreadState::new(signal_handler, capture_backtrace, *limits).with(|cx| {
225 wasmtime_setjmp(
226 cx.jmp_buf.as_ptr(),
227 call_closure::<F>,
228 &mut closure as *mut F as *mut u8,
229 caller,
230 )
231 });
232
233 return match result {
234 Ok(x) => Ok(x),
235 Err((UnwindReason::Trap(reason), backtrace)) => Err(Box::new(Trap { reason, backtrace })),
236 Err((UnwindReason::Panic(panic), _)) => std::panic::resume_unwind(panic),
237 };
238
239 extern "C" fn call_closure<F>(payload: *mut u8, caller: *mut VMContext)
240 where
241 F: FnMut(*mut VMContext),
242 {
243 unsafe { (*(payload as *mut F))(caller) }
244 }
245}
246
247// Module to hide visibility of the `CallThreadState::prev` field and force
248// usage of its accessor methods.
249mod call_thread_state {
250 use super::*;
251 use std::mem;
252
253 /// Temporary state stored on the stack which is registered in the `tls` module
254 /// below for calls into wasm.
255 pub struct CallThreadState {
256 pub(super) unwind: UnsafeCell<MaybeUninit<(UnwindReason, Option<Backtrace>)>>,
257 pub(super) jmp_buf: Cell<*const u8>,
258 pub(super) signal_handler: Option<*const SignalHandler<'static>>,
259 pub(super) capture_backtrace: bool,
260
261 pub(crate) limits: *const VMRuntimeLimits,
262
263 prev: Cell<tls::Ptr>,
264
265 // The values of `VMRuntimeLimits::last_wasm_{exit_{pc,fp},entry_sp}` for
266 // the *previous* `CallThreadState`. Our *current* last wasm PC/FP/SP are
267 // saved in `self.limits`. We save a copy of the old registers here because
268 // the `VMRuntimeLimits` typically doesn't change across nested calls into
269 // Wasm (i.e. they are typically calls back into the same store and
270 // `self.limits == self.prev.limits`) and we must to maintain the list of
271 // contiguous-Wasm-frames stack regions for backtracing purposes.
272 old_last_wasm_exit_fp: Cell<usize>,
273 old_last_wasm_exit_pc: Cell<usize>,
274 old_last_wasm_entry_sp: Cell<usize>,
275 }
276
277 impl CallThreadState {
278 #[inline]
279 pub(super) fn new(
280 signal_handler: Option<*const SignalHandler<'static>>,
281 capture_backtrace: bool,
282 limits: *const VMRuntimeLimits,
283 ) -> CallThreadState {
284 CallThreadState {
285 unwind: UnsafeCell::new(MaybeUninit::uninit()),
286 jmp_buf: Cell::new(ptr::null()),
287 signal_handler,
288 capture_backtrace,
289 limits,
290 prev: Cell::new(ptr::null()),
291 old_last_wasm_exit_fp: Cell::new(0),
292 old_last_wasm_exit_pc: Cell::new(0),
293 old_last_wasm_entry_sp: Cell::new(0),
294 }
295 }
296
297 /// Get the saved FP upon exit from Wasm for the previous `CallThreadState`.
298 pub fn old_last_wasm_exit_fp(&self) -> usize {
299 self.old_last_wasm_exit_fp.get()
300 }
301
302 /// Get the saved PC upon exit from Wasm for the previous `CallThreadState`.
303 pub fn old_last_wasm_exit_pc(&self) -> usize {
304 self.old_last_wasm_exit_pc.get()
305 }
306
307 /// Get the saved SP upon entry into Wasm for the previous `CallThreadState`.
308 pub fn old_last_wasm_entry_sp(&self) -> usize {
309 self.old_last_wasm_entry_sp.get()
310 }
311
312 /// Get the previous `CallThreadState`.
313 pub fn prev(&self) -> tls::Ptr {
314 self.prev.get()
315 }
316
317 /// Connect the link to the previous `CallThreadState`.
318 ///
319 /// Synchronizes the last wasm FP, PC, and SP on `self` and the old
320 /// `self.prev` for the given new `prev`, and returns the old
321 /// `self.prev`.
322 pub unsafe fn set_prev(&self, prev: tls::Ptr) -> tls::Ptr {
323 let old_prev = self.prev.get();
324
325 // Restore the old `prev`'s saved registers in its
326 // `VMRuntimeLimits`. This is necessary for when we are async
327 // suspending the top `CallThreadState` and doing `set_prev(null)`
328 // on it, and so any stack walking we do subsequently will start at
329 // the old `prev` and look at its `VMRuntimeLimits` to get the
330 // initial saved registers.
331 if let Some(old_prev) = old_prev.as_ref() {
332 *(*old_prev.limits).last_wasm_exit_fp.get() = self.old_last_wasm_exit_fp();
333 *(*old_prev.limits).last_wasm_exit_pc.get() = self.old_last_wasm_exit_pc();
334 *(*old_prev.limits).last_wasm_entry_sp.get() = self.old_last_wasm_entry_sp();
335 }
336
337 self.prev.set(prev);
338
339 let mut old_last_wasm_exit_fp = 0;
340 let mut old_last_wasm_exit_pc = 0;
341 let mut old_last_wasm_entry_sp = 0;
342 if let Some(prev) = prev.as_ref() {
343 // We are entering a new `CallThreadState` or resuming a
344 // previously suspended one. This means we will push new Wasm
345 // frames that save the new Wasm FP/SP/PC registers into
346 // `VMRuntimeLimits`, we need to first save the old Wasm
347 // FP/SP/PC registers into this new `CallThreadState` to
348 // maintain our list of contiguous Wasm frame regions that we
349 // use when capturing stack traces.
350 //
351 // NB: the Wasm<--->host trampolines saved the Wasm FP/SP/PC
352 // registers in the active-at-that-time store's
353 // `VMRuntimeLimits`. For the most recent FP/PC/SP that is the
354 // `state.prev.limits` (since we haven't entered this
355 // `CallThreadState` yet). And that can be a different
356 // `VMRuntimeLimits` instance from the currently active
357 // `state.limits`, which will be used by the upcoming call into
358 // Wasm! Consider the case where we have multiple, nested calls
359 // across stores (with host code in between, by necessity, since
360 // only things in the same store can be linked directly
361 // together):
362 //
363 // | ... |
364 // | Host | |
365 // +-----------------+ | stack
366 // | Wasm in store A | | grows
367 // +-----------------+ | down
368 // | Host | |
369 // +-----------------+ |
370 // | Wasm in store B | V
371 // +-----------------+
372 //
373 // In this scenario `state.limits != state.prev.limits`,
374 // i.e. `B.limits != A.limits`! Therefore we must take care to
375 // read the old FP/SP/PC from `state.prev.limits`, rather than
376 // `state.limits`, and store those saved registers into the
377 // current `state`.
378 //
379 // See also the comment above the
380 // `CallThreadState::old_last_wasm_*` fields.
381 old_last_wasm_exit_fp =
382 mem::replace(&mut *(*prev.limits).last_wasm_exit_fp.get(), 0);
383 old_last_wasm_exit_pc =
384 mem::replace(&mut *(*prev.limits).last_wasm_exit_pc.get(), 0);
385 old_last_wasm_entry_sp =
386 mem::replace(&mut *(*prev.limits).last_wasm_entry_sp.get(), 0);
387 }
388
389 self.old_last_wasm_exit_fp.set(old_last_wasm_exit_fp);
390 self.old_last_wasm_exit_pc.set(old_last_wasm_exit_pc);
391 self.old_last_wasm_entry_sp.set(old_last_wasm_entry_sp);
392
393 old_prev
394 }
395 }
396}
397pub use call_thread_state::*;
398
399enum UnwindReason {
400 Panic(Box<dyn Any + Send>),
401 Trap(TrapReason),
402}
403
404impl CallThreadState {
405 fn with(
406 mut self,
407 closure: impl FnOnce(&CallThreadState) -> i32,
408 ) -> Result<(), (UnwindReason, Option<Backtrace>)> {
409 let ret = tls::set(&mut self, |me| closure(me));
410 if ret != 0 {
411 Ok(())
412 } else {
413 Err(unsafe { self.read_unwind() })
414 }
415 }
416
417 #[cold]
418 unsafe fn read_unwind(&self) -> (UnwindReason, Option<Backtrace>) {
419 (*self.unwind.get()).as_ptr().read()
420 }
421
422 fn unwind_with(&self, reason: UnwindReason) -> ! {
423 let backtrace = match reason {
424 // Panics don't need backtraces. There is nowhere to attach the
425 // hypothetical backtrace to and it doesn't really make sense to try
426 // in the first place since this is a Rust problem rather than a
427 // Wasm problem.
428 UnwindReason::Panic(_)
429 // And if we are just propagating an existing trap that already has
430 // a backtrace attached to it, then there is no need to capture a
431 // new backtrace either.
432 | UnwindReason::Trap(TrapReason::User {
433 needs_backtrace: false,
434 ..
435 }) => None,
436 UnwindReason::Trap(_) => self.capture_backtrace(None),
437 };
438 unsafe {
439 (*self.unwind.get()).as_mut_ptr().write((reason, backtrace));
440 wasmtime_longjmp(self.jmp_buf.get());
441 }
442 }
443
444 /// Trap handler using our thread-local state.
445 ///
446 /// * `pc` - the program counter the trap happened at
447 /// * `call_handler` - a closure used to invoke the platform-specific
448 /// signal handler for each instance, if available.
449 ///
450 /// Attempts to handle the trap if it's a wasm trap. Returns a few
451 /// different things:
452 ///
453 /// * null - the trap didn't look like a wasm trap and should continue as a
454 /// trap
455 /// * 1 as a pointer - the trap was handled by a custom trap handler on an
456 /// instance, and the trap handler should quickly return.
457 /// * a different pointer - a jmp_buf buffer to longjmp to, meaning that
458 /// the wasm trap was succesfully handled.
459 #[cfg_attr(target_os = "macos", allow(dead_code))] // macOS is more raw and doesn't use this
460 fn take_jmp_buf_if_trap(
461 &self,
462 pc: *const u8,
463 call_handler: impl Fn(&SignalHandler) -> bool,
464 ) -> *const u8 {
465 // If we haven't even started to handle traps yet, bail out.
466 if self.jmp_buf.get().is_null() {
467 return ptr::null();
468 }
469
470 // First up see if any instance registered has a custom trap handler,
471 // in which case run them all. If anything handles the trap then we
472 // return that the trap was handled.
473 if let Some(handler) = self.signal_handler {
474 if unsafe { call_handler(&*handler) } {
475 return 1 as *const _;
476 }
477 }
478
479 // If this fault wasn't in wasm code, then it's not our problem
480 if unsafe { !IS_WASM_PC(pc as usize) } {
481 return ptr::null();
482 }
483
484 // If all that passed then this is indeed a wasm trap, so return the
485 // `jmp_buf` passed to `wasmtime_longjmp` to resume.
486 self.jmp_buf.replace(ptr::null())
487 }
488
489 fn set_jit_trap(&self, pc: *const u8, fp: usize, faulting_addr: Option<usize>) {
490 let backtrace = self.capture_backtrace(Some((pc as usize, fp)));
491 unsafe {
492 (*self.unwind.get()).as_mut_ptr().write((
493 UnwindReason::Trap(TrapReason::Jit {
494 pc: pc as usize,
495 faulting_addr,
496 }),
497 backtrace,
498 ));
499 }
500 }
501
502 fn capture_backtrace(&self, pc_and_fp: Option<(usize, usize)>) -> Option<Backtrace> {
503 if !self.capture_backtrace {
504 return None;
505 }
506
507 Some(unsafe { Backtrace::new_with_trap_state(self, pc_and_fp) })
508 }
509
510 pub(crate) fn iter<'a>(&'a self) -> impl Iterator<Item = &Self> + 'a {
511 let mut state = Some(self);
512 std::iter::from_fn(move || {
513 let this = state?;
514 state = unsafe { this.prev().as_ref() };
515 Some(this)
516 })
517 }
518}
519
520struct ResetCell<'a, T: Copy>(&'a Cell<T>, T);
521
522impl<T: Copy> Drop for ResetCell<'_, T> {
523 #[inline]
524 fn drop(&mut self) {
525 self.0.set(self.1);
526 }
527}
528
529// A private inner module for managing the TLS state that we require across
530// calls in wasm. The WebAssembly code is called from C++ and then a trap may
531// happen which requires us to read some contextual state to figure out what to
532// do with the trap. This `tls` module is used to persist that information from
533// the caller to the trap site.
534mod tls {
535 use super::CallThreadState;
536 use std::ptr;
537
538 pub use raw::Ptr;
539
540 // An even *more* inner module for dealing with TLS. This actually has the
541 // thread local variable and has functions to access the variable.
542 //
543 // Note that this is specially done to fully encapsulate that the accessors
544 // for tls may or may not be inlined. Wasmtime's async support employs stack
545 // switching which can resume execution on different OS threads. This means
546 // that borrows of our TLS pointer must never live across accesses because
547 // otherwise the access may be split across two threads and cause unsafety.
548 //
549 // This also means that extra care is taken by the runtime to save/restore
550 // these TLS values when the runtime may have crossed threads.
551 //
552 // Note, though, that if async support is disabled at compile time then
553 // these functions are free to be inlined.
554 mod raw {
555 use super::CallThreadState;
556 use std::cell::Cell;
557 use std::ptr;
558
559 pub type Ptr = *const CallThreadState;
560
561 // The first entry here is the `Ptr` which is what's used as part of the
562 // public interface of this module. The second entry is a boolean which
563 // allows the runtime to perform per-thread initialization if necessary
564 // for handling traps (e.g. setting up ports on macOS and sigaltstack on
565 // Unix).
566 thread_local!(static PTR: Cell<(Ptr, bool)> = const { Cell::new((ptr::null(), false)) });
567
568 #[cfg_attr(feature = "async", inline(never))] // see module docs
569 #[cfg_attr(not(feature = "async"), inline)]
570 pub fn replace(val: Ptr) -> Ptr {
571 PTR.with(|p| {
572 // When a new value is configured that means that we may be
573 // entering WebAssembly so check to see if this thread has
574 // performed per-thread initialization for traps.
575 let (prev, initialized) = p.get();
576 if !initialized {
577 super::super::sys::lazy_per_thread_init();
578 }
579 p.set((val, true));
580 prev
581 })
582 }
583
584 /// Eagerly initialize thread-local runtime functionality. This will be performed
585 /// lazily by the runtime if users do not perform it eagerly.
586 #[cfg_attr(feature = "async", inline(never))] // see module docs
587 #[cfg_attr(not(feature = "async"), inline)]
588 pub fn initialize() {
589 PTR.with(|p| {
590 let (state, initialized) = p.get();
591 if initialized {
592 return;
593 }
594 super::super::sys::lazy_per_thread_init();
595 p.set((state, true));
596 })
597 }
598
599 #[cfg_attr(feature = "async", inline(never))] // see module docs
600 #[cfg_attr(not(feature = "async"), inline)]
601 pub fn get() -> Ptr {
602 PTR.with(|p| p.get().0)
603 }
604 }
605
606 pub use raw::initialize as tls_eager_initialize;
607
608 /// Opaque state used to help control TLS state across stack switches for
609 /// async support.
610 pub struct TlsRestore {
611 state: raw::Ptr,
612 }
613
614 impl TlsRestore {
615 /// Takes the TLS state that is currently configured and returns a
616 /// token that is used to replace it later.
617 ///
618 /// This is not a safe operation since it's intended to only be used
619 /// with stack switching found with fibers and async wasmtime.
620 pub unsafe fn take() -> TlsRestore {
621 // Our tls pointer must be set at this time, and it must not be
622 // null. We need to restore the previous pointer since we're
623 // removing ourselves from the call-stack, and in the process we
624 // null out our own previous field for safety in case it's
625 // accidentally used later.
626 let state = raw::get();
627 if let Some(state) = state.as_ref() {
628 let prev_state = state.set_prev(ptr::null());
629 raw::replace(prev_state);
630 } else {
631 // Null case: we aren't in a wasm context, so theres no tls to
632 // save for restoration.
633 }
634
635 TlsRestore { state }
636 }
637
638 /// Restores a previous tls state back into this thread's TLS.
639 ///
640 /// This is unsafe because it's intended to only be used within the
641 /// context of stack switching within wasmtime.
642 pub unsafe fn replace(self) {
643 // Null case: we aren't in a wasm context, so theres no tls
644 // to restore.
645 if self.state.is_null() {
646 return;
647 }
648
649 // We need to configure our previous TLS pointer to whatever is in
650 // TLS at this time, and then we set the current state to ourselves.
651 let prev = raw::get();
652 assert!((*self.state).prev().is_null());
653 (*self.state).set_prev(prev);
654 raw::replace(self.state);
655 }
656 }
657
658 /// Configures thread local state such that for the duration of the
659 /// execution of `closure` any call to `with` will yield `state`, unless
660 /// this is recursively called again.
661 #[inline]
662 pub fn set<R>(state: &mut CallThreadState, closure: impl FnOnce(&CallThreadState) -> R) -> R {
663 struct Reset<'a> {
664 state: &'a CallThreadState,
665 }
666
667 impl Drop for Reset<'_> {
668 #[inline]
669 fn drop(&mut self) {
670 unsafe {
671 let prev = self.state.set_prev(ptr::null());
672 let old_state = raw::replace(prev);
673 debug_assert!(std::ptr::eq(old_state, self.state));
674 }
675 }
676 }
677
678 let prev = raw::replace(state);
679
680 unsafe {
681 state.set_prev(prev);
682
683 let reset = Reset { state };
684 closure(reset.state)
685 }
686 }
687
688 /// Returns the last pointer configured with `set` above, if any.
689 pub fn with<R>(closure: impl FnOnce(Option<&CallThreadState>) -> R) -> R {
690 let p = raw::get();
691 unsafe { closure(if p.is_null() { None } else { Some(&*p) }) }
692 }
693}