wasmtime_runtime/
mmap.rs

1//! Low-level abstraction for allocating and managing zero-filled pages
2//! of memory.
3
4use anyhow::anyhow;
5use anyhow::{Context, Result};
6use std::convert::TryFrom;
7use std::fs::File;
8use std::ops::Range;
9use std::path::Path;
10use std::ptr;
11use std::slice;
12use std::sync::Arc;
13
14/// A simple struct consisting of a page-aligned pointer to page-aligned
15/// and initially-zeroed memory and a length.
16#[derive(Debug)]
17pub struct Mmap {
18    // Note that this is stored as a `usize` instead of a `*const` or `*mut`
19    // pointer to allow this structure to be natively `Send` and `Sync` without
20    // `unsafe impl`. This type is sendable across threads and shareable since
21    // the coordination all happens at the OS layer.
22    ptr: usize,
23    len: usize,
24    file: Option<Arc<File>>,
25}
26
27impl Mmap {
28    /// Construct a new empty instance of `Mmap`.
29    pub fn new() -> Self {
30        // Rust's slices require non-null pointers, even when empty. `Vec`
31        // contains code to create a non-null dangling pointer value when
32        // constructed empty, so we reuse that here.
33        let empty = Vec::<u8>::new();
34        Self {
35            ptr: empty.as_ptr() as usize,
36            len: 0,
37            file: None,
38        }
39    }
40
41    /// Create a new `Mmap` pointing to at least `size` bytes of page-aligned accessible memory.
42    pub fn with_at_least(size: usize) -> Result<Self> {
43        let page_size = crate::page_size();
44        let rounded_size = (size + (page_size - 1)) & !(page_size - 1);
45        Self::accessible_reserved(rounded_size, rounded_size)
46    }
47
48    /// Creates a new `Mmap` by opening the file located at `path` and mapping
49    /// it into memory.
50    ///
51    /// The memory is mapped in read-only mode for the entire file. If portions
52    /// of the file need to be modified then the `region` crate can be use to
53    /// alter permissions of each page.
54    ///
55    /// The memory mapping and the length of the file within the mapping are
56    /// returned.
57    pub fn from_file(path: &Path) -> Result<Self> {
58        #[cfg(unix)]
59        {
60            let file = File::open(path).context("failed to open file")?;
61            let len = file
62                .metadata()
63                .context("failed to get file metadata")?
64                .len();
65            let len = usize::try_from(len).map_err(|_| anyhow!("file too large to map"))?;
66            let ptr = unsafe {
67                rustix::mm::mmap(
68                    ptr::null_mut(),
69                    len,
70                    rustix::mm::ProtFlags::READ | rustix::mm::ProtFlags::WRITE,
71                    rustix::mm::MapFlags::PRIVATE,
72                    &file,
73                    0,
74                )
75                .context(format!("mmap failed to allocate {:#x} bytes", len))?
76            };
77
78            Ok(Self {
79                ptr: ptr as usize,
80                len,
81                file: Some(Arc::new(file)),
82            })
83        }
84
85        #[cfg(windows)]
86        {
87            use std::fs::OpenOptions;
88            use std::io;
89            use std::os::windows::prelude::*;
90            use windows_sys::Win32::Foundation::*;
91            use windows_sys::Win32::Storage::FileSystem::*;
92            use windows_sys::Win32::System::Memory::*;
93
94            unsafe {
95                // Open the file with read/execute access and only share for
96                // read. This will enable us to perform the proper mmap below
97                // while also disallowing other processes modifying the file
98                // and having those modifications show up in our address space.
99                let file = OpenOptions::new()
100                    .read(true)
101                    .access_mode(FILE_GENERIC_READ | FILE_GENERIC_EXECUTE)
102                    .share_mode(FILE_SHARE_READ)
103                    .open(path)
104                    .context("failed to open file")?;
105
106                let len = file
107                    .metadata()
108                    .context("failed to get file metadata")?
109                    .len();
110                let len = usize::try_from(len).map_err(|_| anyhow!("file too large to map"))?;
111
112                // Create a file mapping that allows PAGE_EXECUTE_WRITECOPY.
113                // This enables up-to these permissions but we won't leave all
114                // of these permissions active at all times. Execution is
115                // necessary for the generated code from Cranelift and the
116                // WRITECOPY part is needed for possibly resolving relocations,
117                // but otherwise writes don't happen.
118                let mapping = CreateFileMappingW(
119                    file.as_raw_handle() as isize,
120                    ptr::null_mut(),
121                    PAGE_EXECUTE_WRITECOPY,
122                    0,
123                    0,
124                    ptr::null(),
125                );
126                if mapping == 0 {
127                    return Err(io::Error::last_os_error())
128                        .context("failed to create file mapping");
129                }
130
131                // Create a view for the entire file using all our requisite
132                // permissions so that we can change the virtual permissions
133                // later on.
134                let ptr = MapViewOfFile(
135                    mapping,
136                    FILE_MAP_READ | FILE_MAP_EXECUTE | FILE_MAP_COPY,
137                    0,
138                    0,
139                    len,
140                );
141                let err = io::Error::last_os_error();
142                CloseHandle(mapping);
143                if ptr.is_null() {
144                    return Err(err)
145                        .context(format!("failed to create map view of {:#x} bytes", len));
146                }
147
148                let ret = Self {
149                    ptr: ptr as usize,
150                    len,
151                    file: Some(Arc::new(file)),
152                };
153
154                // Protect the entire file as PAGE_WRITECOPY to start (i.e.
155                // remove the execute bit)
156                let mut old = 0;
157                if VirtualProtect(ret.ptr as *mut _, ret.len, PAGE_WRITECOPY, &mut old) == 0 {
158                    return Err(io::Error::last_os_error())
159                        .context("failed change pages to `PAGE_READONLY`");
160                }
161
162                Ok(ret)
163            }
164        }
165    }
166
167    /// Create a new `Mmap` pointing to `accessible_size` bytes of page-aligned accessible memory,
168    /// within a reserved mapping of `mapping_size` bytes. `accessible_size` and `mapping_size`
169    /// must be native page-size multiples.
170    #[cfg(not(target_os = "windows"))]
171    pub fn accessible_reserved(accessible_size: usize, mapping_size: usize) -> Result<Self> {
172        let page_size = crate::page_size();
173        assert!(accessible_size <= mapping_size);
174        assert_eq!(mapping_size & (page_size - 1), 0);
175        assert_eq!(accessible_size & (page_size - 1), 0);
176
177        // Mmap may return EINVAL if the size is zero, so just
178        // special-case that.
179        if mapping_size == 0 {
180            return Ok(Self::new());
181        }
182
183        Ok(if accessible_size == mapping_size {
184            // Allocate a single read-write region at once.
185            let ptr = unsafe {
186                rustix::mm::mmap_anonymous(
187                    ptr::null_mut(),
188                    mapping_size,
189                    rustix::mm::ProtFlags::READ | rustix::mm::ProtFlags::WRITE,
190                    rustix::mm::MapFlags::PRIVATE,
191                )
192                .context(format!("mmap failed to allocate {:#x} bytes", mapping_size))?
193            };
194
195            Self {
196                ptr: ptr as usize,
197                len: mapping_size,
198                file: None,
199            }
200        } else {
201            // Reserve the mapping size.
202            let ptr = unsafe {
203                rustix::mm::mmap_anonymous(
204                    ptr::null_mut(),
205                    mapping_size,
206                    rustix::mm::ProtFlags::empty(),
207                    rustix::mm::MapFlags::PRIVATE,
208                )
209                .context(format!("mmap failed to allocate {:#x} bytes", mapping_size))?
210            };
211
212            let mut result = Self {
213                ptr: ptr as usize,
214                len: mapping_size,
215                file: None,
216            };
217
218            if accessible_size != 0 {
219                // Commit the accessible size.
220                result.make_accessible(0, accessible_size)?;
221            }
222
223            result
224        })
225    }
226
227    /// Create a new `Mmap` pointing to `accessible_size` bytes of page-aligned accessible memory,
228    /// within a reserved mapping of `mapping_size` bytes. `accessible_size` and `mapping_size`
229    /// must be native page-size multiples.
230    #[cfg(target_os = "windows")]
231    pub fn accessible_reserved(accessible_size: usize, mapping_size: usize) -> Result<Self> {
232        use anyhow::bail;
233        use std::io;
234        use windows_sys::Win32::System::Memory::*;
235
236        if mapping_size == 0 {
237            return Ok(Self::new());
238        }
239
240        let page_size = crate::page_size();
241        assert!(accessible_size <= mapping_size);
242        assert_eq!(mapping_size & (page_size - 1), 0);
243        assert_eq!(accessible_size & (page_size - 1), 0);
244
245        Ok(if accessible_size == mapping_size {
246            // Allocate a single read-write region at once.
247            let ptr = unsafe {
248                VirtualAlloc(
249                    ptr::null_mut(),
250                    mapping_size,
251                    MEM_RESERVE | MEM_COMMIT,
252                    PAGE_READWRITE,
253                )
254            };
255            if ptr.is_null() {
256                bail!("VirtualAlloc failed: {}", io::Error::last_os_error());
257            }
258
259            Self {
260                ptr: ptr as usize,
261                len: mapping_size,
262                file: None,
263            }
264        } else {
265            // Reserve the mapping size.
266            let ptr =
267                unsafe { VirtualAlloc(ptr::null_mut(), mapping_size, MEM_RESERVE, PAGE_NOACCESS) };
268            if ptr.is_null() {
269                bail!("VirtualAlloc failed: {}", io::Error::last_os_error());
270            }
271
272            let mut result = Self {
273                ptr: ptr as usize,
274                len: mapping_size,
275                file: None,
276            };
277
278            if accessible_size != 0 {
279                // Commit the accessible size.
280                result.make_accessible(0, accessible_size)?;
281            }
282
283            result
284        })
285    }
286
287    /// Make the memory starting at `start` and extending for `len` bytes accessible.
288    /// `start` and `len` must be native page-size multiples and describe a range within
289    /// `self`'s reserved memory.
290    #[cfg(not(target_os = "windows"))]
291    pub fn make_accessible(&mut self, start: usize, len: usize) -> Result<()> {
292        use rustix::mm::{mprotect, MprotectFlags};
293
294        let page_size = crate::page_size();
295        assert_eq!(start & (page_size - 1), 0);
296        assert_eq!(len & (page_size - 1), 0);
297        assert!(len <= self.len);
298        assert!(start <= self.len - len);
299
300        // Commit the accessible size.
301        let ptr = self.ptr as *mut u8;
302        unsafe {
303            mprotect(
304                ptr.add(start).cast(),
305                len,
306                MprotectFlags::READ | MprotectFlags::WRITE,
307            )?;
308        }
309
310        Ok(())
311    }
312
313    /// Make the memory starting at `start` and extending for `len` bytes accessible.
314    /// `start` and `len` must be native page-size multiples and describe a range within
315    /// `self`'s reserved memory.
316    #[cfg(target_os = "windows")]
317    pub fn make_accessible(&mut self, start: usize, len: usize) -> Result<()> {
318        use anyhow::bail;
319        use std::ffi::c_void;
320        use std::io;
321        use windows_sys::Win32::System::Memory::*;
322
323        let page_size = crate::page_size();
324        assert_eq!(start & (page_size - 1), 0);
325        assert_eq!(len & (page_size - 1), 0);
326        assert!(len <= self.len);
327        assert!(start <= self.len - len);
328
329        // Commit the accessible size.
330        let ptr = self.ptr as *const u8;
331        if unsafe {
332            VirtualAlloc(
333                ptr.add(start) as *mut c_void,
334                len,
335                MEM_COMMIT,
336                PAGE_READWRITE,
337            )
338        }
339        .is_null()
340        {
341            bail!("VirtualAlloc failed: {}", io::Error::last_os_error());
342        }
343
344        Ok(())
345    }
346
347    /// Return the allocated memory as a slice of u8.
348    pub fn as_slice(&self) -> &[u8] {
349        unsafe { slice::from_raw_parts(self.ptr as *const u8, self.len) }
350    }
351
352    /// Return the allocated memory as a mutable slice of u8.
353    pub fn as_mut_slice(&mut self) -> &mut [u8] {
354        unsafe { slice::from_raw_parts_mut(self.ptr as *mut u8, self.len) }
355    }
356
357    /// Return the allocated memory as a pointer to u8.
358    pub fn as_ptr(&self) -> *const u8 {
359        self.ptr as *const u8
360    }
361
362    /// Return the allocated memory as a mutable pointer to u8.
363    pub fn as_mut_ptr(&self) -> *mut u8 {
364        self.ptr as *mut u8
365    }
366
367    /// Return the length of the allocated memory.
368    pub fn len(&self) -> usize {
369        self.len
370    }
371
372    /// Return whether any memory has been allocated.
373    pub fn is_empty(&self) -> bool {
374        self.len() == 0
375    }
376
377    /// Makes the specified `range` within this `Mmap` to be read/execute.
378    pub unsafe fn make_executable(
379        &self,
380        range: Range<usize>,
381        enable_branch_protection: bool,
382    ) -> Result<()> {
383        assert!(range.start <= self.len());
384        assert!(range.end <= self.len());
385        assert!(range.start <= range.end);
386        assert!(
387            range.start % crate::page_size() == 0,
388            "changing of protections isn't page-aligned",
389        );
390        let base = self.as_ptr().add(range.start) as *mut _;
391        let len = range.end - range.start;
392
393        #[cfg(windows)]
394        {
395            use std::io;
396            use windows_sys::Win32::System::Memory::*;
397
398            let flags = if enable_branch_protection {
399                // TODO: We use this check to avoid an unused variable warning,
400                // but some of the CFG-related flags might be applicable
401                PAGE_EXECUTE_READ
402            } else {
403                PAGE_EXECUTE_READ
404            };
405            let mut old = 0;
406            let result = VirtualProtect(base, len, flags, &mut old);
407            if result == 0 {
408                return Err(io::Error::last_os_error().into());
409            }
410        }
411
412        #[cfg(not(windows))]
413        {
414            use rustix::mm::{mprotect, MprotectFlags};
415
416            let flags = MprotectFlags::READ | MprotectFlags::EXEC;
417            let flags = if enable_branch_protection {
418                #[cfg(all(target_arch = "aarch64", target_os = "linux"))]
419                if std::arch::is_aarch64_feature_detected!("bti") {
420                    MprotectFlags::from_bits_unchecked(flags.bits() | /* PROT_BTI */ 0x10)
421                } else {
422                    flags
423                }
424
425                #[cfg(not(all(target_arch = "aarch64", target_os = "linux")))]
426                flags
427            } else {
428                flags
429            };
430
431            mprotect(base, len, flags)?;
432        }
433
434        Ok(())
435    }
436
437    /// Makes the specified `range` within this `Mmap` to be readonly.
438    pub unsafe fn make_readonly(&self, range: Range<usize>) -> Result<()> {
439        assert!(range.start <= self.len());
440        assert!(range.end <= self.len());
441        assert!(range.start <= range.end);
442        assert!(
443            range.start % crate::page_size() == 0,
444            "changing of protections isn't page-aligned",
445        );
446        let base = self.as_ptr().add(range.start) as *mut _;
447        let len = range.end - range.start;
448
449        #[cfg(windows)]
450        {
451            use std::io;
452            use windows_sys::Win32::System::Memory::*;
453
454            let mut old = 0;
455            let result = VirtualProtect(base, len, PAGE_READONLY, &mut old);
456            if result == 0 {
457                return Err(io::Error::last_os_error().into());
458            }
459        }
460
461        #[cfg(not(windows))]
462        {
463            use rustix::mm::{mprotect, MprotectFlags};
464            mprotect(base, len, MprotectFlags::READ)?;
465        }
466
467        Ok(())
468    }
469
470    /// Returns the underlying file that this mmap is mapping, if present.
471    pub fn original_file(&self) -> Option<&Arc<File>> {
472        self.file.as_ref()
473    }
474}
475
476impl Drop for Mmap {
477    #[cfg(not(target_os = "windows"))]
478    fn drop(&mut self) {
479        if self.len != 0 {
480            unsafe { rustix::mm::munmap(self.ptr as *mut std::ffi::c_void, self.len) }
481                .expect("munmap failed");
482        }
483    }
484
485    #[cfg(target_os = "windows")]
486    fn drop(&mut self) {
487        if self.len != 0 {
488            use std::ffi::c_void;
489            use windows_sys::Win32::System::Memory::*;
490
491            if self.file.is_none() {
492                let r = unsafe { VirtualFree(self.ptr as *mut c_void, 0, MEM_RELEASE) };
493                assert_ne!(r, 0);
494            } else {
495                let r = unsafe { UnmapViewOfFile(self.ptr as *mut c_void) };
496                assert_ne!(r, 0);
497            }
498        }
499    }
500}
501
502fn _assert() {
503    fn _assert_send_sync<T: Send + Sync>() {}
504    _assert_send_sync::<Mmap>();
505}