cranelift_codegen/ir/
memflags.rs

1//! Memory operation flags.
2
3use core::fmt;
4
5#[cfg(feature = "enable-serde")]
6use serde::{Deserialize, Serialize};
7
8enum FlagBit {
9    Notrap,
10    Aligned,
11    Readonly,
12    LittleEndian,
13    BigEndian,
14    /// Accesses only the "heap" part of abstract state. Used for
15    /// alias analysis. Mutually exclusive with "table" and "vmctx".
16    Heap,
17    /// Accesses only the "table" part of abstract state. Used for
18    /// alias analysis. Mutually exclusive with "heap" and "vmctx".
19    Table,
20    /// Accesses only the "vmctx" part of abstract state. Used for
21    /// alias analysis. Mutually exclusive with "heap" and "table".
22    Vmctx,
23}
24
25const NAMES: [&str; 8] = [
26    "notrap", "aligned", "readonly", "little", "big", "heap", "table", "vmctx",
27];
28
29/// Endianness of a memory access.
30#[derive(Clone, Copy, PartialEq, Eq, Debug, Hash)]
31pub enum Endianness {
32    /// Little-endian
33    Little,
34    /// Big-endian
35    Big,
36}
37
38/// Flags for memory operations like load/store.
39///
40/// Each of these flags introduce a limited form of undefined behavior. The flags each enable
41/// certain optimizations that need to make additional assumptions. Generally, the semantics of a
42/// program does not change when a flag is removed, but adding a flag will.
43///
44/// In addition, the flags determine the endianness of the memory access.  By default,
45/// any memory access uses the native endianness determined by the target ISA.  This can
46/// be overridden for individual accesses by explicitly specifying little- or big-endian
47/// semantics via the flags.
48#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
49#[cfg_attr(feature = "enable-serde", derive(Serialize, Deserialize))]
50pub struct MemFlags {
51    bits: u8,
52}
53
54impl MemFlags {
55    /// Create a new empty set of flags.
56    pub fn new() -> Self {
57        Self { bits: 0 }
58    }
59
60    /// Create a set of flags representing an access from a "trusted" address, meaning it's
61    /// known to be aligned and non-trapping.
62    pub fn trusted() -> Self {
63        let mut result = Self::new();
64        result.set_notrap();
65        result.set_aligned();
66        result
67    }
68
69    /// Read a flag bit.
70    fn read(self, bit: FlagBit) -> bool {
71        self.bits & (1 << bit as usize) != 0
72    }
73
74    /// Set a flag bit.
75    fn set(&mut self, bit: FlagBit) {
76        self.bits |= 1 << bit as usize
77    }
78
79    /// Set a flag bit by name.
80    ///
81    /// Returns true if the flag was found and set, false for an unknown flag name.
82    /// Will also return false when trying to set inconsistent endianness flags.
83    pub fn set_by_name(&mut self, name: &str) -> bool {
84        match NAMES.iter().position(|&s| s == name) {
85            Some(bit) => {
86                let bits = self.bits | 1 << bit;
87                if (bits & (1 << FlagBit::LittleEndian as usize)) != 0
88                    && (bits & (1 << FlagBit::BigEndian as usize)) != 0
89                {
90                    false
91                } else {
92                    self.bits = bits;
93                    true
94                }
95            }
96            None => false,
97        }
98    }
99
100    /// Return endianness of the memory access.  This will return the endianness
101    /// explicitly specified by the flags if any, and will default to the native
102    /// endianness otherwise.  The native endianness has to be provided by the
103    /// caller since it is not explicitly encoded in CLIF IR -- this allows a
104    /// front end to create IR without having to know the target endianness.
105    pub fn endianness(self, native_endianness: Endianness) -> Endianness {
106        if self.read(FlagBit::LittleEndian) {
107            Endianness::Little
108        } else if self.read(FlagBit::BigEndian) {
109            Endianness::Big
110        } else {
111            native_endianness
112        }
113    }
114
115    /// Set endianness of the memory access.
116    pub fn set_endianness(&mut self, endianness: Endianness) {
117        match endianness {
118            Endianness::Little => self.set(FlagBit::LittleEndian),
119            Endianness::Big => self.set(FlagBit::BigEndian),
120        };
121        assert!(!(self.read(FlagBit::LittleEndian) && self.read(FlagBit::BigEndian)));
122    }
123
124    /// Set endianness of the memory access, returning new flags.
125    pub fn with_endianness(mut self, endianness: Endianness) -> Self {
126        self.set_endianness(endianness);
127        self
128    }
129
130    /// Test if the `notrap` flag is set.
131    ///
132    /// Normally, trapping is part of the semantics of a load/store operation. If the platform
133    /// would cause a trap when accessing the effective address, the Cranelift memory operation is
134    /// also required to trap.
135    ///
136    /// The `notrap` flag tells Cranelift that the memory is *accessible*, which means that
137    /// accesses will not trap. This makes it possible to delete an unused load or a dead store
138    /// instruction.
139    pub fn notrap(self) -> bool {
140        self.read(FlagBit::Notrap)
141    }
142
143    /// Set the `notrap` flag.
144    pub fn set_notrap(&mut self) {
145        self.set(FlagBit::Notrap)
146    }
147
148    /// Set the `notrap` flag, returning new flags.
149    pub fn with_notrap(mut self) -> Self {
150        self.set_notrap();
151        self
152    }
153
154    /// Test if the `aligned` flag is set.
155    ///
156    /// By default, Cranelift memory instructions work with any unaligned effective address. If the
157    /// `aligned` flag is set, the instruction is permitted to trap or return a wrong result if the
158    /// effective address is misaligned.
159    pub fn aligned(self) -> bool {
160        self.read(FlagBit::Aligned)
161    }
162
163    /// Set the `aligned` flag.
164    pub fn set_aligned(&mut self) {
165        self.set(FlagBit::Aligned)
166    }
167
168    /// Set the `aligned` flag, returning new flags.
169    pub fn with_aligned(mut self) -> Self {
170        self.set_aligned();
171        self
172    }
173
174    /// Test if the `readonly` flag is set.
175    ///
176    /// Loads with this flag have no memory dependencies.
177    /// This results in undefined behavior if the dereferenced memory is mutated at any time
178    /// between when the function is called and when it is exited.
179    pub fn readonly(self) -> bool {
180        self.read(FlagBit::Readonly)
181    }
182
183    /// Set the `readonly` flag.
184    pub fn set_readonly(&mut self) {
185        self.set(FlagBit::Readonly)
186    }
187
188    /// Set the `readonly` flag, returning new flags.
189    pub fn with_readonly(mut self) -> Self {
190        self.set_readonly();
191        self
192    }
193
194    /// Test if the `heap` bit is set.
195    ///
196    /// Loads and stores with this flag accesses the "heap" part of
197    /// abstract state. This is disjoint from the "table", "vmctx",
198    /// and "other" parts of abstract state. In concrete terms, this
199    /// means that behavior is undefined if the same memory is also
200    /// accessed by another load/store with one of the other
201    /// alias-analysis bits (`table`, `vmctx`) set, or `heap` not set.
202    pub fn heap(self) -> bool {
203        self.read(FlagBit::Heap)
204    }
205
206    /// Set the `heap` bit. See the notes about mutual exclusion with
207    /// other bits in `heap()`.
208    pub fn set_heap(&mut self) {
209        assert!(!self.table() && !self.vmctx());
210        self.set(FlagBit::Heap);
211    }
212
213    /// Set the `heap` bit, returning new flags.
214    pub fn with_heap(mut self) -> Self {
215        self.set_heap();
216        self
217    }
218
219    /// Test if the `table` bit is set.
220    ///
221    /// Loads and stores with this flag accesses the "table" part of
222    /// abstract state. This is disjoint from the "heap", "vmctx",
223    /// and "other" parts of abstract state. In concrete terms, this
224    /// means that behavior is undefined if the same memory is also
225    /// accessed by another load/store with one of the other
226    /// alias-analysis bits (`heap`, `vmctx`) set, or `table` not set.
227    pub fn table(self) -> bool {
228        self.read(FlagBit::Table)
229    }
230
231    /// Set the `table` bit. See the notes about mutual exclusion with
232    /// other bits in `table()`.
233    pub fn set_table(&mut self) {
234        assert!(!self.heap() && !self.vmctx());
235        self.set(FlagBit::Table);
236    }
237
238    /// Set the `table` bit, returning new flags.
239    pub fn with_table(mut self) -> Self {
240        self.set_table();
241        self
242    }
243
244    /// Test if the `vmctx` bit is set.
245    ///
246    /// Loads and stores with this flag accesses the "vmctx" part of
247    /// abstract state. This is disjoint from the "heap", "table",
248    /// and "other" parts of abstract state. In concrete terms, this
249    /// means that behavior is undefined if the same memory is also
250    /// accessed by another load/store with one of the other
251    /// alias-analysis bits (`heap`, `table`) set, or `vmctx` not set.
252    pub fn vmctx(self) -> bool {
253        self.read(FlagBit::Vmctx)
254    }
255
256    /// Set the `vmctx` bit. See the notes about mutual exclusion with
257    /// other bits in `vmctx()`.
258    pub fn set_vmctx(&mut self) {
259        assert!(!self.heap() && !self.table());
260        self.set(FlagBit::Vmctx);
261    }
262
263    /// Set the `vmctx` bit, returning new flags.
264    pub fn with_vmctx(mut self) -> Self {
265        self.set_vmctx();
266        self
267    }
268}
269
270impl fmt::Display for MemFlags {
271    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
272        for (i, n) in NAMES.iter().enumerate() {
273            if self.bits & (1 << i) != 0 {
274                write!(f, " {}", n)?;
275            }
276        }
277        Ok(())
278    }
279}