1#[macro_export]
7#[doc(hidden)]
8macro_rules! isle_common_prelude_methods {
9 () => {
10 #[inline]
12 fn unit(&mut self) -> Unit {
13 ()
14 }
15
16 #[inline]
17 fn u8_as_u32(&mut self, x: u8) -> u32 {
18 x.into()
19 }
20
21 #[inline]
22 fn u8_as_u64(&mut self, x: u8) -> u64 {
23 x.into()
24 }
25
26 #[inline]
27 fn u16_as_u64(&mut self, x: u16) -> u64 {
28 x.into()
29 }
30
31 #[inline]
32 fn u32_as_u64(&mut self, x: u32) -> u64 {
33 x.into()
34 }
35
36 #[inline]
37 fn i64_as_u64(&mut self, x: i64) -> u64 {
38 x as u64
39 }
40
41 #[inline]
42 fn u64_as_i32(&mut self, x: u64) -> i32 {
43 x as i32
44 }
45
46 #[inline]
47 fn i64_neg(&mut self, x: i64) -> i64 {
48 x.wrapping_neg()
49 }
50
51 #[inline]
52 fn u64_add(&mut self, x: u64, y: u64) -> u64 {
53 x.wrapping_add(y)
54 }
55
56 #[inline]
57 fn u64_sub(&mut self, x: u64, y: u64) -> u64 {
58 x.wrapping_sub(y)
59 }
60
61 #[inline]
62 fn u64_mul(&mut self, x: u64, y: u64) -> u64 {
63 x.wrapping_mul(y)
64 }
65
66 #[inline]
67 fn u64_sdiv(&mut self, x: u64, y: u64) -> Option<u64> {
68 let x = x as i64;
69 let y = y as i64;
70 x.checked_div(y).map(|d| d as u64)
71 }
72
73 #[inline]
74 fn u64_udiv(&mut self, x: u64, y: u64) -> Option<u64> {
75 x.checked_div(y)
76 }
77
78 #[inline]
79 fn u64_and(&mut self, x: u64, y: u64) -> u64 {
80 x & y
81 }
82
83 #[inline]
84 fn u64_or(&mut self, x: u64, y: u64) -> u64 {
85 x | y
86 }
87
88 #[inline]
89 fn u64_xor(&mut self, x: u64, y: u64) -> u64 {
90 x ^ y
91 }
92
93 #[inline]
94 fn u64_shl(&mut self, x: u64, y: u64) -> u64 {
95 x << y
96 }
97
98 #[inline]
99 fn imm64_shl(&mut self, ty: Type, x: Imm64, y: Imm64) -> Imm64 {
100 let shift_mask = (ty.bits() - 1) as u64;
102 let y = (y.bits() as u64) & shift_mask;
103
104 let ty_mask = self.ty_mask(ty) as i64;
106 Imm64::new((x.bits() << y) & ty_mask)
107 }
108
109 #[inline]
110 fn imm64_ushr(&mut self, ty: Type, x: Imm64, y: Imm64) -> Imm64 {
111 let ty_mask = self.ty_mask(ty);
112 let x = (x.bits() as u64) & ty_mask;
113
114 let shift_mask = (ty.bits() - 1) as u64;
116 let y = (y.bits() as u64) & shift_mask;
117
118 Imm64::new((x >> y) as i64)
120 }
121
122 #[inline]
123 fn imm64_sshr(&mut self, ty: Type, x: Imm64, y: Imm64) -> Imm64 {
124 let shift = u32::checked_sub(64, ty.bits()).unwrap_or(0);
126 let x = (x.bits() << shift) >> shift;
127
128 let shift_mask = (ty.bits() - 1) as i64;
130 let y = y.bits() & shift_mask;
131
132 let ty_mask = self.ty_mask(ty) as i64;
134 Imm64::new((x >> y) & ty_mask)
135 }
136
137 #[inline]
138 fn u64_not(&mut self, x: u64) -> u64 {
139 !x
140 }
141
142 #[inline]
143 fn u64_eq(&mut self, x: u64, y: u64) -> bool {
144 x == y
145 }
146
147 #[inline]
148 fn u64_le(&mut self, x: u64, y: u64) -> bool {
149 x <= y
150 }
151
152 #[inline]
153 fn u64_lt(&mut self, x: u64, y: u64) -> bool {
154 x < y
155 }
156
157 #[inline]
158 fn u64_is_zero(&mut self, value: u64) -> bool {
159 0 == value
160 }
161
162 #[inline]
163 fn u64_is_odd(&mut self, x: u64) -> bool {
164 x & 1 == 1
165 }
166
167 #[inline]
168 fn i64_sextend_imm64(&mut self, ty: Type, mut x: Imm64) -> i64 {
169 x.sign_extend_from_width(ty.bits());
170 x.bits()
171 }
172
173 #[inline]
174 fn u64_uextend_imm64(&mut self, ty: Type, x: Imm64) -> u64 {
175 (x.bits() as u64) & self.ty_mask(ty)
176 }
177
178 #[inline]
179 fn imm64_icmp(&mut self, ty: Type, cc: &IntCC, x: Imm64, y: Imm64) -> Imm64 {
180 let ux = self.u64_uextend_imm64(ty, x);
181 let uy = self.u64_uextend_imm64(ty, y);
182 let sx = self.i64_sextend_imm64(ty, x);
183 let sy = self.i64_sextend_imm64(ty, y);
184 let result = match cc {
185 IntCC::Equal => ux == uy,
186 IntCC::NotEqual => ux != uy,
187 IntCC::UnsignedGreaterThanOrEqual => ux >= uy,
188 IntCC::UnsignedGreaterThan => ux > uy,
189 IntCC::UnsignedLessThanOrEqual => ux <= uy,
190 IntCC::UnsignedLessThan => ux < uy,
191 IntCC::SignedGreaterThanOrEqual => sx >= sy,
192 IntCC::SignedGreaterThan => sx > sy,
193 IntCC::SignedLessThanOrEqual => sx <= sy,
194 IntCC::SignedLessThan => sx < sy,
195 };
196 Imm64::new(result.into())
197 }
198
199 #[inline]
200 fn ty_bits(&mut self, ty: Type) -> u8 {
201 use std::convert::TryInto;
202 ty.bits().try_into().unwrap()
203 }
204
205 #[inline]
206 fn ty_bits_u16(&mut self, ty: Type) -> u16 {
207 ty.bits() as u16
208 }
209
210 #[inline]
211 fn ty_bits_u64(&mut self, ty: Type) -> u64 {
212 ty.bits() as u64
213 }
214
215 #[inline]
216 fn ty_bytes(&mut self, ty: Type) -> u16 {
217 u16::try_from(ty.bytes()).unwrap()
218 }
219
220 #[inline]
221 fn ty_mask(&mut self, ty: Type) -> u64 {
222 let ty_bits = ty.bits();
223 debug_assert_ne!(ty_bits, 0);
224 let shift = 64_u64
225 .checked_sub(ty_bits.into())
226 .expect("unimplemented for > 64 bits");
227 u64::MAX >> shift
228 }
229
230 #[inline]
231 fn ty_umin(&mut self, _ty: Type) -> u64 {
232 0
233 }
234
235 #[inline]
236 fn ty_umax(&mut self, ty: Type) -> u64 {
237 self.ty_mask(ty)
238 }
239
240 #[inline]
241 fn ty_smin(&mut self, ty: Type) -> u64 {
242 let ty_bits = ty.bits();
243 debug_assert_ne!(ty_bits, 0);
244 let shift = 64_u64
245 .checked_sub(ty_bits.into())
246 .expect("unimplemented for > 64 bits");
247 (i64::MIN as u64) >> shift
248 }
249
250 #[inline]
251 fn ty_smax(&mut self, ty: Type) -> u64 {
252 let ty_bits = ty.bits();
253 debug_assert_ne!(ty_bits, 0);
254 let shift = 64_u64
255 .checked_sub(ty_bits.into())
256 .expect("unimplemented for > 64 bits");
257 (i64::MAX as u64) >> shift
258 }
259
260 fn fits_in_16(&mut self, ty: Type) -> Option<Type> {
261 if ty.bits() <= 16 && !ty.is_dynamic_vector() {
262 Some(ty)
263 } else {
264 None
265 }
266 }
267
268 #[inline]
269 fn fits_in_32(&mut self, ty: Type) -> Option<Type> {
270 if ty.bits() <= 32 && !ty.is_dynamic_vector() {
271 Some(ty)
272 } else {
273 None
274 }
275 }
276
277 #[inline]
278 fn lane_fits_in_32(&mut self, ty: Type) -> Option<Type> {
279 if !ty.is_vector() && !ty.is_dynamic_vector() {
280 None
281 } else if ty.lane_type().bits() <= 32 {
282 Some(ty)
283 } else {
284 None
285 }
286 }
287
288 #[inline]
289 fn fits_in_64(&mut self, ty: Type) -> Option<Type> {
290 if ty.bits() <= 64 && !ty.is_dynamic_vector() {
291 Some(ty)
292 } else {
293 None
294 }
295 }
296
297 #[inline]
298 fn ty_int_ref_scalar_64(&mut self, ty: Type) -> Option<Type> {
299 if ty.bits() <= 64 && !ty.is_float() && !ty.is_vector() {
300 Some(ty)
301 } else {
302 None
303 }
304 }
305
306 #[inline]
307 fn ty_32(&mut self, ty: Type) -> Option<Type> {
308 if ty.bits() == 32 {
309 Some(ty)
310 } else {
311 None
312 }
313 }
314
315 #[inline]
316 fn ty_64(&mut self, ty: Type) -> Option<Type> {
317 if ty.bits() == 64 {
318 Some(ty)
319 } else {
320 None
321 }
322 }
323
324 #[inline]
325 fn ty_32_or_64(&mut self, ty: Type) -> Option<Type> {
326 if ty.bits() == 32 || ty.bits() == 64 {
327 Some(ty)
328 } else {
329 None
330 }
331 }
332
333 #[inline]
334 fn ty_8_or_16(&mut self, ty: Type) -> Option<Type> {
335 if ty.bits() == 8 || ty.bits() == 16 {
336 Some(ty)
337 } else {
338 None
339 }
340 }
341
342 #[inline]
343 fn int_fits_in_32(&mut self, ty: Type) -> Option<Type> {
344 match ty {
345 I8 | I16 | I32 => Some(ty),
346 _ => None,
347 }
348 }
349
350 #[inline]
351 fn ty_int_ref_64(&mut self, ty: Type) -> Option<Type> {
352 match ty {
353 I64 | R64 => Some(ty),
354 _ => None,
355 }
356 }
357
358 #[inline]
359 fn ty_int(&mut self, ty: Type) -> Option<Type> {
360 ty.is_int().then(|| ty)
361 }
362
363 #[inline]
364 fn ty_scalar_float(&mut self, ty: Type) -> Option<Type> {
365 match ty {
366 F32 | F64 => Some(ty),
367 _ => None,
368 }
369 }
370
371 #[inline]
372 fn ty_float_or_vec(&mut self, ty: Type) -> Option<Type> {
373 match ty {
374 F32 | F64 => Some(ty),
375 ty if ty.is_vector() => Some(ty),
376 _ => None,
377 }
378 }
379
380 fn ty_vector_float(&mut self, ty: Type) -> Option<Type> {
381 if ty.is_vector() && ty.lane_type().is_float() {
382 Some(ty)
383 } else {
384 None
385 }
386 }
387
388 #[inline]
389 fn ty_vector_not_float(&mut self, ty: Type) -> Option<Type> {
390 if ty.is_vector() && !ty.lane_type().is_float() {
391 Some(ty)
392 } else {
393 None
394 }
395 }
396
397 #[inline]
398 fn ty_vec64_ctor(&mut self, ty: Type) -> Option<Type> {
399 if ty.is_vector() && ty.bits() == 64 {
400 Some(ty)
401 } else {
402 None
403 }
404 }
405
406 #[inline]
407 fn ty_vec64(&mut self, ty: Type) -> Option<Type> {
408 if ty.is_vector() && ty.bits() == 64 {
409 Some(ty)
410 } else {
411 None
412 }
413 }
414
415 #[inline]
416 fn ty_vec128(&mut self, ty: Type) -> Option<Type> {
417 if ty.is_vector() && ty.bits() == 128 {
418 Some(ty)
419 } else {
420 None
421 }
422 }
423
424 #[inline]
425 fn ty_dyn_vec64(&mut self, ty: Type) -> Option<Type> {
426 if ty.is_dynamic_vector() && dynamic_to_fixed(ty).bits() == 64 {
427 Some(ty)
428 } else {
429 None
430 }
431 }
432
433 #[inline]
434 fn ty_dyn_vec128(&mut self, ty: Type) -> Option<Type> {
435 if ty.is_dynamic_vector() && dynamic_to_fixed(ty).bits() == 128 {
436 Some(ty)
437 } else {
438 None
439 }
440 }
441
442 #[inline]
443 fn ty_vec64_int(&mut self, ty: Type) -> Option<Type> {
444 if ty.is_vector() && ty.bits() == 64 && ty.lane_type().is_int() {
445 Some(ty)
446 } else {
447 None
448 }
449 }
450
451 #[inline]
452 fn ty_vec128_int(&mut self, ty: Type) -> Option<Type> {
453 if ty.is_vector() && ty.bits() == 128 && ty.lane_type().is_int() {
454 Some(ty)
455 } else {
456 None
457 }
458 }
459
460 #[inline]
461 fn ty_addr64(&mut self, ty: Type) -> Option<Type> {
462 match ty {
463 I64 | R64 => Some(ty),
464 _ => None,
465 }
466 }
467
468 #[inline]
469 fn u64_from_imm64(&mut self, imm: Imm64) -> u64 {
470 imm.bits() as u64
471 }
472
473 #[inline]
474 fn imm64_power_of_two(&mut self, x: Imm64) -> Option<u64> {
475 let x = i64::from(x);
476 let x = u64::try_from(x).ok()?;
477 if x.is_power_of_two() {
478 Some(x.trailing_zeros().into())
479 } else {
480 None
481 }
482 }
483
484 #[inline]
485 fn u64_from_bool(&mut self, b: bool) -> u64 {
486 if b {
487 u64::MAX
488 } else {
489 0
490 }
491 }
492
493 #[inline]
494 fn multi_lane(&mut self, ty: Type) -> Option<(u32, u32)> {
495 if ty.lane_count() > 1 {
496 Some((ty.lane_bits(), ty.lane_count()))
497 } else {
498 None
499 }
500 }
501
502 #[inline]
503 fn dynamic_lane(&mut self, ty: Type) -> Option<(u32, u32)> {
504 if ty.is_dynamic_vector() {
505 Some((ty.lane_bits(), ty.min_lane_count()))
506 } else {
507 None
508 }
509 }
510
511 #[inline]
512 fn dynamic_int_lane(&mut self, ty: Type) -> Option<u32> {
513 if ty.is_dynamic_vector() && crate::machinst::ty_has_int_representation(ty.lane_type())
514 {
515 Some(ty.lane_bits())
516 } else {
517 None
518 }
519 }
520
521 #[inline]
522 fn dynamic_fp_lane(&mut self, ty: Type) -> Option<u32> {
523 if ty.is_dynamic_vector()
524 && crate::machinst::ty_has_float_or_vec_representation(ty.lane_type())
525 {
526 Some(ty.lane_bits())
527 } else {
528 None
529 }
530 }
531
532 #[inline]
533 fn ty_dyn64_int(&mut self, ty: Type) -> Option<Type> {
534 if ty.is_dynamic_vector() && ty.min_bits() == 64 && ty.lane_type().is_int() {
535 Some(ty)
536 } else {
537 None
538 }
539 }
540
541 #[inline]
542 fn ty_dyn128_int(&mut self, ty: Type) -> Option<Type> {
543 if ty.is_dynamic_vector() && ty.min_bits() == 128 && ty.lane_type().is_int() {
544 Some(ty)
545 } else {
546 None
547 }
548 }
549
550 fn u32_from_ieee32(&mut self, val: Ieee32) -> u32 {
551 val.bits()
552 }
553
554 fn u64_from_ieee64(&mut self, val: Ieee64) -> u64 {
555 val.bits()
556 }
557
558 fn u8_from_uimm8(&mut self, val: Uimm8) -> u8 {
559 val
560 }
561
562 fn not_vec32x2(&mut self, ty: Type) -> Option<Type> {
563 if ty.lane_bits() == 32 && ty.lane_count() == 2 {
564 None
565 } else {
566 Some(ty)
567 }
568 }
569
570 fn not_i64x2(&mut self, ty: Type) -> Option<()> {
571 if ty == I64X2 {
572 None
573 } else {
574 Some(())
575 }
576 }
577
578 fn trap_code_division_by_zero(&mut self) -> TrapCode {
579 TrapCode::IntegerDivisionByZero
580 }
581
582 fn trap_code_integer_overflow(&mut self) -> TrapCode {
583 TrapCode::IntegerOverflow
584 }
585
586 fn trap_code_bad_conversion_to_integer(&mut self) -> TrapCode {
587 TrapCode::BadConversionToInteger
588 }
589
590 fn nonzero_u64_from_imm64(&mut self, val: Imm64) -> Option<u64> {
591 match val.bits() {
592 0 => None,
593 n => Some(n as u64),
594 }
595 }
596
597 #[inline]
598 fn u32_add(&mut self, a: u32, b: u32) -> u32 {
599 a.wrapping_add(b)
600 }
601
602 #[inline]
603 fn s32_add_fallible(&mut self, a: u32, b: u32) -> Option<u32> {
604 let a = a as i32;
605 let b = b as i32;
606 a.checked_add(b).map(|sum| sum as u32)
607 }
608
609 #[inline]
610 fn u32_nonnegative(&mut self, x: u32) -> Option<u32> {
611 if (x as i32) >= 0 {
612 Some(x)
613 } else {
614 None
615 }
616 }
617
618 #[inline]
619 fn u32_lteq(&mut self, a: u32, b: u32) -> Option<()> {
620 if a <= b {
621 Some(())
622 } else {
623 None
624 }
625 }
626
627 #[inline]
628 fn u8_lteq(&mut self, a: u8, b: u8) -> Option<()> {
629 if a <= b {
630 Some(())
631 } else {
632 None
633 }
634 }
635
636 #[inline]
637 fn u8_lt(&mut self, a: u8, b: u8) -> Option<()> {
638 if a < b {
639 Some(())
640 } else {
641 None
642 }
643 }
644
645 #[inline]
646 fn imm64(&mut self, x: u64) -> Imm64 {
647 Imm64::new(x as i64)
648 }
649
650 #[inline]
651 fn imm64_masked(&mut self, ty: Type, x: u64) -> Imm64 {
652 Imm64::new((x & self.ty_mask(ty)) as i64)
653 }
654
655 #[inline]
656 fn simm32(&mut self, x: Imm64) -> Option<u32> {
657 let x64: i64 = x.into();
658 let x32: i32 = x64.try_into().ok()?;
659 Some(x32 as u32)
660 }
661
662 #[inline]
663 fn uimm8(&mut self, x: Imm64) -> Option<u8> {
664 let x64: i64 = x.into();
665 let x8: u8 = x64.try_into().ok()?;
666 Some(x8)
667 }
668
669 #[inline]
670 fn offset32(&mut self, x: Offset32) -> u32 {
671 let x: i32 = x.into();
672 x as u32
673 }
674
675 #[inline]
676 fn u8_and(&mut self, a: u8, b: u8) -> u8 {
677 a & b
678 }
679
680 #[inline]
681 fn lane_type(&mut self, ty: Type) -> Type {
682 ty.lane_type()
683 }
684
685 #[inline]
686 fn offset32_to_u32(&mut self, offset: Offset32) -> u32 {
687 let offset: i32 = offset.into();
688 offset as u32
689 }
690
691 #[inline]
692 fn u32_to_offset32(&mut self, offset: u32) -> Offset32 {
693 Offset32::new(offset as i32)
694 }
695
696 fn range(&mut self, start: usize, end: usize) -> Range {
697 (start, end)
698 }
699
700 fn range_view(&mut self, (start, end): Range) -> RangeView {
701 if start >= end {
702 RangeView::Empty
703 } else {
704 RangeView::NonEmpty {
705 index: start,
706 rest: (start + 1, end),
707 }
708 }
709 }
710
711 #[inline]
712 fn mem_flags_trusted(&mut self) -> MemFlags {
713 MemFlags::trusted()
714 }
715
716 #[inline]
717 fn intcc_unsigned(&mut self, x: &IntCC) -> IntCC {
718 x.unsigned()
719 }
720
721 #[inline]
722 fn signed_cond_code(&mut self, cc: &condcodes::IntCC) -> Option<condcodes::IntCC> {
723 match cc {
724 IntCC::Equal
725 | IntCC::UnsignedGreaterThanOrEqual
726 | IntCC::UnsignedGreaterThan
727 | IntCC::UnsignedLessThanOrEqual
728 | IntCC::UnsignedLessThan
729 | IntCC::NotEqual => None,
730 IntCC::SignedGreaterThanOrEqual
731 | IntCC::SignedGreaterThan
732 | IntCC::SignedLessThanOrEqual
733 | IntCC::SignedLessThan => Some(*cc),
734 }
735 }
736
737 #[inline]
738 fn intcc_reverse(&mut self, cc: &IntCC) -> IntCC {
739 cc.reverse()
740 }
741
742 #[inline]
743 fn intcc_inverse(&mut self, cc: &IntCC) -> IntCC {
744 cc.inverse()
745 }
746
747 #[inline]
748 fn floatcc_reverse(&mut self, cc: &FloatCC) -> FloatCC {
749 cc.reverse()
750 }
751
752 #[inline]
753 fn floatcc_inverse(&mut self, cc: &FloatCC) -> FloatCC {
754 cc.inverse()
755 }
756
757 fn floatcc_unordered(&mut self, cc: &FloatCC) -> bool {
758 match *cc {
759 FloatCC::Unordered
760 | FloatCC::UnorderedOrEqual
761 | FloatCC::UnorderedOrLessThan
762 | FloatCC::UnorderedOrLessThanOrEqual
763 | FloatCC::UnorderedOrGreaterThan
764 | FloatCC::UnorderedOrGreaterThanOrEqual => true,
765 _ => false,
766 }
767 }
768
769 #[inline]
770 fn unpack_value_array_2(&mut self, arr: &ValueArray2) -> (Value, Value) {
771 let [a, b] = *arr;
772 (a, b)
773 }
774
775 #[inline]
776 fn pack_value_array_2(&mut self, a: Value, b: Value) -> ValueArray2 {
777 [a, b]
778 }
779
780 #[inline]
781 fn unpack_value_array_3(&mut self, arr: &ValueArray3) -> (Value, Value, Value) {
782 let [a, b, c] = *arr;
783 (a, b, c)
784 }
785
786 #[inline]
787 fn pack_value_array_3(&mut self, a: Value, b: Value, c: Value) -> ValueArray3 {
788 [a, b, c]
789 }
790
791 #[inline]
792 fn unpack_block_array_2(&mut self, arr: &BlockArray2) -> (BlockCall, BlockCall) {
793 let [a, b] = *arr;
794 (a, b)
795 }
796
797 #[inline]
798 fn pack_block_array_2(&mut self, a: BlockCall, b: BlockCall) -> BlockArray2 {
799 [a, b]
800 }
801
802 fn u128_as_u64(&mut self, val: u128) -> Option<u64> {
803 u64::try_from(val).ok()
804 }
805
806 fn u64_as_u32(&mut self, val: u64) -> Option<u32> {
807 u32::try_from(val).ok()
808 }
809 };
810}