1use super::{
16 spill_weight_from_constraint, Env, LiveBundleIndex, LiveBundleVec, LiveRangeFlag,
17 LiveRangeIndex, LiveRangeKey, LiveRangeList, LiveRangeListEntry, PRegIndex, RegTraversalIter,
18 Requirement, SpillWeight, UseList, VRegIndex,
19};
20use crate::{
21 ion::data_structures::{
22 CodeRange, BUNDLE_MAX_NORMAL_SPILL_WEIGHT, MAX_SPLITS_PER_SPILLSET,
23 MINIMAL_BUNDLE_SPILL_WEIGHT, MINIMAL_FIXED_BUNDLE_SPILL_WEIGHT,
24 },
25 Allocation, Function, Inst, InstPosition, OperandConstraint, OperandKind, PReg, ProgPoint,
26 RegAllocError,
27};
28use fxhash::FxHashSet;
29use smallvec::{smallvec, SmallVec};
30use std::fmt::Debug;
31
32#[derive(Clone, Debug, PartialEq, Eq)]
33pub enum AllocRegResult {
34 Allocated(Allocation),
35 Conflict(LiveBundleVec, ProgPoint),
36 ConflictWithFixed(u32, ProgPoint),
37 ConflictHighCost,
38}
39
40impl<'a, F: Function> Env<'a, F> {
41 pub fn process_bundles(&mut self) -> Result<(), RegAllocError> {
42 while let Some((bundle, reg_hint)) = self.allocation_queue.pop() {
43 self.stats.process_bundle_count += 1;
44 self.process_bundle(bundle, reg_hint)?;
45 }
46 self.stats.final_liverange_count = self.ranges.len();
47 self.stats.final_bundle_count = self.bundles.len();
48 self.stats.spill_bundle_count = self.spilled_bundles.len();
49
50 Ok(())
51 }
52
53 pub fn try_to_allocate_bundle_to_reg(
54 &mut self,
55 bundle: LiveBundleIndex,
56 reg: PRegIndex,
57 max_allowable_cost: Option<u32>,
61 ) -> AllocRegResult {
62 trace!("try_to_allocate_bundle_to_reg: {:?} -> {:?}", bundle, reg);
63 let mut conflicts = smallvec![];
64 self.conflict_set.clear();
65 let mut max_conflict_weight = 0;
66 let bundle_ranges = &self.bundles[bundle.index()].ranges;
80 let from_key = LiveRangeKey::from_range(&CodeRange {
81 from: bundle_ranges.first().unwrap().range.from,
82 to: bundle_ranges.first().unwrap().range.from,
83 });
84 let mut preg_range_iter = self.pregs[reg.index()]
85 .allocations
86 .btree
87 .range(from_key..)
88 .peekable();
89 trace!(
90 "alloc map for {:?} in range {:?}..: {:?}",
91 reg,
92 from_key,
93 self.pregs[reg.index()].allocations.btree
94 );
95 let mut first_conflict: Option<ProgPoint> = None;
96
97 'ranges: for entry in bundle_ranges {
98 trace!(" -> range LR {:?}: {:?}", entry.index, entry.range);
99 let key = LiveRangeKey::from_range(&entry.range);
100
101 let mut skips = 0;
102 'alloc: loop {
103 trace!(" -> PReg range {:?}", preg_range_iter.peek());
104
105 if preg_range_iter.peek().is_some() && *preg_range_iter.peek().unwrap().0 < key {
110 trace!(
111 "Skipping PReg range {:?}",
112 preg_range_iter.peek().unwrap().0
113 );
114 preg_range_iter.next();
115 skips += 1;
116 if skips >= 16 {
117 let from_pos = entry.range.from;
118 let from_key = LiveRangeKey::from_range(&CodeRange {
119 from: from_pos,
120 to: from_pos,
121 });
122 preg_range_iter = self.pregs[reg.index()]
123 .allocations
124 .btree
125 .range(from_key..)
126 .peekable();
127 skips = 0;
128 }
129 continue 'alloc;
130 }
131 skips = 0;
132
133 if preg_range_iter.peek().is_none() {
135 trace!(" -> no more PReg allocations; so no conflict possible!");
136 break 'ranges;
137 }
138
139 if *preg_range_iter.peek().unwrap().0 > key {
141 trace!(
142 " -> next PReg allocation is at {:?}; moving to next VReg range",
143 preg_range_iter.peek().unwrap().0
144 );
145 break 'alloc;
146 }
147
148 let preg_key = *preg_range_iter.peek().unwrap().0;
150 debug_assert_eq!(preg_key, key); let preg_range = preg_range_iter.next().unwrap().1;
152
153 trace!(" -> btree contains range {:?} that overlaps", preg_range);
154 if preg_range.is_valid() {
155 trace!(" -> from vreg {:?}", self.ranges[preg_range.index()].vreg);
156 let conflict_bundle = self.ranges[preg_range.index()].bundle;
159 trace!(" -> conflict bundle {:?}", conflict_bundle);
160 if self.conflict_set.insert(conflict_bundle) {
161 conflicts.push(conflict_bundle);
162 max_conflict_weight = std::cmp::max(
163 max_conflict_weight,
164 self.bundles[conflict_bundle.index()].cached_spill_weight(),
165 );
166 if max_allowable_cost.is_some()
167 && max_conflict_weight > max_allowable_cost.unwrap()
168 {
169 trace!(" -> reached high cost, retrying early");
170 return AllocRegResult::ConflictHighCost;
171 }
172 }
173
174 if first_conflict.is_none() {
175 first_conflict = Some(ProgPoint::from_index(std::cmp::max(
176 preg_key.from,
177 key.from,
178 )));
179 }
180 } else {
181 trace!(" -> conflict with fixed reservation");
182 return AllocRegResult::ConflictWithFixed(
184 max_conflict_weight,
185 ProgPoint::from_index(preg_key.from),
186 );
187 }
188 }
189 }
190
191 if conflicts.len() > 0 {
192 return AllocRegResult::Conflict(conflicts, first_conflict.unwrap());
193 }
194
195 let preg = PReg::from_index(reg.index());
197 trace!(" -> bundle {:?} assigned to preg {:?}", bundle, preg);
198 self.bundles[bundle.index()].allocation = Allocation::reg(preg);
199 for entry in &self.bundles[bundle.index()].ranges {
200 self.pregs[reg.index()]
201 .allocations
202 .btree
203 .insert(LiveRangeKey::from_range(&entry.range), entry.index);
204 }
205
206 AllocRegResult::Allocated(Allocation::reg(preg))
207 }
208
209 pub fn evict_bundle(&mut self, bundle: LiveBundleIndex) {
210 trace!(
211 "evicting bundle {:?}: alloc {:?}",
212 bundle,
213 self.bundles[bundle.index()].allocation
214 );
215 let preg = match self.bundles[bundle.index()].allocation.as_reg() {
216 Some(preg) => preg,
217 None => {
218 trace!(
219 " -> has no allocation! {:?}",
220 self.bundles[bundle.index()].allocation
221 );
222 return;
223 }
224 };
225 let preg_idx = PRegIndex::new(preg.index());
226 self.bundles[bundle.index()].allocation = Allocation::none();
227 for entry in &self.bundles[bundle.index()].ranges {
228 trace!(" -> removing LR {:?} from reg {:?}", entry.index, preg_idx);
229 self.pregs[preg_idx.index()]
230 .allocations
231 .btree
232 .remove(&LiveRangeKey::from_range(&entry.range));
233 }
234 let prio = self.bundles[bundle.index()].prio;
235 trace!(" -> prio {}; back into queue", prio);
236 self.allocation_queue
237 .insert(bundle, prio as usize, PReg::invalid());
238 }
239
240 pub fn bundle_spill_weight(&self, bundle: LiveBundleIndex) -> u32 {
241 self.bundles[bundle.index()].cached_spill_weight()
242 }
243
244 pub fn maximum_spill_weight_in_bundle_set(&self, bundles: &LiveBundleVec) -> u32 {
245 trace!("maximum_spill_weight_in_bundle_set: {:?}", bundles);
246 let m = bundles
247 .iter()
248 .map(|&b| {
249 let w = self.bundles[b.index()].cached_spill_weight();
250 trace!("bundle{}: {}", b.index(), w);
251 w
252 })
253 .max()
254 .unwrap_or(0);
255 trace!(" -> max: {}", m);
256 m
257 }
258
259 pub fn recompute_bundle_properties(&mut self, bundle: LiveBundleIndex) {
260 trace!("recompute bundle properties: bundle {:?}", bundle);
261
262 let minimal;
263 let mut fixed = false;
264 let mut fixed_def = false;
265 let mut stack = false;
266 let bundledata = &self.bundles[bundle.index()];
267 let first_range = bundledata.ranges[0].index;
268 let first_range_data = &self.ranges[first_range.index()];
269
270 self.bundles[bundle.index()].prio = self.compute_bundle_prio(bundle);
271
272 if first_range_data.vreg.is_invalid() {
273 trace!(" -> no vreg; minimal and fixed");
274 minimal = true;
275 fixed = true;
276 } else {
277 for u in &first_range_data.uses {
278 trace!(" -> use: {:?}", u);
279 if let OperandConstraint::FixedReg(_) = u.operand.constraint() {
280 trace!(" -> fixed operand at {:?}: {:?}", u.pos, u.operand);
281 fixed = true;
282 if u.operand.kind() == OperandKind::Def {
283 trace!(" -> is fixed def");
284 fixed_def = true;
285 }
286 }
287 if let OperandConstraint::Stack = u.operand.constraint() {
288 trace!(" -> stack operand at {:?}: {:?}", u.pos, u.operand);
289 stack = true;
290 }
291 if stack && fixed {
292 break;
293 }
294 }
295 trace!(" -> first range has range {:?}", first_range_data.range);
300 let bundle_start = self.bundles[bundle.index()]
301 .ranges
302 .first()
303 .unwrap()
304 .range
305 .from;
306 let bundle_end = self.bundles[bundle.index()].ranges.last().unwrap().range.to;
307 minimal = bundle_start.inst() == bundle_end.prev().inst();
308 trace!(" -> minimal: {}", minimal);
309 }
310
311 let spill_weight = if minimal {
312 if fixed {
313 trace!(" -> fixed and minimal");
314 MINIMAL_FIXED_BUNDLE_SPILL_WEIGHT
315 } else {
316 trace!(" -> non-fixed and minimal");
317 MINIMAL_BUNDLE_SPILL_WEIGHT
318 }
319 } else {
320 let mut total = SpillWeight::zero();
321 for entry in &self.bundles[bundle.index()].ranges {
322 let range_data = &self.ranges[entry.index.index()];
323 trace!(
324 " -> uses spill weight: +{:?}",
325 range_data.uses_spill_weight()
326 );
327 total = total + range_data.uses_spill_weight();
328 }
329
330 if self.bundles[bundle.index()].prio > 0 {
331 let final_weight = (total.to_f32() as u32) / self.bundles[bundle.index()].prio;
332 trace!(
333 " -> dividing by prio {}; final weight {}",
334 self.bundles[bundle.index()].prio,
335 final_weight
336 );
337 std::cmp::min(BUNDLE_MAX_NORMAL_SPILL_WEIGHT, final_weight)
338 } else {
339 0
340 }
341 };
342
343 self.bundles[bundle.index()].set_cached_spill_weight_and_props(
344 spill_weight,
345 minimal,
346 fixed,
347 fixed_def,
348 stack,
349 );
350 }
351
352 pub fn minimal_bundle(&self, bundle: LiveBundleIndex) -> bool {
353 self.bundles[bundle.index()].cached_minimal()
354 }
355
356 pub fn recompute_range_properties(&mut self, range: LiveRangeIndex) {
357 let rangedata = &mut self.ranges[range.index()];
358 let mut w = SpillWeight::zero();
359 for u in &rangedata.uses {
360 w = w + SpillWeight::from_bits(u.weight);
361 trace!("range{}: use {:?}", range.index(), u);
362 }
363 rangedata.set_uses_spill_weight(w);
364 if rangedata.uses.len() > 0 && rangedata.uses[0].operand.kind() == OperandKind::Def {
365 rangedata.set_flag(LiveRangeFlag::StartsAtDef);
372 }
373 }
374
375 pub fn get_or_create_spill_bundle(
376 &mut self,
377 bundle: LiveBundleIndex,
378 create_if_absent: bool,
379 ) -> Option<LiveBundleIndex> {
380 let ssidx = self.bundles[bundle.index()].spillset;
381 let idx = self.spillsets[ssidx.index()].spill_bundle;
382 if idx.is_valid() {
383 Some(idx)
384 } else if create_if_absent {
385 let idx = self.create_bundle();
386 self.spillsets[ssidx.index()].spill_bundle = idx;
387 self.bundles[idx.index()].spillset = ssidx;
388 self.spilled_bundles.push(idx);
389 Some(idx)
390 } else {
391 None
392 }
393 }
394
395 pub fn split_and_requeue_bundle(
396 &mut self,
397 bundle: LiveBundleIndex,
398 mut split_at: ProgPoint,
399 reg_hint: PReg,
400 trim_ends_into_spill_bundle: bool,
403 ) {
404 self.stats.splits += 1;
405 trace!(
406 "split bundle {:?} at {:?} and requeue with reg hint (for first part) {:?}",
407 bundle,
408 split_at,
409 reg_hint,
410 );
411
412 let spillset = self.bundles[bundle.index()].spillset;
417
418 if self.spillsets[spillset.index()].splits >= MAX_SPLITS_PER_SPILLSET {
423 self.split_into_minimal_bundles(bundle, reg_hint);
424 return;
425 }
426 self.spillsets[spillset.index()].splits += 1;
427
428 debug_assert!(!self.bundles[bundle.index()].ranges.is_empty());
429 let bundle_start = self.bundles[bundle.index()]
432 .ranges
433 .first()
434 .unwrap()
435 .range
436 .from;
437 debug_assert!(split_at >= bundle_start);
438 let bundle_end = self.bundles[bundle.index()].ranges.last().unwrap().range.to;
439 debug_assert!(split_at < bundle_end);
440
441 if split_at == bundle_start {
445 let mut first_use = None;
447 'outer: for entry in &self.bundles[bundle.index()].ranges {
448 for u in &self.ranges[entry.index.index()].uses {
449 first_use = Some(u.pos);
450 break 'outer;
451 }
452 }
453 trace!(" -> first use loc is {:?}", first_use);
454 split_at = match first_use {
455 Some(pos) => {
456 if pos.inst() == bundle_start.inst() {
457 ProgPoint::before(pos.inst().next())
458 } else {
459 ProgPoint::before(pos.inst())
460 }
461 }
462 None => ProgPoint::before(
463 self.bundles[bundle.index()]
464 .ranges
465 .first()
466 .unwrap()
467 .range
468 .from
469 .inst()
470 .next(),
471 ),
472 };
473 trace!(
474 "split point is at bundle start; advancing to {:?}",
475 split_at
476 );
477 } else {
478 if split_at.pos() == InstPosition::After {
482 split_at = split_at.next();
483 }
484 if split_at >= bundle_end {
485 split_at = split_at.prev().prev();
486 }
487 }
488
489 debug_assert!(split_at > bundle_start && split_at < bundle_end);
490
491 trace!(" -> LRs: {:?}", self.bundles[bundle.index()].ranges);
496
497 let mut last_lr_in_old_bundle_idx = 0; let mut first_lr_in_new_bundle_idx = 0; for (i, entry) in self.bundles[bundle.index()].ranges.iter().enumerate() {
500 if split_at > entry.range.from {
501 last_lr_in_old_bundle_idx = i;
502 first_lr_in_new_bundle_idx = i;
503 }
504 if split_at < entry.range.to {
505 first_lr_in_new_bundle_idx = i;
506 break;
507 }
508 }
509
510 trace!(
511 " -> last LR in old bundle: LR {:?}",
512 self.bundles[bundle.index()].ranges[last_lr_in_old_bundle_idx]
513 );
514 trace!(
515 " -> first LR in new bundle: LR {:?}",
516 self.bundles[bundle.index()].ranges[first_lr_in_new_bundle_idx]
517 );
518
519 let mut new_lr_list: LiveRangeList = self.bundles[bundle.index()]
521 .ranges
522 .iter()
523 .cloned()
524 .skip(first_lr_in_new_bundle_idx)
525 .collect();
526 self.bundles[bundle.index()]
527 .ranges
528 .truncate(last_lr_in_old_bundle_idx + 1);
529 self.bundles[bundle.index()].ranges.shrink_to_fit();
530
531 if split_at > new_lr_list[0].range.from {
535 debug_assert_eq!(last_lr_in_old_bundle_idx, first_lr_in_new_bundle_idx);
536 let orig_lr = new_lr_list[0].index;
537 let new_lr = self.create_liverange(CodeRange {
538 from: split_at,
539 to: new_lr_list[0].range.to,
540 });
541 self.ranges[new_lr.index()].vreg = self.ranges[orig_lr.index()].vreg;
542 trace!(" -> splitting LR {:?} into {:?}", orig_lr, new_lr);
543 let first_use = self.ranges[orig_lr.index()]
544 .uses
545 .iter()
546 .position(|u| u.pos >= split_at)
547 .unwrap_or(self.ranges[orig_lr.index()].uses.len());
548 let rest_uses: UseList = self.ranges[orig_lr.index()]
549 .uses
550 .iter()
551 .cloned()
552 .skip(first_use)
553 .collect();
554 self.ranges[new_lr.index()].uses = rest_uses;
555 self.ranges[orig_lr.index()].uses.truncate(first_use);
556 self.ranges[orig_lr.index()].uses.shrink_to_fit();
557 self.recompute_range_properties(orig_lr);
558 self.recompute_range_properties(new_lr);
559 new_lr_list[0].index = new_lr;
560 new_lr_list[0].range = self.ranges[new_lr.index()].range;
561 self.ranges[orig_lr.index()].range.to = split_at;
562 self.bundles[bundle.index()].ranges[last_lr_in_old_bundle_idx].range =
563 self.ranges[orig_lr.index()].range;
564
565 self.vregs[self.ranges[new_lr.index()].vreg.index()]
572 .ranges
573 .push(LiveRangeListEntry {
574 range: self.ranges[new_lr.index()].range,
575 index: new_lr,
576 });
577 }
578
579 let new_bundle = self.create_bundle();
580 trace!(" -> creating new bundle {:?}", new_bundle);
581 self.bundles[new_bundle.index()].spillset = spillset;
582 for entry in &new_lr_list {
583 self.ranges[entry.index.index()].bundle = new_bundle;
584 }
585 self.bundles[new_bundle.index()].ranges = new_lr_list;
586
587 if trim_ends_into_spill_bundle {
588 while let Some(entry) = self.bundles[bundle.index()].ranges.last().cloned() {
596 let end = entry.range.to;
597 let vreg = self.ranges[entry.index.index()].vreg;
598 let last_use = self.ranges[entry.index.index()].uses.last().map(|u| u.pos);
599 if last_use.is_none() {
600 let spill = self
601 .get_or_create_spill_bundle(bundle, true)
602 .unwrap();
603 trace!(
604 " -> bundle {:?} range {:?}: no uses; moving to spill bundle {:?}",
605 bundle,
606 entry.index,
607 spill
608 );
609 self.bundles[spill.index()].ranges.push(entry);
610 self.bundles[bundle.index()].ranges.pop();
611 self.ranges[entry.index.index()].bundle = spill;
612 continue;
613 }
614 let last_use = last_use.unwrap();
615 let split = ProgPoint::before(last_use.inst().next());
616 if split < end {
617 let spill = self
618 .get_or_create_spill_bundle(bundle, true)
619 .unwrap();
620 self.bundles[bundle.index()]
621 .ranges
622 .last_mut()
623 .unwrap()
624 .range
625 .to = split;
626 self.ranges[self.bundles[bundle.index()]
627 .ranges
628 .last()
629 .unwrap()
630 .index
631 .index()]
632 .range
633 .to = split;
634 let range = CodeRange {
635 from: split,
636 to: end,
637 };
638 let empty_lr = self.create_liverange(range);
639 self.bundles[spill.index()].ranges.push(LiveRangeListEntry {
640 range,
641 index: empty_lr,
642 });
643 self.ranges[empty_lr.index()].bundle = spill;
644 self.vregs[vreg.index()].ranges.push(LiveRangeListEntry {
645 range,
646 index: empty_lr,
647 });
648 trace!(
649 " -> bundle {:?} range {:?}: last use implies split point {:?}",
650 bundle,
651 entry.index,
652 split
653 );
654 trace!(
655 " -> moving trailing empty region to new spill bundle {:?} with new LR {:?}",
656 spill,
657 empty_lr
658 );
659 }
660 break;
661 }
662 while let Some(entry) = self.bundles[new_bundle.index()].ranges.first().cloned() {
663 if self.ranges[entry.index.index()].has_flag(LiveRangeFlag::StartsAtDef) {
664 break;
665 }
666 let start = entry.range.from;
667 let vreg = self.ranges[entry.index.index()].vreg;
668 let first_use = self.ranges[entry.index.index()].uses.first().map(|u| u.pos);
669 if first_use.is_none() {
670 let spill = self
671 .get_or_create_spill_bundle(new_bundle, true)
672 .unwrap();
673 trace!(
674 " -> bundle {:?} range {:?}: no uses; moving to spill bundle {:?}",
675 new_bundle,
676 entry.index,
677 spill
678 );
679 self.bundles[spill.index()].ranges.push(entry);
680 self.bundles[new_bundle.index()].ranges.drain(..1);
681 self.ranges[entry.index.index()].bundle = spill;
682 continue;
683 }
684 let first_use = first_use.unwrap();
685 let split = ProgPoint::before(first_use.inst());
686 if split > start {
687 let spill = self
688 .get_or_create_spill_bundle(new_bundle, true)
689 .unwrap();
690 self.bundles[new_bundle.index()]
691 .ranges
692 .first_mut()
693 .unwrap()
694 .range
695 .from = split;
696 self.ranges[self.bundles[new_bundle.index()]
697 .ranges
698 .first()
699 .unwrap()
700 .index
701 .index()]
702 .range
703 .from = split;
704 let range = CodeRange {
705 from: start,
706 to: split,
707 };
708 let empty_lr = self.create_liverange(range);
709 self.bundles[spill.index()].ranges.push(LiveRangeListEntry {
710 range,
711 index: empty_lr,
712 });
713 self.ranges[empty_lr.index()].bundle = spill;
714 self.vregs[vreg.index()].ranges.push(LiveRangeListEntry {
715 range,
716 index: empty_lr,
717 });
718 trace!(
719 " -> bundle {:?} range {:?}: first use implies split point {:?}",
720 bundle,
721 entry.index,
722 first_use,
723 );
724 trace!(
725 " -> moving leading empty region to new spill bundle {:?} with new LR {:?}",
726 spill,
727 empty_lr
728 );
729 }
730 break;
731 }
732 }
733
734 if self.bundles[bundle.index()].ranges.len() > 0 {
735 self.recompute_bundle_properties(bundle);
736 let prio = self.bundles[bundle.index()].prio;
737 self.allocation_queue
738 .insert(bundle, prio as usize, reg_hint);
739 }
740 if self.bundles[new_bundle.index()].ranges.len() > 0 {
741 self.recompute_bundle_properties(new_bundle);
742 let prio = self.bundles[new_bundle.index()].prio;
743 self.allocation_queue
744 .insert(new_bundle, prio as usize, reg_hint);
745 }
746 }
747
748 pub fn split_into_minimal_bundles(&mut self, bundle: LiveBundleIndex, reg_hint: PReg) {
779 let mut removed_lrs: FxHashSet<LiveRangeIndex> = FxHashSet::default();
780 let mut removed_lrs_vregs: FxHashSet<VRegIndex> = FxHashSet::default();
781 let mut new_lrs: SmallVec<[(VRegIndex, LiveRangeIndex); 16]> = smallvec![];
782 let mut new_bundles: SmallVec<[LiveBundleIndex; 16]> = smallvec![];
783
784 let spill = self
785 .get_or_create_spill_bundle(bundle, true)
786 .unwrap();
787
788 trace!(
789 "Splitting bundle {:?} into minimal bundles with reg hint {}",
790 bundle,
791 reg_hint
792 );
793
794 let mut last_lr: Option<LiveRangeIndex> = None;
795 let mut last_bundle: Option<LiveBundleIndex> = None;
796 let mut last_inst: Option<Inst> = None;
797 let mut last_vreg: Option<VRegIndex> = None;
798
799 for entry_idx in 0..self.bundles[bundle.index()].ranges.len() {
800 let entry = self.bundles[bundle.index()].ranges[entry_idx];
802 let lr_from = entry.range.from;
803 let lr_to = entry.range.to;
804
805 removed_lrs.insert(entry.index);
806 let vreg = self.ranges[entry.index.index()].vreg;
807 removed_lrs_vregs.insert(vreg);
808 trace!(" -> removing old LR {:?} for vreg {:?}", entry.index, vreg);
809
810 let mut last_live_pos = entry.range.from;
811 for use_idx in 0..self.ranges[entry.index.index()].uses.len() {
812 let u = self.ranges[entry.index.index()].uses[use_idx];
813 trace!(" -> use {:?} (last_live_pos {:?})", u, last_live_pos);
814
815 if Some(u.pos.inst()) == last_inst && Some(vreg) == last_vreg {
818 self.ranges[last_lr.unwrap().index()].uses.push(u);
819 trace!(" -> appended to last LR {:?}", last_lr.unwrap());
820 continue;
821 }
822
823 let to = std::cmp::min(ProgPoint::before(u.pos.inst().next()), lr_to);
828
829 if Some(u.pos.inst()) == last_inst {
833 let cr = CodeRange { from: u.pos, to };
834 let lr = self.create_liverange(cr);
835 new_lrs.push((vreg, lr));
836 self.ranges[lr.index()].uses.push(u);
837 self.ranges[lr.index()].vreg = vreg;
838
839 trace!(
840 " -> created new LR {:?} but adding to existing bundle {:?}",
841 lr,
842 last_bundle.unwrap()
843 );
844 self.bundles[last_bundle.unwrap().index()]
846 .ranges
847 .last_mut()
848 .unwrap()
849 .range
850 .to = u.pos;
851 self.ranges[last_lr.unwrap().index()].range.to = u.pos;
852 self.bundles[last_bundle.unwrap().index()]
854 .ranges
855 .push(LiveRangeListEntry {
856 range: cr,
857 index: lr,
858 });
859 self.ranges[lr.index()].bundle = last_bundle.unwrap();
860 last_live_pos = ProgPoint::before(u.pos.inst().next());
861 continue;
862 }
863
864 let pos = ProgPoint::before(u.pos.inst());
866 let pos = std::cmp::max(lr_from, pos);
867 let cr = CodeRange { from: pos, to };
868 let lr = self.create_liverange(cr);
869 new_lrs.push((vreg, lr));
870 self.ranges[lr.index()].uses.push(u);
871 self.ranges[lr.index()].vreg = vreg;
872
873 let new_bundle = self.create_bundle();
875 self.ranges[lr.index()].bundle = new_bundle;
876 self.bundles[new_bundle.index()].spillset = self.bundles[bundle.index()].spillset;
877 self.bundles[new_bundle.index()]
878 .ranges
879 .push(LiveRangeListEntry {
880 range: cr,
881 index: lr,
882 });
883 new_bundles.push(new_bundle);
884
885 if u.operand.kind() == OperandKind::Def {
892 self.ranges[lr.index()].set_flag(LiveRangeFlag::StartsAtDef);
893 }
894
895 trace!(
896 " -> created new LR {:?} range {:?} with new bundle {:?} for this use",
897 lr,
898 cr,
899 new_bundle
900 );
901
902 if pos > last_live_pos {
906 let cr = CodeRange {
907 from: last_live_pos,
908 to: pos,
909 };
910 let spill_lr = self.create_liverange(cr);
911 self.ranges[spill_lr.index()].vreg = vreg;
912 self.ranges[spill_lr.index()].bundle = spill;
913 new_lrs.push((vreg, spill_lr));
914 self.bundles[spill.index()].ranges.push(LiveRangeListEntry {
915 range: cr,
916 index: spill_lr,
917 });
918 self.ranges[spill_lr.index()].bundle = spill;
919 trace!(
920 " -> put intervening range {:?} in new LR {:?} in spill bundle {:?}",
921 cr,
922 spill_lr,
923 spill
924 );
925 }
926 last_live_pos = ProgPoint::before(u.pos.inst().next());
927
928 last_lr = Some(lr);
929 last_bundle = Some(new_bundle);
930 last_inst = Some(u.pos.inst());
931 last_vreg = Some(vreg);
932 }
933
934 self.ranges[entry.index.index()].uses = Default::default();
936
937 if entry.range.to > last_live_pos {
940 let cr = CodeRange {
941 from: last_live_pos,
942 to: entry.range.to,
943 };
944 let spill_lr = self.create_liverange(cr);
945 self.ranges[spill_lr.index()].vreg = vreg;
946 self.ranges[spill_lr.index()].bundle = spill;
947 new_lrs.push((vreg, spill_lr));
948 self.bundles[spill.index()].ranges.push(LiveRangeListEntry {
949 range: cr,
950 index: spill_lr,
951 });
952 self.ranges[spill_lr.index()].bundle = spill;
953 trace!(
954 " -> put trailing range {:?} in new LR {:?} in spill bundle {:?}",
955 cr,
956 spill_lr,
957 spill
958 );
959 }
960 }
961
962 self.bundles[bundle.index()].ranges.clear();
964 self.bundles[bundle.index()].ranges.shrink_to_fit();
965
966 for vreg in removed_lrs_vregs {
968 self.vregs[vreg.index()]
969 .ranges
970 .retain(|entry| !removed_lrs.contains(&entry.index));
971 }
972
973 for (vreg, lr) in new_lrs {
975 let range = self.ranges[lr.index()].range;
976 let entry = LiveRangeListEntry { range, index: lr };
977 self.vregs[vreg.index()].ranges.push(entry);
978 }
979
980 for bundle in new_bundles {
983 if self.bundles[bundle.index()].ranges.len() > 0 {
984 self.recompute_bundle_properties(bundle);
985 let prio = self.bundles[bundle.index()].prio;
986 self.allocation_queue
987 .insert(bundle, prio as usize, reg_hint);
988 }
989 }
990 }
991
992 pub fn process_bundle(
993 &mut self,
994 bundle: LiveBundleIndex,
995 reg_hint: PReg,
996 ) -> Result<(), RegAllocError> {
997 let class = self.spillsets[self.bundles[bundle.index()].spillset.index()].class;
998 let mut hint_reg = if reg_hint != PReg::invalid() {
1000 reg_hint
1001 } else {
1002 self.spillsets[self.bundles[bundle.index()].spillset.index()].reg_hint
1003 };
1004 if self.pregs[hint_reg.index()].is_stack {
1005 hint_reg = PReg::invalid();
1006 }
1007 trace!("process_bundle: bundle {:?} hint {:?}", bundle, hint_reg,);
1008
1009 let req = match self.compute_requirement(bundle) {
1010 Ok(req) => req,
1011 Err(conflict) => {
1012 debug_assert!(
1016 !self.minimal_bundle(bundle),
1017 "Minimal bundle with conflict!"
1018 );
1019 self.split_and_requeue_bundle(
1020 bundle,
1021 conflict.suggested_split_point(),
1022 reg_hint,
1023 conflict.should_trim_edges_around_split(),
1025 );
1026 return Ok(());
1027 }
1028 };
1029
1030 match req {
1034 Requirement::Any => {
1035 if let Some(spill) =
1036 self.get_or_create_spill_bundle(bundle, false)
1037 {
1038 let mut list =
1039 std::mem::replace(&mut self.bundles[bundle.index()].ranges, smallvec![]);
1040 for entry in &list {
1041 self.ranges[entry.index.index()].bundle = spill;
1042 }
1043 self.bundles[spill.index()].ranges.extend(list.drain(..));
1044 return Ok(());
1045 }
1046 }
1047 _ => {}
1048 }
1049
1050 let mut attempts = 0;
1052 loop {
1053 attempts += 1;
1054 trace!("attempt {}, req {:?}", attempts, req);
1055 debug_assert!(attempts < 100 * self.func.num_insts());
1056
1057 let fixed_preg = match req {
1058 Requirement::FixedReg(preg) | Requirement::FixedStack(preg) => Some(preg),
1059 Requirement::Register => None,
1060 Requirement::Stack => {
1061 self.spillsets[self.bundles[bundle.index()].spillset.index()].required = true;
1064 return Ok(());
1065 }
1066
1067 Requirement::Any => {
1068 self.spilled_bundles.push(bundle);
1069 return Ok(());
1070 }
1071 };
1072 let mut lowest_cost_evict_conflict_set: Option<LiveBundleVec> = None;
1075 let mut lowest_cost_evict_conflict_cost: Option<u32> = None;
1076
1077 let mut lowest_cost_split_conflict_cost: Option<u32> = None;
1078 let mut lowest_cost_split_conflict_point = ProgPoint::before(Inst::new(0));
1079 let mut lowest_cost_split_conflict_reg = PReg::invalid();
1080
1081 let scan_offset = self.ranges[self.bundles[bundle.index()].ranges[0].index.index()]
1087 .range
1088 .from
1089 .inst()
1090 .index()
1091 + bundle.index();
1092
1093 self.stats.process_bundle_reg_probe_start_any += 1;
1094 for preg in RegTraversalIter::new(
1095 self.env,
1096 class,
1097 hint_reg,
1098 PReg::invalid(),
1099 scan_offset,
1100 fixed_preg,
1101 ) {
1102 self.stats.process_bundle_reg_probes_any += 1;
1103 let preg_idx = PRegIndex::new(preg.index());
1104 trace!("trying preg {:?}", preg_idx);
1105
1106 let scan_limit_cost = match (
1107 lowest_cost_evict_conflict_cost,
1108 lowest_cost_split_conflict_cost,
1109 ) {
1110 (Some(a), Some(b)) => Some(std::cmp::max(a, b)),
1111 _ => None,
1112 };
1113 match self.try_to_allocate_bundle_to_reg(bundle, preg_idx, scan_limit_cost) {
1114 AllocRegResult::Allocated(alloc) => {
1115 self.stats.process_bundle_reg_success_any += 1;
1116 trace!(" -> allocated to any {:?}", preg_idx);
1117 self.spillsets[self.bundles[bundle.index()].spillset.index()].reg_hint =
1118 alloc.as_reg().unwrap();
1119 return Ok(());
1120 }
1121 AllocRegResult::Conflict(bundles, first_conflict_point) => {
1122 trace!(
1123 " -> conflict with bundles {:?}, first conflict at {:?}",
1124 bundles,
1125 first_conflict_point
1126 );
1127
1128 let conflict_cost = self.maximum_spill_weight_in_bundle_set(&bundles);
1129
1130 if lowest_cost_evict_conflict_cost.is_none()
1131 || conflict_cost < lowest_cost_evict_conflict_cost.unwrap()
1132 {
1133 lowest_cost_evict_conflict_cost = Some(conflict_cost);
1134 lowest_cost_evict_conflict_set = Some(bundles);
1135 }
1136
1137 let loop_depth = self.cfginfo.approx_loop_depth
1138 [self.cfginfo.insn_block[first_conflict_point.inst().index()].index()];
1139 let move_cost = spill_weight_from_constraint(
1140 OperandConstraint::Reg,
1141 loop_depth as usize,
1142 true,
1143 )
1144 .to_int();
1145 if lowest_cost_split_conflict_cost.is_none()
1146 || (conflict_cost + move_cost)
1147 < lowest_cost_split_conflict_cost.unwrap()
1148 {
1149 lowest_cost_split_conflict_cost = Some(conflict_cost + move_cost);
1150 lowest_cost_split_conflict_point = first_conflict_point;
1151 lowest_cost_split_conflict_reg = preg;
1152 }
1153 }
1154 AllocRegResult::ConflictWithFixed(max_cost, point) => {
1155 trace!(" -> conflict with fixed alloc; cost of other bundles up to point is {}, conflict at {:?}", max_cost, point);
1156
1157 let loop_depth = self.cfginfo.approx_loop_depth
1158 [self.cfginfo.insn_block[point.inst().index()].index()];
1159 let move_cost = spill_weight_from_constraint(
1160 OperandConstraint::Reg,
1161 loop_depth as usize,
1162 true,
1163 )
1164 .to_int();
1165
1166 if lowest_cost_split_conflict_cost.is_none()
1167 || (max_cost + move_cost) < lowest_cost_split_conflict_cost.unwrap()
1168 {
1169 lowest_cost_split_conflict_cost = Some(max_cost + move_cost);
1170 lowest_cost_split_conflict_point = point;
1171 lowest_cost_split_conflict_reg = preg;
1172 }
1173 }
1174 AllocRegResult::ConflictHighCost => {
1175 continue;
1179 }
1180 }
1181 }
1182
1183 trace!(
1188 " -> lowest cost evict: set {:?}, cost {:?}",
1189 lowest_cost_evict_conflict_set,
1190 lowest_cost_evict_conflict_cost,
1191 );
1192 trace!(
1193 " -> lowest cost split: cost {:?}, point {:?}, reg {:?}",
1194 lowest_cost_split_conflict_cost,
1195 lowest_cost_split_conflict_point,
1196 lowest_cost_split_conflict_reg
1197 );
1198
1199 debug_assert!(
1201 lowest_cost_split_conflict_cost.is_some()
1202 || lowest_cost_evict_conflict_cost.is_some()
1203 );
1204
1205 let our_spill_weight = self.bundle_spill_weight(bundle);
1206 trace!(" -> our spill weight: {}", our_spill_weight);
1207
1208 if self.minimal_bundle(bundle)
1214 && (attempts >= 2
1215 || lowest_cost_evict_conflict_cost.is_none()
1216 || lowest_cost_evict_conflict_cost.unwrap() >= our_spill_weight)
1217 {
1218 if let Requirement::Register = req {
1219 let range = self.bundles[bundle.index()].ranges[0].range;
1221 trace!("checking for too many live regs");
1222 let mut min_bundles_assigned = 0;
1223 let mut fixed_assigned = 0;
1224 let mut total_regs = 0;
1225 for preg in self.env.preferred_regs_by_class[class as u8 as usize]
1226 .iter()
1227 .chain(self.env.non_preferred_regs_by_class[class as u8 as usize].iter())
1228 {
1229 trace!(" -> PR {:?}", preg);
1230 let start = LiveRangeKey::from_range(&CodeRange {
1231 from: range.from.prev(),
1232 to: range.from.prev(),
1233 });
1234 for (key, lr) in self.pregs[preg.index()].allocations.btree.range(start..) {
1235 let preg_range = key.to_range();
1236 if preg_range.to <= range.from {
1237 continue;
1238 }
1239 if preg_range.from >= range.to {
1240 break;
1241 }
1242 if lr.is_valid() {
1243 if self.minimal_bundle(self.ranges[lr.index()].bundle) {
1244 trace!(" -> min bundle {:?}", lr);
1245 min_bundles_assigned += 1;
1246 } else {
1247 trace!(" -> non-min bundle {:?}", lr);
1248 }
1249 } else {
1250 trace!(" -> fixed bundle");
1251 fixed_assigned += 1;
1252 }
1253 }
1254 total_regs += 1;
1255 }
1256 trace!(
1257 " -> total {}, fixed {}, min {}",
1258 total_regs,
1259 fixed_assigned,
1260 min_bundles_assigned
1261 );
1262 if min_bundles_assigned + fixed_assigned >= total_regs {
1263 return Err(RegAllocError::TooManyLiveRegs);
1264 }
1265 }
1266
1267 panic!("Could not allocate minimal bundle, but the allocation problem should be possible to solve");
1268 }
1269
1270 if !self.minimal_bundle(bundle)
1284 && (attempts >= 2
1285 || lowest_cost_evict_conflict_cost.is_none()
1286 || our_spill_weight <= lowest_cost_evict_conflict_cost.unwrap())
1287 {
1288 trace!(
1289 " -> deciding to split: our spill weight is {}",
1290 self.bundle_spill_weight(bundle)
1291 );
1292 let bundle_start = self.bundles[bundle.index()].ranges[0].range.from;
1293 let mut split_at_point =
1294 std::cmp::max(lowest_cost_split_conflict_point, bundle_start);
1295 let requeue_with_reg = lowest_cost_split_conflict_reg;
1296
1297 let bundle_start_depth = self.cfginfo.approx_loop_depth
1301 [self.cfginfo.insn_block[bundle_start.inst().index()].index()];
1302 let split_at_depth = self.cfginfo.approx_loop_depth
1303 [self.cfginfo.insn_block[split_at_point.inst().index()].index()];
1304 if split_at_depth > bundle_start_depth {
1305 for block in (self.cfginfo.insn_block[bundle_start.inst().index()].index() + 1)
1306 ..=self.cfginfo.insn_block[split_at_point.inst().index()].index()
1307 {
1308 if self.cfginfo.approx_loop_depth[block] > bundle_start_depth {
1309 split_at_point = self.cfginfo.block_entry[block];
1310 break;
1311 }
1312 }
1313 }
1314
1315 self.split_and_requeue_bundle(
1316 bundle,
1317 split_at_point,
1318 requeue_with_reg,
1319 true,
1320 );
1321 return Ok(());
1322 } else {
1323 self.stats.evict_bundle_event += 1;
1325 for &bundle in &lowest_cost_evict_conflict_set.unwrap() {
1326 trace!(" -> evicting {:?}", bundle);
1327 self.evict_bundle(bundle);
1328 self.stats.evict_bundle_count += 1;
1329 }
1330 }
1331 }
1332 }
1333}