forked from llvm/llvm-project
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathInlineSpiller.cpp
1629 lines (1433 loc) · 61.9 KB
/
InlineSpiller.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
//===- InlineSpiller.cpp - Insert spills and restores inline --------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// The inline spiller modifies the machine function directly instead of
// inserting spills and restores in VirtRegMap.
//
//===----------------------------------------------------------------------===//
#include "SplitKit.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/None.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/CodeGen/LiveInterval.h"
#include "llvm/CodeGen/LiveIntervalCalc.h"
#include "llvm/CodeGen/LiveIntervals.h"
#include "llvm/CodeGen/LiveRangeEdit.h"
#include "llvm/CodeGen/LiveStacks.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineInstrBundle.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/SlotIndexes.h"
#include "llvm/CodeGen/Spiller.h"
#include "llvm/CodeGen/StackMaps.h"
#include "llvm/CodeGen/TargetInstrInfo.h"
#include "llvm/CodeGen/TargetOpcodes.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
#include "llvm/CodeGen/VirtRegMap.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/Support/BlockFrequency.h"
#include "llvm/Support/BranchProbability.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include <cassert>
#include <iterator>
#include <tuple>
#include <utility>
#include <vector>
using namespace llvm;
#define DEBUG_TYPE "regalloc"
STATISTIC(NumSpilledRanges, "Number of spilled live ranges");
STATISTIC(NumSnippets, "Number of spilled snippets");
STATISTIC(NumSpills, "Number of spills inserted");
STATISTIC(NumSpillsRemoved, "Number of spills removed");
STATISTIC(NumReloads, "Number of reloads inserted");
STATISTIC(NumReloadsRemoved, "Number of reloads removed");
STATISTIC(NumFolded, "Number of folded stack accesses");
STATISTIC(NumFoldedLoads, "Number of folded loads");
STATISTIC(NumRemats, "Number of rematerialized defs for spilling");
static cl::opt<bool> DisableHoisting("disable-spill-hoist", cl::Hidden,
cl::desc("Disable inline spill hoisting"));
static cl::opt<bool>
RestrictStatepointRemat("restrict-statepoint-remat",
cl::init(false), cl::Hidden,
cl::desc("Restrict remat for statepoint operands"));
namespace {
class HoistSpillHelper : private LiveRangeEdit::Delegate {
MachineFunction &MF;
LiveIntervals &LIS;
LiveStacks &LSS;
AliasAnalysis *AA;
MachineDominatorTree &MDT;
MachineLoopInfo &Loops;
VirtRegMap &VRM;
MachineRegisterInfo &MRI;
const TargetInstrInfo &TII;
const TargetRegisterInfo &TRI;
const MachineBlockFrequencyInfo &MBFI;
InsertPointAnalysis IPA;
// Map from StackSlot to the LiveInterval of the original register.
// Note the LiveInterval of the original register may have been deleted
// after it is spilled. We keep a copy here to track the range where
// spills can be moved.
DenseMap<int, std::unique_ptr<LiveInterval>> StackSlotToOrigLI;
// Map from pair of (StackSlot and Original VNI) to a set of spills which
// have the same stackslot and have equal values defined by Original VNI.
// These spills are mergeable and are hoist candiates.
using MergeableSpillsMap =
MapVector<std::pair<int, VNInfo *>, SmallPtrSet<MachineInstr *, 16>>;
MergeableSpillsMap MergeableSpills;
/// This is the map from original register to a set containing all its
/// siblings. To hoist a spill to another BB, we need to find out a live
/// sibling there and use it as the source of the new spill.
DenseMap<Register, SmallSetVector<Register, 16>> Virt2SiblingsMap;
bool isSpillCandBB(LiveInterval &OrigLI, VNInfo &OrigVNI,
MachineBasicBlock &BB, Register &LiveReg);
void rmRedundantSpills(
SmallPtrSet<MachineInstr *, 16> &Spills,
SmallVectorImpl<MachineInstr *> &SpillsToRm,
DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill);
void getVisitOrders(
MachineBasicBlock *Root, SmallPtrSet<MachineInstr *, 16> &Spills,
SmallVectorImpl<MachineDomTreeNode *> &Orders,
SmallVectorImpl<MachineInstr *> &SpillsToRm,
DenseMap<MachineDomTreeNode *, unsigned> &SpillsToKeep,
DenseMap<MachineDomTreeNode *, MachineInstr *> &SpillBBToSpill);
void runHoistSpills(LiveInterval &OrigLI, VNInfo &OrigVNI,
SmallPtrSet<MachineInstr *, 16> &Spills,
SmallVectorImpl<MachineInstr *> &SpillsToRm,
DenseMap<MachineBasicBlock *, unsigned> &SpillsToIns);
public:
HoistSpillHelper(MachineFunctionPass &pass, MachineFunction &mf,
VirtRegMap &vrm)
: MF(mf), LIS(pass.getAnalysis<LiveIntervals>()),
LSS(pass.getAnalysis<LiveStacks>()),
AA(&pass.getAnalysis<AAResultsWrapperPass>().getAAResults()),
MDT(pass.getAnalysis<MachineDominatorTree>()),
Loops(pass.getAnalysis<MachineLoopInfo>()), VRM(vrm),
MRI(mf.getRegInfo()), TII(*mf.getSubtarget().getInstrInfo()),
TRI(*mf.getSubtarget().getRegisterInfo()),
MBFI(pass.getAnalysis<MachineBlockFrequencyInfo>()),
IPA(LIS, mf.getNumBlockIDs()) {}
void addToMergeableSpills(MachineInstr &Spill, int StackSlot,
unsigned Original);
bool rmFromMergeableSpills(MachineInstr &Spill, int StackSlot);
void hoistAllSpills();
void LRE_DidCloneVirtReg(Register, Register) override;
};
class InlineSpiller : public Spiller {
MachineFunction &MF;
LiveIntervals &LIS;
LiveStacks &LSS;
AliasAnalysis *AA;
MachineDominatorTree &MDT;
MachineLoopInfo &Loops;
VirtRegMap &VRM;
MachineRegisterInfo &MRI;
const TargetInstrInfo &TII;
const TargetRegisterInfo &TRI;
const MachineBlockFrequencyInfo &MBFI;
// Variables that are valid during spill(), but used by multiple methods.
LiveRangeEdit *Edit;
LiveInterval *StackInt;
int StackSlot;
Register Original;
// All registers to spill to StackSlot, including the main register.
SmallVector<Register, 8> RegsToSpill;
// All COPY instructions to/from snippets.
// They are ignored since both operands refer to the same stack slot.
SmallPtrSet<MachineInstr*, 8> SnippetCopies;
// Values that failed to remat at some point.
SmallPtrSet<VNInfo*, 8> UsedValues;
// Dead defs generated during spilling.
SmallVector<MachineInstr*, 8> DeadDefs;
// Object records spills information and does the hoisting.
HoistSpillHelper HSpiller;
// Live range weight calculator.
VirtRegAuxInfo &VRAI;
~InlineSpiller() override = default;
public:
InlineSpiller(MachineFunctionPass &Pass, MachineFunction &MF, VirtRegMap &VRM,
VirtRegAuxInfo &VRAI)
: MF(MF), LIS(Pass.getAnalysis<LiveIntervals>()),
LSS(Pass.getAnalysis<LiveStacks>()),
AA(&Pass.getAnalysis<AAResultsWrapperPass>().getAAResults()),
MDT(Pass.getAnalysis<MachineDominatorTree>()),
Loops(Pass.getAnalysis<MachineLoopInfo>()), VRM(VRM),
MRI(MF.getRegInfo()), TII(*MF.getSubtarget().getInstrInfo()),
TRI(*MF.getSubtarget().getRegisterInfo()),
MBFI(Pass.getAnalysis<MachineBlockFrequencyInfo>()),
HSpiller(Pass, MF, VRM), VRAI(VRAI) {}
void spill(LiveRangeEdit &) override;
void postOptimization() override;
private:
bool isSnippet(const LiveInterval &SnipLI);
void collectRegsToSpill();
bool isRegToSpill(Register Reg) { return is_contained(RegsToSpill, Reg); }
bool isSibling(Register Reg);
bool hoistSpillInsideBB(LiveInterval &SpillLI, MachineInstr &CopyMI);
void eliminateRedundantSpills(LiveInterval &LI, VNInfo *VNI);
void markValueUsed(LiveInterval*, VNInfo*);
bool canGuaranteeAssignmentAfterRemat(Register VReg, MachineInstr &MI);
bool reMaterializeFor(LiveInterval &, MachineInstr &MI);
void reMaterializeAll();
bool coalesceStackAccess(MachineInstr *MI, Register Reg);
bool foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>>,
MachineInstr *LoadMI = nullptr);
void insertReload(Register VReg, SlotIndex, MachineBasicBlock::iterator MI);
void insertSpill(Register VReg, bool isKill, MachineBasicBlock::iterator MI);
void spillAroundUses(Register Reg);
void spillAll();
};
} // end anonymous namespace
Spiller::~Spiller() = default;
void Spiller::anchor() {}
Spiller *llvm::createInlineSpiller(MachineFunctionPass &Pass,
MachineFunction &MF, VirtRegMap &VRM,
VirtRegAuxInfo &VRAI) {
return new InlineSpiller(Pass, MF, VRM, VRAI);
}
//===----------------------------------------------------------------------===//
// Snippets
//===----------------------------------------------------------------------===//
// When spilling a virtual register, we also spill any snippets it is connected
// to. The snippets are small live ranges that only have a single real use,
// leftovers from live range splitting. Spilling them enables memory operand
// folding or tightens the live range around the single use.
//
// This minimizes register pressure and maximizes the store-to-load distance for
// spill slots which can be important in tight loops.
/// isFullCopyOf - If MI is a COPY to or from Reg, return the other register,
/// otherwise return 0.
static Register isFullCopyOf(const MachineInstr &MI, Register Reg) {
if (!MI.isFullCopy())
return Register();
if (MI.getOperand(0).getReg() == Reg)
return MI.getOperand(1).getReg();
if (MI.getOperand(1).getReg() == Reg)
return MI.getOperand(0).getReg();
return Register();
}
static void getVDefInterval(const MachineInstr &MI, LiveIntervals &LIS) {
for (const MachineOperand &MO : MI.operands())
if (MO.isReg() && MO.isDef() && Register::isVirtualRegister(MO.getReg()))
LIS.getInterval(MO.getReg());
}
/// isSnippet - Identify if a live interval is a snippet that should be spilled.
/// It is assumed that SnipLI is a virtual register with the same original as
/// Edit->getReg().
bool InlineSpiller::isSnippet(const LiveInterval &SnipLI) {
Register Reg = Edit->getReg();
// A snippet is a tiny live range with only a single instruction using it
// besides copies to/from Reg or spills/fills. We accept:
//
// %snip = COPY %Reg / FILL fi#
// %snip = USE %snip
// %Reg = COPY %snip / SPILL %snip, fi#
//
if (SnipLI.getNumValNums() > 2 || !LIS.intervalIsInOneMBB(SnipLI))
return false;
MachineInstr *UseMI = nullptr;
// Check that all uses satisfy our criteria.
for (MachineRegisterInfo::reg_instr_nodbg_iterator
RI = MRI.reg_instr_nodbg_begin(SnipLI.reg()),
E = MRI.reg_instr_nodbg_end();
RI != E;) {
MachineInstr &MI = *RI++;
// Allow copies to/from Reg.
if (isFullCopyOf(MI, Reg))
continue;
// Allow stack slot loads.
int FI;
if (SnipLI.reg() == TII.isLoadFromStackSlot(MI, FI) && FI == StackSlot)
continue;
// Allow stack slot stores.
if (SnipLI.reg() == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot)
continue;
// Allow a single additional instruction.
if (UseMI && &MI != UseMI)
return false;
UseMI = &MI;
}
return true;
}
/// collectRegsToSpill - Collect live range snippets that only have a single
/// real use.
void InlineSpiller::collectRegsToSpill() {
Register Reg = Edit->getReg();
// Main register always spills.
RegsToSpill.assign(1, Reg);
SnippetCopies.clear();
// Snippets all have the same original, so there can't be any for an original
// register.
if (Original == Reg)
return;
for (MachineInstr &MI :
llvm::make_early_inc_range(MRI.reg_instructions(Reg))) {
Register SnipReg = isFullCopyOf(MI, Reg);
if (!isSibling(SnipReg))
continue;
LiveInterval &SnipLI = LIS.getInterval(SnipReg);
if (!isSnippet(SnipLI))
continue;
SnippetCopies.insert(&MI);
if (isRegToSpill(SnipReg))
continue;
RegsToSpill.push_back(SnipReg);
LLVM_DEBUG(dbgs() << "\talso spill snippet " << SnipLI << '\n');
++NumSnippets;
}
}
bool InlineSpiller::isSibling(Register Reg) {
return Reg.isVirtual() && VRM.getOriginal(Reg) == Original;
}
/// It is beneficial to spill to earlier place in the same BB in case
/// as follows:
/// There is an alternative def earlier in the same MBB.
/// Hoist the spill as far as possible in SpillMBB. This can ease
/// register pressure:
///
/// x = def
/// y = use x
/// s = copy x
///
/// Hoisting the spill of s to immediately after the def removes the
/// interference between x and y:
///
/// x = def
/// spill x
/// y = use killed x
///
/// This hoist only helps when the copy kills its source.
///
bool InlineSpiller::hoistSpillInsideBB(LiveInterval &SpillLI,
MachineInstr &CopyMI) {
SlotIndex Idx = LIS.getInstructionIndex(CopyMI);
#ifndef NDEBUG
VNInfo *VNI = SpillLI.getVNInfoAt(Idx.getRegSlot());
assert(VNI && VNI->def == Idx.getRegSlot() && "Not defined by copy");
#endif
Register SrcReg = CopyMI.getOperand(1).getReg();
LiveInterval &SrcLI = LIS.getInterval(SrcReg);
VNInfo *SrcVNI = SrcLI.getVNInfoAt(Idx);
LiveQueryResult SrcQ = SrcLI.Query(Idx);
MachineBasicBlock *DefMBB = LIS.getMBBFromIndex(SrcVNI->def);
if (DefMBB != CopyMI.getParent() || !SrcQ.isKill())
return false;
// Conservatively extend the stack slot range to the range of the original
// value. We may be able to do better with stack slot coloring by being more
// careful here.
assert(StackInt && "No stack slot assigned yet.");
LiveInterval &OrigLI = LIS.getInterval(Original);
VNInfo *OrigVNI = OrigLI.getVNInfoAt(Idx);
StackInt->MergeValueInAsValue(OrigLI, OrigVNI, StackInt->getValNumInfo(0));
LLVM_DEBUG(dbgs() << "\tmerged orig valno " << OrigVNI->id << ": "
<< *StackInt << '\n');
// We are going to spill SrcVNI immediately after its def, so clear out
// any later spills of the same value.
eliminateRedundantSpills(SrcLI, SrcVNI);
MachineBasicBlock *MBB = LIS.getMBBFromIndex(SrcVNI->def);
MachineBasicBlock::iterator MII;
if (SrcVNI->isPHIDef())
MII = MBB->SkipPHIsLabelsAndDebug(MBB->begin());
else {
MachineInstr *DefMI = LIS.getInstructionFromIndex(SrcVNI->def);
assert(DefMI && "Defining instruction disappeared");
MII = DefMI;
++MII;
}
MachineInstrSpan MIS(MII, MBB);
// Insert spill without kill flag immediately after def.
TII.storeRegToStackSlot(*MBB, MII, SrcReg, false, StackSlot,
MRI.getRegClass(SrcReg), &TRI);
LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MII);
for (const MachineInstr &MI : make_range(MIS.begin(), MII))
getVDefInterval(MI, LIS);
--MII; // Point to store instruction.
LLVM_DEBUG(dbgs() << "\thoisted: " << SrcVNI->def << '\t' << *MII);
// If there is only 1 store instruction is required for spill, add it
// to mergeable list. In X86 AMX, 2 intructions are required to store.
// We disable the merge for this case.
if (MIS.begin() == MII)
HSpiller.addToMergeableSpills(*MII, StackSlot, Original);
++NumSpills;
return true;
}
/// eliminateRedundantSpills - SLI:VNI is known to be on the stack. Remove any
/// redundant spills of this value in SLI.reg and sibling copies.
void InlineSpiller::eliminateRedundantSpills(LiveInterval &SLI, VNInfo *VNI) {
assert(VNI && "Missing value");
SmallVector<std::pair<LiveInterval*, VNInfo*>, 8> WorkList;
WorkList.push_back(std::make_pair(&SLI, VNI));
assert(StackInt && "No stack slot assigned yet.");
do {
LiveInterval *LI;
std::tie(LI, VNI) = WorkList.pop_back_val();
Register Reg = LI->reg();
LLVM_DEBUG(dbgs() << "Checking redundant spills for " << VNI->id << '@'
<< VNI->def << " in " << *LI << '\n');
// Regs to spill are taken care of.
if (isRegToSpill(Reg))
continue;
// Add all of VNI's live range to StackInt.
StackInt->MergeValueInAsValue(*LI, VNI, StackInt->getValNumInfo(0));
LLVM_DEBUG(dbgs() << "Merged to stack int: " << *StackInt << '\n');
// Find all spills and copies of VNI.
for (MachineInstr &MI :
llvm::make_early_inc_range(MRI.use_nodbg_instructions(Reg))) {
if (!MI.isCopy() && !MI.mayStore())
continue;
SlotIndex Idx = LIS.getInstructionIndex(MI);
if (LI->getVNInfoAt(Idx) != VNI)
continue;
// Follow sibling copies down the dominator tree.
if (Register DstReg = isFullCopyOf(MI, Reg)) {
if (isSibling(DstReg)) {
LiveInterval &DstLI = LIS.getInterval(DstReg);
VNInfo *DstVNI = DstLI.getVNInfoAt(Idx.getRegSlot());
assert(DstVNI && "Missing defined value");
assert(DstVNI->def == Idx.getRegSlot() && "Wrong copy def slot");
WorkList.push_back(std::make_pair(&DstLI, DstVNI));
}
continue;
}
// Erase spills.
int FI;
if (Reg == TII.isStoreToStackSlot(MI, FI) && FI == StackSlot) {
LLVM_DEBUG(dbgs() << "Redundant spill " << Idx << '\t' << MI);
// eliminateDeadDefs won't normally remove stores, so switch opcode.
MI.setDesc(TII.get(TargetOpcode::KILL));
DeadDefs.push_back(&MI);
++NumSpillsRemoved;
if (HSpiller.rmFromMergeableSpills(MI, StackSlot))
--NumSpills;
}
}
} while (!WorkList.empty());
}
//===----------------------------------------------------------------------===//
// Rematerialization
//===----------------------------------------------------------------------===//
/// markValueUsed - Remember that VNI failed to rematerialize, so its defining
/// instruction cannot be eliminated. See through snippet copies
void InlineSpiller::markValueUsed(LiveInterval *LI, VNInfo *VNI) {
SmallVector<std::pair<LiveInterval*, VNInfo*>, 8> WorkList;
WorkList.push_back(std::make_pair(LI, VNI));
do {
std::tie(LI, VNI) = WorkList.pop_back_val();
if (!UsedValues.insert(VNI).second)
continue;
if (VNI->isPHIDef()) {
MachineBasicBlock *MBB = LIS.getMBBFromIndex(VNI->def);
for (MachineBasicBlock *P : MBB->predecessors()) {
VNInfo *PVNI = LI->getVNInfoBefore(LIS.getMBBEndIdx(P));
if (PVNI)
WorkList.push_back(std::make_pair(LI, PVNI));
}
continue;
}
// Follow snippet copies.
MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
if (!SnippetCopies.count(MI))
continue;
LiveInterval &SnipLI = LIS.getInterval(MI->getOperand(1).getReg());
assert(isRegToSpill(SnipLI.reg()) && "Unexpected register in copy");
VNInfo *SnipVNI = SnipLI.getVNInfoAt(VNI->def.getRegSlot(true));
assert(SnipVNI && "Snippet undefined before copy");
WorkList.push_back(std::make_pair(&SnipLI, SnipVNI));
} while (!WorkList.empty());
}
bool InlineSpiller::canGuaranteeAssignmentAfterRemat(Register VReg,
MachineInstr &MI) {
if (!RestrictStatepointRemat)
return true;
// Here's a quick explanation of the problem we're trying to handle here:
// * There are some pseudo instructions with more vreg uses than there are
// physical registers on the machine.
// * This is normally handled by spilling the vreg, and folding the reload
// into the user instruction. (Thus decreasing the number of used vregs
// until the remainder can be assigned to physregs.)
// * However, since we may try to spill vregs in any order, we can end up
// trying to spill each operand to the instruction, and then rematting it
// instead. When that happens, the new live intervals (for the remats) are
// expected to be trivially assignable (i.e. RS_Done). However, since we
// may have more remats than physregs, we're guaranteed to fail to assign
// one.
// At the moment, we only handle this for STATEPOINTs since they're the only
// pseudo op where we've seen this. If we start seeing other instructions
// with the same problem, we need to revisit this.
if (MI.getOpcode() != TargetOpcode::STATEPOINT)
return true;
// For STATEPOINTs we allow re-materialization for fixed arguments only hoping
// that number of physical registers is enough to cover all fixed arguments.
// If it is not true we need to revisit it.
for (unsigned Idx = StatepointOpers(&MI).getVarIdx(),
EndIdx = MI.getNumOperands();
Idx < EndIdx; ++Idx) {
MachineOperand &MO = MI.getOperand(Idx);
if (MO.isReg() && MO.getReg() == VReg)
return false;
}
return true;
}
/// reMaterializeFor - Attempt to rematerialize before MI instead of reloading.
bool InlineSpiller::reMaterializeFor(LiveInterval &VirtReg, MachineInstr &MI) {
// Analyze instruction
SmallVector<std::pair<MachineInstr *, unsigned>, 8> Ops;
VirtRegInfo RI = AnalyzeVirtRegInBundle(MI, VirtReg.reg(), &Ops);
if (!RI.Reads)
return false;
SlotIndex UseIdx = LIS.getInstructionIndex(MI).getRegSlot(true);
VNInfo *ParentVNI = VirtReg.getVNInfoAt(UseIdx.getBaseIndex());
if (!ParentVNI) {
LLVM_DEBUG(dbgs() << "\tadding <undef> flags: ");
for (MachineOperand &MO : MI.operands())
if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg())
MO.setIsUndef();
LLVM_DEBUG(dbgs() << UseIdx << '\t' << MI);
return true;
}
if (SnippetCopies.count(&MI))
return false;
LiveInterval &OrigLI = LIS.getInterval(Original);
VNInfo *OrigVNI = OrigLI.getVNInfoAt(UseIdx);
LiveRangeEdit::Remat RM(ParentVNI);
RM.OrigMI = LIS.getInstructionFromIndex(OrigVNI->def);
if (!Edit->canRematerializeAt(RM, OrigVNI, UseIdx, false)) {
markValueUsed(&VirtReg, ParentVNI);
LLVM_DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << MI);
return false;
}
// If the instruction also writes VirtReg.reg, it had better not require the
// same register for uses and defs.
if (RI.Tied) {
markValueUsed(&VirtReg, ParentVNI);
LLVM_DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << MI);
return false;
}
// Before rematerializing into a register for a single instruction, try to
// fold a load into the instruction. That avoids allocating a new register.
if (RM.OrigMI->canFoldAsLoad() &&
foldMemoryOperand(Ops, RM.OrigMI)) {
Edit->markRematerialized(RM.ParentVNI);
++NumFoldedLoads;
return true;
}
// If we can't guarantee that we'll be able to actually assign the new vreg,
// we can't remat.
if (!canGuaranteeAssignmentAfterRemat(VirtReg.reg(), MI)) {
markValueUsed(&VirtReg, ParentVNI);
LLVM_DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << MI);
return false;
}
// Allocate a new register for the remat.
Register NewVReg = Edit->createFrom(Original);
// Finally we can rematerialize OrigMI before MI.
SlotIndex DefIdx =
Edit->rematerializeAt(*MI.getParent(), MI, NewVReg, RM, TRI);
// We take the DebugLoc from MI, since OrigMI may be attributed to a
// different source location.
auto *NewMI = LIS.getInstructionFromIndex(DefIdx);
NewMI->setDebugLoc(MI.getDebugLoc());
(void)DefIdx;
LLVM_DEBUG(dbgs() << "\tremat: " << DefIdx << '\t'
<< *LIS.getInstructionFromIndex(DefIdx));
// Replace operands
for (const auto &OpPair : Ops) {
MachineOperand &MO = OpPair.first->getOperand(OpPair.second);
if (MO.isReg() && MO.isUse() && MO.getReg() == VirtReg.reg()) {
MO.setReg(NewVReg);
MO.setIsKill();
}
}
LLVM_DEBUG(dbgs() << "\t " << UseIdx << '\t' << MI << '\n');
++NumRemats;
return true;
}
/// reMaterializeAll - Try to rematerialize as many uses as possible,
/// and trim the live ranges after.
void InlineSpiller::reMaterializeAll() {
if (!Edit->anyRematerializable(AA))
return;
UsedValues.clear();
// Try to remat before all uses of snippets.
bool anyRemat = false;
for (Register Reg : RegsToSpill) {
LiveInterval &LI = LIS.getInterval(Reg);
for (MachineInstr &MI : llvm::make_early_inc_range(MRI.reg_bundles(Reg))) {
// Debug values are not allowed to affect codegen.
if (MI.isDebugValue())
continue;
assert(!MI.isDebugInstr() && "Did not expect to find a use in debug "
"instruction that isn't a DBG_VALUE");
anyRemat |= reMaterializeFor(LI, MI);
}
}
if (!anyRemat)
return;
// Remove any values that were completely rematted.
for (Register Reg : RegsToSpill) {
LiveInterval &LI = LIS.getInterval(Reg);
for (VNInfo *VNI : llvm::make_range(LI.vni_begin(), LI.vni_end())) {
if (VNI->isUnused() || VNI->isPHIDef() || UsedValues.count(VNI))
continue;
MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
MI->addRegisterDead(Reg, &TRI);
if (!MI->allDefsAreDead())
continue;
LLVM_DEBUG(dbgs() << "All defs dead: " << *MI);
DeadDefs.push_back(MI);
}
}
// Eliminate dead code after remat. Note that some snippet copies may be
// deleted here.
if (DeadDefs.empty())
return;
LLVM_DEBUG(dbgs() << "Remat created " << DeadDefs.size() << " dead defs.\n");
Edit->eliminateDeadDefs(DeadDefs, RegsToSpill, AA);
// LiveRangeEdit::eliminateDeadDef is used to remove dead define instructions
// after rematerialization. To remove a VNI for a vreg from its LiveInterval,
// LiveIntervals::removeVRegDefAt is used. However, after non-PHI VNIs are all
// removed, PHI VNI are still left in the LiveInterval.
// So to get rid of unused reg, we need to check whether it has non-dbg
// reference instead of whether it has non-empty interval.
unsigned ResultPos = 0;
for (Register Reg : RegsToSpill) {
if (MRI.reg_nodbg_empty(Reg)) {
Edit->eraseVirtReg(Reg);
continue;
}
assert(LIS.hasInterval(Reg) &&
(!LIS.getInterval(Reg).empty() || !MRI.reg_nodbg_empty(Reg)) &&
"Empty and not used live-range?!");
RegsToSpill[ResultPos++] = Reg;
}
RegsToSpill.erase(RegsToSpill.begin() + ResultPos, RegsToSpill.end());
LLVM_DEBUG(dbgs() << RegsToSpill.size()
<< " registers to spill after remat.\n");
}
//===----------------------------------------------------------------------===//
// Spilling
//===----------------------------------------------------------------------===//
/// If MI is a load or store of StackSlot, it can be removed.
bool InlineSpiller::coalesceStackAccess(MachineInstr *MI, Register Reg) {
int FI = 0;
Register InstrReg = TII.isLoadFromStackSlot(*MI, FI);
bool IsLoad = InstrReg;
if (!IsLoad)
InstrReg = TII.isStoreToStackSlot(*MI, FI);
// We have a stack access. Is it the right register and slot?
if (InstrReg != Reg || FI != StackSlot)
return false;
if (!IsLoad)
HSpiller.rmFromMergeableSpills(*MI, StackSlot);
LLVM_DEBUG(dbgs() << "Coalescing stack access: " << *MI);
LIS.RemoveMachineInstrFromMaps(*MI);
MI->eraseFromParent();
if (IsLoad) {
++NumReloadsRemoved;
--NumReloads;
} else {
++NumSpillsRemoved;
--NumSpills;
}
return true;
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
LLVM_DUMP_METHOD
// Dump the range of instructions from B to E with their slot indexes.
static void dumpMachineInstrRangeWithSlotIndex(MachineBasicBlock::iterator B,
MachineBasicBlock::iterator E,
LiveIntervals const &LIS,
const char *const header,
Register VReg = Register()) {
char NextLine = '\n';
char SlotIndent = '\t';
if (std::next(B) == E) {
NextLine = ' ';
SlotIndent = ' ';
}
dbgs() << '\t' << header << ": " << NextLine;
for (MachineBasicBlock::iterator I = B; I != E; ++I) {
SlotIndex Idx = LIS.getInstructionIndex(*I).getRegSlot();
// If a register was passed in and this instruction has it as a
// destination that is marked as an early clobber, print the
// early-clobber slot index.
if (VReg) {
MachineOperand *MO = I->findRegisterDefOperand(VReg);
if (MO && MO->isEarlyClobber())
Idx = Idx.getRegSlot(true);
}
dbgs() << SlotIndent << Idx << '\t' << *I;
}
}
#endif
/// foldMemoryOperand - Try folding stack slot references in Ops into their
/// instructions.
///
/// @param Ops Operand indices from AnalyzeVirtRegInBundle().
/// @param LoadMI Load instruction to use instead of stack slot when non-null.
/// @return True on success.
bool InlineSpiller::
foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>> Ops,
MachineInstr *LoadMI) {
if (Ops.empty())
return false;
// Don't attempt folding in bundles.
MachineInstr *MI = Ops.front().first;
if (Ops.back().first != MI || MI->isBundled())
return false;
bool WasCopy = MI->isCopy();
Register ImpReg;
// TII::foldMemoryOperand will do what we need here for statepoint
// (fold load into use and remove corresponding def). We will replace
// uses of removed def with loads (spillAroundUses).
// For that to work we need to untie def and use to pass it through
// foldMemoryOperand and signal foldPatchpoint that it is allowed to
// fold them.
bool UntieRegs = MI->getOpcode() == TargetOpcode::STATEPOINT;
// Spill subregs if the target allows it.
// We always want to spill subregs for stackmap/patchpoint pseudos.
bool SpillSubRegs = TII.isSubregFoldable() ||
MI->getOpcode() == TargetOpcode::STATEPOINT ||
MI->getOpcode() == TargetOpcode::PATCHPOINT ||
MI->getOpcode() == TargetOpcode::STACKMAP;
// TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied
// operands.
SmallVector<unsigned, 8> FoldOps;
for (const auto &OpPair : Ops) {
unsigned Idx = OpPair.second;
assert(MI == OpPair.first && "Instruction conflict during operand folding");
MachineOperand &MO = MI->getOperand(Idx);
if (MO.isImplicit()) {
ImpReg = MO.getReg();
continue;
}
if (!SpillSubRegs && MO.getSubReg())
return false;
// We cannot fold a load instruction into a def.
if (LoadMI && MO.isDef())
return false;
// Tied use operands should not be passed to foldMemoryOperand.
if (UntieRegs || !MI->isRegTiedToDefOperand(Idx))
FoldOps.push_back(Idx);
}
// If we only have implicit uses, we won't be able to fold that.
// Moreover, TargetInstrInfo::foldMemoryOperand will assert if we try!
if (FoldOps.empty())
return false;
MachineInstrSpan MIS(MI, MI->getParent());
SmallVector<std::pair<unsigned, unsigned> > TiedOps;
if (UntieRegs)
for (unsigned Idx : FoldOps) {
MachineOperand &MO = MI->getOperand(Idx);
if (!MO.isTied())
continue;
unsigned Tied = MI->findTiedOperandIdx(Idx);
if (MO.isUse())
TiedOps.emplace_back(Tied, Idx);
else {
assert(MO.isDef() && "Tied to not use and def?");
TiedOps.emplace_back(Idx, Tied);
}
MI->untieRegOperand(Idx);
}
MachineInstr *FoldMI =
LoadMI ? TII.foldMemoryOperand(*MI, FoldOps, *LoadMI, &LIS)
: TII.foldMemoryOperand(*MI, FoldOps, StackSlot, &LIS, &VRM);
if (!FoldMI) {
// Re-tie operands.
for (auto Tied : TiedOps)
MI->tieOperands(Tied.first, Tied.second);
return false;
}
// Remove LIS for any dead defs in the original MI not in FoldMI.
for (MIBundleOperands MO(*MI); MO.isValid(); ++MO) {
if (!MO->isReg())
continue;
Register Reg = MO->getReg();
if (!Reg || Register::isVirtualRegister(Reg) || MRI.isReserved(Reg)) {
continue;
}
// Skip non-Defs, including undef uses and internal reads.
if (MO->isUse())
continue;
PhysRegInfo RI = AnalyzePhysRegInBundle(*FoldMI, Reg, &TRI);
if (RI.FullyDefined)
continue;
// FoldMI does not define this physreg. Remove the LI segment.
assert(MO->isDead() && "Cannot fold physreg def");
SlotIndex Idx = LIS.getInstructionIndex(*MI).getRegSlot();
LIS.removePhysRegDefAt(Reg.asMCReg(), Idx);
}
int FI;
if (TII.isStoreToStackSlot(*MI, FI) &&
HSpiller.rmFromMergeableSpills(*MI, FI))
--NumSpills;
LIS.ReplaceMachineInstrInMaps(*MI, *FoldMI);
// Update the call site info.
if (MI->isCandidateForCallSiteEntry())
MI->getMF()->moveCallSiteInfo(MI, FoldMI);
// If we've folded a store into an instruction labelled with debug-info,
// record a substitution from the old operand to the memory operand. Handle
// the simple common case where operand 0 is the one being folded, plus when
// the destination operand is also a tied def. More values could be
// substituted / preserved with more analysis.
if (MI->peekDebugInstrNum() && Ops[0].second == 0) {
// Helper lambda.
auto MakeSubstitution = [this,FoldMI,MI,&Ops]() {
// Substitute old operand zero to the new instructions memory operand.
unsigned OldOperandNum = Ops[0].second;
unsigned NewNum = FoldMI->getDebugInstrNum();
unsigned OldNum = MI->getDebugInstrNum();
MF.makeDebugValueSubstitution({OldNum, OldOperandNum},
{NewNum, MachineFunction::DebugOperandMemNumber});
};
const MachineOperand &Op0 = MI->getOperand(Ops[0].second);
if (Ops.size() == 1 && Op0.isDef()) {
MakeSubstitution();
} else if (Ops.size() == 2 && Op0.isDef() && MI->getOperand(1).isTied() &&
Op0.getReg() == MI->getOperand(1).getReg()) {
MakeSubstitution();
}
} else if (MI->peekDebugInstrNum()) {
// This is a debug-labelled instruction, but the operand being folded isn't
// at operand zero. Most likely this means it's a load being folded in.
// Substitute any register defs from operand zero up to the one being
// folded -- past that point, we don't know what the new operand indexes
// will be.
MF.substituteDebugValuesForInst(*MI, *FoldMI, Ops[0].second);
}
MI->eraseFromParent();
// Insert any new instructions other than FoldMI into the LIS maps.
assert(!MIS.empty() && "Unexpected empty span of instructions!");
for (MachineInstr &MI : MIS)
if (&MI != FoldMI)
LIS.InsertMachineInstrInMaps(MI);
// TII.foldMemoryOperand may have left some implicit operands on the
// instruction. Strip them.
if (ImpReg)
for (unsigned i = FoldMI->getNumOperands(); i; --i) {
MachineOperand &MO = FoldMI->getOperand(i - 1);
if (!MO.isReg() || !MO.isImplicit())
break;
if (MO.getReg() == ImpReg)
FoldMI->RemoveOperand(i - 1);
}
LLVM_DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MIS.end(), LIS,
"folded"));
if (!WasCopy)
++NumFolded;
else if (Ops.front().second == 0) {
++NumSpills;
// If there is only 1 store instruction is required for spill, add it
// to mergeable list. In X86 AMX, 2 intructions are required to store.
// We disable the merge for this case.
if (std::distance(MIS.begin(), MIS.end()) <= 1)
HSpiller.addToMergeableSpills(*FoldMI, StackSlot, Original);
} else
++NumReloads;
return true;
}
void InlineSpiller::insertReload(Register NewVReg,
SlotIndex Idx,
MachineBasicBlock::iterator MI) {
MachineBasicBlock &MBB = *MI->getParent();
MachineInstrSpan MIS(MI, &MBB);
TII.loadRegFromStackSlot(MBB, MI, NewVReg, StackSlot,
MRI.getRegClass(NewVReg), &TRI);
LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MI);
LLVM_DEBUG(dumpMachineInstrRangeWithSlotIndex(MIS.begin(), MI, LIS, "reload",
NewVReg));
++NumReloads;