forked from llvm/llvm-project
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathInlineCost.cpp
3126 lines (2676 loc) · 118 KB
/
InlineCost.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
//===- InlineCost.cpp - Cost analysis for inliner -------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements inline cost analysis.
//
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/InlineCost.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/BlockFrequencyInfo.h"
#include "llvm/Analysis/CFG.h"
#include "llvm/Analysis/CodeMetrics.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/ProfileSummaryInfo.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/IR/AssemblyAnnotationWriter.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/GlobalAlias.h"
#include "llvm/IR/InstVisitor.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/PatternMatch.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/FormattedStream.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
#define DEBUG_TYPE "inline-cost"
STATISTIC(NumCallsAnalyzed, "Number of call sites analyzed");
static cl::opt<int>
DefaultThreshold("inlinedefault-threshold", cl::Hidden, cl::init(225),
cl::ZeroOrMore,
cl::desc("Default amount of inlining to perform"));
static cl::opt<bool> PrintInstructionComments(
"print-instruction-comments", cl::Hidden, cl::init(false),
cl::desc("Prints comments for instruction based on inline cost analysis"));
static cl::opt<int> InlineThreshold(
"inline-threshold", cl::Hidden, cl::init(225), cl::ZeroOrMore,
cl::desc("Control the amount of inlining to perform (default = 225)"));
static cl::opt<int> HintThreshold(
"inlinehint-threshold", cl::Hidden, cl::init(325), cl::ZeroOrMore,
cl::desc("Threshold for inlining functions with inline hint"));
static cl::opt<int>
ColdCallSiteThreshold("inline-cold-callsite-threshold", cl::Hidden,
cl::init(45), cl::ZeroOrMore,
cl::desc("Threshold for inlining cold callsites"));
static cl::opt<bool> InlineEnableCostBenefitAnalysis(
"inline-enable-cost-benefit-analysis", cl::Hidden, cl::init(false),
cl::desc("Enable the cost-benefit analysis for the inliner"));
static cl::opt<int> InlineSavingsMultiplier(
"inline-savings-multiplier", cl::Hidden, cl::init(8), cl::ZeroOrMore,
cl::desc("Multiplier to multiply cycle savings by during inlining"));
static cl::opt<int>
InlineSizeAllowance("inline-size-allowance", cl::Hidden, cl::init(100),
cl::ZeroOrMore,
cl::desc("The maximum size of a callee that get's "
"inlined without sufficient cycle savings"));
// We introduce this threshold to help performance of instrumentation based
// PGO before we actually hook up inliner with analysis passes such as BPI and
// BFI.
static cl::opt<int> ColdThreshold(
"inlinecold-threshold", cl::Hidden, cl::init(45), cl::ZeroOrMore,
cl::desc("Threshold for inlining functions with cold attribute"));
static cl::opt<int>
HotCallSiteThreshold("hot-callsite-threshold", cl::Hidden, cl::init(3000),
cl::ZeroOrMore,
cl::desc("Threshold for hot callsites "));
static cl::opt<int> LocallyHotCallSiteThreshold(
"locally-hot-callsite-threshold", cl::Hidden, cl::init(525), cl::ZeroOrMore,
cl::desc("Threshold for locally hot callsites "));
static cl::opt<int> ColdCallSiteRelFreq(
"cold-callsite-rel-freq", cl::Hidden, cl::init(2), cl::ZeroOrMore,
cl::desc("Maximum block frequency, expressed as a percentage of caller's "
"entry frequency, for a callsite to be cold in the absence of "
"profile information."));
static cl::opt<int> HotCallSiteRelFreq(
"hot-callsite-rel-freq", cl::Hidden, cl::init(60), cl::ZeroOrMore,
cl::desc("Minimum block frequency, expressed as a multiple of caller's "
"entry frequency, for a callsite to be hot in the absence of "
"profile information."));
static cl::opt<int> CallPenalty(
"inline-call-penalty", cl::Hidden, cl::init(25),
cl::desc("Call penalty that is applied per callsite when inlining"));
static cl::opt<bool> OptComputeFullInlineCost(
"inline-cost-full", cl::Hidden, cl::init(false), cl::ZeroOrMore,
cl::desc("Compute the full inline cost of a call site even when the cost "
"exceeds the threshold."));
static cl::opt<bool> InlineCallerSupersetNoBuiltin(
"inline-caller-superset-nobuiltin", cl::Hidden, cl::init(true),
cl::ZeroOrMore,
cl::desc("Allow inlining when caller has a superset of callee's nobuiltin "
"attributes."));
static cl::opt<bool> DisableGEPConstOperand(
"disable-gep-const-evaluation", cl::Hidden, cl::init(false),
cl::desc("Disables evaluation of GetElementPtr with constant operands"));
namespace {
class InlineCostCallAnalyzer;
/// This function behaves more like CallBase::hasFnAttr: when it looks for the
/// requested attribute, it check both the call instruction and the called
/// function (if it's available and operand bundles don't prohibit that).
Attribute getFnAttr(CallBase &CB, StringRef AttrKind) {
Attribute CallAttr = CB.getFnAttr(AttrKind);
if (CallAttr.isValid())
return CallAttr;
// Operand bundles override attributes on the called function, but don't
// override attributes directly present on the call instruction.
if (!CB.isFnAttrDisallowedByOpBundle(AttrKind))
if (const Function *F = CB.getCalledFunction())
return F->getFnAttribute(AttrKind);
return {};
}
Optional<int> getStringFnAttrAsInt(CallBase &CB, StringRef AttrKind) {
Attribute Attr = getFnAttr(CB, AttrKind);
int AttrValue;
if (Attr.getValueAsString().getAsInteger(10, AttrValue))
return None;
return AttrValue;
}
// This struct is used to store information about inline cost of a
// particular instruction
struct InstructionCostDetail {
int CostBefore = 0;
int CostAfter = 0;
int ThresholdBefore = 0;
int ThresholdAfter = 0;
int getThresholdDelta() const { return ThresholdAfter - ThresholdBefore; }
int getCostDelta() const { return CostAfter - CostBefore; }
bool hasThresholdChanged() const { return ThresholdAfter != ThresholdBefore; }
};
class InlineCostAnnotationWriter : public AssemblyAnnotationWriter {
private:
InlineCostCallAnalyzer *const ICCA;
public:
InlineCostAnnotationWriter(InlineCostCallAnalyzer *ICCA) : ICCA(ICCA) {}
virtual void emitInstructionAnnot(const Instruction *I,
formatted_raw_ostream &OS) override;
};
/// Carry out call site analysis, in order to evaluate inlinability.
/// NOTE: the type is currently used as implementation detail of functions such
/// as llvm::getInlineCost. Note the function_ref constructor parameters - the
/// expectation is that they come from the outer scope, from the wrapper
/// functions. If we want to support constructing CallAnalyzer objects where
/// lambdas are provided inline at construction, or where the object needs to
/// otherwise survive past the scope of the provided functions, we need to
/// revisit the argument types.
class CallAnalyzer : public InstVisitor<CallAnalyzer, bool> {
typedef InstVisitor<CallAnalyzer, bool> Base;
friend class InstVisitor<CallAnalyzer, bool>;
protected:
virtual ~CallAnalyzer() = default;
/// The TargetTransformInfo available for this compilation.
const TargetTransformInfo &TTI;
/// Getter for the cache of @llvm.assume intrinsics.
function_ref<AssumptionCache &(Function &)> GetAssumptionCache;
/// Getter for BlockFrequencyInfo
function_ref<BlockFrequencyInfo &(Function &)> GetBFI;
/// Profile summary information.
ProfileSummaryInfo *PSI;
/// The called function.
Function &F;
// Cache the DataLayout since we use it a lot.
const DataLayout &DL;
/// The OptimizationRemarkEmitter available for this compilation.
OptimizationRemarkEmitter *ORE;
/// The candidate callsite being analyzed. Please do not use this to do
/// analysis in the caller function; we want the inline cost query to be
/// easily cacheable. Instead, use the cover function paramHasAttr.
CallBase &CandidateCall;
/// Extension points for handling callsite features.
// Called before a basic block was analyzed.
virtual void onBlockStart(const BasicBlock *BB) {}
/// Called after a basic block was analyzed.
virtual void onBlockAnalyzed(const BasicBlock *BB) {}
/// Called before an instruction was analyzed
virtual void onInstructionAnalysisStart(const Instruction *I) {}
/// Called after an instruction was analyzed
virtual void onInstructionAnalysisFinish(const Instruction *I) {}
/// Called at the end of the analysis of the callsite. Return the outcome of
/// the analysis, i.e. 'InlineResult(true)' if the inlining may happen, or
/// the reason it can't.
virtual InlineResult finalizeAnalysis() { return InlineResult::success(); }
/// Called when we're about to start processing a basic block, and every time
/// we are done processing an instruction. Return true if there is no point in
/// continuing the analysis (e.g. we've determined already the call site is
/// too expensive to inline)
virtual bool shouldStop() { return false; }
/// Called before the analysis of the callee body starts (with callsite
/// contexts propagated). It checks callsite-specific information. Return a
/// reason analysis can't continue if that's the case, or 'true' if it may
/// continue.
virtual InlineResult onAnalysisStart() { return InlineResult::success(); }
/// Called if the analysis engine decides SROA cannot be done for the given
/// alloca.
virtual void onDisableSROA(AllocaInst *Arg) {}
/// Called the analysis engine determines load elimination won't happen.
virtual void onDisableLoadElimination() {}
/// Called when we visit a CallBase, before the analysis starts. Return false
/// to stop further processing of the instruction.
virtual bool onCallBaseVisitStart(CallBase &Call) { return true; }
/// Called to account for a call.
virtual void onCallPenalty() {}
/// Called to account for the expectation the inlining would result in a load
/// elimination.
virtual void onLoadEliminationOpportunity() {}
/// Called to account for the cost of argument setup for the Call in the
/// callee's body (not the callsite currently under analysis).
virtual void onCallArgumentSetup(const CallBase &Call) {}
/// Called to account for a load relative intrinsic.
virtual void onLoadRelativeIntrinsic() {}
/// Called to account for a lowered call.
virtual void onLoweredCall(Function *F, CallBase &Call, bool IsIndirectCall) {
}
/// Account for a jump table of given size. Return false to stop further
/// processing the switch instruction
virtual bool onJumpTable(unsigned JumpTableSize) { return true; }
/// Account for a case cluster of given size. Return false to stop further
/// processing of the instruction.
virtual bool onCaseCluster(unsigned NumCaseCluster) { return true; }
/// Called at the end of processing a switch instruction, with the given
/// number of case clusters.
virtual void onFinalizeSwitch(unsigned JumpTableSize,
unsigned NumCaseCluster) {}
/// Called to account for any other instruction not specifically accounted
/// for.
virtual void onMissedSimplification() {}
/// Start accounting potential benefits due to SROA for the given alloca.
virtual void onInitializeSROAArg(AllocaInst *Arg) {}
/// Account SROA savings for the AllocaInst value.
virtual void onAggregateSROAUse(AllocaInst *V) {}
bool handleSROA(Value *V, bool DoNotDisable) {
// Check for SROA candidates in comparisons.
if (auto *SROAArg = getSROAArgForValueOrNull(V)) {
if (DoNotDisable) {
onAggregateSROAUse(SROAArg);
return true;
}
disableSROAForArg(SROAArg);
}
return false;
}
bool IsCallerRecursive = false;
bool IsRecursiveCall = false;
bool ExposesReturnsTwice = false;
bool HasDynamicAlloca = false;
bool ContainsNoDuplicateCall = false;
bool HasReturn = false;
bool HasIndirectBr = false;
bool HasUninlineableIntrinsic = false;
bool InitsVargArgs = false;
/// Number of bytes allocated statically by the callee.
uint64_t AllocatedSize = 0;
unsigned NumInstructions = 0;
unsigned NumVectorInstructions = 0;
/// While we walk the potentially-inlined instructions, we build up and
/// maintain a mapping of simplified values specific to this callsite. The
/// idea is to propagate any special information we have about arguments to
/// this call through the inlinable section of the function, and account for
/// likely simplifications post-inlining. The most important aspect we track
/// is CFG altering simplifications -- when we prove a basic block dead, that
/// can cause dramatic shifts in the cost of inlining a function.
DenseMap<Value *, Constant *> SimplifiedValues;
/// Keep track of the values which map back (through function arguments) to
/// allocas on the caller stack which could be simplified through SROA.
DenseMap<Value *, AllocaInst *> SROAArgValues;
/// Keep track of Allocas for which we believe we may get SROA optimization.
DenseSet<AllocaInst *> EnabledSROAAllocas;
/// Keep track of values which map to a pointer base and constant offset.
DenseMap<Value *, std::pair<Value *, APInt>> ConstantOffsetPtrs;
/// Keep track of dead blocks due to the constant arguments.
SetVector<BasicBlock *> DeadBlocks;
/// The mapping of the blocks to their known unique successors due to the
/// constant arguments.
DenseMap<BasicBlock *, BasicBlock *> KnownSuccessors;
/// Model the elimination of repeated loads that is expected to happen
/// whenever we simplify away the stores that would otherwise cause them to be
/// loads.
bool EnableLoadElimination = true;
/// Whether we allow inlining for recursive call.
bool AllowRecursiveCall = false;
SmallPtrSet<Value *, 16> LoadAddrSet;
AllocaInst *getSROAArgForValueOrNull(Value *V) const {
auto It = SROAArgValues.find(V);
if (It == SROAArgValues.end() || EnabledSROAAllocas.count(It->second) == 0)
return nullptr;
return It->second;
}
// Custom simplification helper routines.
bool isAllocaDerivedArg(Value *V);
void disableSROAForArg(AllocaInst *SROAArg);
void disableSROA(Value *V);
void findDeadBlocks(BasicBlock *CurrBB, BasicBlock *NextBB);
void disableLoadElimination();
bool isGEPFree(GetElementPtrInst &GEP);
bool canFoldInboundsGEP(GetElementPtrInst &I);
bool accumulateGEPOffset(GEPOperator &GEP, APInt &Offset);
bool simplifyCallSite(Function *F, CallBase &Call);
template <typename Callable>
bool simplifyInstruction(Instruction &I, Callable Evaluate);
bool simplifyIntrinsicCallIsConstant(CallBase &CB);
ConstantInt *stripAndComputeInBoundsConstantOffsets(Value *&V);
/// Return true if the given argument to the function being considered for
/// inlining has the given attribute set either at the call site or the
/// function declaration. Primarily used to inspect call site specific
/// attributes since these can be more precise than the ones on the callee
/// itself.
bool paramHasAttr(Argument *A, Attribute::AttrKind Attr);
/// Return true if the given value is known non null within the callee if
/// inlined through this particular callsite.
bool isKnownNonNullInCallee(Value *V);
/// Return true if size growth is allowed when inlining the callee at \p Call.
bool allowSizeGrowth(CallBase &Call);
// Custom analysis routines.
InlineResult analyzeBlock(BasicBlock *BB,
SmallPtrSetImpl<const Value *> &EphValues);
// Disable several entry points to the visitor so we don't accidentally use
// them by declaring but not defining them here.
void visit(Module *);
void visit(Module &);
void visit(Function *);
void visit(Function &);
void visit(BasicBlock *);
void visit(BasicBlock &);
// Provide base case for our instruction visit.
bool visitInstruction(Instruction &I);
// Our visit overrides.
bool visitAlloca(AllocaInst &I);
bool visitPHI(PHINode &I);
bool visitGetElementPtr(GetElementPtrInst &I);
bool visitBitCast(BitCastInst &I);
bool visitPtrToInt(PtrToIntInst &I);
bool visitIntToPtr(IntToPtrInst &I);
bool visitCastInst(CastInst &I);
bool visitCmpInst(CmpInst &I);
bool visitSub(BinaryOperator &I);
bool visitBinaryOperator(BinaryOperator &I);
bool visitFNeg(UnaryOperator &I);
bool visitLoad(LoadInst &I);
bool visitStore(StoreInst &I);
bool visitExtractValue(ExtractValueInst &I);
bool visitInsertValue(InsertValueInst &I);
bool visitCallBase(CallBase &Call);
bool visitReturnInst(ReturnInst &RI);
bool visitBranchInst(BranchInst &BI);
bool visitSelectInst(SelectInst &SI);
bool visitSwitchInst(SwitchInst &SI);
bool visitIndirectBrInst(IndirectBrInst &IBI);
bool visitResumeInst(ResumeInst &RI);
bool visitCleanupReturnInst(CleanupReturnInst &RI);
bool visitCatchReturnInst(CatchReturnInst &RI);
bool visitUnreachableInst(UnreachableInst &I);
public:
CallAnalyzer(Function &Callee, CallBase &Call, const TargetTransformInfo &TTI,
function_ref<AssumptionCache &(Function &)> GetAssumptionCache,
function_ref<BlockFrequencyInfo &(Function &)> GetBFI = nullptr,
ProfileSummaryInfo *PSI = nullptr,
OptimizationRemarkEmitter *ORE = nullptr)
: TTI(TTI), GetAssumptionCache(GetAssumptionCache), GetBFI(GetBFI),
PSI(PSI), F(Callee), DL(F.getParent()->getDataLayout()), ORE(ORE),
CandidateCall(Call) {}
InlineResult analyze();
Optional<Constant *> getSimplifiedValue(Instruction *I) {
if (SimplifiedValues.find(I) != SimplifiedValues.end())
return SimplifiedValues[I];
return None;
}
// Keep a bunch of stats about the cost savings found so we can print them
// out when debugging.
unsigned NumConstantArgs = 0;
unsigned NumConstantOffsetPtrArgs = 0;
unsigned NumAllocaArgs = 0;
unsigned NumConstantPtrCmps = 0;
unsigned NumConstantPtrDiffs = 0;
unsigned NumInstructionsSimplified = 0;
void dump();
};
// Considering forming a binary search, we should find the number of nodes
// which is same as the number of comparisons when lowered. For a given
// number of clusters, n, we can define a recursive function, f(n), to find
// the number of nodes in the tree. The recursion is :
// f(n) = 1 + f(n/2) + f (n - n/2), when n > 3,
// and f(n) = n, when n <= 3.
// This will lead a binary tree where the leaf should be either f(2) or f(3)
// when n > 3. So, the number of comparisons from leaves should be n, while
// the number of non-leaf should be :
// 2^(log2(n) - 1) - 1
// = 2^log2(n) * 2^-1 - 1
// = n / 2 - 1.
// Considering comparisons from leaf and non-leaf nodes, we can estimate the
// number of comparisons in a simple closed form :
// n + n / 2 - 1 = n * 3 / 2 - 1
int64_t getExpectedNumberOfCompare(int NumCaseCluster) {
return 3 * static_cast<int64_t>(NumCaseCluster) / 2 - 1;
}
/// FIXME: if it is necessary to derive from InlineCostCallAnalyzer, note
/// the FIXME in onLoweredCall, when instantiating an InlineCostCallAnalyzer
class InlineCostCallAnalyzer final : public CallAnalyzer {
const int CostUpperBound = INT_MAX - InlineConstants::InstrCost - 1;
const bool ComputeFullInlineCost;
int LoadEliminationCost = 0;
/// Bonus to be applied when percentage of vector instructions in callee is
/// high (see more details in updateThreshold).
int VectorBonus = 0;
/// Bonus to be applied when the callee has only one reachable basic block.
int SingleBBBonus = 0;
/// Tunable parameters that control the analysis.
const InlineParams &Params;
// This DenseMap stores the delta change in cost and threshold after
// accounting for the given instruction. The map is filled only with the
// flag PrintInstructionComments on.
DenseMap<const Instruction *, InstructionCostDetail> InstructionCostDetailMap;
/// Upper bound for the inlining cost. Bonuses are being applied to account
/// for speculative "expected profit" of the inlining decision.
int Threshold = 0;
/// Attempt to evaluate indirect calls to boost its inline cost.
const bool BoostIndirectCalls;
/// Ignore the threshold when finalizing analysis.
const bool IgnoreThreshold;
// True if the cost-benefit-analysis-based inliner is enabled.
const bool CostBenefitAnalysisEnabled;
/// Inlining cost measured in abstract units, accounts for all the
/// instructions expected to be executed for a given function invocation.
/// Instructions that are statically proven to be dead based on call-site
/// arguments are not counted here.
int Cost = 0;
// The cumulative cost at the beginning of the basic block being analyzed. At
// the end of analyzing each basic block, "Cost - CostAtBBStart" represents
// the size of that basic block.
int CostAtBBStart = 0;
// The static size of live but cold basic blocks. This is "static" in the
// sense that it's not weighted by profile counts at all.
int ColdSize = 0;
// Whether inlining is decided by cost-threshold analysis.
bool DecidedByCostThreshold = false;
// Whether inlining is decided by cost-benefit analysis.
bool DecidedByCostBenefit = false;
// The cost-benefit pair computed by cost-benefit analysis.
Optional<CostBenefitPair> CostBenefit = None;
bool SingleBB = true;
unsigned SROACostSavings = 0;
unsigned SROACostSavingsLost = 0;
/// The mapping of caller Alloca values to their accumulated cost savings. If
/// we have to disable SROA for one of the allocas, this tells us how much
/// cost must be added.
DenseMap<AllocaInst *, int> SROAArgCosts;
/// Return true if \p Call is a cold callsite.
bool isColdCallSite(CallBase &Call, BlockFrequencyInfo *CallerBFI);
/// Update Threshold based on callsite properties such as callee
/// attributes and callee hotness for PGO builds. The Callee is explicitly
/// passed to support analyzing indirect calls whose target is inferred by
/// analysis.
void updateThreshold(CallBase &Call, Function &Callee);
/// Return a higher threshold if \p Call is a hot callsite.
Optional<int> getHotCallSiteThreshold(CallBase &Call,
BlockFrequencyInfo *CallerBFI);
/// Handle a capped 'int' increment for Cost.
void addCost(int64_t Inc, int64_t UpperBound = INT_MAX) {
assert(UpperBound > 0 && UpperBound <= INT_MAX && "invalid upper bound");
Cost = std::min<int>(UpperBound, Cost + Inc);
}
void onDisableSROA(AllocaInst *Arg) override {
auto CostIt = SROAArgCosts.find(Arg);
if (CostIt == SROAArgCosts.end())
return;
addCost(CostIt->second);
SROACostSavings -= CostIt->second;
SROACostSavingsLost += CostIt->second;
SROAArgCosts.erase(CostIt);
}
void onDisableLoadElimination() override {
addCost(LoadEliminationCost);
LoadEliminationCost = 0;
}
bool onCallBaseVisitStart(CallBase &Call) override {
if (Optional<int> AttrCallThresholdBonus =
getStringFnAttrAsInt(Call, "call-threshold-bonus"))
Threshold += *AttrCallThresholdBonus;
if (Optional<int> AttrCallCost =
getStringFnAttrAsInt(Call, "call-inline-cost")) {
addCost(*AttrCallCost);
// Prevent further processing of the call since we want to override its
// inline cost, not just add to it.
return false;
}
return true;
}
void onCallPenalty() override { addCost(CallPenalty); }
void onCallArgumentSetup(const CallBase &Call) override {
// Pay the price of the argument setup. We account for the average 1
// instruction per call argument setup here.
addCost(Call.arg_size() * InlineConstants::InstrCost);
}
void onLoadRelativeIntrinsic() override {
// This is normally lowered to 4 LLVM instructions.
addCost(3 * InlineConstants::InstrCost);
}
void onLoweredCall(Function *F, CallBase &Call,
bool IsIndirectCall) override {
// We account for the average 1 instruction per call argument setup here.
addCost(Call.arg_size() * InlineConstants::InstrCost);
// If we have a constant that we are calling as a function, we can peer
// through it and see the function target. This happens not infrequently
// during devirtualization and so we want to give it a hefty bonus for
// inlining, but cap that bonus in the event that inlining wouldn't pan out.
// Pretend to inline the function, with a custom threshold.
if (IsIndirectCall && BoostIndirectCalls) {
auto IndirectCallParams = Params;
IndirectCallParams.DefaultThreshold =
InlineConstants::IndirectCallThreshold;
/// FIXME: if InlineCostCallAnalyzer is derived from, this may need
/// to instantiate the derived class.
InlineCostCallAnalyzer CA(*F, Call, IndirectCallParams, TTI,
GetAssumptionCache, GetBFI, PSI, ORE, false);
if (CA.analyze().isSuccess()) {
// We were able to inline the indirect call! Subtract the cost from the
// threshold to get the bonus we want to apply, but don't go below zero.
Cost -= std::max(0, CA.getThreshold() - CA.getCost());
}
} else
// Otherwise simply add the cost for merely making the call.
addCost(CallPenalty);
}
void onFinalizeSwitch(unsigned JumpTableSize,
unsigned NumCaseCluster) override {
// If suitable for a jump table, consider the cost for the table size and
// branch to destination.
// Maximum valid cost increased in this function.
if (JumpTableSize) {
int64_t JTCost =
static_cast<int64_t>(JumpTableSize) * InlineConstants::InstrCost +
4 * InlineConstants::InstrCost;
addCost(JTCost, static_cast<int64_t>(CostUpperBound));
return;
}
if (NumCaseCluster <= 3) {
// Suppose a comparison includes one compare and one conditional branch.
addCost(NumCaseCluster * 2 * InlineConstants::InstrCost);
return;
}
int64_t ExpectedNumberOfCompare =
getExpectedNumberOfCompare(NumCaseCluster);
int64_t SwitchCost =
ExpectedNumberOfCompare * 2 * InlineConstants::InstrCost;
addCost(SwitchCost, static_cast<int64_t>(CostUpperBound));
}
void onMissedSimplification() override {
addCost(InlineConstants::InstrCost);
}
void onInitializeSROAArg(AllocaInst *Arg) override {
assert(Arg != nullptr &&
"Should not initialize SROA costs for null value.");
SROAArgCosts[Arg] = 0;
}
void onAggregateSROAUse(AllocaInst *SROAArg) override {
auto CostIt = SROAArgCosts.find(SROAArg);
assert(CostIt != SROAArgCosts.end() &&
"expected this argument to have a cost");
CostIt->second += InlineConstants::InstrCost;
SROACostSavings += InlineConstants::InstrCost;
}
void onBlockStart(const BasicBlock *BB) override { CostAtBBStart = Cost; }
void onBlockAnalyzed(const BasicBlock *BB) override {
if (CostBenefitAnalysisEnabled) {
// Keep track of the static size of live but cold basic blocks. For now,
// we define a cold basic block to be one that's never executed.
assert(GetBFI && "GetBFI must be available");
BlockFrequencyInfo *BFI = &(GetBFI(F));
assert(BFI && "BFI must be available");
auto ProfileCount = BFI->getBlockProfileCount(BB);
assert(ProfileCount.hasValue());
if (ProfileCount.getValue() == 0)
ColdSize += Cost - CostAtBBStart;
}
auto *TI = BB->getTerminator();
// If we had any successors at this point, than post-inlining is likely to
// have them as well. Note that we assume any basic blocks which existed
// due to branches or switches which folded above will also fold after
// inlining.
if (SingleBB && TI->getNumSuccessors() > 1) {
// Take off the bonus we applied to the threshold.
Threshold -= SingleBBBonus;
SingleBB = false;
}
}
void onInstructionAnalysisStart(const Instruction *I) override {
// This function is called to store the initial cost of inlining before
// the given instruction was assessed.
if (!PrintInstructionComments)
return;
InstructionCostDetailMap[I].CostBefore = Cost;
InstructionCostDetailMap[I].ThresholdBefore = Threshold;
}
void onInstructionAnalysisFinish(const Instruction *I) override {
// This function is called to find new values of cost and threshold after
// the instruction has been assessed.
if (!PrintInstructionComments)
return;
InstructionCostDetailMap[I].CostAfter = Cost;
InstructionCostDetailMap[I].ThresholdAfter = Threshold;
}
bool isCostBenefitAnalysisEnabled() {
if (!PSI || !PSI->hasProfileSummary())
return false;
if (!GetBFI)
return false;
if (InlineEnableCostBenefitAnalysis.getNumOccurrences()) {
// Honor the explicit request from the user.
if (!InlineEnableCostBenefitAnalysis)
return false;
} else {
// Otherwise, require instrumentation profile.
if (!PSI->hasInstrumentationProfile())
return false;
}
auto *Caller = CandidateCall.getParent()->getParent();
if (!Caller->getEntryCount())
return false;
BlockFrequencyInfo *CallerBFI = &(GetBFI(*Caller));
if (!CallerBFI)
return false;
// For now, limit to hot call site.
if (!PSI->isHotCallSite(CandidateCall, CallerBFI))
return false;
// Make sure we have a nonzero entry count.
auto EntryCount = F.getEntryCount();
if (!EntryCount || !EntryCount->getCount())
return false;
BlockFrequencyInfo *CalleeBFI = &(GetBFI(F));
if (!CalleeBFI)
return false;
return true;
}
// Determine whether we should inline the given call site, taking into account
// both the size cost and the cycle savings. Return None if we don't have
// suficient profiling information to determine.
Optional<bool> costBenefitAnalysis() {
if (!CostBenefitAnalysisEnabled)
return None;
// buildInlinerPipeline in the pass builder sets HotCallSiteThreshold to 0
// for the prelink phase of the AutoFDO + ThinLTO build. Honor the logic by
// falling back to the cost-based metric.
// TODO: Improve this hacky condition.
if (Threshold == 0)
return None;
assert(GetBFI);
BlockFrequencyInfo *CalleeBFI = &(GetBFI(F));
assert(CalleeBFI);
// The cycle savings expressed as the sum of InlineConstants::InstrCost
// multiplied by the estimated dynamic count of each instruction we can
// avoid. Savings come from the call site cost, such as argument setup and
// the call instruction, as well as the instructions that are folded.
//
// We use 128-bit APInt here to avoid potential overflow. This variable
// should stay well below 10^^24 (or 2^^80) in practice. This "worst" case
// assumes that we can avoid or fold a billion instructions, each with a
// profile count of 10^^15 -- roughly the number of cycles for a 24-hour
// period on a 4GHz machine.
APInt CycleSavings(128, 0);
for (auto &BB : F) {
APInt CurrentSavings(128, 0);
for (auto &I : BB) {
if (BranchInst *BI = dyn_cast<BranchInst>(&I)) {
// Count a conditional branch as savings if it becomes unconditional.
if (BI->isConditional() &&
isa_and_nonnull<ConstantInt>(
SimplifiedValues.lookup(BI->getCondition()))) {
CurrentSavings += InlineConstants::InstrCost;
}
} else if (Value *V = dyn_cast<Value>(&I)) {
// Count an instruction as savings if we can fold it.
if (SimplifiedValues.count(V)) {
CurrentSavings += InlineConstants::InstrCost;
}
}
}
auto ProfileCount = CalleeBFI->getBlockProfileCount(&BB);
assert(ProfileCount.hasValue());
CurrentSavings *= ProfileCount.getValue();
CycleSavings += CurrentSavings;
}
// Compute the cycle savings per call.
auto EntryProfileCount = F.getEntryCount();
assert(EntryProfileCount.hasValue() && EntryProfileCount->getCount());
auto EntryCount = EntryProfileCount->getCount();
CycleSavings += EntryCount / 2;
CycleSavings = CycleSavings.udiv(EntryCount);
// Compute the total savings for the call site.
auto *CallerBB = CandidateCall.getParent();
BlockFrequencyInfo *CallerBFI = &(GetBFI(*(CallerBB->getParent())));
CycleSavings += getCallsiteCost(this->CandidateCall, DL);
CycleSavings *= CallerBFI->getBlockProfileCount(CallerBB).getValue();
// Remove the cost of the cold basic blocks.
int Size = Cost - ColdSize;
// Allow tiny callees to be inlined regardless of whether they meet the
// savings threshold.
Size = Size > InlineSizeAllowance ? Size - InlineSizeAllowance : 1;
CostBenefit.emplace(APInt(128, Size), CycleSavings);
// Return true if the savings justify the cost of inlining. Specifically,
// we evaluate the following inequality:
//
// CycleSavings PSI->getOrCompHotCountThreshold()
// -------------- >= -----------------------------------
// Size InlineSavingsMultiplier
//
// Note that the left hand side is specific to a call site. The right hand
// side is a constant for the entire executable.
APInt LHS = CycleSavings;
LHS *= InlineSavingsMultiplier;
APInt RHS(128, PSI->getOrCompHotCountThreshold());
RHS *= Size;
return LHS.uge(RHS);
}
InlineResult finalizeAnalysis() override {
// Loops generally act a lot like calls in that they act like barriers to
// movement, require a certain amount of setup, etc. So when optimising for
// size, we penalise any call sites that perform loops. We do this after all
// other costs here, so will likely only be dealing with relatively small
// functions (and hence DT and LI will hopefully be cheap).
auto *Caller = CandidateCall.getFunction();
if (Caller->hasMinSize()) {
DominatorTree DT(F);
LoopInfo LI(DT);
int NumLoops = 0;
for (Loop *L : LI) {
// Ignore loops that will not be executed
if (DeadBlocks.count(L->getHeader()))
continue;
NumLoops++;
}
addCost(NumLoops * InlineConstants::LoopPenalty);
}
// We applied the maximum possible vector bonus at the beginning. Now,
// subtract the excess bonus, if any, from the Threshold before
// comparing against Cost.
if (NumVectorInstructions <= NumInstructions / 10)
Threshold -= VectorBonus;
else if (NumVectorInstructions <= NumInstructions / 2)
Threshold -= VectorBonus / 2;
if (Optional<int> AttrCost =
getStringFnAttrAsInt(CandidateCall, "function-inline-cost"))
Cost = *AttrCost;
if (Optional<int> AttrThreshold =
getStringFnAttrAsInt(CandidateCall, "function-inline-threshold"))
Threshold = *AttrThreshold;
if (auto Result = costBenefitAnalysis()) {
DecidedByCostBenefit = true;
if (Result.getValue())
return InlineResult::success();
else
return InlineResult::failure("Cost over threshold.");
}
if (IgnoreThreshold)
return InlineResult::success();
DecidedByCostThreshold = true;
return Cost < std::max(1, Threshold)
? InlineResult::success()
: InlineResult::failure("Cost over threshold.");
}
bool shouldStop() override {
if (IgnoreThreshold || ComputeFullInlineCost)
return false;
// Bail out the moment we cross the threshold. This means we'll under-count
// the cost, but only when undercounting doesn't matter.
if (Cost < Threshold)
return false;
DecidedByCostThreshold = true;
return true;
}
void onLoadEliminationOpportunity() override {
LoadEliminationCost += InlineConstants::InstrCost;
}
InlineResult onAnalysisStart() override {
// Perform some tweaks to the cost and threshold based on the direct
// callsite information.
// We want to more aggressively inline vector-dense kernels, so up the
// threshold, and we'll lower it if the % of vector instructions gets too
// low. Note that these bonuses are some what arbitrary and evolved over
// time by accident as much as because they are principled bonuses.
//
// FIXME: It would be nice to remove all such bonuses. At least it would be
// nice to base the bonus values on something more scientific.
assert(NumInstructions == 0);
assert(NumVectorInstructions == 0);
// Update the threshold based on callsite properties
updateThreshold(CandidateCall, F);
// While Threshold depends on commandline options that can take negative
// values, we want to enforce the invariant that the computed threshold and
// bonuses are non-negative.
assert(Threshold >= 0);
assert(SingleBBBonus >= 0);
assert(VectorBonus >= 0);
// Speculatively apply all possible bonuses to Threshold. If cost exceeds
// this Threshold any time, and cost cannot decrease, we can stop processing
// the rest of the function body.
Threshold += (SingleBBBonus + VectorBonus);
// Give out bonuses for the callsite, as the instructions setting them up
// will be gone after inlining.
addCost(-getCallsiteCost(this->CandidateCall, DL));
// If this function uses the coldcc calling convention, prefer not to inline
// it.
if (F.getCallingConv() == CallingConv::Cold)
Cost += InlineConstants::ColdccPenalty;
// Check if we're done. This can happen due to bonuses and penalties.
if (Cost >= Threshold && !ComputeFullInlineCost)
return InlineResult::failure("high cost");
return InlineResult::success();
}
public:
InlineCostCallAnalyzer(
Function &Callee, CallBase &Call, const InlineParams &Params,
const TargetTransformInfo &TTI,
function_ref<AssumptionCache &(Function &)> GetAssumptionCache,
function_ref<BlockFrequencyInfo &(Function &)> GetBFI = nullptr,
ProfileSummaryInfo *PSI = nullptr,
OptimizationRemarkEmitter *ORE = nullptr, bool BoostIndirect = true,
bool IgnoreThreshold = false)
: CallAnalyzer(Callee, Call, TTI, GetAssumptionCache, GetBFI, PSI, ORE),
ComputeFullInlineCost(OptComputeFullInlineCost ||
Params.ComputeFullInlineCost || ORE ||
isCostBenefitAnalysisEnabled()),