llvm.org GIT mirror llvm / 39c1e3d
[LoopUnroll] Implement profile-based loop peeling This implements PGO-driven loop peeling. The basic idea is that when the average dynamic trip-count of a loop is known, based on PGO, to be low, we can expect a performance win by peeling off the first several iterations of that loop. Unlike unrolling based on a known trip count, or a trip count multiple, this doesn't save us the conditional check and branch on each iteration. However, it does allow us to simplify the straight-line code we get (constant-folding, etc.). This is important given that we know that we will usually only hit this code, and not the actual loop. This is currently disabled by default. Differential Revision: https://reviews.llvm.org/D25963 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@288274 91177308-0d34-0410-b5e6-96231b3b80d8 Michael Kuperstein 4 years ago
9 changed file(s) with 637 addition(s) and 35 deletion(s). Raw diff Collapse all Expand all
264264 /// transformation will select an unrolling factor based on the current cost
265265 /// threshold and other factors.
266266 unsigned Count;
267 /// A forced peeling factor (the number of bodied of the original loop
268 /// that should be peeled off before the loop body). When set to 0, the
269 /// unrolling transformation will select a peeling factor based on profile
270 /// information and other factors.
271 unsigned PeelCount;
267272 /// Default unroll count for loops with run-time trip count.
268273 unsigned DefaultUnrollRuntimeCount;
269274 // Set the maximum unrolling factor. The unrolling factor may be selected
297302 bool Force;
298303 /// Allow using trip count upper bound to unroll loops.
299304 bool UpperBound;
305 /// Allow peeling off loop iterations for loops with low dynamic tripcount.
306 bool AllowPeeling;
300307 };
301308
302309 /// \brief Get target-customized preferences for the generic loop unrolling
1515 #ifndef LLVM_TRANSFORMS_UTILS_UNROLLLOOP_H
1616 #define LLVM_TRANSFORMS_UTILS_UNROLLLOOP_H
1717
18 // Needed because we can't forward-declare the nested struct
19 // TargetTransformInfo::UnrollingPreferences
20 #include "llvm/Analysis/TargetTransformInfo.h"
1821
1922 namespace llvm {
2023
3235 bool UnrollLoop(Loop *L, unsigned Count, unsigned TripCount, bool Force,
3336 bool AllowRuntime, bool AllowExpensiveTripCount,
3437 bool PreserveCondBr, bool PreserveOnlyFirst,
35 unsigned TripMultiple, LoopInfo *LI, ScalarEvolution *SE,
36 DominatorTree *DT, AssumptionCache *AC,
38 unsigned TripMultiple, unsigned PeelCount, LoopInfo *LI,
39 ScalarEvolution *SE, DominatorTree *DT, AssumptionCache *AC,
3740 OptimizationRemarkEmitter *ORE, bool PreserveLCSSA);
3841
3942 bool UnrollRuntimeLoopRemainder(Loop *L, unsigned Count,
4245 ScalarEvolution *SE, DominatorTree *DT,
4346 bool PreserveLCSSA);
4447
48 void computePeelCount(Loop *L, unsigned LoopSize,
49 TargetTransformInfo::UnrollingPreferences &UP);
50
51 bool peelLoop(Loop *L, unsigned PeelCount, LoopInfo *LI, ScalarEvolution *SE,
52 DominatorTree *DT, bool PreserveLCSSA);
53
4554 MDNode *GetUnrollMetadata(MDNode *LoopID, StringRef Name);
4655 }
4756
2323 #include "llvm/Analysis/OptimizationDiagnosticInfo.h"
2424 #include "llvm/Analysis/ScalarEvolution.h"
2525 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
26 #include "llvm/Analysis/TargetTransformInfo.h"
2726 #include "llvm/IR/DataLayout.h"
2827 #include "llvm/IR/Dominators.h"
2928 #include "llvm/IR/InstVisitor.h"
107106 "threshold, the loop is considered as flat and will be less "
108107 "aggressively unrolled."));
109108
109 static cl::opt
110 UnrollAllowPeeling("unroll-allow-peeling", cl::Hidden,
111 cl::desc("Allows loops to be peeled when the dynamic "
112 "trip count is known to be low."));
113
110114 /// A magic value for use with the Threshold parameter to indicate
111115 /// that the loop unroll should be performed regardless of how much
112116 /// code expansion would result.
128132 UP.PartialThreshold = UP.Threshold;
129133 UP.PartialOptSizeThreshold = 0;
130134 UP.Count = 0;
135 UP.PeelCount = 0;
131136 UP.DefaultUnrollRuntimeCount = 8;
132137 UP.MaxCount = UINT_MAX;
133138 UP.FullUnrollMaxCount = UINT_MAX;
138143 UP.AllowExpensiveTripCount = false;
139144 UP.Force = false;
140145 UP.UpperBound = false;
146 UP.AllowPeeling = false;
141147
142148 // Override with any target specific settings
143149 TTI.getUnrollingPreferences(L, UP);
170176 UP.Runtime = UnrollRuntime;
171177 if (UnrollMaxUpperBound == 0)
172178 UP.UpperBound = false;
179 if (UnrollAllowPeeling.getNumOccurrences() > 0)
180 UP.AllowPeeling = UnrollAllowPeeling;
173181
174182 // Apply user values provided by argument
175183 if (UserThreshold.hasValue()) {
752760 bool PragmaEnableUnroll = HasUnrollEnablePragma(L);
753761 bool ExplicitUnroll = PragmaCount > 0 || PragmaFullUnroll ||
754762 PragmaEnableUnroll || UserUnrollCount;
755
756 // Check if the runtime trip count is too small when profile is available.
757 if (L->getHeader()->getParent()->getEntryCount() && TripCount == 0) {
758 if (auto ProfileTripCount = getLoopEstimatedTripCount(L)) {
759 if (*ProfileTripCount < FlatLoopTripCountThreshold)
760 return false;
761 else
762 UP.AllowExpensiveTripCount = true;
763 }
764 }
765763
766764 if (ExplicitUnroll && TripCount != 0) {
767765 // If the loop has an unrolling pragma, we want to be more aggressive with
877875 << "Unable to fully unroll loop as directed by unroll(full) pragma "
878876 "because loop has a runtime trip count.");
879877
880 // 5th priority is runtime unrolling.
878 // 5th priority is loop peeling
879 computePeelCount(L, LoopSize, UP);
880 if (UP.PeelCount) {
881 UP.Runtime = false;
882 UP.Count = 1;
883 return ExplicitUnroll;
884 }
885
886 // 6th priority is runtime unrolling.
881887 // Don't unroll a runtime trip count loop when it is disabled.
882888 if (HasRuntimeUnrollDisablePragma(L)) {
883889 UP.Count = 0;
884890 return false;
885891 }
892
893 // Check if the runtime trip count is too small when profile is available.
894 if (L->getHeader()->getParent()->getEntryCount()) {
895 if (auto ProfileTripCount = getLoopEstimatedTripCount(L)) {
896 if (*ProfileTripCount < FlatLoopTripCountThreshold)
897 return false;
898 else
899 UP.AllowExpensiveTripCount = true;
900 }
901 }
902
886903 // Reduce count based on the type of unrolling and the threshold values.
887904 UP.Runtime |= PragmaEnableUnroll || PragmaCount > 0 || UserUnrollCount;
888905 if (!UP.Runtime) {
10411058 // Unroll the loop.
10421059 if (!UnrollLoop(L, UP.Count, TripCount, UP.Force, UP.Runtime,
10431060 UP.AllowExpensiveTripCount, UseUpperBound, MaxOrZero,
1044 TripMultiple, LI, SE, &DT, &AC, &ORE, PreserveLCSSA))
1061 TripMultiple, UP.PeelCount, LI, SE, &DT, &AC, &ORE,
1062 PreserveLCSSA))
10451063 return false;
10461064
10471065 // If loop has an unroll count pragma or unrolled by explicitly set count
10481066 // mark loop as unrolled to prevent unrolling beyond that requested.
1049 if (IsCountSetExplicitly)
1067 // If the loop was peeled, we already "used up" the profile information
1068 // we had, so we don't want to unroll or peel again.
1069 if (IsCountSetExplicitly || UP.PeelCount)
10501070 SetLoopAlreadyUnrolled(L);
1071
10511072 return true;
10521073 }
10531074
2525 Local.cpp
2626 LoopSimplify.cpp
2727 LoopUnroll.cpp
28 LoopUnrollPeel.cpp
2829 LoopUnrollRuntime.cpp
2930 LoopUtils.cpp
3031 LoopVersioning.cpp
201201 /// runtime-unroll the loop if computing RuntimeTripCount will be expensive and
202202 /// AllowExpensiveTripCount is false.
203203 ///
204 /// If we want to perform PGO-based loop peeling, PeelCount is set to the
205 /// number of iterations we want to peel off.
206 ///
204207 /// The LoopInfo Analysis that is passed will be kept consistent.
205208 ///
206209 /// This utility preserves LoopInfo. It will also preserve ScalarEvolution and
208211 bool llvm::UnrollLoop(Loop *L, unsigned Count, unsigned TripCount, bool Force,
209212 bool AllowRuntime, bool AllowExpensiveTripCount,
210213 bool PreserveCondBr, bool PreserveOnlyFirst,
211 unsigned TripMultiple, LoopInfo *LI, ScalarEvolution *SE,
212 DominatorTree *DT, AssumptionCache *AC,
213 OptimizationRemarkEmitter *ORE, bool PreserveLCSSA) {
214 unsigned TripMultiple, unsigned PeelCount, LoopInfo *LI,
215 ScalarEvolution *SE, DominatorTree *DT,
216 AssumptionCache *AC, OptimizationRemarkEmitter *ORE,
217 bool PreserveLCSSA) {
218
214219 BasicBlock *Preheader = L->getLoopPreheader();
215220 if (!Preheader) {
216221 DEBUG(dbgs() << " Can't unroll; loop preheader-insertion failed.\n");
256261 if (TripCount != 0 && Count > TripCount)
257262 Count = TripCount;
258263
259 // Don't enter the unroll code if there is nothing to do. This way we don't
260 // need to support "partial unrolling by 1".
261 if (TripCount == 0 && Count < 2)
264 // Don't enter the unroll code if there is nothing to do.
265 if (TripCount == 0 && Count < 2 && PeelCount == 0)
262266 return false;
263267
264268 assert(Count > 0);
286290 // figure out the loop trip count and the unroll-runtime
287291 // flag is specified.
288292 bool RuntimeTripCount = (TripCount == 0 && Count > 0 && AllowRuntime);
293
294 assert((!RuntimeTripCount || !PeelCount) &&
295 "Did not expect runtime trip-count unrolling "
296 "and peeling for the same loop");
297
298 if (PeelCount)
299 peelLoop(L, PeelCount, LI, SE, DT, PreserveLCSSA);
289300
290301 // Loops containing convergent instructions must have a count that divides
291302 // their TripMultiple.
300311 "Unroll count must divide trip multiple if loop contains a "
301312 "convergent operation.");
302313 });
303 // Don't output the runtime loop remainder if Count is a multiple of
304 // TripMultiple. Such a remainder is never needed, and is unsafe if the loop
305 // contains a convergent instruction.
314
306315 if (RuntimeTripCount && TripMultiple % Count != 0 &&
307316 !UnrollRuntimeLoopRemainder(L, Count, AllowExpensiveTripCount,
308317 UnrollRuntimeEpilog, LI, SE, DT,
338347 L->getHeader())
339348 << "completely unrolled loop with "
340349 << NV("UnrollCount", TripCount) << " iterations");
350 } else if (PeelCount) {
351 DEBUG(dbgs() << "PEELING loop %" << Header->getName()
352 << " with iteration count " << PeelCount << "!\n");
353 ORE->emit(OptimizationRemark(DEBUG_TYPE, "Peeled", L->getStartLoc(),
354 L->getHeader())
355 << " peeled loop by " << NV("PeelCount", PeelCount)
356 << " iterations");
341357 } else {
342358 OptimizationRemark Diag(DEBUG_TYPE, "PartialUnrolled", L->getStartLoc(),
343359 L->getHeader());
627643 DEBUG(DT->verifyDomTree());
628644
629645 // Simplify any new induction variables in the partially unrolled loop.
630 if (SE && !CompletelyUnroll) {
646 if (SE && !CompletelyUnroll && Count > 1) {
631647 SmallVector DeadInsts;
632648 simplifyLoopIVs(L, SE, DT, LI, DeadInsts);
633649
0 //===-- UnrollLoopPeel.cpp - Loop peeling utilities -----------------------===//
1 //
2 // The LLVM Compiler Infrastructure
3 //
4 // This file is distributed under the University of Illinois Open Source
5 // License. See LICENSE.TXT for details.
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements some loop unrolling utilities for peeling loops
10 // with dynamically inferred (from PGO) trip counts. See LoopUnroll.cpp for
11 // unrolling loops with compile-time constant trip counts.
12 //
13 //===----------------------------------------------------------------------===//
14
15 #include "llvm/ADT/Statistic.h"
16 #include "llvm/Analysis/LoopIterator.h"
17 #include "llvm/Analysis/LoopPass.h"
18 #include "llvm/Analysis/ScalarEvolution.h"
19 #include "llvm/Analysis/TargetTransformInfo.h"
20 #include "llvm/IR/BasicBlock.h"
21 #include "llvm/IR/Dominators.h"
22 #include "llvm/IR/MDBuilder.h"
23 #include "llvm/IR/Metadata.h"
24 #include "llvm/IR/Module.h"
25 #include "llvm/Support/Debug.h"
26 #include "llvm/Support/raw_ostream.h"
27 #include "llvm/Transforms/Scalar.h"
28 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
29 #include "llvm/Transforms/Utils/Cloning.h"
30 #include "llvm/Transforms/Utils/LoopUtils.h"
31 #include "llvm/Transforms/Utils/UnrollLoop.h"
32 #include
33
34 using namespace llvm;
35
36 #define DEBUG_TYPE "loop-unroll"
37 STATISTIC(NumPeeled, "Number of loops peeled");
38
39 static cl::opt UnrollPeelMaxCount(
40 "unroll-peel-max-count", cl::init(7), cl::Hidden,
41 cl::desc("Max average trip count which will cause loop peeling."));
42
43 static cl::opt UnrollForcePeelCount(
44 "unroll-force-peel-count", cl::init(0), cl::Hidden,
45 cl::desc("Force a peel count regardless of profiling information."));
46
47 // Check whether we are capable of peeling this loop.
48 static bool canPeel(Loop *L) {
49 // Make sure the loop is in simplified form
50 if (!L->isLoopSimplifyForm())
51 return false;
52
53 // Only peel loops that contain a single exit
54 if (!L->getExitingBlock() || !L->getUniqueExitBlock())
55 return false;
56
57 return true;
58 }
59
60 // Return the number of iterations we want to peel off.
61 void llvm::computePeelCount(Loop *L, unsigned LoopSize,
62 TargetTransformInfo::UnrollingPreferences &UP) {
63 UP.PeelCount = 0;
64 if (!canPeel(L))
65 return;
66
67 // Only try to peel innermost loops.
68 if (!L->empty())
69 return;
70
71 // If the user provided a peel count, use that.
72 bool UserPeelCount = UnrollForcePeelCount.getNumOccurrences() > 0;
73 if (UserPeelCount) {
74 DEBUG(dbgs() << "Force-peeling first " << UnrollForcePeelCount
75 << " iterations.\n");
76 UP.PeelCount = UnrollForcePeelCount;
77 return;
78 }
79
80 // If we don't know the trip count, but have reason to believe the average
81 // trip count is low, peeling should be beneficial, since we will usually
82 // hit the peeled section.
83 // We only do this in the presence of profile information, since otherwise
84 // our estimates of the trip count are not reliable enough.
85 if (UP.AllowPeeling && L->getHeader()->getParent()->getEntryCount()) {
86 Optional PeelCount = getLoopEstimatedTripCount(L);
87 if (!PeelCount)
88 return;
89
90 DEBUG(dbgs() << "Profile-based estimated trip count is " << *PeelCount
91 << "\n");
92
93 if (*PeelCount) {
94 if ((*PeelCount <= UnrollPeelMaxCount) &&
95 (LoopSize * (*PeelCount + 1) <= UP.Threshold)) {
96 DEBUG(dbgs() << "Peeling first " << *PeelCount << " iterations.\n");
97 UP.PeelCount = *PeelCount;
98 return;
99 }
100 DEBUG(dbgs() << "Requested peel count: " << *PeelCount << "\n");
101 DEBUG(dbgs() << "Max peel count: " << UnrollPeelMaxCount << "\n");
102 DEBUG(dbgs() << "Peel cost: " << LoopSize * (*PeelCount + 1) << "\n");
103 DEBUG(dbgs() << "Max peel cost: " << UP.Threshold << "\n");
104 }
105 }
106
107 return;
108 }
109
110 /// \brief Update the branch weights of the latch of a peeled-off loop
111 /// iteration.
112 /// This sets the branch weights for the latch of the recently peeled off loop
113 /// iteration correctly.
114 /// Our goal is to make sure that:
115 /// a) The total weight of all the copies of the loop body is preserved.
116 /// b) The total weight of the loop exit is preserved.
117 /// c) The body weight is reasonably distributed between the peeled iterations.
118 ///
119 /// \param Header The copy of the header block that belongs to next iteration.
120 /// \param LatchBR The copy of the latch branch that belongs to this iteration.
121 /// \param IterNumber The serial number of the iteration that was just
122 /// peeled off.
123 /// \param AvgIters The average number of iterations we expect the loop to have.
124 /// \param[in,out] PeeledHeaderWeight The total number of dynamic loop
125 /// iterations that are unaccounted for. As an input, it represents the number
126 /// of times we expect to enter the header of the iteration currently being
127 /// peeled off. The output is the number of times we expect to enter the
128 /// header of the next iteration.
129 static void updateBranchWeights(BasicBlock *Header, BranchInst *LatchBR,
130 unsigned IterNumber, unsigned AvgIters,
131 uint64_t &PeeledHeaderWeight) {
132
133 // FIXME: Pick a more realistic distribution.
134 // Currently the proportion of weight we assign to the fall-through
135 // side of the branch drops linearly with the iteration number, and we use
136 // a 0.9 fudge factor to make the drop-off less sharp...
137 if (PeeledHeaderWeight) {
138 uint64_t FallThruWeight =
139 PeeledHeaderWeight * ((float)(AvgIters - IterNumber) / AvgIters * 0.9);
140 uint64_t ExitWeight = PeeledHeaderWeight - FallThruWeight;
141 PeeledHeaderWeight -= ExitWeight;
142
143 unsigned HeaderIdx = (LatchBR->getSuccessor(0) == Header ? 0 : 1);
144 MDBuilder MDB(LatchBR->getContext());
145 MDNode *WeightNode =
146 HeaderIdx ? MDB.createBranchWeights(ExitWeight, FallThruWeight)
147 : MDB.createBranchWeights(FallThruWeight, ExitWeight);
148 LatchBR->setMetadata(LLVMContext::MD_prof, WeightNode);
149 }
150 }
151
152 /// \brief Clones the body of the loop L, putting it between \p InsertTop and \p
153 /// InsertBot.
154 /// \param IterNumber The serial number of the iteration currently being
155 /// peeled off.
156 /// \param Exit The exit block of the original loop.
157 /// \param[out] NewBlocks A list of the the blocks in the newly created clone
158 /// \param[out] VMap The value map between the loop and the new clone.
159 /// \param LoopBlocks A helper for DFS-traversal of the loop.
160 /// \param LVMap A value-map that maps instructions from the original loop to
161 /// instructions in the last peeled-off iteration.
162 static void cloneLoopBlocks(Loop *L, unsigned IterNumber, BasicBlock *InsertTop,
163 BasicBlock *InsertBot, BasicBlock *Exit,
164 SmallVectorImpl &NewBlocks,
165 LoopBlocksDFS &LoopBlocks, ValueToValueMapTy &VMap,
166 ValueToValueMapTy &LVMap, LoopInfo *LI) {
167
168 BasicBlock *Header = L->getHeader();
169 BasicBlock *Latch = L->getLoopLatch();
170 BasicBlock *PreHeader = L->getLoopPreheader();
171
172 Function *F = Header->getParent();
173 LoopBlocksDFS::RPOIterator BlockBegin = LoopBlocks.beginRPO();
174 LoopBlocksDFS::RPOIterator BlockEnd = LoopBlocks.endRPO();
175 Loop *ParentLoop = L->getParentLoop();
176
177 // For each block in the original loop, create a new copy,
178 // and update the value map with the newly created values.
179 for (LoopBlocksDFS::RPOIterator BB = BlockBegin; BB != BlockEnd; ++BB) {
180 BasicBlock *NewBB = CloneBasicBlock(*BB, VMap, ".peel", F);
181 NewBlocks.push_back(NewBB);
182
183 if (ParentLoop)
184 ParentLoop->addBasicBlockToLoop(NewBB, *LI);
185
186 VMap[*BB] = NewBB;
187 }
188
189 // Hook-up the control flow for the newly inserted blocks.
190 // The new header is hooked up directly to the "top", which is either
191 // the original loop preheader (for the first iteration) or the previous
192 // iteration's exiting block (for every other iteration)
193 InsertTop->getTerminator()->setSuccessor(0, cast(VMap[Header]));
194
195 // Similarly, for the latch:
196 // The original exiting edge is still hooked up to the loop exit.
197 // The backedge now goes to the "bottom", which is either the loop's real
198 // header (for the last peeled iteration) or the copied header of the next
199 // iteration (for every other iteration)
200 BranchInst *LatchBR =
201 cast(cast(VMap[Latch])->getTerminator());
202 unsigned HeaderIdx = (LatchBR->getSuccessor(0) == Header ? 0 : 1);
203 LatchBR->setSuccessor(HeaderIdx, InsertBot);
204 LatchBR->setSuccessor(1 - HeaderIdx, Exit);
205
206 // The new copy of the loop body starts with a bunch of PHI nodes
207 // that pick an incoming value from either the preheader, or the previous
208 // loop iteration. Since this copy is no longer part of the loop, we
209 // resolve this statically:
210 // For the first iteration, we use the value from the preheader directly.
211 // For any other iteration, we replace the phi with the value generated by
212 // the immediately preceding clone of the loop body (which represents
213 // the previous iteration).
214 for (BasicBlock::iterator I = Header->begin(); isa(I); ++I) {
215 PHINode *NewPHI = cast(VMap[&*I]);
216 if (IterNumber == 0) {
217 VMap[&*I] = NewPHI->getIncomingValueForBlock(PreHeader);
218 } else {
219 Value *LatchVal = NewPHI->getIncomingValueForBlock(Latch);
220 Instruction *LatchInst = dyn_cast(LatchVal);
221 if (LatchInst && L->contains(LatchInst))
222 VMap[&*I] = LVMap[LatchInst];
223 else
224 VMap[&*I] = LatchVal;
225 }
226 cast(VMap[Header])->getInstList().erase(NewPHI);
227 }
228
229 // Fix up the outgoing values - we need to add a value for the iteration
230 // we've just created. Note that this must happen *after* the incoming
231 // values are adjusted, since the value going out of the latch may also be
232 // a value coming into the header.
233 for (BasicBlock::iterator I = Exit->begin(); isa(I); ++I) {
234 PHINode *PHI = cast(I);
235 Value *LatchVal = PHI->getIncomingValueForBlock(Latch);
236 Instruction *LatchInst = dyn_cast(LatchVal);
237 if (LatchInst && L->contains(LatchInst))
238 LatchVal = VMap[LatchVal];
239 PHI->addIncoming(LatchVal, cast(VMap[Latch]));
240 }
241
242 // LastValueMap is updated with the values for the current loop
243 // which are used the next time this function is called.
244 for (const auto &KV : VMap)
245 LVMap[KV.first] = KV.second;
246 }
247
248 /// \brief Peel off the first \p PeelCount iterations of loop \p L.
249 ///
250 /// Note that this does not peel them off as a single straight-line block.
251 /// Rather, each iteration is peeled off separately, and needs to check the
252 /// exit condition.
253 /// For loops that dynamically execute \p PeelCount iterations or less
254 /// this provides a benefit, since the peeled off iterations, which account
255 /// for the bulk of dynamic execution, can be further simplified by scalar
256 /// optimizations.
257 bool llvm::peelLoop(Loop *L, unsigned PeelCount, LoopInfo *LI,
258 ScalarEvolution *SE, DominatorTree *DT,
259 bool PreserveLCSSA) {
260 if (!canPeel(L))
261 return false;
262
263 LoopBlocksDFS LoopBlocks(L);
264 LoopBlocks.perform(LI);
265
266 BasicBlock *Header = L->getHeader();
267 BasicBlock *PreHeader = L->getLoopPreheader();
268 BasicBlock *Latch = L->getLoopLatch();
269 BasicBlock *Exit = L->getUniqueExitBlock();
270
271 Function *F = Header->getParent();
272
273 // Set up all the necessary basic blocks. It is convenient to split the
274 // preheader into 3 parts - two blocks to anchor the peeled copy of the loop
275 // body, and a new preheader for the "real" loop.
276
277 // Peeling the first iteration transforms.
278 //
279 // PreHeader:
280 // ...
281 // Header:
282 // LoopBody
283 // If (cond) goto Header
284 // Exit:
285 //
286 // into
287 //
288 // InsertTop:
289 // LoopBody
290 // If (!cond) goto Exit
291 // InsertBot:
292 // NewPreHeader:
293 // ...
294 // Header:
295 // LoopBody
296 // If (cond) goto Header
297 // Exit:
298 //
299 // Each following iteration will split the current bottom anchor in two,
300 // and put the new copy of the loop body between these two blocks. That is,
301 // after peeling another iteration from the example above, we'll split
302 // InsertBot, and get:
303 //
304 // InsertTop:
305 // LoopBody
306 // If (!cond) goto Exit
307 // InsertBot:
308 // LoopBody
309 // If (!cond) goto Exit
310 // InsertBot.next:
311 // NewPreHeader:
312 // ...
313 // Header:
314 // LoopBody
315 // If (cond) goto Header
316 // Exit:
317
318 BasicBlock *InsertTop = SplitEdge(PreHeader, Header, DT, LI);
319 BasicBlock *InsertBot =
320 SplitBlock(InsertTop, InsertTop->getTerminator(), DT, LI);
321 BasicBlock *NewPreHeader =
322 SplitBlock(InsertBot, InsertBot->getTerminator(), DT, LI);
323
324 InsertTop->setName(Header->getName() + ".peel.begin");
325 InsertBot->setName(Header->getName() + ".peel.next");
326 NewPreHeader->setName(PreHeader->getName() + ".peel.newph");
327
328 ValueToValueMapTy LVMap;
329
330 // If we have branch weight information, we'll want to update it for the
331 // newly created branches.
332 BranchInst *LatchBR =
333 cast(cast(Latch)->getTerminator());
334 unsigned HeaderIdx = (LatchBR->getSuccessor(0) == Header ? 0 : 1);
335
336 uint64_t TrueWeight, FalseWeight;
337 uint64_t ExitWeight = 0, BackEdgeWeight = 0;
338 if (LatchBR->extractProfMetadata(TrueWeight, FalseWeight)) {
339 ExitWeight = HeaderIdx ? TrueWeight : FalseWeight;
340 BackEdgeWeight = HeaderIdx ? FalseWeight : TrueWeight;
341 }
342
343 // For each peeled-off iteration, make a copy of the loop.
344 for (unsigned Iter = 0; Iter < PeelCount; ++Iter) {
345 SmallVector NewBlocks;
346 ValueToValueMapTy VMap;
347
348 // The exit weight of the previous iteration is the header entry weight
349 // of the current iteration. So this is exactly how many dynamic iterations
350 // the current peeled-off static iteration uses up.
351 // FIXME: due to the way the distribution is constructed, we need a
352 // guard here to make sure we don't end up with non-positive weights.
353 if (ExitWeight < BackEdgeWeight)
354 BackEdgeWeight -= ExitWeight;
355 else
356 BackEdgeWeight = 1;
357
358 cloneLoopBlocks(L, Iter, InsertTop, InsertBot, Exit,
359 NewBlocks, LoopBlocks, VMap, LVMap, LI);
360 updateBranchWeights(InsertBot, cast(VMap[LatchBR]), Iter,
361 PeelCount, ExitWeight);
362
363 InsertTop = InsertBot;
364 InsertBot = SplitBlock(InsertBot, InsertBot->getTerminator(), DT, LI);
365 InsertBot->setName(Header->getName() + ".peel.next");
366
367 F->getBasicBlockList().splice(InsertTop->getIterator(),
368 F->getBasicBlockList(),
369 NewBlocks[0]->getIterator(), F->end());
370
371 // Remap to use values from the current iteration instead of the
372 // previous one.
373 remapInstructionsInBlocks(NewBlocks, VMap);
374 }
375
376 // Now adjust the phi nodes in the loop header to get their initial values
377 // from the last peeled-off iteration instead of the preheader.
378 for (BasicBlock::iterator I = Header->begin(); isa(I); ++I) {
379 PHINode *PHI = cast(I);
380 Value *NewVal = PHI->getIncomingValueForBlock(Latch);
381 Instruction *LatchInst = dyn_cast(NewVal);
382 if (LatchInst && L->contains(LatchInst))
383 NewVal = LVMap[LatchInst];
384
385 PHI->setIncomingValue(PHI->getBasicBlockIndex(NewPreHeader), NewVal);
386 }
387
388 // Adjust the branch weights on the loop exit.
389 if (ExitWeight) {
390 MDBuilder MDB(LatchBR->getContext());
391 MDNode *WeightNode =
392 HeaderIdx ? MDB.createBranchWeights(ExitWeight, BackEdgeWeight)
393 : MDB.createBranchWeights(BackEdgeWeight, ExitWeight);
394 LatchBR->setMetadata(LLVMContext::MD_prof, WeightNode);
395 }
396
397 // If the loop is nested, we changed the parent loop, update SE.
398 if (Loop *ParentLoop = L->getParentLoop())
399 SE->forgetLoop(ParentLoop);
400
401 NumPeeled++;
402
403 return true;
404 }
10891089 // from the raw counts to provide a better probability estimate. Remove
10901090 // the adjustment by subtracting 1 from both weights.
10911091 uint64_t TrueVal, FalseVal;
1092 if (!LatchBR->extractProfMetadata(TrueVal, FalseVal) || (TrueVal <= 1) ||
1093 (FalseVal <= 1))
1092 if (!LatchBR->extractProfMetadata(TrueVal, FalseVal))
10941093 return None;
10951094
1096 TrueVal -= 1;
1097 FalseVal -= 1;
1098
1099 // Divide the count of the backedge by the count of the edge exiting the loop.
1095 if (!TrueVal || !FalseVal)
1096 return 0;
1097
1098 // Divide the count of the backedge by the count of the edge exiting the loop,
1099 // rounding to nearest.
11001100 if (LatchBR->getSuccessor(0) == L->getHeader())
1101 return TrueVal / FalseVal;
1101 return (TrueVal + (FalseVal / 2)) / FalseVal;
11021102 else
1103 return FalseVal / TrueVal;
1104 }
1103 return (FalseVal + (TrueVal / 2)) / TrueVal;
1104 }
0 ; RUN: opt < %s -S -debug-only=loop-unroll -loop-unroll -unroll-allow-peeling 2>&1 | FileCheck %s
1 ; REQUIRES: asserts
2
3 ; Make sure we use the profile information correctly to peel-off 3 iterations
4 ; from the loop, and update the branch weights for the peeled loop properly.
5 ; CHECK: PEELING loop %for.body with iteration count 3!
6 ; CHECK-LABEL: @basic
7 ; CHECK: br i1 %{{.*}}, label %[[NEXT0:.*]], label %for.cond.for.end_crit_edge, !prof !1
8 ; CHECK: [[NEXT0]]:
9 ; CHECK: br i1 %{{.*}}, label %[[NEXT1:.*]], label %for.cond.for.end_crit_edge, !prof !2
10 ; CHECK: [[NEXT1]]:
11 ; CHECK: br i1 %{{.*}}, label %[[NEXT2:.*]], label %for.cond.for.end_crit_edge, !prof !3
12 ; CHECK: [[NEXT2]]:
13 ; CHECK: br i1 %{{.*}}, label %for.body, label %{{.*}}, !prof !4
14
15 define void @basic(i32* %p, i32 %k) #0 !prof !0 {
16 entry:
17 %cmp3 = icmp slt i32 0, %k
18 br i1 %cmp3, label %for.body.lr.ph, label %for.end
19
20 for.body.lr.ph: ; preds = %entry
21 br label %for.body
22
23 for.body: ; preds = %for.body.lr.ph, %for.body
24 %i.05 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
25 %p.addr.04 = phi i32* [ %p, %for.body.lr.ph ], [ %incdec.ptr, %for.body ]
26 %incdec.ptr = getelementptr inbounds i32, i32* %p.addr.04, i32 1
27 store i32 %i.05, i32* %p.addr.04, align 4
28 %inc = add nsw i32 %i.05, 1
29 %cmp = icmp slt i32 %inc, %k
30 br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge, !prof !1
31
32 for.cond.for.end_crit_edge: ; preds = %for.body
33 br label %for.end
34
35 for.end: ; preds = %for.cond.for.end_crit_edge, %entry
36 ret void
37 }
38
39 !0 = !{!"function_entry_count", i64 1}
40 !1 = !{!"branch_weights", i32 3001, i32 1001}
41
42 ;CHECK: !1 = !{!"branch_weights", i32 900, i32 101}
43 ;CHECK: !2 = !{!"branch_weights", i32 540, i32 360}
44 ;CHECK: !3 = !{!"branch_weights", i32 162, i32 378}
45 ;CHECK: !4 = !{!"branch_weights", i32 560, i32 162}
46
0 ; RUN: opt < %s -S -loop-unroll -unroll-force-peel-count=3 -simplifycfg -instcombine | FileCheck %s
1
2 ; Basic loop peeling - check that we can peel-off the first 3 loop iterations
3 ; when explicitly requested.
4 ; CHECK-LABEL: @basic
5 ; CHECK: %[[CMP0:.*]] = icmp sgt i32 %k, 0
6 ; CHECK: br i1 %[[CMP0]], label %[[NEXT0:.*]], label %for.end
7 ; CHECK: [[NEXT0]]:
8 ; CHECK: store i32 0, i32* %p, align 4
9 ; CHECK: %[[CMP1:.*]] = icmp eq i32 %k, 1
10 ; CHECK: br i1 %[[CMP1]], label %for.end, label %[[NEXT1:.*]]
11 ; CHECK: [[NEXT1]]:
12 ; CHECK: %[[INC1:.*]] = getelementptr inbounds i32, i32* %p, i64 1
13 ; CHECK: store i32 1, i32* %[[INC1]], align 4
14 ; CHECK: %[[CMP2:.*]] = icmp sgt i32 %k, 2
15 ; CHECK: br i1 %[[CMP2]], label %[[NEXT2:.*]], label %for.end
16 ; CHECK: [[NEXT2]]:
17 ; CHECK: %[[INC2:.*]] = getelementptr inbounds i32, i32* %p, i64 2
18 ; CHECK: store i32 2, i32* %[[INC2]], align 4
19 ; CHECK: %[[CMP3:.*]] = icmp eq i32 %k, 3
20 ; CHECK: br i1 %[[CMP3]], label %for.end, label %[[LOOP:.*]]
21 ; CHECK: [[LOOP]]:
22 ; CHECK: %[[IV:.*]] = phi i32 [ {{.*}}, %[[LOOP]] ], [ 3, %[[NEXT2]] ]
23
24 define void @basic(i32* %p, i32 %k) #0 {
25 entry:
26 %cmp3 = icmp slt i32 0, %k
27 br i1 %cmp3, label %for.body.lr.ph, label %for.end
28
29 for.body.lr.ph: ; preds = %entry
30 br label %for.body
31
32 for.body: ; preds = %for.body.lr.ph, %for.body
33 %i.05 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
34 %p.addr.04 = phi i32* [ %p, %for.body.lr.ph ], [ %incdec.ptr, %for.body ]
35 %incdec.ptr = getelementptr inbounds i32, i32* %p.addr.04, i32 1
36 store i32 %i.05, i32* %p.addr.04, align 4
37 %inc = add nsw i32 %i.05, 1
38 %cmp = icmp slt i32 %inc, %k
39 br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge
40
41 for.cond.for.end_crit_edge: ; preds = %for.body
42 br label %for.end
43
44 for.end: ; preds = %for.cond.for.end_crit_edge, %entry
45 ret void
46 }
47
48 ; Make sure peeling works correctly when a value defined in a loop is used
49 ; in later code - we need to correctly plumb the phi depending on which
50 ; iteration is actually used.
51 ; CHECK-LABEL: @output
52 ; CHECK: %[[CMP0:.*]] = icmp sgt i32 %k, 0
53 ; CHECK: br i1 %[[CMP0]], label %[[NEXT0:.*]], label %for.end
54 ; CHECK: [[NEXT0]]:
55 ; CHECK: store i32 0, i32* %p, align 4
56 ; CHECK: %[[CMP1:.*]] = icmp eq i32 %k, 1
57 ; CHECK: br i1 %[[CMP1]], label %for.end, label %[[NEXT1:.*]]
58 ; CHECK: [[NEXT1]]:
59 ; CHECK: %[[INC1:.*]] = getelementptr inbounds i32, i32* %p, i64 1
60 ; CHECK: store i32 1, i32* %[[INC1]], align 4
61 ; CHECK: %[[CMP2:.*]] = icmp sgt i32 %k, 2
62 ; CHECK: br i1 %[[CMP2]], label %[[NEXT2:.*]], label %for.end
63 ; CHECK: [[NEXT2]]:
64 ; CHECK: %[[INC2:.*]] = getelementptr inbounds i32, i32* %p, i64 2
65 ; CHECK: store i32 2, i32* %[[INC2]], align 4
66 ; CHECK: %[[CMP3:.*]] = icmp eq i32 %k, 3
67 ; CHECK: br i1 %[[CMP3]], label %for.end, label %[[LOOP:.*]]
68 ; CHECK: [[LOOP]]:
69 ; CHECK: %[[IV:.*]] = phi i32 [ %[[IV:.*]], %[[LOOP]] ], [ 3, %[[NEXT2]] ]
70 ; CHECK: %ret = phi i32 [ 0, %entry ], [ 1, %[[NEXT0]] ], [ 2, %[[NEXT1]] ], [ 3, %[[NEXT2]] ], [ %[[IV]], %[[LOOP]] ]
71 ; CHECK: ret i32 %ret
72 define i32 @output(i32* %p, i32 %k) #0 {
73 entry:
74 %cmp3 = icmp slt i32 0, %k
75 br i1 %cmp3, label %for.body.lr.ph, label %for.end
76
77 for.body.lr.ph: ; preds = %entry
78 br label %for.body
79
80 for.body: ; preds = %for.body.lr.ph, %for.body
81 %i.05 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
82 %p.addr.04 = phi i32* [ %p, %for.body.lr.ph ], [ %incdec.ptr, %for.body ]
83 %incdec.ptr = getelementptr inbounds i32, i32* %p.addr.04, i32 1
84 store i32 %i.05, i32* %p.addr.04, align 4
85 %inc = add nsw i32 %i.05, 1
86 %cmp = icmp slt i32 %inc, %k
87 br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge
88
89 for.cond.for.end_crit_edge: ; preds = %for.body
90 br label %for.end
91
92 for.end: ; preds = %for.cond.for.end_crit_edge, %entry
93 %ret = phi i32 [ 0, %entry], [ %inc, %for.cond.for.end_crit_edge ]
94 ret i32 %ret
95 }