llvm.org GIT mirror llvm / 5be18e8
Teach SCEVExpander to expand arithmetic involving pointers into GEP instructions. It attempts to create high-level multi-operand GEPs, though in cases where this isn't possible it falls back to casting the pointer to i8* and emitting a GEP with that. Using GEP instructions instead of ptrtoint+arithmetic+inttoptr helps pointer analyses that don't use ScalarEvolution, such as BasicAliasAnalysis. Also, make the AddrModeMatcher more aggressive in handling GEPs. Previously it assumed that operand 0 of a GEP would require a register in almost all cases. It now does extra checking and can do more matching if operand 0 of the GEP is foldable. This fixes a problem that was exposed by SCEVExpander using GEPs. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@72093 91177308-0d34-0410-b5e6-96231b3b80d8 Dan Gohman 10 years ago
9 changed file(s) with 271 addition(s) and 73 deletion(s). Raw diff Collapse all Expand all
212212 ///
213213 class ScalarEvolution : public FunctionPass {
214214 friend class SCEVCallbackVH;
215 friend class SCEVExpander;
215216
216217 /// F - The function we are analyzing.
217218 ///
2727 /// memory.
2828 struct SCEVExpander : public SCEVVisitor {
2929 ScalarEvolution &SE;
30 LoopInfo &LI;
3130 std::map InsertedExpressions;
3231 std::set InsertedValues;
3332
3534
3635 friend struct SCEVVisitor;
3736 public:
38 SCEVExpander(ScalarEvolution &se, LoopInfo &li)
39 : SE(se), LI(li) {}
40
41 LoopInfo &getLoopInfo() const { return LI; }
37 explicit SCEVExpander(ScalarEvolution &se)
38 : SE(se) {}
4239
4340 /// clear - Erase the contents of the InsertedExpressions map so that users
4441 /// trying to expand the same expression into multiple BasicBlocks or
8279
8380 /// expandCodeFor - Insert code to directly compute the specified SCEV
8481 /// expression into the program. The inserted code is inserted into the
85 /// SCEVExpander's current insertion point.
86 Value *expandCodeFor(SCEVHandle SH, const Type *Ty);
82 /// SCEVExpander's current insertion point. If a type is specified, the
83 /// result will be expanded to have that type, with a cast if necessary.
84 Value *expandCodeFor(SCEVHandle SH, const Type *Ty = 0);
8785
8886 /// expandCodeFor - Insert code to directly compute the specified SCEV
8987 /// expression into the program. The inserted code is inserted into the
109107 Value *RHS, BasicBlock::iterator InsertPt);
110108
111109 private:
110 /// expandAddToGEP - Expand a SCEVAddExpr with a pointer type into a GEP
111 /// instead of using ptrtoint+arithmetic+inttoptr.
112 Value *expandAddToGEP(const SCEVAddExpr *S, const PointerType *PTy,
113 const Type *Ty, Value *V);
114
112115 Value *expand(const SCEV *S);
113116
114117 Value *visitConstant(const SCEVConstant *S) {
455455 if (const SCEVUnknown *LU = dyn_cast(LHS)) {
456456 const SCEVUnknown *RU = cast(RHS);
457457
458 // Order pointer values after integer values. This helps SCEVExpander
459 // form GEPs.
460 if (isa(LU->getType()) && !isa(RU->getType()))
461 return false;
462 if (isa(RU->getType()) && !isa(LU->getType()))
463 return true;
464
458465 // Compare getValueID values.
459466 if (LU->getValue()->getValueID() != RU->getValue()->getValueID())
460467 return LU->getValue()->getValueID() < RU->getValue()->getValueID();
1414
1515 #include "llvm/Analysis/ScalarEvolutionExpander.h"
1616 #include "llvm/Analysis/LoopInfo.h"
17 #include "llvm/Target/TargetData.h"
1718 using namespace llvm;
1819
1920 /// InsertCastOfTo - Insert a cast of V to the specified type, doing what
129130 BasicBlock::iterator IP = InsertPt;
130131 --IP;
131132 for (; ScanLimit; --IP, --ScanLimit) {
132 if (BinaryOperator *BinOp = dyn_cast(IP))
133 if (BinOp->getOpcode() == Opcode && BinOp->getOperand(0) == LHS &&
134 BinOp->getOperand(1) == RHS)
135 return BinOp;
133 if (IP->getOpcode() == (unsigned)Opcode && IP->getOperand(0) == LHS &&
134 IP->getOperand(1) == RHS)
135 return IP;
136136 if (IP == BlockBegin) break;
137137 }
138138 }
143143 return BO;
144144 }
145145
146 /// expandAddToGEP - Expand a SCEVAddExpr with a pointer type into a GEP
147 /// instead of using ptrtoint+arithmetic+inttoptr.
148 Value *SCEVExpander::expandAddToGEP(const SCEVAddExpr *S,
149 const PointerType *PTy,
150 const Type *Ty,
151 Value *V) {
152 const Type *ElTy = PTy->getElementType();
153 SmallVector GepIndices;
154 std::vector Ops = S->getOperands();
155 bool AnyNonZeroIndices = false;
156 Ops.pop_back();
157
158 // Decend down the pointer's type and attempt to convert the other
159 // operands into GEP indices, at each level. The first index in a GEP
160 // indexes into the array implied by the pointer operand; the rest of
161 // the indices index into the element or field type selected by the
162 // preceding index.
163 for (;;) {
164 APInt ElSize = APInt(SE.getTypeSizeInBits(Ty),
165 ElTy->isSized() ? SE.TD->getTypeAllocSize(ElTy) : 0);
166 std::vector NewOps;
167 std::vector ScaledOps;
168 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
169 if (ElSize != 0) {
170 if (const SCEVConstant *C = dyn_cast(Ops[i]))
171 if (!C->getValue()->getValue().srem(ElSize)) {
172 ConstantInt *CI =
173 ConstantInt::get(C->getValue()->getValue().sdiv(ElSize));
174 SCEVHandle Div = SE.getConstant(CI);
175 ScaledOps.push_back(Div);
176 continue;
177 }
178 if (const SCEVMulExpr *M = dyn_cast(Ops[i]))
179 if (const SCEVConstant *C = dyn_cast(M->getOperand(0)))
180 if (C->getValue()->getValue() == ElSize) {
181 for (unsigned j = 1, f = M->getNumOperands(); j != f; ++j)
182 ScaledOps.push_back(M->getOperand(j));
183 continue;
184 }
185 if (const SCEVUnknown *U = dyn_cast(Ops[i]))
186 if (BinaryOperator *BO = dyn_cast(U->getValue()))
187 if (BO->getOpcode() == Instruction::Mul)
188 if (ConstantInt *CI = dyn_cast(BO->getOperand(1)))
189 if (CI->getValue() == ElSize) {
190 ScaledOps.push_back(SE.getUnknown(BO->getOperand(0)));
191 continue;
192 }
193 if (ElSize == 1) {
194 ScaledOps.push_back(Ops[i]);
195 continue;
196 }
197 }
198 NewOps.push_back(Ops[i]);
199 }
200 Ops = NewOps;
201 AnyNonZeroIndices |= !ScaledOps.empty();
202 Value *Scaled = ScaledOps.empty() ?
203 Constant::getNullValue(Ty) :
204 expandCodeFor(SE.getAddExpr(ScaledOps), Ty);
205 GepIndices.push_back(Scaled);
206
207 // Collect struct field index operands.
208 if (!Ops.empty())
209 while (const StructType *STy = dyn_cast(ElTy)) {
210 if (const SCEVConstant *C = dyn_cast(Ops[0]))
211 if (SE.getTypeSizeInBits(C->getType()) <= 64) {
212 const StructLayout &SL = *SE.TD->getStructLayout(STy);
213 uint64_t FullOffset = C->getValue()->getZExtValue();
214 if (FullOffset < SL.getSizeInBytes()) {
215 unsigned ElIdx = SL.getElementContainingOffset(FullOffset);
216 GepIndices.push_back(ConstantInt::get(Type::Int32Ty, ElIdx));
217 ElTy = STy->getTypeAtIndex(ElIdx);
218 Ops[0] =
219 SE.getConstant(ConstantInt::get(Ty,
220 FullOffset -
221 SL.getElementOffset(ElIdx)));
222 AnyNonZeroIndices = true;
223 continue;
224 }
225 }
226 break;
227 }
228
229 if (const ArrayType *ATy = dyn_cast(ElTy)) {
230 ElTy = ATy->getElementType();
231 continue;
232 }
233 break;
234 }
235
236 // If none of the operands were convertable to proper GEP indices, cast
237 // the base to i8* and do an ugly getelementptr with that. It's still
238 // better than ptrtoint+arithmetic+inttoptr at least.
239 if (!AnyNonZeroIndices) {
240 V = InsertNoopCastOfTo(V,
241 Type::Int8Ty->getPointerTo(PTy->getAddressSpace()));
242 Value *Idx = expand(SE.getAddExpr(Ops));
243 Idx = InsertNoopCastOfTo(Idx, Ty);
244
245 // Fold a GEP with constant operands.
246 if (Constant *CLHS = dyn_cast(V))
247 if (Constant *CRHS = dyn_cast(Idx))
248 return ConstantExpr::get(Instruction::GetElementPtr, CLHS, CRHS);
249
250 // Do a quick scan to see if we have this GEP nearby. If so, reuse it.
251 unsigned ScanLimit = 6;
252 BasicBlock::iterator BlockBegin = InsertPt->getParent()->begin();
253 if (InsertPt != BlockBegin) {
254 // Scanning starts from the last instruction before InsertPt.
255 BasicBlock::iterator IP = InsertPt;
256 --IP;
257 for (; ScanLimit; --IP, --ScanLimit) {
258 if (IP->getOpcode() == Instruction::GetElementPtr &&
259 IP->getOperand(0) == V && IP->getOperand(1) == Idx)
260 return IP;
261 if (IP == BlockBegin) break;
262 }
263 }
264
265 Value *GEP = GetElementPtrInst::Create(V, Idx, "scevgep", InsertPt);
266 InsertedValues.insert(GEP);
267 return GEP;
268 }
269
270 // Insert a pretty getelementptr.
271 Value *GEP = GetElementPtrInst::Create(V,
272 GepIndices.begin(),
273 GepIndices.end(),
274 "scevgep", InsertPt);
275 Ops.push_back(SE.getUnknown(GEP));
276 InsertedValues.insert(GEP);
277 return expand(SE.getAddExpr(Ops));
278 }
279
146280 Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
147281 const Type *Ty = SE.getEffectiveSCEVType(S->getType());
148282 Value *V = expand(S->getOperand(S->getNumOperands()-1));
283
284 // Turn things like ptrtoint+arithmetic+inttoptr into GEP. This helps
285 // BasicAliasAnalysis analyze the result. However, it suffers from the
286 // underlying bug described in PR2831. Addition in LLVM currently always
287 // has two's complement wrapping guaranteed. However, the semantics for
288 // getelementptr overflow are ambiguous. In the common case though, this
289 // expansion gets used when a GEP in the original code has been converted
290 // into integer arithmetic, in which case the resulting code will be no
291 // more undefined than it was originally.
292 if (SE.TD)
293 if (const PointerType *PTy = dyn_cast(V->getType()))
294 return expandAddToGEP(S, PTy, Ty, V);
295
149296 V = InsertNoopCastOfTo(V, Ty);
150297
151298 // Emit a bunch of add instructions
156303 }
157304 return V;
158305 }
159
306
160307 Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) {
161308 const Type *Ty = SE.getEffectiveSCEVType(S->getType());
162309 int FirstOp = 0; // Set if we should emit a subtract.
205352
206353 // {X,+,F} --> X + {0,+,F}
207354 if (!S->getStart()->isZero()) {
208 Value *Start = expand(S->getStart());
209 Start = InsertNoopCastOfTo(Start, Ty);
210 std::vector NewOps(S->op_begin(), S->op_end());
355 std::vector NewOps(S->getOperands());
211356 NewOps[0] = SE.getIntegerSCEV(0, Ty);
212357 Value *Rest = expand(SE.getAddRecExpr(NewOps, L));
213 Rest = InsertNoopCastOfTo(Rest, Ty);
214
215 // FIXME: look for an existing add to use.
216 return InsertBinop(Instruction::Add, Rest, Start, InsertPt);
358 return expand(SE.getAddExpr(S->getStart(), SE.getUnknown(Rest)));
217359 }
218360
219361 // {0,+,1} --> Insert a canonical induction variable into the loop!
264406 // point loop. If we can, move the multiply to the outer most loop that it
265407 // is safe to be in.
266408 BasicBlock::iterator MulInsertPt = getInsertionPoint();
267 Loop *InsertPtLoop = LI.getLoopFor(MulInsertPt->getParent());
409 Loop *InsertPtLoop = SE.LI->getLoopFor(MulInsertPt->getParent());
268410 if (InsertPtLoop != L && InsertPtLoop &&
269411 L->contains(InsertPtLoop->getHeader())) {
270412 do {
362504
363505 Value *SCEVExpander::expandCodeFor(SCEVHandle SH, const Type *Ty) {
364506 // Expand the code for this SCEV.
365 assert(SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(SH->getType()) &&
366 "non-trivial casts should be done with the SCEVs directly!");
367507 Value *V = expand(SH);
368 return InsertNoopCastOfTo(V, Ty);
508 if (Ty) {
509 assert(SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(SH->getType()) &&
510 "non-trivial casts should be done with the SCEVs directly!");
511 V = InsertNoopCastOfTo(V, Ty);
512 }
513 return V;
369514 }
370515
371516 Value *SCEVExpander::expand(const SCEV *S) {
171171
172172 // Expand the code for the iteration count into the preheader of the loop.
173173 BasicBlock *Preheader = L->getLoopPreheader();
174 Value *ExitCnt = Rewriter.expandCodeFor(RHS, IndVar->getType(),
174 Value *ExitCnt = Rewriter.expandCodeFor(RHS, CmpIndVar->getType(),
175175 Preheader->getTerminator());
176176
177177 // Insert a new icmp_ne or icmp_eq instruction before the branch.
217217
218218 // Scan all of the instructions in the loop, looking at those that have
219219 // extra-loop users and which are recurrences.
220 SCEVExpander Rewriter(*SE, *LI);
220 SCEVExpander Rewriter(*SE);
221221
222222 // We insert the code into the preheader of the loop if the loop contains
223223 // multiple exit blocks, or in the exit block if there is exactly one.
385385 }
386386
387387 // Create a rewriter object which we'll use to transform the code with.
388 SCEVExpander Rewriter(*SE, *LI);
388 SCEVExpander Rewriter(*SE);
389389
390390 // Now that we know the largest of of the induction variable expressions
391391 // in this loop, insert a canonical induction variable of the largest size.
477477 BasicBlock::iterator I = Rewriter.getInsertionPoint();
478478 // Expand loop-invariant values in the loop preheader. They will
479479 // be sunk to the exit block later, if possible.
480 NewVal =
480 NewVal =
481481 Rewriter.expandCodeFor(AR, LargestType,
482482 L->getLoopPreheader()->getTerminator());
483483 Rewriter.setInsertionPoint(I);
522522 NewAR = SE->getAddExpr(NewAR, PromotedOffset);
523523
524524 // Expand the addrec into instructions.
525 Value *V = Rewriter.expandCodeFor(NewAR, LargestType);
525 Value *V = Rewriter.expandCodeFor(NewAR);
526526
527527 // Insert an explicit cast if necessary to truncate the value
528528 // down to the original stride type. This is done outside of
532532 if (SE->getTypeSizeInBits(IVTy) != SE->getTypeSizeInBits(LargestType))
533533 NewAR = SE->getTruncateExpr(NewAR, IVTy);
534534 if (Rewriter.isInsertedExpression(NewAR))
535 V = Rewriter.expandCodeFor(NewAR, IVTy);
535 V = Rewriter.expandCodeFor(NewAR);
536536 else {
537537 V = Rewriter.InsertCastOfTo(CastInst::getCastOpcode(V, false,
538538 IVTy, false),
366366 void RewriteInstructionToUseNewBase(const SCEVHandle &NewBase,
367367 Instruction *InsertPt,
368368 SCEVExpander &Rewriter, Loop *L, Pass *P,
369 LoopInfo &LI,
369370 SmallVectorImpl &DeadInsts);
370371
371372 Value *InsertCodeForBaseAtPosition(const SCEVHandle &NewBase,
372373 const Type *Ty,
373374 SCEVExpander &Rewriter,
374 Instruction *IP, Loop *L);
375 Instruction *IP, Loop *L,
376 LoopInfo &LI);
375377 void dump() const;
376378 };
377379 }
385387 Value *BasedUser::InsertCodeForBaseAtPosition(const SCEVHandle &NewBase,
386388 const Type *Ty,
387389 SCEVExpander &Rewriter,
388 Instruction *IP, Loop *L) {
390 Instruction *IP, Loop *L,
391 LoopInfo &LI) {
389392 // Figure out where we *really* want to insert this code. In particular, if
390393 // the user is inside of a loop that is nested inside of L, we really don't
391394 // want to insert this expression before the user, we'd rather pull it out as
392395 // many loops as possible.
393 LoopInfo &LI = Rewriter.getLoopInfo();
394396 Instruction *BaseInsertPt = IP;
395397
396398 // Figure out the most-nested loop that IP is in.
404406 InsertLoop = InsertLoop->getParentLoop();
405407 }
406408
407 Value *Base = Rewriter.expandCodeFor(NewBase, NewBase->getType(),
408 BaseInsertPt);
409 Value *Base = Rewriter.expandCodeFor(NewBase, 0, BaseInsertPt);
409410
410411 SCEVHandle NewValSCEV = SE->getUnknown(Base);
411412
438439 void BasedUser::RewriteInstructionToUseNewBase(const SCEVHandle &NewBase,
439440 Instruction *NewBasePt,
440441 SCEVExpander &Rewriter, Loop *L, Pass *P,
442 LoopInfo &LI,
441443 SmallVectorImpl &DeadInsts) {
442444 if (!isa(Inst)) {
443445 // By default, insert code at the user instruction.
467469 }
468470 Value *NewVal = InsertCodeForBaseAtPosition(NewBase,
469471 OperandValToReplace->getType(),
470 Rewriter, InsertPt, L);
472 Rewriter, InsertPt, L, LI);
471473 // Replace the use of the operand Value with the new Phi we just created.
472474 Inst->replaceUsesOfWith(OperandValToReplace, NewVal);
473475
526528 PN->getIncomingBlock(i)->getTerminator() :
527529 OldLoc->getParent()->getTerminator();
528530 Code = InsertCodeForBaseAtPosition(NewBase, PN->getType(),
529 Rewriter, InsertPt, L);
531 Rewriter, InsertPt, L, LI);
530532
531533 DOUT << " Changing PHI use to ";
532534 DEBUG(WriteAsOperand(*DOUT, Code, /*PrintType=*/false));
15791581 << *Stride << ":\n"
15801582 << " Common base: " << *CommonExprs << "\n";
15811583
1582 SCEVExpander Rewriter(*SE, *LI);
1583 SCEVExpander PreheaderRewriter(*SE, *LI);
1584 SCEVExpander Rewriter(*SE);
1585 SCEVExpander PreheaderRewriter(*SE);
15841586
15851587 BasicBlock *Preheader = L->getLoopPreheader();
15861588 Instruction *PreInsertPt = Preheader->getTerminator();
16351637 // Emit the code for Base into the preheader.
16361638 Value *BaseV = 0;
16371639 if (!Base->isZero()) {
1638 BaseV = PreheaderRewriter.expandCodeFor(Base, Base->getType(),
1639 PreInsertPt);
1640 BaseV = PreheaderRewriter.expandCodeFor(Base, 0, PreInsertPt);
16401641
16411642 DOUT << " INSERTING code for BASE = " << *Base << ":";
16421643 if (BaseV->hasName())
17571758 RewriteExpr = SE->getAddExpr(RewriteExpr, SE->getUnknown(BaseV));
17581759
17591760 User.RewriteInstructionToUseNewBase(RewriteExpr, NewBasePt,
1760 Rewriter, L, this,
1761 Rewriter, L, this, *LI,
17611762 DeadInsts);
17621763
17631764 // Mark old value we replaced as possibly dead, so that it is eliminated
254254
255255 // Save the valid addressing mode in case we can't match.
256256 ExtAddrMode BackupAddrMode = AddrMode;
257
258 // Check that this has no base reg yet. If so, we won't have a place to
259 // put the base of the GEP (assuming it is not a null ptr).
260 bool SetBaseReg = true;
261 if (isa(AddrInst->getOperand(0)))
262 SetBaseReg = false; // null pointer base doesn't need representation.
263 else if (AddrMode.HasBaseReg)
264 return false; // Base register already specified, can't match GEP.
265 else {
266 // Otherwise, we'll use the GEP base as the BaseReg.
257 unsigned OldSize = AddrModeInsts.size();
258
259 // See if the scale and offset amount is valid for this target.
260 AddrMode.BaseOffs += ConstantOffset;
261
262 // Match the base operand of the GEP.
263 if (!MatchAddr(AddrInst->getOperand(0), Depth+1)) {
264 // If it couldn't be matched, just stuff the value in a register.
265 if (AddrMode.HasBaseReg) {
266 AddrMode = BackupAddrMode;
267 AddrModeInsts.resize(OldSize);
268 return false;
269 }
267270 AddrMode.HasBaseReg = true;
268271 AddrMode.BaseReg = AddrInst->getOperand(0);
269272 }
270
271 // See if the scale and offset amount is valid for this target.
272 AddrMode.BaseOffs += ConstantOffset;
273
273
274 // Match the remaining variable portion of the GEP.
274275 if (!MatchScaledValue(AddrInst->getOperand(VariableOperand), VariableScale,
275276 Depth)) {
277 // If it couldn't be matched, try stuffing the base into a register
278 // instead of matching it, and retrying the match of the scale.
276279 AddrMode = BackupAddrMode;
277 return false;
278 }
279
280 // If we have a null as the base of the GEP, folding in the constant offset
281 // plus variable scale is all we can do.
282 if (!SetBaseReg) return true;
283
284 // If this match succeeded, we know that we can form an address with the
285 // GepBase as the basereg. Match the base pointer of the GEP more
286 // aggressively by zeroing out BaseReg and rematching. If the base is
287 // (for example) another GEP, this allows merging in that other GEP into
288 // the addressing mode we're forming.
289 AddrMode.HasBaseReg = false;
290 AddrMode.BaseReg = 0;
291 bool Success = MatchAddr(AddrInst->getOperand(0), Depth+1);
292 assert(Success && "MatchAddr should be able to fill in BaseReg!");
293 Success=Success;
280 AddrModeInsts.resize(OldSize);
281 if (AddrMode.HasBaseReg)
282 return false;
283 AddrMode.HasBaseReg = true;
284 AddrMode.BaseReg = AddrInst->getOperand(0);
285 AddrMode.BaseOffs += ConstantOffset;
286 if (!MatchScaledValue(AddrInst->getOperand(VariableOperand),
287 VariableScale, Depth)) {
288 // If even that didn't work, bail.
289 AddrMode = BackupAddrMode;
290 AddrModeInsts.resize(OldSize);
291 return false;
292 }
293 }
294
294295 return true;
295296 }
296297 }
0 ; RUN: llvm-as < %s | opt -indvars | llvm-dis > %t
1 ; RUN: not grep ptrtoint %t
2 ; RUN: not grep inttoptr %t
3 ; RUN: grep getelementptr %t | count 1
4
5 ; Indvars shouldn't leave getelementptrs expanded out as
6 ; inttoptr+ptrtoint in its output in common cases.
7
8 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
9 target triple = "x86_64-unknown-linux-gnu"
10 %struct.Foo = type { i32, i32, [10 x i32], i32 }
11
12 define void @me(%struct.Foo* nocapture %Bar) nounwind {
13 entry:
14 br i1 false, label %return, label %bb.nph
15
16 bb.nph: ; preds = %entry
17 br label %bb
18
19 bb: ; preds = %bb1, %bb.nph
20 %i.01 = phi i64 [ %4, %bb1 ], [ 0, %bb.nph ] ; [#uses=3]
21 %0 = getelementptr %struct.Foo* %Bar, i64 %i.01, i32 2, i64 3 ; [#uses=1]
22 %1 = load i32* %0, align 4 ; [#uses=1]
23 %2 = mul i32 %1, 113 ; [#uses=1]
24 %3 = getelementptr %struct.Foo* %Bar, i64 %i.01, i32 2, i64 3 ; [#uses=1]
25 store i32 %2, i32* %3, align 4
26 %4 = add i64 %i.01, 1 ; [#uses=2]
27 br label %bb1
28
29 bb1: ; preds = %bb
30 %phitmp = icmp sgt i64 %4, 19999 ; [#uses=1]
31 br i1 %phitmp, label %bb1.return_crit_edge, label %bb
32
33 bb1.return_crit_edge: ; preds = %bb1
34 br label %return
35
36 return: ; preds = %bb1.return_crit_edge, %entry
37 ret void
38 }
None ; RUN: llvm-as < %s | opt -loop-reduce | llvm-dis | grep {mul.*%lsr.iv} | count 2
0 ; RUN: llvm-as < %s | opt -loop-reduce | llvm-dis \
1 ; RUN: | grep {getelementptr.*%lsr.iv.*%lsr.iv.*}
12 ; The multiply in bb2 must not be reduced to an add, as the sext causes the
23 ; %1 argument to become negative after a while.
34 ; ModuleID = ''