llvm.org GIT mirror llvm / 670bbd6
[ScalarEvolution] Apply Depth limit to getMulExpr This is a fix for PR33292 that shows a case of extremely long compilation of a single .c file with clang, with most time spent within SCEV. We have a mechanism of limiting recursion depth for getAddExpr to avoid long analysis in SCEV. However, there are calls from getAddExpr to getMulExpr and back that do not propagate the info about depth. As result of this, a chain getAddExpr -> ... .> getAddExpr -> getMulExpr -> getAddExpr -> ... -> getAddExpr can be extremely long, with every segment of getAddExpr's being up to max depth long. This leads either to long compilation or crash by stack overflow. We face this situation while analyzing big SCEVs in the test of PR33292. This patch applies the same limit on max expression depth for getAddExpr and getMulExpr. Differential Revision: https://reviews.llvm.org/D33984 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@305463 91177308-0d34-0410-b5e6-96231b3b80d8 Max Kazantsev 2 years ago
4 changed file(s) with 150 addition(s) and 73 deletion(s). Raw diff Collapse all Expand all
12131213 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
12141214 unsigned Depth = 0);
12151215 const SCEV *getAddExpr(const SCEV *LHS, const SCEV *RHS,
1216 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap) {
1216 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
1217 unsigned Depth = 0) {
12171218 SmallVector Ops = {LHS, RHS};
1218 return getAddExpr(Ops, Flags);
1219 return getAddExpr(Ops, Flags, Depth);
12191220 }
12201221 const SCEV *getAddExpr(const SCEV *Op0, const SCEV *Op1, const SCEV *Op2,
1221 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap) {
1222 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
1223 unsigned Depth = 0) {
12221224 SmallVector Ops = {Op0, Op1, Op2};
1223 return getAddExpr(Ops, Flags);
1225 return getAddExpr(Ops, Flags, Depth);
12241226 }
12251227 const SCEV *getMulExpr(SmallVectorImpl &Ops,
1226 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap);
1228 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
1229 unsigned Depth = 0);
12271230 const SCEV *getMulExpr(const SCEV *LHS, const SCEV *RHS,
1228 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap) {
1231 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
1232 unsigned Depth = 0) {
12291233 SmallVector Ops = {LHS, RHS};
1230 return getMulExpr(Ops, Flags);
1234 return getMulExpr(Ops, Flags, Depth);
12311235 }
12321236 const SCEV *getMulExpr(const SCEV *Op0, const SCEV *Op1, const SCEV *Op2,
1233 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap) {
1237 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
1238 unsigned Depth = 0) {
12341239 SmallVector Ops = {Op0, Op1, Op2};
1235 return getMulExpr(Ops, Flags);
1240 return getMulExpr(Ops, Flags, Depth);
12361241 }
12371242 const SCEV *getUDivExpr(const SCEV *LHS, const SCEV *RHS);
12381243 const SCEV *getUDivExactExpr(const SCEV *LHS, const SCEV *RHS);
12861291
12871292 /// Return LHS-RHS. Minus is represented in SCEV as A+B*-1.
12881293 const SCEV *getMinusSCEV(const SCEV *LHS, const SCEV *RHS,
1289 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap);
1294 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
1295 unsigned Depth = 0);
12901296
12911297 /// Return a SCEV corresponding to a conversion of the input value to the
12921298 /// specified type. If the type must be extended, it is zero extended.
16921698 bool doesIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride, bool IsSigned,
16931699 bool NoWrap);
16941700
1695 /// Get add expr already created or create a new one
1701 /// Get add expr already created or create a new one.
16961702 const SCEV *getOrCreateAddExpr(SmallVectorImpl &Ops,
1703 SCEV::NoWrapFlags Flags);
1704
1705 /// Get mul expr already created or create a new one.
1706 const SCEV *getOrCreateMulExpr(SmallVectorImpl &Ops,
16971707 SCEV::NoWrapFlags Flags);
16981708
16991709 private:
148148 cl::init(2));
149149
150150 static cl::opt
151 MaxAddExprDepth("scalar-evolution-max-addexpr-depth", cl::Hidden,
152 cl::desc("Maximum depth of recursive AddExpr"),
153 cl::init(32));
151 MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden,
152 cl::desc("Maximum depth of recursive arithmetics"),
153 cl::init(32));
154154
155155 static cl::opt MaxConstantEvolvingDepth(
156156 "scalar-evolution-max-constant-evolving-depth", cl::Hidden,
22752275 if (Ops.size() == 1) return Ops[0];
22762276 }
22772277
2278 // Limit recursion calls depth
2279 if (Depth > MaxAddExprDepth)
2278 // Limit recursion calls depth.
2279 if (Depth > MaxArithDepth)
22802280 return getOrCreateAddExpr(Ops, Flags);
22812281
22822282 // Okay, check to see if the same value occurs in the operand list more than
22922292 ++Count;
22932293 // Merge the values into a multiply.
22942294 const SCEV *Scale = getConstant(Ty, Count);
2295 const SCEV *Mul = getMulExpr(Scale, Ops[i]);
2295 const SCEV *Mul = getMulExpr(Scale, Ops[i], SCEV::FlagAnyWrap, Depth + 1);
22962296 if (Ops.size() == Count)
22972297 return Mul;
22982298 Ops[i] = Mul;
23422342 }
23432343 }
23442344 if (Ok)
2345 LargeOps.push_back(getMulExpr(LargeMulOps));
2345 LargeOps.push_back(getMulExpr(LargeMulOps, SCEV::FlagAnyWrap, Depth + 1));
23462346 } else {
23472347 Ok = false;
23482348 break;
24162416 if (MulOp.first != 0)
24172417 Ops.push_back(getMulExpr(
24182418 getConstant(MulOp.first),
2419 getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1)));
2419 getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1),
2420 SCEV::FlagAnyWrap, Depth + 1));
24202421 if (Ops.empty())
24212422 return getZero(Ty);
24222423 if (Ops.size() == 1)
24442445 SmallVector MulOps(Mul->op_begin(),
24452446 Mul->op_begin()+MulOp);
24462447 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end());
2447 InnerMul = getMulExpr(MulOps);
2448 InnerMul = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1);
24482449 }
24492450 SmallVector TwoOps = {getOne(Ty), InnerMul};
24502451 const SCEV *AddOne = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1);
2451 const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV);
2452 const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV,
2453 SCEV::FlagAnyWrap, Depth + 1);
24522454 if (Ops.size() == 2) return OuterMul;
24532455 if (AddOp < Idx) {
24542456 Ops.erase(Ops.begin()+AddOp);
24772479 SmallVector MulOps(Mul->op_begin(),
24782480 Mul->op_begin()+MulOp);
24792481 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end());
2480 InnerMul1 = getMulExpr(MulOps);
2482 InnerMul1 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1);
24812483 }
24822484 const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0);
24832485 if (OtherMul->getNumOperands() != 2) {
24842486 SmallVector MulOps(OtherMul->op_begin(),
24852487 OtherMul->op_begin()+OMulOp);
24862488 MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end());
2487 InnerMul2 = getMulExpr(MulOps);
2489 InnerMul2 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1);
24882490 }
24892491 SmallVector TwoOps = {InnerMul1, InnerMul2};
24902492 const SCEV *InnerMulSum =
24912493 getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1);
2492 const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum);
2494 const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum,
2495 SCEV::FlagAnyWrap, Depth + 1);
24932496 if (Ops.size() == 2) return OuterMul;
24942497 Ops.erase(Ops.begin()+Idx);
24952498 Ops.erase(Ops.begin()+OtherMulIdx-1);
26202623 return S;
26212624 }
26222625
2626 const SCEV *
2627 ScalarEvolution::getOrCreateMulExpr(SmallVectorImpl &Ops,
2628 SCEV::NoWrapFlags Flags) {
2629 FoldingSetNodeID ID;
2630 ID.AddInteger(scMulExpr);
2631 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2632 ID.AddPointer(Ops[i]);
2633 void *IP = nullptr;
2634 SCEVMulExpr *S =
2635 static_cast(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
2636 if (!S) {
2637 const SCEV **O = SCEVAllocator.Allocate(Ops.size());
2638 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
2639 S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator),
2640 O, Ops.size());
2641 UniqueSCEVs.InsertNode(S, IP);
2642 }
2643 S->setNoWrapFlags(Flags);
2644 return S;
2645 }
2646
26232647 static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) {
26242648 uint64_t k = i*j;
26252649 if (j > 1 && k / j != i) Overflow = true;
26722696
26732697 /// Get a canonical multiply expression, or something simpler if possible.
26742698 const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl &Ops,
2675 SCEV::NoWrapFlags Flags) {
2699 SCEV::NoWrapFlags Flags,
2700 unsigned Depth) {
26762701 assert(Flags == maskFlags(Flags, SCEV::FlagNUW | SCEV::FlagNSW) &&
26772702 "only nuw or nsw allowed");
26782703 assert(!Ops.empty() && "Cannot get empty mul!");
26892714
26902715 Flags = StrengthenNoWrapFlags(this, scMulExpr, Ops, Flags);
26912716
2717 // Limit recursion calls depth.
2718 if (Depth > MaxArithDepth)
2719 return getOrCreateMulExpr(Ops, Flags);
2720
26922721 // If there are any constants, fold them together.
26932722 unsigned Idx = 0;
26942723 if (const SCEVConstant *LHSC = dyn_cast(Ops[0])) {
27002729 // apply this transformation as well.
27012730 if (Add->getNumOperands() == 2)
27022731 if (containsConstantSomewhere(Add))
2703 return getAddExpr(getMulExpr(LHSC, Add->getOperand(0)),
2704 getMulExpr(LHSC, Add->getOperand(1)));
2732 return getAddExpr(getMulExpr(LHSC, Add->getOperand(0),
2733 SCEV::FlagAnyWrap, Depth + 1),
2734 getMulExpr(LHSC, Add->getOperand(1),
2735 SCEV::FlagAnyWrap, Depth + 1),
2736 SCEV::FlagAnyWrap, Depth + 1);
27052737
27062738 ++Idx;
27072739 while (const SCEVConstant *RHSC = dyn_cast(Ops[Idx])) {
27292761 SmallVector NewOps;
27302762 bool AnyFolded = false;
27312763 for (const SCEV *AddOp : Add->operands()) {
2732 const SCEV *Mul = getMulExpr(Ops[0], AddOp);
2764 const SCEV *Mul = getMulExpr(Ops[0], AddOp, SCEV::FlagAnyWrap,
2765 Depth + 1);
27332766 if (!isa(Mul)) AnyFolded = true;
27342767 NewOps.push_back(Mul);
27352768 }
27362769 if (AnyFolded)
2737 return getAddExpr(NewOps);
2770 return getAddExpr(NewOps, SCEV::FlagAnyWrap, Depth + 1);
27382771 } else if (const auto *AddRec = dyn_cast(Ops[1])) {
27392772 // Negation preserves a recurrence's no self-wrap property.
27402773 SmallVector Operands;
27412774 for (const SCEV *AddRecOp : AddRec->operands())
2742 Operands.push_back(getMulExpr(Ops[0], AddRecOp));
2775 Operands.push_back(getMulExpr(Ops[0], AddRecOp, SCEV::FlagAnyWrap,
2776 Depth + 1));
27432777
27442778 return getAddRecExpr(Operands, AddRec->getLoop(),
27452779 AddRec->getNoWrapFlags(SCEV::FlagNW));
27612795 while (const SCEVMulExpr *Mul = dyn_cast(Ops[Idx])) {
27622796 if (Ops.size() > MulOpsInlineThreshold)
27632797 break;
2764 // If we have an mul, expand the mul operands onto the end of the operands
2765 // list.
2798 // If we have an mul, expand the mul operands onto the end of the
2799 // operands list.
27662800 Ops.erase(Ops.begin()+Idx);
27672801 Ops.append(Mul->op_begin(), Mul->op_end());
27682802 DeletedMul = true;
27692803 }
27702804
2771 // If we deleted at least one mul, we added operands to the end of the list,
2772 // and they are not necessarily sorted. Recurse to resort and resimplify
2773 // any operands we just acquired.
2805 // If we deleted at least one mul, we added operands to the end of the
2806 // list, and they are not necessarily sorted. Recurse to resort and
2807 // resimplify any operands we just acquired.
27742808 if (DeletedMul)
2775 return getMulExpr(Ops);
2809 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
27762810 }
27772811
27782812 // If there are any add recurrences in the operands list, see if any other
27832817
27842818 // Scan over all recurrences, trying to fold loop invariants into them.
27852819 for (; Idx < Ops.size() && isa(Ops[Idx]); ++Idx) {
2786 // Scan all of the other operands to this mul and add them to the vector if
2787 // they are loop invariant w.r.t. the recurrence.
2820 // Scan all of the other operands to this mul and add them to the vector
2821 // if they are loop invariant w.r.t. the recurrence.
27882822 SmallVector LIOps;
27892823 const SCEVAddRecExpr *AddRec = cast(Ops[Idx]);
27902824 const Loop *AddRecLoop = AddRec->getLoop();
28002834 // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step}
28012835 SmallVector NewOps;
28022836 NewOps.reserve(AddRec->getNumOperands());
2803 const SCEV *Scale = getMulExpr(LIOps);
2837 const SCEV *Scale = getMulExpr(LIOps, SCEV::FlagAnyWrap, Depth + 1);
28042838 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
2805 NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i)));
2839 NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i),
2840 SCEV::FlagAnyWrap, Depth + 1));
28062841
28072842 // Build the new addrec. Propagate the NUW and NSW flags if both the
28082843 // outer mul and the inner addrec are guaranteed to have no overflow.
28212856 Ops[i] = NewRec;
28222857 break;
28232858 }
2824 return getMulExpr(Ops);
2825 }
2826
2827 // Okay, if there weren't any loop invariants to be folded, check to see if
2828 // there are multiple AddRec's with the same loop induction variable being
2829 // multiplied together. If so, we can fold them.
2859 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
2860 }
2861
2862 // Okay, if there weren't any loop invariants to be folded, check to see
2863 // if there are multiple AddRec's with the same loop induction variable
2864 // being multiplied together. If so, we can fold them.
28302865
28312866 // {A1,+,A2,+,...,+,An} * {B1,+,B2,+,...,+,Bn}
28322867 // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [
28682903 const SCEV *CoeffTerm = getConstant(Ty, Coeff);
28692904 const SCEV *Term1 = AddRec->getOperand(y-z);
28702905 const SCEV *Term2 = OtherAddRec->getOperand(z);
2871 Term = getAddExpr(Term, getMulExpr(CoeffTerm, Term1,Term2));
2906 Term = getAddExpr(Term, getMulExpr(CoeffTerm, Term1, Term2,
2907 SCEV::FlagAnyWrap, Depth + 1),
2908 SCEV::FlagAnyWrap, Depth + 1);
28722909 }
28732910 }
28742911 AddRecOps.push_back(Term);
28862923 }
28872924 }
28882925 if (OpsModified)
2889 return getMulExpr(Ops);
2926 return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
28902927
28912928 // Otherwise couldn't fold anything into this recurrence. Move onto the
28922929 // next one.
28942931
28952932 // Okay, it looks like we really DO need an mul expr. Check to see if we
28962933 // already have one, otherwise create a new one.
2897 FoldingSetNodeID ID;
2898 ID.AddInteger(scMulExpr);
2899 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2900 ID.AddPointer(Ops[i]);
2901 void *IP = nullptr;
2902 SCEVMulExpr *S =
2903 static_cast(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
2904 if (!S) {
2905 const SCEV **O = SCEVAllocator.Allocate(Ops.size());
2906 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
2907 S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator),
2908 O, Ops.size());
2909 UniqueSCEVs.InsertNode(S, IP);
2910 }
2911 S->setNoWrapFlags(Flags);
2912 return S;
2934 return getOrCreateMulExpr(Ops, Flags);
29132935 }
29142936
29152937 /// Get a canonical unsigned division expression, or something simpler if
37123734 }
37133735
37143736 const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS,
3715 SCEV::NoWrapFlags Flags) {
3737 SCEV::NoWrapFlags Flags,
3738 unsigned Depth) {
37163739 // Fast path: X - X --> 0.
37173740 if (LHS == RHS)
37183741 return getZero(LHS->getType());
37463769 // larger scope than intended.
37473770 auto NegFlags = RHSIsNotMinSigned ? SCEV::FlagNSW : SCEV::FlagAnyWrap;
37483771
3749 return getAddExpr(LHS, getNegativeSCEV(RHS, NegFlags), AddFlags);
3772 return getAddExpr(LHS, getNegativeSCEV(RHS, NegFlags), AddFlags, Depth);
37503773 }
37513774
37523775 const SCEV *
351351 return false;
352352
353353 typedef const SCEV *(ScalarEvolution::*OperationFunctionTy)(
354 const SCEV *, const SCEV *, SCEV::NoWrapFlags);
354 const SCEV *, const SCEV *, SCEV::NoWrapFlags, unsigned);
355355 typedef const SCEV *(ScalarEvolution::*ExtensionFunctionTy)(
356356 const SCEV *, Type *);
357357
405405 IntegerType::get(NarrowTy->getContext(), NarrowTy->getBitWidth() * 2);
406406
407407 const SCEV *A =
408 (SE->*Extension)((SE->*Operation)(LHS, RHS, SCEV::FlagAnyWrap), WideTy);
408 (SE->*Extension)((SE->*Operation)(LHS, RHS, SCEV::FlagAnyWrap, 0u),
409 WideTy);
409410 const SCEV *B =
410411 (SE->*Operation)((SE->*Extension)(LHS, WideTy),
411 (SE->*Extension)(RHS, WideTy), SCEV::FlagAnyWrap);
412 (SE->*Extension)(RHS, WideTy), SCEV::FlagAnyWrap, 0u);
412413
413414 if (A != B)
414415 return false;
529530 return false;
530531
531532 const SCEV *(ScalarEvolution::*GetExprForBO)(const SCEV *, const SCEV *,
532 SCEV::NoWrapFlags);
533
533 SCEV::NoWrapFlags, unsigned);
534534 switch (BO->getOpcode()) {
535535 default:
536536 return false;
559559 const SCEV *ExtendAfterOp = SE->getZeroExtendExpr(SE->getSCEV(BO), WideTy);
560560 const SCEV *OpAfterExtend = (SE->*GetExprForBO)(
561561 SE->getZeroExtendExpr(LHS, WideTy), SE->getZeroExtendExpr(RHS, WideTy),
562 SCEV::FlagAnyWrap);
562 SCEV::FlagAnyWrap, 0u);
563563 if (ExtendAfterOp == OpAfterExtend) {
564564 BO->setHasNoUnsignedWrap();
565565 SE->forgetValue(BO);
571571 const SCEV *ExtendAfterOp = SE->getSignExtendExpr(SE->getSCEV(BO), WideTy);
572572 const SCEV *OpAfterExtend = (SE->*GetExprForBO)(
573573 SE->getSignExtendExpr(LHS, WideTy), SE->getSignExtendExpr(RHS, WideTy),
574 SCEV::FlagAnyWrap);
574 SCEV::FlagAnyWrap, 0u);
575575 if (ExtendAfterOp == OpAfterExtend) {
576576 BO->setHasNoSignedWrap();
577577 SE->forgetValue(BO);
0 ; RUN: opt -scalar-evolution-max-arith-depth=0 -analyze -scalar-evolution < %s | FileCheck %s
1
2 ; Check that depth set to 0 prevents getAddExpr and getMulExpr from making
3 ; transformations in SCEV. We expect the result to be very straightforward.
4
5 define void @test_add(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f) {
6 ; CHECK-LABEL: @test_add
7 ; CHECK: %s2 = add i32 %s1, %p3
8 ; CHECK-NEXT: --> (%a + %a + %b + %b + %c + %c + %d + %d + %e + %e + %f + %f)
9 %tmp0 = add i32 %a, %b
10 %tmp1 = add i32 %b, %c
11 %tmp2 = add i32 %c, %d
12 %tmp3 = add i32 %d, %e
13 %tmp4 = add i32 %e, %f
14 %tmp5 = add i32 %f, %a
15
16 %p1 = add i32 %tmp0, %tmp3
17 %p2 = add i32 %tmp1, %tmp4
18 %p3 = add i32 %tmp2, %tmp5
19
20 %s1 = add i32 %p1, %p2
21 %s2 = add i32 %s1, %p3
22 ret void
23 }
24
25 define void @test_mul(i32 %a, i32 %b, i32 %c, i32 %d, i32 %e, i32 %f) {
26 ; CHECK-LABEL: @test_mul
27 ; CHECK: %s2 = mul i32 %s1, %p3
28 ; CHECK-NEXT: --> (2 * 3 * 4 * 5 * 6 * 7 * %a * %b * %c * %d * %e * %f)
29 %tmp0 = mul i32 %a, 2
30 %tmp1 = mul i32 %b, 3
31 %tmp2 = mul i32 %c, 4
32 %tmp3 = mul i32 %d, 5
33 %tmp4 = mul i32 %e, 6
34 %tmp5 = mul i32 %f, 7
35
36 %p1 = mul i32 %tmp0, %tmp3
37 %p2 = mul i32 %tmp1, %tmp4
38 %p3 = mul i32 %tmp2, %tmp5
39
40 %s1 = mul i32 %p1, %p2
41 %s2 = mul i32 %s1, %p3
42 ret void
43 }