84#include "llvm/Config/llvm-config.h"
135using namespace PatternMatch;
137#define DEBUG_TYPE "scalar-evolution"
140 "Number of loop exits with predictable exit counts");
142 "Number of loop exits without predictable exit counts");
144 "Number of loops with trip counts computed by force");
146#ifdef EXPENSIVE_CHECKS
154 cl::desc(
"Maximum number of iterations SCEV will "
155 "symbolically execute a constant "
161 cl::desc(
"Verify ScalarEvolution's backedge taken counts (slow)"));
164 cl::desc(
"Enable stricter verification with -verify-scev is passed"));
168 cl::desc(
"Verify IR correctness when making sensitive SCEV queries (slow)"),
173 cl::desc(
"Threshold for inlining multiplication operands into a SCEV"),
178 cl::desc(
"Threshold for inlining addition operands into a SCEV"),
182 "scalar-evolution-max-scev-compare-depth",
cl::Hidden,
183 cl::desc(
"Maximum depth of recursive SCEV complexity comparisons"),
187 "scalar-evolution-max-scev-operations-implication-depth",
cl::Hidden,
188 cl::desc(
"Maximum depth of recursive SCEV operations implication analysis"),
192 "scalar-evolution-max-value-compare-depth",
cl::Hidden,
193 cl::desc(
"Maximum depth of recursive value complexity comparisons"),
198 cl::desc(
"Maximum depth of recursive arithmetics"),
202 "scalar-evolution-max-constant-evolving-depth",
cl::Hidden,
207 cl::desc(
"Maximum depth of recursive SExt/ZExt/Trunc"),
212 cl::desc(
"Max coefficients in AddRec during evolving"),
217 cl::desc(
"Size of the expression which is considered huge"),
222 cl::desc(
"Threshold for switching to iteratively computing SCEV ranges"),
228 cl::desc(
"When printing analysis, include information on every instruction"));
231 "scalar-evolution-use-expensive-range-sharpening",
cl::Hidden,
233 cl::desc(
"Use more powerful methods of sharpening expression ranges. May "
234 "be costly in terms of compile time"));
237 "scalar-evolution-max-scc-analysis-depth",
cl::Hidden,
238 cl::desc(
"Maximum amount of nodes to process while searching SCEVUnknown "
239 "Phi strongly connected components"),
244 cl::desc(
"Handle <= and >= in finite loops"),
248 "scalar-evolution-use-context-for-no-wrap-flag-strenghening",
cl::Hidden,
249 cl::desc(
"Infer nuw/nsw flags using context where suitable"),
260#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
270 cast<SCEVConstant>(
this)->getValue()->printAsOperand(
OS,
false);
278 OS <<
"(ptrtoint " << *
Op->getType() <<
" " << *
Op <<
" to "
279 << *PtrToInt->
getType() <<
")";
285 OS <<
"(trunc " << *
Op->getType() <<
" " << *
Op <<
" to "
292 OS <<
"(zext " << *
Op->getType() <<
" " << *
Op <<
" to "
299 OS <<
"(sext " << *
Op->getType() <<
" " << *
Op <<
" to "
328 const char *OpStr =
nullptr;
341 OpStr =
" umin_seq ";
347 ListSeparator LS(OpStr);
371 cast<SCEVUnknown>(
this)->getValue()->printAsOperand(
OS,
false);
374 OS <<
"***COULDNOTCOMPUTE***";
383 return cast<SCEVConstant>(
this)->getType();
385 return cast<SCEVVScale>(
this)->getType();
390 return cast<SCEVCastExpr>(
this)->getType();
392 return cast<SCEVAddRecExpr>(
this)->getType();
394 return cast<SCEVMulExpr>(
this)->getType();
399 return cast<SCEVMinMaxExpr>(
this)->getType();
401 return cast<SCEVSequentialMinMaxExpr>(
this)->getType();
403 return cast<SCEVAddExpr>(
this)->getType();
405 return cast<SCEVUDivExpr>(
this)->getType();
407 return cast<SCEVUnknown>(
this)->getType();
424 return cast<SCEVCastExpr>(
this)->operands();
433 return cast<SCEVNAryExpr>(
this)->operands();
435 return cast<SCEVUDivExpr>(
this)->operands();
443 if (
const SCEVConstant *SC = dyn_cast<SCEVConstant>(
this))
444 return SC->getValue()->isZero();
449 if (
const SCEVConstant *SC = dyn_cast<SCEVConstant>(
this))
450 return SC->getValue()->isOne();
455 if (
const SCEVConstant *SC = dyn_cast<SCEVConstant>(
this))
456 return SC->getValue()->isMinusOne();
462 if (!
Mul)
return false;
466 if (!SC)
return false;
469 return SC->getAPInt().isNegative();
484 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
486 UniqueSCEVs.InsertNode(S, IP);
505 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
508 UniqueSCEVs.InsertNode(S, IP);
527 "Must be a non-bit-width-changing pointer-to-integer cast!");
539 "Cannot truncate non-integer value!");
546 "Cannot zero extend non-integer value!");
553 "Cannot sign extend non-integer value!");
556void SCEVUnknown::deleted() {
558 SE->forgetMemoizedResults(
this);
561 SE->UniqueSCEVs.RemoveNode(
this);
567void SCEVUnknown::allUsesReplacedWith(
Value *New) {
569 SE->forgetMemoizedResults(
this);
572 SE->UniqueSCEVs.RemoveNode(
this);
611 if (LIsPointer != RIsPointer)
612 return (
int)LIsPointer - (int)RIsPointer;
617 return (
int)LID - (int)RID;
620 if (
const auto *LA = dyn_cast<Argument>(LV)) {
621 const auto *
RA = cast<Argument>(RV);
622 unsigned LArgNo = LA->getArgNo(), RArgNo =
RA->getArgNo();
623 return (
int)LArgNo - (int)RArgNo;
626 if (
const auto *LGV = dyn_cast<GlobalValue>(LV)) {
627 const auto *RGV = cast<GlobalValue>(RV);
629 const auto IsGVNameSemantic = [&](
const GlobalValue *GV) {
630 auto LT = GV->getLinkage();
637 if (IsGVNameSemantic(LGV) && IsGVNameSemantic(RGV))
638 return LGV->getName().compare(RGV->getName());
643 if (
const auto *LInst = dyn_cast<Instruction>(LV)) {
644 const auto *RInst = cast<Instruction>(RV);
649 if (LParent != RParent) {
652 if (LDepth != RDepth)
653 return (
int)LDepth - (int)RDepth;
657 unsigned LNumOps = LInst->getNumOperands(),
658 RNumOps = RInst->getNumOperands();
659 if (LNumOps != RNumOps)
660 return (
int)LNumOps - (int)RNumOps;
662 for (
unsigned Idx :
seq(LNumOps)) {
680static std::optional<int>
692 return (
int)LType - (int)RType;
722 unsigned LBitWidth = LA.
getBitWidth(), RBitWidth =
RA.getBitWidth();
723 if (LBitWidth != RBitWidth)
724 return (
int)LBitWidth - (int)RBitWidth;
725 return LA.
ult(
RA) ? -1 : 1;
729 const auto *LTy = cast<IntegerType>(cast<SCEVVScale>(
LHS)->
getType());
730 const auto *RTy = cast<IntegerType>(cast<SCEVVScale>(
RHS)->
getType());
731 return LTy->getBitWidth() - RTy->getBitWidth();
742 if (LLoop != RLoop) {
744 assert(LHead != RHead &&
"Two loops share the same header?");
748 "No dominance between recurrences used by one SCEV?");
771 unsigned LNumOps = LOps.
size(), RNumOps = ROps.
size();
772 if (LNumOps != RNumOps)
773 return (
int)LNumOps - (int)RNumOps;
775 for (
unsigned i = 0; i != LNumOps; ++i) {
777 ROps[i], DT,
Depth + 1);
802 if (Ops.
size() < 2)
return;
811 return Complexity && *Complexity < 0;
813 if (Ops.
size() == 2) {
817 if (IsLessComplex(
RHS,
LHS))
824 return IsLessComplex(
LHS,
RHS);
831 for (
unsigned i = 0, e = Ops.
size(); i != e-2; ++i) {
832 const SCEV *S = Ops[i];
837 for (
unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) {
842 if (i == e-2)
return;
928 APInt OddFactorial(W, 1);
930 for (
unsigned i = 3; i <= K; ++i) {
933 OddFactorial *= (i >> TwoFactors);
937 unsigned CalculationBits = W +
T;
951 for (
unsigned i = 1; i != K; ++i) {
984 for (
unsigned i = 1, e =
Operands.size(); i != e; ++i) {
989 if (isa<SCEVCouldNotCompute>(Coeff))
1004 "getLosslessPtrToIntExpr() should self-recurse at most once.");
1008 if (!
Op->getType()->isPointerTy())
1019 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
1038 if (
auto *U = dyn_cast<SCEVUnknown>(
Op)) {
1043 if (isa<ConstantPointerNull>(U->getValue()))
1049 SCEV *S =
new (SCEVAllocator)
1051 UniqueSCEVs.InsertNode(S, IP);
1056 assert(
Depth == 0 &&
"getLosslessPtrToIntExpr() should not self-recurse for "
1057 "non-SCEVUnknown's.");
1069 class SCEVPtrToIntSinkingRewriter
1077 SCEVPtrToIntSinkingRewriter
Rewriter(SE);
1087 return Base::visit(S);
1092 bool Changed =
false;
1102 bool Changed =
false;
1112 "Should only reach pointer-typed SCEVUnknown's.");
1118 const SCEV *IntOp = SCEVPtrToIntSinkingRewriter::rewrite(
Op, *
this);
1120 "We must have succeeded in sinking the cast, "
1121 "and ending up with an integer-typed expression!");
1129 if (isa<SCEVCouldNotCompute>(IntOp))
1138 "This is not a truncating conversion!");
1140 "This is not a conversion to a SCEVable type!");
1141 assert(!
Op->getType()->isPointerTy() &&
"Can't truncate pointer!");
1149 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
1171 UniqueSCEVs.InsertNode(S, IP);
1180 if (isa<SCEVAddExpr>(
Op) || isa<SCEVMulExpr>(
Op)) {
1181 auto *CommOp = cast<SCEVCommutativeExpr>(
Op);
1183 unsigned numTruncs = 0;
1184 for (
unsigned i = 0, e = CommOp->getNumOperands(); i != e && numTruncs < 2;
1187 if (!isa<SCEVIntegralCastExpr>(CommOp->getOperand(i)) &&
1188 isa<SCEVTruncateExpr>(S))
1192 if (numTruncs < 2) {
1193 if (isa<SCEVAddExpr>(
Op))
1195 if (isa<SCEVMulExpr>(
Op))
1202 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
1209 for (
const SCEV *
Op : AddRec->operands())
1224 UniqueSCEVs.InsertNode(S, IP);
1264struct ExtendOpTraitsBase {
1270template <
typename ExtendOp>
struct ExtendOpTraits {
1286 static const GetExtendExprTy GetExtendExpr;
1288 static const SCEV *getOverflowLimitForStep(
const SCEV *Step,
1295const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
1302 static const GetExtendExprTy GetExtendExpr;
1304 static const SCEV *getOverflowLimitForStep(
const SCEV *Step,
1311const ExtendOpTraitsBase::GetExtendExprTy ExtendOpTraits<
1323template <
typename ExtendOpTy>
1326 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
1327 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
1334 const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start);
1343 for (
auto It = DiffOps.
begin(); It != DiffOps.
end(); ++It)
1356 auto PreStartFlags =
1374 const SCEV *OperandExtendedStart =
1376 (SE->*GetExtendExpr)(Step, WideTy,
Depth));
1377 if ((SE->*GetExtendExpr)(Start, WideTy,
Depth) == OperandExtendedStart) {
1389 const SCEV *OverflowLimit =
1390 ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(Step, &Pred, SE);
1392 if (OverflowLimit &&
1400template <
typename ExtendOpTy>
1404 auto GetExtendExpr = ExtendOpTraits<ExtendOpTy>::GetExtendExpr;
1406 const SCEV *PreStart = getPreStartForExtend<ExtendOpTy>(AR, Ty, SE,
Depth);
1412 (SE->*GetExtendExpr)(PreStart, Ty,
Depth));
1447template <
typename ExtendOpTy>
1448bool ScalarEvolution::proveNoWrapByVaryingStart(
const SCEV *Start,
1451 auto WrapType = ExtendOpTraits<ExtendOpTy>::WrapType;
1457 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start);
1463 for (
unsigned Delta : {-2, -1, 1, 2}) {
1468 ID.AddPointer(PreStart);
1469 ID.AddPointer(Step);
1477 if (PreAR && PreAR->getNoWrapFlags(WrapType)) {
1480 const SCEV *Limit = ExtendOpTraits<ExtendOpTy>::getOverflowLimitForStep(
1481 DeltaS, &Pred,
this);
1499 const unsigned BitWidth =
C.getBitWidth();
1517 const APInt &ConstantStart,
1536 auto &UserIDs = FoldCacheUser[
I.first->second];
1537 assert(
count(UserIDs,
ID) == 1 &&
"unexpected duplicates in UserIDs");
1538 for (
unsigned I = 0;
I != UserIDs.size(); ++
I)
1539 if (UserIDs[
I] ==
ID) {
1544 I.first->second = S;
1546 auto R = FoldCacheUser.insert({S, {}});
1547 R.first->second.push_back(
ID);
1553 "This is not an extending conversion!");
1555 "This is not a conversion to a SCEVable type!");
1556 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
1560 auto Iter = FoldCache.find(
ID);
1561 if (Iter != FoldCache.end())
1562 return Iter->second;
1565 if (!isa<SCEVZeroExtendExpr>(S))
1573 "This is not an extending conversion!");
1575 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
1592 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
1596 UniqueSCEVs.InsertNode(S, IP);
1605 const SCEV *
X = ST->getOperand();
1619 if (AR->isAffine()) {
1620 const SCEV *Start = AR->getStart();
1621 const SCEV *Step = AR->getStepRecurrence(*
this);
1623 const Loop *L = AR->getLoop();
1627 if (AR->hasNoUnsignedWrap()) {
1629 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty,
this,
Depth + 1);
1643 if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
1648 const SCEV *CastedMaxBECount =
1652 if (MaxBECount == RecastedMaxBECount) {
1662 const SCEV *WideMaxBECount =
1664 const SCEV *OperandExtendedAdd =
1670 if (ZAdd == OperandExtendedAdd) {
1674 Start = getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty,
this,
1681 OperandExtendedAdd =
1687 if (ZAdd == OperandExtendedAdd) {
1692 Start = getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty,
this,
1707 if (!isa<SCEVCouldNotCompute>(MaxBECount) || HasGuards ||
1710 auto NewFlags = proveNoUnsignedWrapViaInduction(AR);
1712 if (AR->hasNoUnsignedWrap()) {
1718 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty,
this,
Depth + 1);
1735 Start = getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty,
this,
1746 if (
const auto *SC = dyn_cast<SCEVConstant>(Start)) {
1747 const APInt &
C = SC->getAPInt();
1751 const SCEV *SResidual =
1760 if (proveNoWrapByVaryingStart<SCEVZeroExtendExpr>(Start, Step, L)) {
1763 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty,
this,
Depth + 1);
1779 if (
auto *Div = dyn_cast<SCEVUDivExpr>(
Op))
1783 if (
auto *SA = dyn_cast<SCEVAddExpr>(
Op)) {
1785 if (SA->hasNoUnsignedWrap()) {
1789 for (
const auto *
Op : SA->operands())
1802 if (
const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) {
1806 const SCEV *SResidual =
1816 if (
auto *SM = dyn_cast<SCEVMulExpr>(
Op)) {
1818 if (SM->hasNoUnsignedWrap()) {
1822 for (
const auto *
Op : SM->operands())
1839 if (SM->getNumOperands() == 2)
1840 if (
auto *MulLHS = dyn_cast<SCEVConstant>(SM->getOperand(0)))
1841 if (MulLHS->getAPInt().isPowerOf2())
1842 if (
auto *TruncRHS = dyn_cast<SCEVTruncateExpr>(SM->getOperand(1))) {
1844 MulLHS->getAPInt().logBase2();
1856 if (isa<SCEVUMinExpr>(
Op) || isa<SCEVUMaxExpr>(
Op)) {
1857 auto *
MinMax = cast<SCEVMinMaxExpr>(
Op);
1859 for (
auto *Operand :
MinMax->operands())
1861 if (isa<SCEVUMinExpr>(
MinMax))
1867 if (
auto *
MinMax = dyn_cast<SCEVSequentialMinMaxExpr>(
Op)) {
1868 assert(isa<SCEVSequentialUMinExpr>(
MinMax) &&
"Not supported!");
1870 for (
auto *Operand :
MinMax->operands())
1877 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
1880 UniqueSCEVs.InsertNode(S, IP);
1888 "This is not an extending conversion!");
1890 "This is not a conversion to a SCEVable type!");
1891 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
1895 auto Iter = FoldCache.find(
ID);
1896 if (Iter != FoldCache.end())
1897 return Iter->second;
1900 if (!isa<SCEVSignExtendExpr>(S))
1908 "This is not an extending conversion!");
1910 assert(!
Op->getType()->isPointerTy() &&
"Can't extend pointer!");
1932 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
1937 UniqueSCEVs.InsertNode(S, IP);
1946 const SCEV *
X = ST->getOperand();
1955 if (
auto *SA = dyn_cast<SCEVAddExpr>(
Op)) {
1957 if (SA->hasNoSignedWrap()) {
1961 for (
const auto *
Op : SA->operands())
1975 if (
const auto *SC = dyn_cast<SCEVConstant>(SA->getOperand(0))) {
1979 const SCEV *SResidual =
1993 if (AR->isAffine()) {
1994 const SCEV *Start = AR->getStart();
1995 const SCEV *Step = AR->getStepRecurrence(*
this);
1997 const Loop *L = AR->getLoop();
2001 if (AR->hasNoSignedWrap()) {
2003 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty,
this,
Depth + 1);
2017 if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
2023 const SCEV *CastedMaxBECount =
2027 if (MaxBECount == RecastedMaxBECount) {
2037 const SCEV *WideMaxBECount =
2039 const SCEV *OperandExtendedAdd =
2045 if (SAdd == OperandExtendedAdd) {
2049 Start = getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty,
this,
2056 OperandExtendedAdd =
2062 if (SAdd == OperandExtendedAdd) {
2074 Start = getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty,
this,
2082 auto NewFlags = proveNoSignedWrapViaInduction(AR);
2084 if (AR->hasNoSignedWrap()) {
2090 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty,
this,
Depth + 1);
2098 if (
const auto *SC = dyn_cast<SCEVConstant>(Start)) {
2099 const APInt &
C = SC->getAPInt();
2103 const SCEV *SResidual =
2112 if (proveNoWrapByVaryingStart<SCEVSignExtendExpr>(Start, Step, L)) {
2115 getExtendAddRecStart<SCEVSignExtendExpr>(AR, Ty,
this,
Depth + 1);
2128 if (isa<SCEVSMinExpr>(
Op) || isa<SCEVSMaxExpr>(
Op)) {
2129 auto *
MinMax = cast<SCEVMinMaxExpr>(
Op);
2131 for (
auto *Operand :
MinMax->operands())
2133 if (isa<SCEVSMinExpr>(
MinMax))
2140 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
2143 UniqueSCEVs.InsertNode(S, IP);
2169 "This is not an extending conversion!");
2171 "This is not a conversion to a SCEVable type!");
2176 if (SC->getAPInt().isNegative())
2181 const SCEV *NewOp =
T->getOperand();
2189 if (!isa<SCEVZeroExtendExpr>(ZExt))
2194 if (!isa<SCEVSignExtendExpr>(SExt))
2200 for (
const SCEV *
Op : AR->operands())
2206 if (isa<SCEVSMaxExpr>(
Op))
2239 APInt &AccumulatedConstant,
2242 bool Interesting =
false;
2246 while (
const SCEVConstant *
C = dyn_cast<SCEVConstant>(Ops[i])) {
2249 if (Scale != 1 || AccumulatedConstant != 0 ||
C->getValue()->isZero())
2251 AccumulatedConstant += Scale *
C->getAPInt();
2256 for (; i != Ops.
size(); ++i) {
2258 if (
Mul && isa<SCEVConstant>(
Mul->getOperand(0))) {
2260 Scale * cast<SCEVConstant>(
Mul->getOperand(0))->getAPInt();
2261 if (
Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(
Mul->getOperand(1))) {
2266 Add->operands(), NewScale, SE);
2272 auto Pair = M.insert({Key, NewScale});
2276 Pair.first->second += NewScale;
2284 std::pair<DenseMap<const SCEV *, APInt>::iterator,
bool> Pair =
2285 M.insert({Ops[i], Scale});
2289 Pair.first->second += Scale;
2308 case Instruction::Add:
2311 case Instruction::Sub:
2314 case Instruction::Mul:
2324 auto *NarrowTy = cast<IntegerType>(
LHS->
getType());
2328 const SCEV *
A = (this->*Extension)(
2330 const SCEV *LHSB = (this->*Extension)(
LHS, WideTy, 0);
2331 const SCEV *RHSB = (this->*Extension)(
RHS, WideTy, 0);
2339 if (BinOp == Instruction::Mul)
2341 auto *RHSC = dyn_cast<SCEVConstant>(
RHS);
2345 APInt C = RHSC->getAPInt();
2346 unsigned NumBits =
C.getBitWidth();
2347 bool IsSub = (BinOp == Instruction::Sub);
2348 bool IsNegativeConst = (
Signed &&
C.isNegative());
2350 bool OverflowDown = IsSub ^ IsNegativeConst;
2352 if (IsNegativeConst) {
2365 APInt Limit = Min + Magnitude;
2371 APInt Limit = Max - Magnitude;
2376std::optional<SCEV::NoWrapFlags>
2381 return std::nullopt;
2390 bool Deduced =
false;
2392 if (OBO->
getOpcode() != Instruction::Add &&
2395 return std::nullopt;
2404 false,
LHS,
RHS, CtxI)) {
2418 return std::nullopt;
2428 using namespace std::placeholders;
2435 assert(CanAnalyze &&
"don't call from other places!");
2442 auto IsKnownNonNegative = [&](
const SCEV *S) {
2452 if (SignOrUnsignWrap != SignOrUnsignMask &&
2454 isa<SCEVConstant>(Ops[0])) {
2459 return Instruction::Add;
2461 return Instruction::Mul;
2467 const APInt &
C = cast<SCEVConstant>(Ops[0])->getAPInt();
2472 Opcode,
C, OBO::NoSignedWrap);
2480 Opcode,
C, OBO::NoUnsignedWrap);
2490 Ops[0]->isZero() && IsKnownNonNegative(Ops[1]))
2496 if (
auto *UDiv = dyn_cast<SCEVUDivExpr>(Ops[0]))
2497 if (UDiv->getOperand(1) == Ops[1])
2499 if (
auto *UDiv = dyn_cast<SCEVUDivExpr>(Ops[1]))
2500 if (UDiv->getOperand(1) == Ops[0])
2516 "only nuw or nsw allowed");
2518 if (Ops.
size() == 1)
return Ops[0];
2521 for (
unsigned i = 1, e = Ops.
size(); i != e; ++i)
2523 "SCEVAddExpr operand types don't match!");
2526 assert(NumPtrs <= 1 &&
"add has at most one pointer operand");
2534 if (
const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
2539 Ops[0] =
getConstant(LHSC->getAPInt() + RHSC->getAPInt());
2540 if (Ops.
size() == 2)
return Ops[0];
2542 LHSC = cast<SCEVConstant>(Ops[0]);
2546 if (LHSC->getValue()->isZero()) {
2551 if (Ops.
size() == 1)
return Ops[0];
2561 return getOrCreateAddExpr(Ops, ComputeFlags(Ops));
2566 if (
Add->getNoWrapFlags(OrigFlags) != OrigFlags)
2567 Add->setNoWrapFlags(ComputeFlags(Ops));
2574 Type *Ty = Ops[0]->getType();
2575 bool FoundMatch =
false;
2576 for (
unsigned i = 0, e = Ops.
size(); i != e-1; ++i)
2577 if (Ops[i] == Ops[i+1]) {
2580 while (i+Count != e && Ops[i+Count] == Ops[i])
2585 if (Ops.
size() == Count)
2589 --i; e -= Count - 1;
2599 auto FindTruncSrcType = [&]() ->
Type * {
2604 if (
auto *
T = dyn_cast<SCEVTruncateExpr>(Ops[
Idx]))
2605 return T->getOperand()->getType();
2606 if (
const auto *
Mul = dyn_cast<SCEVMulExpr>(Ops[
Idx])) {
2607 const auto *LastOp =
Mul->getOperand(
Mul->getNumOperands() - 1);
2608 if (
const auto *
T = dyn_cast<SCEVTruncateExpr>(LastOp))
2609 return T->getOperand()->getType();
2613 if (
auto *SrcType = FindTruncSrcType()) {
2618 for (
unsigned i = 0, e = Ops.
size(); i != e; ++i) {
2620 if (
T->getOperand()->getType() != SrcType) {
2625 }
else if (
const SCEVConstant *
C = dyn_cast<SCEVConstant>(Ops[i])) {
2627 }
else if (
const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) {
2629 for (
unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) {
2631 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) {
2632 if (
T->getOperand()->getType() != SrcType) {
2637 }
else if (
const auto *
C = dyn_cast<SCEVConstant>(M->getOperand(j))) {
2655 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold))
2660 if (Ops.
size() == 2) {
2664 const SCEV *
A = Ops[0];
2665 const SCEV *
B = Ops[1];
2666 auto *AddExpr = dyn_cast<SCEVAddExpr>(
B);
2667 auto *
C = dyn_cast<SCEVConstant>(
A);
2668 if (AddExpr &&
C && isa<SCEVConstant>(AddExpr->getOperand(0))) {
2669 auto C1 = cast<SCEVConstant>(AddExpr->getOperand(0))->getAPInt();
2670 auto C2 =
C->getAPInt();
2673 APInt ConstAdd = C1 + C2;
2674 auto AddFlags = AddExpr->getNoWrapFlags();
2686 ConstAdd.
abs().
ule(C1.abs())) {
2700 if (Ops.
size() == 2) {
2702 if (
Mul &&
Mul->getNumOperands() == 2 &&
2703 Mul->getOperand(0)->isAllOnesValue()) {
2706 if (matchURem(
Mul->getOperand(1),
X,
Y) &&
X == Ops[1]) {
2718 bool DeletedAdd =
false;
2732 CommonFlags =
maskFlags(CommonFlags,
Add->getNoWrapFlags());
2748 if (
Idx < Ops.
size() && isa<SCEVMulExpr>(Ops[
Idx])) {
2755 struct APIntCompare {
2764 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists;
2765 for (
const SCEV *NewOp : NewOps)
2766 MulOpLists[M.find(NewOp)->second].push_back(NewOp);
2769 if (AccumulatedConstant != 0)
2771 for (
auto &MulOp : MulOpLists) {
2772 if (MulOp.first == 1) {
2774 }
else if (MulOp.first != 0) {
2783 if (Ops.
size() == 1)
2792 for (;
Idx < Ops.
size() && isa<SCEVMulExpr>(Ops[
Idx]); ++
Idx) {
2794 for (
unsigned MulOp = 0, e =
Mul->getNumOperands(); MulOp != e; ++MulOp) {
2795 const SCEV *MulOpSCEV =
Mul->getOperand(MulOp);
2796 if (isa<SCEVConstant>(MulOpSCEV))
2798 for (
unsigned AddOp = 0, e = Ops.
size(); AddOp != e; ++AddOp)
2799 if (MulOpSCEV == Ops[AddOp]) {
2801 const SCEV *InnerMul =
Mul->getOperand(MulOp == 0);
2802 if (
Mul->getNumOperands() != 2) {
2806 Mul->operands().take_front(MulOp));
2814 if (Ops.
size() == 2)
return OuterMul;
2827 for (
unsigned OtherMulIdx =
Idx+1;
2828 OtherMulIdx < Ops.
size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]);
2830 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]);
2834 OMulOp != e; ++OMulOp)
2835 if (OtherMul->
getOperand(OMulOp) == MulOpSCEV) {
2837 const SCEV *InnerMul1 =
Mul->getOperand(MulOp == 0);
2838 if (
Mul->getNumOperands() != 2) {
2840 Mul->operands().take_front(MulOp));
2847 OtherMul->
operands().take_front(OMulOp));
2852 const SCEV *InnerMulSum =
2856 if (Ops.
size() == 2)
return OuterMul;
2873 for (;
Idx < Ops.
size() && isa<SCEVAddRecExpr>(Ops[
Idx]); ++
Idx) {
2879 for (
unsigned i = 0, e = Ops.
size(); i != e; ++i)
2887 if (!LIOps.
empty()) {
2912 auto *DefI = getDefiningScopeBound(LIOps);
2914 if (!isGuaranteedToTransferExecutionTo(DefI, ReachI))
2926 if (Ops.
size() == 1)
return NewRec;
2929 for (
unsigned i = 0;; ++i)
2930 if (Ops[i] == AddRec) {
2940 for (
unsigned OtherIdx =
Idx+1;
2941 OtherIdx < Ops.
size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
2946 cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()->getHeader(),
2948 "AddRecExprs are not sorted in reverse dominance order?");
2949 if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) {
2952 for (; OtherIdx != Ops.
size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
2954 const auto *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
2955 if (OtherAddRec->getLoop() == AddRecLoop) {
2956 for (
unsigned i = 0, e = OtherAddRec->getNumOperands();
2958 if (i >= AddRecOps.
size()) {
2959 append_range(AddRecOps, OtherAddRec->operands().drop_front(i));
2963 AddRecOps[i], OtherAddRec->getOperand(i)};
2966 Ops.
erase(Ops.
begin() + OtherIdx); --OtherIdx;
2981 return getOrCreateAddExpr(Ops, ComputeFlags(Ops));
2989 for (
const SCEV *
Op : Ops)
2993 static_cast<SCEVAddExpr *
>(UniqueSCEVs.FindNodeOrInsertPos(
ID, IP));
2996 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
2997 S =
new (SCEVAllocator)
2999 UniqueSCEVs.InsertNode(S, IP);
3011 for (
const SCEV *
Op : Ops)
3019 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
3020 S =
new (SCEVAllocator)
3022 UniqueSCEVs.InsertNode(S, IP);
3023 LoopUsers[
L].push_back(S);
3035 for (
const SCEV *
Op : Ops)
3039 static_cast<SCEVMulExpr *
>(UniqueSCEVs.FindNodeOrInsertPos(
ID, IP));
3042 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
3043 S =
new (SCEVAllocator)
SCEVMulExpr(
ID.Intern(SCEVAllocator),
3045 UniqueSCEVs.InsertNode(S, IP);
3054 if (j > 1 && k / j != i) Overflow =
true;
3070 if (n == 0 || n == k)
return 1;
3071 if (k > n)
return 0;
3077 for (
uint64_t i = 1; i <= k; ++i) {
3078 r =
umul_ov(r, n-(i-1), Overflow);
3087 struct FindConstantInAddMulChain {
3088 bool FoundConstant =
false;
3090 bool follow(
const SCEV *S) {
3091 FoundConstant |= isa<SCEVConstant>(S);
3092 return isa<SCEVAddExpr>(S) || isa<SCEVMulExpr>(S);
3095 bool isDone()
const {
3096 return FoundConstant;
3100 FindConstantInAddMulChain
F;
3102 ST.visitAll(StartExpr);
3103 return F.FoundConstant;
3111 "only nuw or nsw allowed");
3113 if (Ops.
size() == 1)
return Ops[0];
3115 Type *ETy = Ops[0]->getType();
3117 for (
unsigned i = 1, e = Ops.
size(); i != e; ++i)
3119 "SCEVMulExpr operand types don't match!");
3127 if (
const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
3132 Ops[0] =
getConstant(LHSC->getAPInt() * RHSC->getAPInt());
3133 if (Ops.
size() == 2)
return Ops[0];
3135 LHSC = cast<SCEVConstant>(Ops[0]);
3139 if (LHSC->getValue()->isZero())
3143 if (LHSC->getValue()->isOne()) {
3148 if (Ops.
size() == 1)
3159 return getOrCreateMulExpr(Ops, ComputeFlags(Ops));
3164 if (
Mul->getNoWrapFlags(OrigFlags) != OrigFlags)
3165 Mul->setNoWrapFlags(ComputeFlags(Ops));
3169 if (
const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
3170 if (Ops.
size() == 2) {
3187 if (Ops[0]->isAllOnesValue()) {
3192 bool AnyFolded =
false;
3193 for (
const SCEV *AddOp :
Add->operands()) {
3196 if (!isa<SCEVMulExpr>(
Mul)) AnyFolded =
true;
3201 }
else if (
const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) {
3220 AddRec->getNoWrapFlags(FlagsMask));
3232 bool DeletedMul =
false;
3257 for (;
Idx < Ops.
size() && isa<SCEVAddRecExpr>(Ops[
Idx]); ++
Idx) {
3262 for (
unsigned i = 0, e = Ops.
size(); i != e; ++i)
3270 if (!LIOps.
empty()) {
3283 for (
unsigned i = 0, e = AddRec->
getNumOperands(); i != e; ++i) {
3299 if (Ops.
size() == 1)
return NewRec;
3302 for (
unsigned i = 0;; ++i)
3303 if (Ops[i] == AddRec) {
3324 bool OpsModified =
false;
3325 for (
unsigned OtherIdx =
Idx+1;
3326 OtherIdx != Ops.
size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
3329 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]);
3339 bool Overflow =
false;
3345 SmallVector <const SCEV *, 7> SumOps;
3346 for (
int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) {
3350 z < ze && !Overflow; ++z) {
3353 if (LargerThan64Bits)
3354 Coeff =
umul_ov(Coeff1, Coeff2, Overflow);
3356 Coeff = Coeff1*Coeff2;
3371 if (Ops.
size() == 2)
return NewAddRec;
3372 Ops[
Idx] = NewAddRec;
3373 Ops.
erase(Ops.
begin() + OtherIdx); --OtherIdx;
3375 AddRec = dyn_cast<SCEVAddRecExpr>(NewAddRec);
3389 return getOrCreateMulExpr(Ops, ComputeFlags(Ops));
3397 "SCEVURemExpr operand types don't match!");
3402 if (RHSC->getValue()->isOne())
3406 if (RHSC->getAPInt().isPowerOf2()) {
3425 "SCEVUDivExpr operand can't be pointer!");
3427 "SCEVUDivExpr operand types don't match!");
3434 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
3439 if (LHSC->getValue()->isZero())
3443 if (RHSC->getValue()->isOne())
3448 if (!RHSC->getValue()->isZero()) {
3453 unsigned LZ = RHSC->getAPInt().countl_zero();
3457 if (!RHSC->getAPInt().isPowerOf2())
3463 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*
this))) {
3465 const APInt &StepInt = Step->getAPInt();
3466 const APInt &DivInt = RHSC->getAPInt();
3467 if (!StepInt.
urem(DivInt) &&
3473 for (
const SCEV *
Op : AR->operands())
3480 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart());
3481 if (StartC && !DivInt.
urem(StepInt) &&
3487 const APInt &StartRem = StartInt.
urem(StepInt);
3488 if (StartRem != 0) {
3489 const SCEV *NewLHS =
3492 if (
LHS != NewLHS) {
3502 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
3511 for (
const SCEV *
Op : M->operands())
3515 for (
unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
3516 const SCEV *
Op = M->getOperand(i);
3518 if (!isa<SCEVUDivExpr>(Div) &&
getMulExpr(Div, RHSC) ==
Op) {
3528 if (
auto *DivisorConstant =
3529 dyn_cast<SCEVConstant>(OtherDiv->getRHS())) {
3530 bool Overflow =
false;
3532 DivisorConstant->getAPInt().
umul_ov(RHSC->getAPInt(), Overflow);
3543 for (
const SCEV *
Op :
A->operands())
3547 for (
unsigned i = 0, e =
A->getNumOperands(); i != e; ++i) {
3549 if (isa<SCEVUDivExpr>(
Op) ||
3554 if (
Operands.size() ==
A->getNumOperands())
3561 return getConstant(LHSC->getAPInt().udiv(RHSC->getAPInt()));
3568 if (
const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP))
return S;
3571 UniqueSCEVs.InsertNode(S, IP);
3601 if (!
Mul || !
Mul->hasNoUnsignedWrap())
3607 if (
const auto *LHSCst = dyn_cast<SCEVConstant>(
Mul->getOperand(0))) {
3608 if (LHSCst == RHSCst) {
3616 APInt Factor =
gcd(LHSCst, RHSCst);
3619 cast<SCEVConstant>(
getConstant(LHSCst->getAPInt().udiv(Factor)));
3621 cast<SCEVConstant>(
getConstant(RHSCst->getAPInt().udiv(Factor)));
3627 Mul = dyn_cast<SCEVMulExpr>(
LHS);
3634 for (
int i = 0, e =
Mul->getNumOperands(); i != e; ++i) {
3635 if (
Mul->getOperand(i) ==
RHS) {
3653 if (
const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step))
3654 if (StepChrec->getLoop() == L) {
3671 for (
unsigned i = 1, e =
Operands.size(); i != e; ++i) {
3673 "SCEVAddRecExpr operand types don't match!");
3676 for (
unsigned i = 0, e =
Operands.size(); i != e; ++i)
3678 "SCEVAddRecExpr operand is not available at loop entry!");
3696 const Loop *NestedLoop = NestedAR->getLoop();
3697 if (L->contains(NestedLoop)
3702 Operands[0] = NestedAR->getStart();
3706 bool AllInvariant =
all_of(
3718 AllInvariant =
all_of(NestedOperands, [&](
const SCEV *
Op) {
3729 return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags);
3739 return getOrCreateAddRecExpr(
Operands, L, Flags);
3749 const bool AssumeInBoundsFlags = [&]() {
3750 if (!
GEP->isInBounds())
3756 auto *GEPI = dyn_cast<Instruction>(
GEP);
3759 return GEPI && isSCEVExprNeverPoison(GEPI);
3766 bool FirstIter =
true;
3768 for (
const SCEV *IndexExpr : IndexExprs) {
3770 if (
StructType *STy = dyn_cast<StructType>(CurTy)) {
3775 Offsets.push_back(FieldOffset);
3778 CurTy = STy->getTypeAtIndex(
Index);
3782 assert(isa<PointerType>(CurTy) &&
3783 "The first index of a GEP indexes a pointer");
3784 CurTy =
GEP->getSourceElementType();
3795 const SCEV *LocalOffset =
getMulExpr(IndexExpr, ElementSize, OffsetWrap);
3796 Offsets.push_back(LocalOffset);
3801 if (Offsets.empty())
3813 "GEP should not change type mid-flight.");
3817SCEV *ScalarEvolution::findExistingSCEVInCache(
SCEVTypes SCEVType,
3820 ID.AddInteger(SCEVType);
3821 for (
const SCEV *
Op : Ops)
3824 return UniqueSCEVs.FindNodeOrInsertPos(
ID, IP);
3834 assert(SCEVMinMaxExpr::isMinMaxType(Kind) &&
"Not a SCEVMinMaxExpr!");
3835 assert(!Ops.
empty() &&
"Cannot get empty (u|s)(min|max)!");
3836 if (Ops.
size() == 1)
return Ops[0];
3839 for (
unsigned i = 1, e = Ops.
size(); i != e; ++i) {
3841 "Operand types don't match!");
3844 "min/max should be consistently pointerish");
3855 if (
const SCEV *S = findExistingSCEVInCache(Kind, Ops)) {
3861 if (
const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
3882 getContext(), FoldOp(LHSC->getAPInt(), RHSC->getAPInt()));
3885 if (Ops.
size() == 1)
return Ops[0];
3886 LHSC = cast<SCEVConstant>(Ops[0]);
3889 bool IsMinV = LHSC->getValue()->isMinValue(IsSigned);
3890 bool IsMaxV = LHSC->getValue()->isMaxValue(IsSigned);
3892 if (IsMax ? IsMinV : IsMaxV) {
3896 }
else if (IsMax ? IsMaxV : IsMinV) {
3902 if (Ops.
size() == 1)
return Ops[0];
3906 while (
Idx < Ops.
size() && Ops[
Idx]->getSCEVType() < Kind)
3912 bool DeletedAny =
false;
3913 while (Ops[
Idx]->getSCEVType() == Kind) {
3933 for (
unsigned i = 0, e = Ops.
size() - 1; i != e; ++i) {
3934 if (Ops[i] == Ops[i + 1] ||
3935 isKnownViaNonRecursiveReasoning(FirstPred, Ops[i], Ops[i + 1])) {
3941 }
else if (isKnownViaNonRecursiveReasoning(SecondPred, Ops[i],
3950 if (Ops.
size() == 1)
return Ops[0];
3952 assert(!Ops.
empty() &&
"Reduced smax down to nothing!");
3957 ID.AddInteger(Kind);
3958 for (
unsigned i = 0, e = Ops.
size(); i != e; ++i)
3959 ID.AddPointer(Ops[i]);
3961 const SCEV *ExistingSCEV = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP);
3963 return ExistingSCEV;
3965 std::uninitialized_copy(Ops.
begin(), Ops.
end(), O);
3966 SCEV *S =
new (SCEVAllocator)
3969 UniqueSCEVs.InsertNode(S, IP);
3976class SCEVSequentialMinMaxDeduplicatingVisitor final
3977 :
public SCEVVisitor<SCEVSequentialMinMaxDeduplicatingVisitor,
3978 std::optional<const SCEV *>> {
3979 using RetVal = std::optional<const SCEV *>;
3987 bool canRecurseInto(
SCEVTypes Kind)
const {
3990 return RootKind == Kind || NonSequentialRootKind == Kind;
3993 RetVal visitAnyMinMaxExpr(
const SCEV *S) {
3994 assert((isa<SCEVMinMaxExpr>(S) || isa<SCEVSequentialMinMaxExpr>(S)) &&
3995 "Only for min/max expressions.");
3998 if (!canRecurseInto(Kind))
4001 auto *NAry = cast<SCEVNAryExpr>(S);
4003 bool Changed = visit(Kind, NAry->operands(), NewOps);
4008 return std::nullopt;
4010 return isa<SCEVSequentialMinMaxExpr>(S)
4015 RetVal visit(
const SCEV *S) {
4017 if (!SeenOps.
insert(S).second)
4018 return std::nullopt;
4019 return Base::visit(S);
4025 : SE(SE), RootKind(RootKind),
4026 NonSequentialRootKind(
4032 bool Changed =
false;
4036 for (
const SCEV *
Op : OrigOps) {
4037 RetVal NewOp = visit(
Op);
4045 NewOps = std::move(Ops);
4051 RetVal visitVScale(
const SCEVVScale *VScale) {
return VScale; }
4061 RetVal visitAddExpr(
const SCEVAddExpr *Expr) {
return Expr; }
4063 RetVal visitMulExpr(
const SCEVMulExpr *Expr) {
return Expr; }
4065 RetVal visitUDivExpr(
const SCEVUDivExpr *Expr) {
return Expr; }
4067 RetVal visitAddRecExpr(
const SCEVAddRecExpr *Expr) {
return Expr; }
4070 return visitAnyMinMaxExpr(Expr);
4074 return visitAnyMinMaxExpr(Expr);
4078 return visitAnyMinMaxExpr(Expr);
4082 return visitAnyMinMaxExpr(Expr);
4086 return visitAnyMinMaxExpr(Expr);
4089 RetVal visitUnknown(
const SCEVUnknown *Expr) {
return Expr; }
4133struct SCEVPoisonCollector {
4134 bool LookThroughMaybePoisonBlocking;
4136 SCEVPoisonCollector(
bool LookThroughMaybePoisonBlocking)
4137 : LookThroughMaybePoisonBlocking(LookThroughMaybePoisonBlocking) {}
4139 bool follow(
const SCEV *S) {
4140 if (!LookThroughMaybePoisonBlocking &&
4144 if (
auto *SU = dyn_cast<SCEVUnknown>(S)) {
4150 bool isDone()
const {
return false; }
4160 SCEVPoisonCollector PC1(
true);
4165 if (PC1.MaybePoison.empty())
4171 SCEVPoisonCollector PC2(
false);
4177 return PC2.MaybePoison.contains(S);
4183 SCEVPoisonCollector PC(
false);
4206 while (!Worklist.
empty()) {
4208 if (!Visited.
insert(V).second)
4212 if (Visited.
size() > 16)
4220 auto *
I = dyn_cast<Instruction>(V);
4227 if (
auto *PDI = dyn_cast<PossiblyDisjointInst>(
I))
4228 if (PDI->isDisjoint())
4234 if (
auto *II = dyn_cast<IntrinsicInst>(
I);
4235 II && II->getIntrinsicID() == Intrinsic::vscale)
4242 if (
I->hasPoisonGeneratingAnnotations())
4254 assert(SCEVSequentialMinMaxExpr::isSequentialMinMaxType(Kind) &&
4255 "Not a SCEVSequentialMinMaxExpr!");
4256 assert(!Ops.
empty() &&
"Cannot get empty (u|s)(min|max)!");
4257 if (Ops.
size() == 1)
4261 for (
unsigned i = 1, e = Ops.
size(); i != e; ++i) {
4263 "Operand types don't match!");
4266 "min/max should be consistently pointerish");
4274 if (
const SCEV *S = findExistingSCEVInCache(Kind, Ops))
4281 SCEVSequentialMinMaxDeduplicatingVisitor Deduplicator(*
this, Kind);
4282 bool Changed = Deduplicator.visit(Kind, Ops, Ops);
4291 bool DeletedAny =
false;
4293 if (Ops[
Idx]->getSCEVType() != Kind) {
4297 const auto *SMME = cast<SCEVSequentialMinMaxExpr>(Ops[
Idx]);
4300 SMME->operands().end());
4308 const SCEV *SaturationPoint;
4319 for (
unsigned i = 1, e = Ops.
size(); i != e; ++i) {
4335 if (isKnownViaNonRecursiveReasoning(Pred, Ops[i - 1], Ops[i])) {
4344 ID.AddInteger(Kind);
4345 for (
unsigned i = 0, e = Ops.
size(); i != e; ++i)
4346 ID.AddPointer(Ops[i]);
4348 const SCEV *ExistingSCEV = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP);
4350 return ExistingSCEV;
4353 std::uninitialized_copy(Ops.
begin(), Ops.
end(), O);
4354 SCEV *S =
new (SCEVAllocator)
4357 UniqueSCEVs.InsertNode(S, IP);
4405 if (
Size.isScalable())
4426 "Cannot get offset for structure containing scalable vector types");
4440 if (
SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(
ID, IP)) {
4441 assert(cast<SCEVUnknown>(S)->getValue() == V &&
4442 "Stale SCEVUnknown in uniquing map!");
4447 FirstUnknown = cast<SCEVUnknown>(S);
4448 UniqueSCEVs.InsertNode(S, IP);
4496 bool PreciseA, PreciseB;
4497 auto *ScopeA = getDefiningScopeBound({
A}, PreciseA);
4498 auto *ScopeB = getDefiningScopeBound({
B}, PreciseB);
4499 if (!PreciseA || !PreciseB)
4502 return (ScopeA == ScopeB) || DT.
dominates(ScopeA, ScopeB) ||
4507 return CouldNotCompute.get();
4510bool ScalarEvolution::checkValidity(
const SCEV *S)
const {
4512 auto *SU = dyn_cast<SCEVUnknown>(S);
4513 return SU && SU->getValue() ==
nullptr;
4516 return !ContainsNulls;
4521 if (
I != HasRecMap.
end())
4526 HasRecMap.
insert({S, FoundAddRec});
4534 if (SI == ExprValueMap.
end())
4535 return std::nullopt;
4536 return SI->second.getArrayRef();
4542void ScalarEvolution::eraseValueFromMap(
Value *V) {
4544 if (
I != ValueExprMap.
end()) {
4545 auto EVIt = ExprValueMap.
find(
I->second);
4546 bool Removed = EVIt->second.remove(V);
4548 assert(Removed &&
"Value not in ExprValueMap?");
4553void ScalarEvolution::insertValueToMap(
Value *V,
const SCEV *S) {
4557 auto It = ValueExprMap.
find_as(V);
4558 if (It == ValueExprMap.
end()) {
4560 ExprValueMap[S].
insert(V);
4571 return createSCEVIter(V);
4578 if (
I != ValueExprMap.
end()) {
4579 const SCEV *S =
I->second;
4580 assert(checkValidity(S) &&
4581 "existing SCEV has not been properly invalidated");
4590 if (
const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
4594 Type *Ty = V->getType();
4602 if (!
Add ||
Add->getNumOperands() != 2 ||
4603 !
Add->getOperand(0)->isAllOnesValue())
4606 const SCEVMulExpr *AddRHS = dyn_cast<SCEVMulExpr>(
Add->getOperand(1));
4616 assert(!V->getType()->isPointerTy() &&
"Can't negate pointer");
4618 if (
const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
4629 return (
const SCEV *)
nullptr;
4635 if (
const SCEV *Replaced = MatchMinMaxNegation(MME))
4639 Type *Ty = V->getType();
4645 assert(
P->getType()->isPointerTy());
4647 if (
auto *AddRec = dyn_cast<SCEVAddRecExpr>(
P)) {
4655 if (
auto *
Add = dyn_cast<SCEVAddExpr>(
P)) {
4658 const SCEV **PtrOp =
nullptr;
4659 for (
const SCEV *&AddOp : Ops) {
4660 if (AddOp->getType()->isPointerTy()) {
4661 assert(!PtrOp &&
"Cannot have multiple pointer ops");
4695 const bool RHSIsNotMinSigned =
4726 Type *SrcTy = V->getType();
4728 "Cannot truncate or zero extend with non-integer arguments!");
4738 Type *SrcTy = V->getType();
4740 "Cannot truncate or zero extend with non-integer arguments!");
4750 Type *SrcTy = V->getType();
4752 "Cannot noop or zero extend with non-integer arguments!");
4754 "getNoopOrZeroExtend cannot truncate!");
4762 Type *SrcTy = V->getType();
4764 "Cannot noop or sign extend with non-integer arguments!");
4766 "getNoopOrSignExtend cannot truncate!");
4774 Type *SrcTy = V->getType();
4776 "Cannot noop or any extend with non-integer arguments!");
4778 "getNoopOrAnyExtend cannot truncate!");
4786 Type *SrcTy = V->getType();
4788 "Cannot truncate or noop with non-integer arguments!");
4790 "getTruncateOrNoop cannot extend!");
4798 const SCEV *PromotedLHS =
LHS;
4799 const SCEV *PromotedRHS =
RHS;
4819 assert(!Ops.
empty() &&
"At least one operand must be!");
4821 if (Ops.
size() == 1)
4825 Type *MaxType =
nullptr;
4826 for (
const auto *S : Ops)
4831 assert(MaxType &&
"Failed to find maximum type!");
4835 for (
const auto *S : Ops)
4844 if (!V->getType()->isPointerTy())
4848 if (
auto *AddRec = dyn_cast<SCEVAddRecExpr>(V)) {
4849 V = AddRec->getStart();
4850 }
else if (
auto *
Add = dyn_cast<SCEVAddExpr>(V)) {
4851 const SCEV *PtrOp =
nullptr;
4852 for (
const SCEV *AddOp :
Add->operands()) {
4853 if (AddOp->getType()->isPointerTy()) {
4854 assert(!PtrOp &&
"Cannot have multiple pointer ops");
4858 assert(PtrOp &&
"Must have pointer op");
4870 for (
User *U :
I->users()) {
4871 auto *UserInsn = cast<Instruction>(U);
4872 if (Visited.
insert(UserInsn).second)
4887 bool IgnoreOtherLoops =
true) {
4890 if (
Rewriter.hasSeenLoopVariantSCEVUnknown())
4892 return Rewriter.hasSeenOtherLoops() && !IgnoreOtherLoops
4899 SeenLoopVariantSCEVUnknown =
true;
4907 SeenOtherLoops =
true;
4911 bool hasSeenLoopVariantSCEVUnknown() {
return SeenLoopVariantSCEVUnknown; }
4913 bool hasSeenOtherLoops() {
return SeenOtherLoops; }
4920 bool SeenLoopVariantSCEVUnknown =
false;
4921 bool SeenOtherLoops =
false;
4931 SCEVPostIncRewriter
Rewriter(L, SE);
4933 return Rewriter.hasSeenLoopVariantSCEVUnknown()
4940 SeenLoopVariantSCEVUnknown =
true;
4948 SeenOtherLoops =
true;
4952 bool hasSeenLoopVariantSCEVUnknown() {
return SeenLoopVariantSCEVUnknown; }
4954 bool hasSeenOtherLoops() {
return SeenOtherLoops; }
4961 bool SeenLoopVariantSCEVUnknown =
false;
4962 bool SeenOtherLoops =
false;
4968class SCEVBackedgeConditionFolder
4973 bool IsPosBECond =
false;
4974 Value *BECond =
nullptr;
4976 BranchInst *BI = dyn_cast<BranchInst>(Latch->getTerminator());
4979 "Both outgoing branches should not target same header!");
4986 SCEVBackedgeConditionFolder
Rewriter(L, BECond, IsPosBECond, SE);
4996 switch (
I->getOpcode()) {
4997 case Instruction::Select: {
4999 std::optional<const SCEV *> Res =
5000 compareWithBackedgeCondition(
SI->getCondition());
5002 bool IsOne = cast<SCEVConstant>(*Res)->getValue()->isOne();
5008 std::optional<const SCEV *> Res = compareWithBackedgeCondition(
I);
5019 explicit SCEVBackedgeConditionFolder(
const Loop *L,
Value *BECond,
5022 IsPositiveBECond(IsPosBECond) {}
5024 std::optional<const SCEV *> compareWithBackedgeCondition(
Value *IC);
5028 Value *BackedgeCond =
nullptr;
5030 bool IsPositiveBECond;
5033std::optional<const SCEV *>
5034SCEVBackedgeConditionFolder::compareWithBackedgeCondition(
Value *IC) {
5039 if (BackedgeCond == IC)
5042 return std::nullopt;
5068 bool isValid() {
return Valid; }
5081ScalarEvolution::proveNoWrapViaConstantRanges(
const SCEVAddRecExpr *AR) {
5091 if (
const SCEVConstant *BECountMax = dyn_cast<SCEVConstant>(BECount)) {
5093 const APInt &BECountAP = BECountMax->getAPInt();
5094 unsigned NoOverflowBitWidth =
5106 Instruction::Add, IncRange, OBO::NoSignedWrap);
5107 if (NSWRegion.contains(AddRecRange))
5116 Instruction::Add, IncRange, OBO::NoUnsignedWrap);
5117 if (NUWRegion.contains(AddRecRange))
5125ScalarEvolution::proveNoSignedWrapViaInduction(
const SCEVAddRecExpr *AR) {
5135 if (!SignedWrapViaInductionTried.insert(AR).second)
5159 if (isa<SCEVCouldNotCompute>(MaxBECount) && !HasGuards &&
5168 const SCEV *OverflowLimit =
5170 if (OverflowLimit &&
5178ScalarEvolution::proveNoUnsignedWrapViaInduction(
const SCEVAddRecExpr *AR) {
5188 if (!UnsignedWrapViaInductionTried.insert(AR).second)
5213 if (isa<SCEVCouldNotCompute>(MaxBECount) && !HasGuards &&
5252 if (
auto *OBO = dyn_cast<OverflowingBinaryOperator>(
Op)) {
5253 IsNSW = OBO->hasNoSignedWrap();
5254 IsNUW = OBO->hasNoUnsignedWrap();
5258 explicit BinaryOp(
unsigned Opcode,
Value *
LHS,
Value *
RHS,
bool IsNSW =
false,
5260 : Opcode(Opcode),
LHS(
LHS),
RHS(
RHS), IsNSW(IsNSW), IsNUW(IsNUW) {}
5270 auto *
Op = dyn_cast<Operator>(V);
5272 return std::nullopt;
5278 switch (
Op->getOpcode()) {
5279 case Instruction::Add:
5280 case Instruction::Sub:
5281 case Instruction::Mul:
5282 case Instruction::UDiv:
5283 case Instruction::URem:
5284 case Instruction::And:
5285 case Instruction::AShr:
5286 case Instruction::Shl:
5287 return BinaryOp(
Op);
5289 case Instruction::Or: {
5291 if (cast<PossiblyDisjointInst>(
Op)->isDisjoint())
5292 return BinaryOp(Instruction::Add,
Op->getOperand(0),
Op->getOperand(1),
5294 return BinaryOp(
Op);
5297 case Instruction::Xor:
5298 if (
auto *RHSC = dyn_cast<ConstantInt>(
Op->getOperand(1)))
5301 if (RHSC->getValue().isSignMask())
5302 return BinaryOp(Instruction::Add,
Op->getOperand(0),
Op->getOperand(1));
5304 if (V->getType()->isIntegerTy(1))
5305 return BinaryOp(Instruction::Add,
Op->getOperand(0),
Op->getOperand(1));
5306 return BinaryOp(
Op);
5308 case Instruction::LShr:
5310 if (
ConstantInt *SA = dyn_cast<ConstantInt>(
Op->getOperand(1))) {
5317 if (SA->getValue().ult(
BitWidth)) {
5319 ConstantInt::get(SA->getContext(),
5321 return BinaryOp(Instruction::UDiv,
Op->getOperand(0),
X);
5324 return BinaryOp(
Op);
5326 case Instruction::ExtractValue: {
5327 auto *EVI = cast<ExtractValueInst>(
Op);
5328 if (EVI->getNumIndices() != 1 || EVI->getIndices()[0] != 0)
5331 auto *WO = dyn_cast<WithOverflowInst>(EVI->getAggregateOperand());
5336 bool Signed = WO->isSigned();
5339 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS());
5344 return BinaryOp(BinOp, WO->getLHS(), WO->getRHS(),
5354 if (
auto *II = dyn_cast<IntrinsicInst>(V))
5355 if (II->getIntrinsicID() == Intrinsic::loop_decrement_reg)
5356 return BinaryOp(Instruction::Sub, II->getOperand(0), II->getOperand(1));
5358 return std::nullopt;
5384 if (
Op == SymbolicPHI)
5389 if (SourceBits != NewBits)
5397 SExt ? dyn_cast<SCEVTruncateExpr>(SExt->
getOperand())
5398 : dyn_cast<SCEVTruncateExpr>(ZExt->
getOperand());
5402 if (
X != SymbolicPHI)
5404 Signed = SExt !=
nullptr;
5412 if (!L || L->getHeader() != PN->
getParent())
5470std::optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
5471ScalarEvolution::createAddRecFromPHIWithCastsImpl(
const SCEVUnknown *SymbolicPHI) {
5477 auto *PN = cast<PHINode>(SymbolicPHI->
getValue());
5479 assert(L &&
"Expecting an integer loop header phi");
5484 Value *BEValueV =
nullptr, *StartValueV =
nullptr;
5485 for (
unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
5486 Value *
V = PN->getIncomingValue(i);
5487 if (
L->contains(PN->getIncomingBlock(i))) {
5490 }
else if (BEValueV != V) {
5494 }
else if (!StartValueV) {
5496 }
else if (StartValueV != V) {
5497 StartValueV =
nullptr;
5501 if (!BEValueV || !StartValueV)
5502 return std::nullopt;
5509 const auto *
Add = dyn_cast<SCEVAddExpr>(BEValue);
5511 return std::nullopt;
5515 unsigned FoundIndex =
Add->getNumOperands();
5516 Type *TruncTy =
nullptr;
5518 for (
unsigned i = 0, e =
Add->getNumOperands(); i != e; ++i)
5521 if (FoundIndex == e) {
5526 if (FoundIndex ==
Add->getNumOperands())
5527 return std::nullopt;
5531 for (
unsigned i = 0, e =
Add->getNumOperands(); i != e; ++i)
5532 if (i != FoundIndex)
5539 return std::nullopt;
5593 const SCEV *PHISCEV =
5603 if (
const auto *AR = dyn_cast<SCEVAddRecExpr>(PHISCEV)) {
5620 auto getExtendedExpr = [&](
const SCEV *Expr,
5621 bool CreateSignExtend) ->
const SCEV * {
5624 const SCEV *ExtendedExpr =
5627 return ExtendedExpr;
5635 auto PredIsKnownFalse = [&](
const SCEV *Expr,
5636 const SCEV *ExtendedExpr) ->
bool {
5637 return Expr != ExtendedExpr &&
5641 const SCEV *StartExtended = getExtendedExpr(StartVal,
Signed);
5642 if (PredIsKnownFalse(StartVal, StartExtended)) {
5644 return std::nullopt;
5649 const SCEV *AccumExtended = getExtendedExpr(Accum,
true);
5650 if (PredIsKnownFalse(Accum, AccumExtended)) {
5652 return std::nullopt;
5655 auto AppendPredicate = [&](
const SCEV *Expr,
5656 const SCEV *ExtendedExpr) ->
void {
5657 if (Expr != ExtendedExpr &&
5665 AppendPredicate(StartVal, StartExtended);
5666 AppendPredicate(Accum, AccumExtended);
5674 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> PredRewrite =
5675 std::make_pair(NewAR, Predicates);
5677 PredicatedSCEVRewrites[{SymbolicPHI,
L}] = PredRewrite;
5681std::optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
5683 auto *PN = cast<PHINode>(SymbolicPHI->
getValue());
5686 return std::nullopt;
5689 auto I = PredicatedSCEVRewrites.find({SymbolicPHI, L});
5690 if (
I != PredicatedSCEVRewrites.end()) {
5691 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> Rewrite =
5694 if (Rewrite.first == SymbolicPHI)
5695 return std::nullopt;
5698 assert(isa<SCEVAddRecExpr>(Rewrite.first) &&
"Expected an AddRec");
5699 assert(!(Rewrite.second).empty() &&
"Expected to find Predicates");
5703 std::optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
5704 Rewrite = createAddRecFromPHIWithCastsImpl(SymbolicPHI);
5709 PredicatedSCEVRewrites[{SymbolicPHI, L}] = {SymbolicPHI, Predicates};
5710 return std::nullopt;
5727 auto areExprsEqual = [&](
const SCEV *Expr1,
const SCEV *Expr2) ->
bool {
5746const SCEV *ScalarEvolution::createSimpleAffineAddRec(
PHINode *PN,
5748 Value *StartValueV) {
5751 assert(BEValueV && StartValueV);
5757 if (BO->Opcode != Instruction::Add)
5760 const SCEV *Accum =
nullptr;
5761 if (BO->LHS == PN && L->isLoopInvariant(BO->RHS))
5763 else if (BO->RHS == PN && L->isLoopInvariant(BO->LHS))
5777 insertValueToMap(PN, PHISCEV);
5779 if (
auto *AR = dyn_cast<SCEVAddRecExpr>(PHISCEV)) {
5782 proveNoWrapViaConstantRanges(AR)));
5788 if (
auto *BEInst = dyn_cast<Instruction>(BEValueV)) {
5790 "Accum is defined outside L, but is not invariant?");
5791 if (isAddRecNeverPoison(BEInst, L))
5798const SCEV *ScalarEvolution::createAddRecFromPHI(
PHINode *PN) {
5806 Value *BEValueV =
nullptr, *StartValueV =
nullptr;
5812 }
else if (BEValueV != V) {
5816 }
else if (!StartValueV) {
5818 }
else if (StartValueV != V) {
5819 StartValueV =
nullptr;
5823 if (!BEValueV || !StartValueV)
5827 "PHI node already processed?");
5831 if (
auto *S = createSimpleAffineAddRec(PN, BEValueV, StartValueV))
5836 insertValueToMap(PN, SymbolicName);
5850 unsigned FoundIndex =
Add->getNumOperands();
5851 for (
unsigned i = 0, e =
Add->getNumOperands(); i != e; ++i)
5852 if (
Add->getOperand(i) == SymbolicName)
5853 if (FoundIndex == e) {
5858 if (FoundIndex !=
Add->getNumOperands()) {
5861 for (
unsigned i = 0, e =
Add->getNumOperands(); i != e; ++i)
5862 if (i != FoundIndex)
5863 Ops.
push_back(SCEVBackedgeConditionFolder::rewrite(
Add->getOperand(i),
5870 (isa<SCEVAddRecExpr>(Accum) &&
5871 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) {
5875 if (BO->Opcode == Instruction::Add && BO->LHS == PN) {
5888 if (
GEP->isInBounds() &&
GEP->getOperand(0) == PN) {
5905 forgetMemoizedResults(SymbolicName);
5906 insertValueToMap(PN, PHISCEV);
5908 if (
auto *AR = dyn_cast<SCEVAddRecExpr>(PHISCEV)) {
5911 proveNoWrapViaConstantRanges(AR)));
5917 if (
auto *BEInst = dyn_cast<Instruction>(BEValueV))
5934 const SCEV *Shifted = SCEVShiftRewriter::rewrite(BEValue, L, *
this);
5935 const SCEV *Start = SCEVInitRewriter::rewrite(Shifted, L, *
this,
false);
5939 if (Start == StartVal) {
5943 forgetMemoizedResults(SymbolicName);
5944 insertValueToMap(PN, Shifted);
5954 eraseValueFromMap(PN);
5974 Use &LeftUse =
Merge->getOperandUse(0);
5975 Use &RightUse =
Merge->getOperandUse(1);
5992const SCEV *ScalarEvolution::createNodeFromSelectLikePHI(
PHINode *PN) {
6009 assert(IDom &&
"At least the entry block should dominate PN");
6018 return createNodeForSelectOrPHI(PN,
Cond, LHS, RHS);
6024const SCEV *ScalarEvolution::createNodeForPHI(
PHINode *PN) {
6025 if (
const SCEV *S = createAddRecFromPHI(PN))
6031 if (
const SCEV *S = createNodeFromSelectLikePHI(PN))
6040 struct FindClosure {
6041 const SCEV *OperandToFind;
6047 bool canRecurseInto(
SCEVTypes Kind)
const {
6050 return RootKind == Kind || NonSequentialRootKind == Kind ||
6055 : OperandToFind(OperandToFind), RootKind(RootKind),
6056 NonSequentialRootKind(
6060 bool follow(
const SCEV *S) {
6061 Found = S == OperandToFind;
6063 return !isDone() && canRecurseInto(S->
getSCEVType());
6066 bool isDone()
const {
return Found; }
6069 FindClosure FC(OperandToFind, RootKind);
6074std::optional<const SCEV *>
6075ScalarEvolution::createNodeForSelectOrPHIInstWithICmpInstCond(
Type *Ty,
6085 switch (ICI->getPredicate()) {
6099 bool Signed = ICI->isSigned();
6108 if (LA == LS &&
RA == RS)
6110 if (LA == RS &&
RA == LS)
6113 auto CoerceOperand = [&](
const SCEV *
Op) ->
const SCEV * {
6114 if (
Op->getType()->isPointerTy()) {
6116 if (isa<SCEVCouldNotCompute>(
Op))
6125 LS = CoerceOperand(LS);
6126 RS = CoerceOperand(RS);
6127 if (isa<SCEVCouldNotCompute>(LS) || isa<SCEVCouldNotCompute>(RS))
6148 isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->
isZero()) {
6154 if (isa<SCEVConstant>(
C) && cast<SCEVConstant>(
C)->getAPInt().ule(1))
6161 if (isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->
isZero() &&
6162 isa<ConstantInt>(TrueVal) && cast<ConstantInt>(TrueVal)->
isZero()) {
6164 while (
auto *ZExt = dyn_cast<SCEVZeroExtendExpr>(
X))
6165 X = ZExt->getOperand();
6178 return std::nullopt;
6181static std::optional<const SCEV *>
6183 const SCEV *TrueExpr,
const SCEV *FalseExpr) {
6187 "Unexpected operands of a select.");
6198 if (!isa<SCEVConstant>(TrueExpr) && !isa<SCEVConstant>(FalseExpr))
6199 return std::nullopt;
6202 if (isa<SCEVConstant>(TrueExpr)) {
6214static std::optional<const SCEV *>
6217 if (!isa<ConstantInt>(TrueVal) && !isa<ConstantInt>(FalseVal))
6218 return std::nullopt;
6221 const auto *SETrue = SE->
getSCEV(TrueVal);
6222 const auto *SEFalse = SE->
getSCEV(FalseVal);
6226const SCEV *ScalarEvolution::createNodeForSelectOrPHIViaUMinSeq(
6228 assert(
Cond->getType()->isIntegerTy(1) &&
"Select condition is not an i1?");
6230 V->getType() ==
TrueVal->getType() &&
6231 "Types of select hands and of the result must match.");
6234 if (!
V->getType()->isIntegerTy(1))
6237 if (std::optional<const SCEV *> S =
6249 if (
auto *CI = dyn_cast<ConstantInt>(
Cond))
6250 return getSCEV(CI->isOne() ? TrueVal : FalseVal);
6252 if (
auto *
I = dyn_cast<Instruction>(V)) {
6253 if (
auto *ICI = dyn_cast<ICmpInst>(
Cond)) {
6254 if (std::optional<const SCEV *> S =
6255 createNodeForSelectOrPHIInstWithICmpInstCond(
I->getType(), ICI,
6261 return createNodeForSelectOrPHIViaUMinSeq(V,
Cond, TrueVal, FalseVal);
6267 assert(
GEP->getSourceElementType()->isSized() &&
6268 "GEP source element type must be sized");
6276APInt ScalarEvolution::getConstantMultipleImpl(
const SCEV *S) {
6286 for (
unsigned I = 1, E =
N->getNumOperands();
I < E && Res != 1; ++
I)
6294 return cast<SCEVConstant>(S)->getAPInt();
6304 return GetShiftedByZeros(TZ);
6316 if (
M->hasNoUnsignedWrap()) {
6319 for (
const SCEV *Operand :
M->operands().drop_front())
6327 for (
const SCEV *Operand :
M->operands())
6329 return GetShiftedByZeros(TZ);
6334 if (
N->hasNoUnsignedWrap())
6335 return GetGCDMultiple(
N);
6338 for (
const SCEV *Operand :
N->operands().drop_front())
6340 return GetShiftedByZeros(TZ);
6347 return GetGCDMultiple(cast<SCEVNAryExpr>(S));
6353 .countMinTrailingZeros();
6354 return GetShiftedByZeros(Known);
6363 auto I = ConstantMultipleCache.find(S);
6364 if (
I != ConstantMultipleCache.end())
6367 APInt Result = getConstantMultipleImpl(S);
6368 auto InsertPair = ConstantMultipleCache.insert({S, Result});
6369 assert(InsertPair.second &&
"Should insert a new key");
6370 return InsertPair.first->second;
6386 if (
MDNode *MD =
I->getMetadata(LLVMContext::MD_range))
6388 if (
const auto *CB = dyn_cast<CallBase>(V))
6389 if (std::optional<ConstantRange> Range = CB->getRange())
6392 if (
auto *
A = dyn_cast<Argument>(V))
6393 if (std::optional<ConstantRange> Range =
A->getRange())
6396 return std::nullopt;
6403 UnsignedRanges.erase(AddRec);
6404 SignedRanges.erase(AddRec);
6405 ConstantMultipleCache.erase(AddRec);
6410getRangeForUnknownRecurrence(
const SCEVUnknown *U) {
6424 auto *
P = dyn_cast<PHINode>(U->getValue());
6436 Value *Start, *Step;
6443 assert(L && L->getHeader() ==
P->getParent());
6456 case Instruction::AShr:
6457 case Instruction::LShr:
6458 case Instruction::Shl:
6473 KnownStep.getBitWidth() ==
BitWidth);
6476 auto MaxShiftAmt = KnownStep.getMaxValue();
6478 bool Overflow =
false;
6479 auto TotalShift = MaxShiftAmt.umul_ov(TCAP, Overflow);
6486 case Instruction::AShr: {
6494 if (KnownStart.isNonNegative())
6497 KnownStart.getMaxValue() + 1);
6498 if (KnownStart.isNegative())
6501 KnownEnd.getMaxValue() + 1);
6504 case Instruction::LShr: {
6513 KnownStart.getMaxValue() + 1);
6515 case Instruction::Shl: {
6519 if (TotalShift.ult(KnownStart.countMinLeadingZeros()))
6521 KnownEnd.getMaxValue() + 1);
6529ScalarEvolution::getRangeRefIter(
const SCEV *S,
6530 ScalarEvolution::RangeSignHint SignHint) {
6532 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges
6539 auto AddToWorklist = [&WorkList, &Seen, &Cache](
const SCEV *Expr) {
6540 if (!Seen.
insert(Expr).second)
6546 if (!isa<PHINode>(cast<SCEVUnknown>(Expr)->getValue()))
6573 for (
unsigned I = 0;
I != WorkList.
size(); ++
I) {
6574 const SCEV *
P = WorkList[
I];
6575 auto *UnknownS = dyn_cast<SCEVUnknown>(
P);
6578 for (
const SCEV *
Op :
P->operands())
6583 if (
const PHINode *
P = dyn_cast<PHINode>(UnknownS->getValue())) {
6584 if (!PendingPhiRangesIter.insert(
P).second)
6591 if (!WorkList.
empty()) {
6596 getRangeRef(
P, SignHint);
6598 if (
auto *UnknownS = dyn_cast<SCEVUnknown>(
P))
6599 if (
const PHINode *
P = dyn_cast<PHINode>(UnknownS->getValue()))
6600 PendingPhiRangesIter.erase(
P);
6604 return getRangeRef(S, SignHint, 0);
6611 const SCEV *S, ScalarEvolution::RangeSignHint SignHint,
unsigned Depth) {
6613 SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED ? UnsignedRanges
6621 if (
I != Cache.
end())
6630 return getRangeRefIter(S, SignHint);
6638 if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) {
6642 ConservativeResult =
6665 ConservativeResult.intersectWith(
X.truncate(
BitWidth), RangeType));
6672 ConservativeResult.intersectWith(
X.zeroExtend(
BitWidth), RangeType));
6679 ConservativeResult.intersectWith(
X.signExtend(
BitWidth), RangeType));
6684 return setRange(PtrToInt, SignHint,
X);
6689 unsigned WrapType = OBO::AnyWrap;
6690 if (
Add->hasNoSignedWrap())
6691 WrapType |= OBO::NoSignedWrap;
6692 if (
Add->hasNoUnsignedWrap())
6693 WrapType |= OBO::NoUnsignedWrap;
6694 for (
unsigned i = 1, e =
Add->getNumOperands(); i != e; ++i)
6695 X =
X.addWithNoWrap(getRangeRef(
Add->getOperand(i), SignHint,
Depth + 1),
6696 WrapType, RangeType);
6697 return setRange(
Add, SignHint,
6698 ConservativeResult.intersectWith(
X, RangeType));
6703 for (
unsigned i = 1, e =
Mul->getNumOperands(); i != e; ++i)
6704 X =
X.multiply(getRangeRef(
Mul->getOperand(i), SignHint,
Depth + 1));
6705 return setRange(
Mul, SignHint,
6706 ConservativeResult.intersectWith(
X, RangeType));
6712 return setRange(UDiv, SignHint,
6713 ConservativeResult.intersectWith(
X.udiv(
Y), RangeType));
6721 if (!UnsignedMinValue.
isZero())
6722 ConservativeResult = ConservativeResult.intersectWith(
6732 bool AllNonNeg =
true;
6733 bool AllNonPos =
true;
6734 for (
unsigned i = 1, e = AddRec->
getNumOperands(); i != e; ++i) {
6741 ConservativeResult = ConservativeResult.intersectWith(
6746 ConservativeResult = ConservativeResult.intersectWith(
6755 const SCEV *MaxBEScev =
6757 if (!isa<SCEVCouldNotCompute>(MaxBEScev)) {
6758 APInt MaxBECount = cast<SCEVConstant>(MaxBEScev)->getAPInt();
6769 auto RangeFromAffine = getRangeForAffineAR(
6771 ConservativeResult =
6772 ConservativeResult.intersectWith(RangeFromAffine, RangeType);
6774 auto RangeFromFactoring = getRangeViaFactoring(
6776 ConservativeResult =
6777 ConservativeResult.intersectWith(RangeFromFactoring, RangeType);
6783 const SCEV *SymbolicMaxBECount =
6785 if (!isa<SCEVCouldNotCompute>(SymbolicMaxBECount) &&
6788 auto RangeFromAffineNew = getRangeForAffineNoSelfWrappingAR(
6789 AddRec, SymbolicMaxBECount,
BitWidth, SignHint);
6790 ConservativeResult =
6791 ConservativeResult.intersectWith(RangeFromAffineNew, RangeType);
6796 return setRange(AddRec, SignHint, std::move(ConservativeResult));
6806 ID = Intrinsic::umax;
6809 ID = Intrinsic::smax;
6813 ID = Intrinsic::umin;
6816 ID = Intrinsic::smin;
6822 const auto *NAry = cast<SCEVNAryExpr>(S);
6824 for (
unsigned i = 1, e = NAry->getNumOperands(); i != e; ++i)
6826 ID, {
X, getRangeRef(NAry->getOperand(i), SignHint,
Depth + 1)});
6827 return setRange(S, SignHint,
6828 ConservativeResult.intersectWith(
X, RangeType));
6837 ConservativeResult =
6838 ConservativeResult.intersectWith(*MDRange, RangeType);
6843 auto CR = getRangeForUnknownRecurrence(U);
6844 ConservativeResult = ConservativeResult.intersectWith(CR);
6855 if (
U->getType()->isPointerTy()) {
6858 unsigned ptrSize =
DL.getPointerTypeSizeInBits(
U->getType());
6859 int ptrIdxDiff = ptrSize -
BitWidth;
6860 if (ptrIdxDiff > 0 && ptrSize >
BitWidth && NS > (
unsigned)ptrIdxDiff)
6873 ConservativeResult = ConservativeResult.intersectWith(
6877 ConservativeResult = ConservativeResult.intersectWith(
6882 if (
U->getType()->isPointerTy() && SignHint == HINT_RANGE_UNSIGNED) {
6889 if ((isa<GlobalVariable>(V) || isa<AllocaInst>(V) ||
6905 ConservativeResult = ConservativeResult.intersectWith(
6911 if (
PHINode *Phi = dyn_cast<PHINode>(V)) {
6913 if (PendingPhiRanges.insert(Phi).second) {
6916 for (
const auto &
Op :
Phi->operands()) {
6918 RangeFromOps = RangeFromOps.unionWith(OpRange);
6920 if (RangeFromOps.isFullSet())
6923 ConservativeResult =
6924 ConservativeResult.intersectWith(RangeFromOps, RangeType);
6925 bool Erased = PendingPhiRanges.erase(Phi);
6926 assert(Erased &&
"Failed to erase Phi properly?");
6932 if (
const auto *II = dyn_cast<IntrinsicInst>(V))
6933 if (II->getIntrinsicID() == Intrinsic::vscale) {
6935 ConservativeResult = ConservativeResult.difference(Disallowed);
6938 return setRange(U, SignHint, std::move(ConservativeResult));
6944 return setRange(S, SignHint, std::move(ConservativeResult));
6953 const APInt &MaxBECount,
6960 if (Step == 0 || MaxBECount == 0)
6967 return ConstantRange::getFull(
BitWidth);
6983 return ConstantRange::getFull(
BitWidth);
6995 APInt MovedBoundary = Descending ? (StartLower - std::move(
Offset))
6996 : (StartUpper + std::move(
Offset));
7001 if (StartRange.
contains(MovedBoundary))
7002 return ConstantRange::getFull(
BitWidth);
7005 Descending ? std::move(MovedBoundary) : std::move(StartLower);
7007 Descending ? std::move(StartUpper) : std::move(MovedBoundary);
7016 const APInt &MaxBECount) {
7020 "mismatched bit widths");
7029 StepSRange.
getSignedMin(), StartSRange, MaxBECount,
true);
7031 StartSRange, MaxBECount,
7043ConstantRange ScalarEvolution::getRangeForAffineNoSelfWrappingAR(
7045 ScalarEvolution::RangeSignHint SignHint) {
7046 assert(AddRec->
isAffine() &&
"Non-affine AddRecs are not suppored!\n");
7048 "This only works for non-self-wrapping AddRecs!");
7049 const bool IsSigned = SignHint == HINT_RANGE_SIGNED;
7052 if (!isa<SCEVConstant>(Step))
7053 return ConstantRange::getFull(
BitWidth);
7061 return ConstantRange::getFull(
BitWidth);
7067 MaxItersWithoutWrap))
7068 return ConstantRange::getFull(
BitWidth);
7095 return RangeBetween;
7100 return ConstantRange::getFull(
BitWidth);
7103 isKnownPredicateViaConstantRanges(LEPred, Start,
End))
7104 return RangeBetween;
7106 isKnownPredicateViaConstantRanges(GEPred, Start,
End))
7107 return RangeBetween;
7108 return ConstantRange::getFull(
BitWidth);
7113 const APInt &MaxBECount) {
7120 "mismatched bit widths");
7122 struct SelectPattern {
7123 Value *Condition =
nullptr;
7129 std::optional<unsigned> CastOp;
7136 if (
auto *SA = dyn_cast<SCEVAddExpr>(S)) {
7139 if (SA->getNumOperands() != 2 || !isa<SCEVConstant>(SA->getOperand(0)))
7142 Offset = cast<SCEVConstant>(SA->getOperand(0))->getAPInt();
7143 S = SA->getOperand(1);
7147 if (
auto *SCast = dyn_cast<SCEVIntegralCastExpr>(S)) {
7149 S = SCast->getOperand();
7154 auto *SU = dyn_cast<SCEVUnknown>(S);
7159 Condition =
nullptr;
7191 bool isRecognized() {
return Condition !=
nullptr; }
7194 SelectPattern StartPattern(*
this,
BitWidth, Start);
7195 if (!StartPattern.isRecognized())
7196 return ConstantRange::getFull(
BitWidth);
7198 SelectPattern StepPattern(*
this,
BitWidth, Step);
7199 if (!StepPattern.isRecognized())
7200 return ConstantRange::getFull(
BitWidth);
7202 if (StartPattern.Condition != StepPattern.Condition) {
7206 return ConstantRange::getFull(
BitWidth);
7223 this->getRangeForAffineAR(TrueStart, TrueStep, MaxBECount);
7225 this->getRangeForAffineAR(FalseStart, FalseStep, MaxBECount);
7247ScalarEvolution::getNonTrivialDefiningScopeBound(
const SCEV *S) {
7248 if (
auto *AddRec = dyn_cast<SCEVAddRecExpr>(S))
7250 if (
auto *U = dyn_cast<SCEVUnknown>(S))
7251 if (
auto *
I = dyn_cast<Instruction>(
U->getValue()))
7263 auto pushOp = [&](
const SCEV *S) {
7264 if (!Visited.
insert(S).second)
7267 if (Visited.
size() > 30) {
7274 for (
const auto *S : Ops)
7278 while (!Worklist.
empty()) {
7280 if (
auto *DefI = getNonTrivialDefiningScopeBound(S)) {
7281 if (!Bound || DT.
dominates(Bound, DefI))
7294 return getDefiningScopeBound(Ops, Discard);
7297bool ScalarEvolution::isGuaranteedToTransferExecutionTo(
const Instruction *
A,
7299 if (
A->getParent() ==
B->getParent() &&
7305 if (BLoop && BLoop->getHeader() ==
B->getParent() &&
7306 BLoop->getLoopPreheader() ==
A->getParent() &&
7308 A->getParent()->end()) &&
7316bool ScalarEvolution::isSCEVExprNeverPoison(
const Instruction *
I) {
7333 for (
const Use &
Op :
I->operands()) {
7339 auto *DefI = getDefiningScopeBound(SCEVOps);
7340 return isGuaranteedToTransferExecutionTo(DefI,
I);
7343bool ScalarEvolution::isAddRecNeverPoison(
const Instruction *
I,
const Loop *L) {
7345 if (isSCEVExprNeverPoison(
I))
7356 auto *ExitingBB =
L->getExitingBlock();
7369 while (!Worklist.
empty()) {
7372 for (
const Use &U : Poison->
uses()) {
7373 const Instruction *PoisonUser = cast<Instruction>(
U.getUser());
7379 if (KnownPoison.
insert(PoisonUser).second)
7387ScalarEvolution::LoopProperties
7388ScalarEvolution::getLoopProperties(
const Loop *L) {
7389 using LoopProperties = ScalarEvolution::LoopProperties;
7391 auto Itr = LoopPropertiesCache.find(L);
7392 if (Itr == LoopPropertiesCache.end()) {
7394 if (
auto *SI = dyn_cast<StoreInst>(
I))
7395 return !
SI->isSimple();
7397 return I->mayThrow() ||
I->mayWriteToMemory();
7400 LoopProperties LP = {
true,
7403 for (
auto *BB :
L->getBlocks())
7404 for (
auto &
I : *BB) {
7406 LP.HasNoAbnormalExits =
false;
7407 if (HasSideEffects(&
I))
7408 LP.HasNoSideEffects =
false;
7409 if (!LP.HasNoAbnormalExits && !LP.HasNoSideEffects)
7413 auto InsertPair = LoopPropertiesCache.insert({
L, LP});
7414 assert(InsertPair.second &&
"We just checked!");
7415 Itr = InsertPair.first;
7428const SCEV *ScalarEvolution::createSCEVIter(
Value *V) {
7434 Stack.emplace_back(V,
true);
7435 Stack.emplace_back(V,
false);
7436 while (!Stack.empty()) {
7437 auto E = Stack.pop_back_val();
7438 Value *CurV = E.getPointer();
7444 const SCEV *CreatedSCEV =
nullptr;
7447 CreatedSCEV = createSCEV(CurV);
7452 CreatedSCEV = getOperandsToCreate(CurV, Ops);
7456 insertValueToMap(CurV, CreatedSCEV);
7460 Stack.emplace_back(CurV,
true);
7462 Stack.emplace_back(
Op,
false);
7481 }
else if (
ConstantInt *CI = dyn_cast<ConstantInt>(V))
7483 else if (isa<GlobalAlias>(V))
7485 else if (!isa<ConstantExpr>(V))
7491 bool IsConstArg = isa<ConstantInt>(BO->RHS);
7492 switch (BO->Opcode) {
7493 case Instruction::Add:
7494 case Instruction::Mul: {
7507 dyn_cast<Instruction>(V));
7509 (BO->Opcode == Instruction::Add &&
7510 (NewBO->Opcode != Instruction::Add &&
7511 NewBO->Opcode != Instruction::Sub)) ||
7512 (BO->Opcode == Instruction::Mul &&
7513 NewBO->Opcode != Instruction::Mul)) {
7519 if (BO->
Op && (BO->IsNSW || BO->IsNUW)) {
7520 auto *
I = dyn_cast<Instruction>(BO->
Op);
7530 case Instruction::Sub:
7531 case Instruction::UDiv:
7532 case Instruction::URem:
7534 case Instruction::AShr:
7535 case Instruction::Shl:
7536 case Instruction::Xor:
7540 case Instruction::And:
7541 case Instruction::Or:
7545 case Instruction::LShr:
7557 switch (
U->getOpcode()) {
7558 case Instruction::Trunc:
7559 case Instruction::ZExt:
7560 case Instruction::SExt:
7561 case Instruction::PtrToInt:
7565 case Instruction::BitCast:
7572 case Instruction::SDiv:
7573 case Instruction::SRem:
7578 case Instruction::GetElementPtr:
7579 assert(cast<GEPOperator>(U)->getSourceElementType()->isSized() &&
7580 "GEP source element type must be sized");
7585 case Instruction::IntToPtr:
7588 case Instruction::PHI:
7592 case Instruction::Select: {
7594 auto CanSimplifyToUnknown = [
this,
U]() {
7595 if (
U->getType()->isIntegerTy(1) || isa<ConstantInt>(
U->getOperand(0)))
7598 auto *ICI = dyn_cast<ICmpInst>(
U->getOperand(0));
7605 if (!(isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->
isZero()))
7612 if (CanSimplifyToUnknown())
7615 for (
Value *Inc :
U->operands())
7620 case Instruction::Call:
7621 case Instruction::Invoke:
7622 if (
Value *RV = cast<CallBase>(U)->getReturnedArgOperand()) {
7627 if (
auto *II = dyn_cast<IntrinsicInst>(U)) {
7628 switch (II->getIntrinsicID()) {
7629 case Intrinsic::abs:
7632 case Intrinsic::umax:
7633 case Intrinsic::umin:
7634 case Intrinsic::smax:
7635 case Intrinsic::smin:
7636 case Intrinsic::usub_sat:
7637 case Intrinsic::uadd_sat:
7641 case Intrinsic::start_loop_iterations:
7642 case Intrinsic::annotation:
7643 case Intrinsic::ptr_annotation:
7656const SCEV *ScalarEvolution::createSCEV(
Value *V) {
7667 }
else if (
ConstantInt *CI = dyn_cast<ConstantInt>(V))
7669 else if (isa<GlobalAlias>(V))
7671 else if (!isa<ConstantExpr>(V))
7680 switch (BO->Opcode) {
7681 case Instruction::Add: {
7707 if (BO->Opcode == Instruction::Sub)
7715 if (BO->Opcode == Instruction::Sub)
7721 dyn_cast<Instruction>(V));
7722 if (!NewBO || (NewBO->Opcode != Instruction::Add &&
7723 NewBO->Opcode != Instruction::Sub)) {
7733 case Instruction::Mul: {
7753 dyn_cast<Instruction>(V));
7754 if (!NewBO || NewBO->Opcode != Instruction::Mul) {
7763 case Instruction::UDiv:
7767 case Instruction::URem:
7771 case Instruction::Sub: {
7774 Flags = getNoWrapFlagsFromUB(BO->
Op);
7779 case Instruction::And:
7782 if (
ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) {
7785 if (CI->isMinusOne())
7787 const APInt &
A = CI->getValue();
7793 unsigned LZ =
A.countl_zero();
7794 unsigned TZ =
A.countr_zero();
7798 0, &AC,
nullptr, &DT);
7800 APInt EffectiveMask =
7802 if ((LZ != 0 || TZ != 0) && !((~
A & ~Known.
Zero) & EffectiveMask)) {
7805 const SCEV *ShiftedLHS =
nullptr;
7806 if (
auto *LHSMul = dyn_cast<SCEVMulExpr>(LHS)) {
7807 if (
auto *OpC = dyn_cast<SCEVConstant>(LHSMul->getOperand(0))) {
7809 unsigned MulZeros = OpC->getAPInt().countr_zero();
7810 unsigned GCD = std::min(MulZeros, TZ);
7815 auto *NewMul =
getMulExpr(MulOps, LHSMul->getNoWrapFlags());
7837 case Instruction::Or:
7846 case Instruction::Xor:
7847 if (
ConstantInt *CI = dyn_cast<ConstantInt>(BO->RHS)) {
7849 if (CI->isMinusOne())
7856 if (
auto *LBO = dyn_cast<BinaryOperator>(BO->LHS))
7857 if (
ConstantInt *LCI = dyn_cast<ConstantInt>(LBO->getOperand(1)))
7858 if (LBO->getOpcode() == Instruction::And &&
7859 LCI->getValue() == CI->getValue())
7861 dyn_cast<SCEVZeroExtendExpr>(
getSCEV(BO->LHS))) {
7863 const SCEV *Z0 =
Z->getOperand();
7870 if (CI->getValue().isMask(Z0TySize))
7876 APInt Trunc = CI->getValue().
trunc(Z0TySize);
7885 case Instruction::Shl:
7887 if (
ConstantInt *SA = dyn_cast<ConstantInt>(BO->RHS)) {
7903 auto MulFlags = getNoWrapFlagsFromUB(BO->
Op);
7917 case Instruction::AShr:
7938 Operator *
L = dyn_cast<Operator>(BO->LHS);
7939 const SCEV *AddTruncateExpr =
nullptr;
7941 const SCEV *AddConstant =
nullptr;
7943 if (L &&
L->getOpcode() == Instruction::Add) {
7949 Operator *LShift = dyn_cast<Operator>(
L->getOperand(0));
7950 ConstantInt *AddOperandCI = dyn_cast<ConstantInt>(
L->getOperand(1));
7951 if (LShift && LShift->
getOpcode() == Instruction::Shl) {
7954 ShlAmtCI = dyn_cast<ConstantInt>(LShift->
getOperand(1));
7966 }
else if (L &&
L->getOpcode() == Instruction::Shl) {
7972 ShlAmtCI = dyn_cast<ConstantInt>(
L->getOperand(1));
7976 if (AddTruncateExpr && ShlAmtCI) {
7992 const SCEV *CompositeExpr =
7994 if (
L->getOpcode() != Instruction::Shl)
7995 CompositeExpr =
getAddExpr(CompositeExpr, AddConstant);
8004 switch (
U->getOpcode()) {
8005 case Instruction::Trunc:
8008 case Instruction::ZExt:
8011 case Instruction::SExt:
8013 dyn_cast<Instruction>(V))) {
8021 if (BO->Opcode == Instruction::Sub && BO->IsNSW) {
8022 Type *Ty =
U->getType();
8030 case Instruction::BitCast:
8036 case Instruction::PtrToInt: {
8039 Type *DstIntTy =
U->getType();
8043 if (isa<SCEVCouldNotCompute>(IntOp))
8047 case Instruction::IntToPtr:
8051 case Instruction::SDiv:
8058 case Instruction::SRem:
8065 case Instruction::GetElementPtr:
8066 return createNodeForGEP(cast<GEPOperator>(U));
8068 case Instruction::PHI:
8069 return createNodeForPHI(cast<PHINode>(U));
8071 case Instruction::Select:
8072 return createNodeForSelectOrPHI(U,
U->getOperand(0),
U->getOperand(1),
8075 case Instruction::Call:
8076 case Instruction::Invoke:
8077 if (
Value *RV = cast<CallBase>(U)->getReturnedArgOperand())
8080 if (
auto *II = dyn_cast<IntrinsicInst>(U)) {
8081 switch (II->getIntrinsicID()) {
8082 case Intrinsic::abs:
8084 getSCEV(II->getArgOperand(0)),
8085 cast<ConstantInt>(II->getArgOperand(1))->
isOne());
8086 case Intrinsic::umax:
8090 case Intrinsic::umin:
8094 case Intrinsic::smax:
8098 case Intrinsic::smin:
8102 case Intrinsic::usub_sat: {
8108 case Intrinsic::uadd_sat: {
8114 case Intrinsic::start_loop_iterations:
8115 case Intrinsic::annotation:
8116 case Intrinsic::ptr_annotation:
8119 return getSCEV(II->getArgOperand(0));
8120 case Intrinsic::vscale:
8137 if (isa<SCEVCouldNotCompute>(ExitCount))
8140 auto *ExitCountType = ExitCount->
getType();
8141 assert(ExitCountType->isIntegerTy());
8143 1 + ExitCountType->getScalarSizeInBits());
8150 if (isa<SCEVCouldNotCompute>(ExitCount))
8156 auto CanAddOneWithoutOverflow = [&]() {
8158 getRangeRef(ExitCount, RangeSignHint::HINT_RANGE_UNSIGNED);
8169 if (EvalSize > ExitCountSize && CanAddOneWithoutOverflow())
8199 assert(ExitingBlock &&
"Must pass a non-null exiting block!");
8200 assert(L->isLoopExiting(ExitingBlock) &&
8201 "Exiting block must actually branch out of the loop!");
8208 const auto *MaxExitCount =
8215 L->getExitingBlocks(ExitingBlocks);
8217 std::optional<unsigned> Res;
8218 for (
auto *ExitingBB : ExitingBlocks) {
8222 Res = (
unsigned)std::gcd(*Res, Multiple);
8224 return Res.value_or(1);
8228 const SCEV *ExitCount) {
8258 assert(ExitingBlock &&
"Must pass a non-null exiting block!");
8259 assert(L->isLoopExiting(ExitingBlock) &&
8260 "Exiting block must actually branch out of the loop!");
8270 return getBackedgeTakenInfo(L).getExact(ExitingBlock,
this);
8272 return getBackedgeTakenInfo(L).getSymbolicMax(ExitingBlock,
this);
8274 return getBackedgeTakenInfo(L).getConstantMax(ExitingBlock,
this);
8282 return getPredicatedBackedgeTakenInfo(L).getExact(L,
this, &Preds);
8289 return getBackedgeTakenInfo(L).getExact(L,
this);
8291 return getBackedgeTakenInfo(L).getConstantMax(
this);
8293 return getBackedgeTakenInfo(L).getSymbolicMax(L,
this);
8299 return getBackedgeTakenInfo(L).isConstantMaxOrZero(
this);
8309 for (
PHINode &PN : Header->phis())
8310 if (Visited.
insert(&PN).second)
8314const ScalarEvolution::BackedgeTakenInfo &
8315ScalarEvolution::getPredicatedBackedgeTakenInfo(
const Loop *L) {
8316 auto &BTI = getBackedgeTakenInfo(L);
8317 if (BTI.hasFullInfo())
8320 auto Pair = PredicatedBackedgeTakenCounts.insert({
L, BackedgeTakenInfo()});
8323 return Pair.first->second;
8325 BackedgeTakenInfo
Result =
8326 computeBackedgeTakenCount(L,
true);
8328 return PredicatedBackedgeTakenCounts.find(L)->second = std::move(Result);
8331ScalarEvolution::BackedgeTakenInfo &
8332ScalarEvolution::getBackedgeTakenInfo(
const Loop *L) {
8338 std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator,
bool> Pair =
8339 BackedgeTakenCounts.insert({
L, BackedgeTakenInfo()});
8341 return Pair.first->second;
8346 BackedgeTakenInfo
Result = computeBackedgeTakenCount(L);
8353 if (
Result.hasAnyInfo()) {
8356 auto LoopUsersIt = LoopUsers.find(L);
8357 if (LoopUsersIt != LoopUsers.end())
8359 forgetMemoizedResults(ToForget);
8362 for (
PHINode &PN :
L->getHeader()->phis())
8363 ConstantEvolutionLoopExitValue.erase(&PN);
8371 return BackedgeTakenCounts.find(L)->second = std::move(Result);
8380 BackedgeTakenCounts.clear();
8381 PredicatedBackedgeTakenCounts.clear();
8382 BECountUsers.clear();
8383 LoopPropertiesCache.clear();
8384 ConstantEvolutionLoopExitValue.clear();
8385 ValueExprMap.
clear();
8386 ValuesAtScopes.clear();
8387 ValuesAtScopesUsers.clear();
8388 LoopDispositions.clear();
8389 BlockDispositions.clear();
8390 UnsignedRanges.clear();
8391 SignedRanges.clear();
8392 ExprValueMap.
clear();
8394 ConstantMultipleCache.clear();
8395 PredicatedSCEVRewrites.clear();
8397 FoldCacheUser.clear();
8399void ScalarEvolution::visitAndClearUsers(
8403 while (!Worklist.
empty()) {
8410 if (It != ValueExprMap.
end()) {
8411 eraseValueFromMap(It->first);
8413 if (
PHINode *PN = dyn_cast<PHINode>(
I))
8414 ConstantEvolutionLoopExitValue.erase(PN);
8428 while (!LoopWorklist.
empty()) {
8432 forgetBackedgeTakenCounts(CurrL,
false);
8433 forgetBackedgeTakenCounts(CurrL,
true);
8436 for (
auto I = PredicatedSCEVRewrites.begin();
8437 I != PredicatedSCEVRewrites.end();) {
8438 std::pair<const SCEV *, const Loop *> Entry =
I->first;
8439 if (Entry.second == CurrL)
8440 PredicatedSCEVRewrites.erase(
I++);
8445 auto LoopUsersItr = LoopUsers.find(CurrL);
8446 if (LoopUsersItr != LoopUsers.end()) {
8447 ToForget.
insert(ToForget.
end(), LoopUsersItr->second.begin(),
8448 LoopUsersItr->second.end());
8453 visitAndClearUsers(Worklist, Visited, ToForget);
8455 LoopPropertiesCache.erase(CurrL);
8458 LoopWorklist.
append(CurrL->begin(), CurrL->end());
8460 forgetMemoizedResults(ToForget);
8477 visitAndClearUsers(Worklist, Visited, ToForget);
8479 forgetMemoizedResults(ToForget);
8491 struct InvalidationRootCollector {
8495 InvalidationRootCollector(
Loop *L) : L(L) {}
8497 bool follow(
const SCEV *S) {
8498 if (
auto *SU = dyn_cast<SCEVUnknown>(S)) {
8499 if (
auto *
I = dyn_cast<Instruction>(SU->getValue()))
8502 }
else if (
auto *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
8503 if (L->contains(AddRec->
getLoop()))
8508 bool isDone()
const {
return false; }
8511 InvalidationRootCollector
C(L);
8513 forgetMemoizedResults(
C.Roots);
8526 BlockDispositions.clear();
8527 LoopDispositions.clear();
8544 while (!Worklist.
empty()) {
8546 bool LoopDispoRemoved = LoopDispositions.erase(Curr);
8547 bool BlockDispoRemoved = BlockDispositions.erase(Curr);
8548 if (!LoopDispoRemoved && !BlockDispoRemoved)
8550 auto Users = SCEVUsers.find(Curr);
8551 if (
Users != SCEVUsers.end())
8568 if (!isComplete() || ExitNotTaken.empty())
8579 for (
const auto &ENT : ExitNotTaken) {
8580 const SCEV *BECount = ENT.ExactNotTaken;
8583 "We should only have known counts for exiting blocks that dominate "
8589 for (
const auto *
P : ENT.Predicates)
8592 assert((Preds || ENT.hasAlwaysTruePredicate()) &&
8593 "Predicate should be always true!");
8604ScalarEvolution::BackedgeTakenInfo::getExact(
const BasicBlock *ExitingBlock,
8606 for (
const auto &ENT : ExitNotTaken)
8607 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate())
8608 return ENT.ExactNotTaken;
8613const SCEV *ScalarEvolution::BackedgeTakenInfo::getConstantMax(
8615 for (
const auto &ENT : ExitNotTaken)
8616 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate())
8617 return ENT.ConstantMaxNotTaken;
8622const SCEV *ScalarEvolution::BackedgeTakenInfo::getSymbolicMax(
8624 for (
const auto &ENT : ExitNotTaken)
8625 if (ENT.ExitingBlock == ExitingBlock && ENT.hasAlwaysTruePredicate())
8626 return ENT.SymbolicMaxNotTaken;
8633ScalarEvolution::BackedgeTakenInfo::getConstantMax(
ScalarEvolution *SE)
const {
8634 auto PredicateNotAlwaysTrue = [](
const ExitNotTakenInfo &ENT) {
8635 return !ENT.hasAlwaysTruePredicate();
8638 if (!getConstantMax() ||
any_of(ExitNotTaken, PredicateNotAlwaysTrue))
8641 assert((isa<SCEVCouldNotCompute>(getConstantMax()) ||
8642 isa<SCEVConstant>(getConstantMax())) &&
8643 "No point in having a non-constant max backedge taken count!");
8644 return getConstantMax();
8648ScalarEvolution::BackedgeTakenInfo::getSymbolicMax(
const Loop *L,
8651 SymbolicMax = SE->computeSymbolicMaxBackedgeTakenCount(L);
8655bool ScalarEvolution::BackedgeTakenInfo::isConstantMaxOrZero(
8657 auto PredicateNotAlwaysTrue = [](
const ExitNotTakenInfo &ENT) {
8658 return !ENT.hasAlwaysTruePredicate();
8660 return MaxOrZero && !
any_of(ExitNotTaken, PredicateNotAlwaysTrue);
8667 const SCEV *E,
const SCEV *ConstantMaxNotTaken,
8668 const SCEV *SymbolicMaxNotTaken,
bool MaxOrZero,
8670 : ExactNotTaken(E), ConstantMaxNotTaken(ConstantMaxNotTaken),
8671 SymbolicMaxNotTaken(SymbolicMaxNotTaken), MaxOrZero(MaxOrZero) {
8682 "Exact is not allowed to be less precise than Constant Max");
8685 "Exact is not allowed to be less precise than Symbolic Max");
8688 "Symbolic Max is not allowed to be less precise than Constant Max");
8691 "No point in having a non-constant max backedge taken count!");
8692 for (
const auto *PredSet : PredSetList)
8693 for (
const auto *
P : *PredSet)
8696 "Backedge count should be int");
8699 "Max backedge count should be int");
8703 const SCEV *E,
const SCEV *ConstantMaxNotTaken,
8704 const SCEV *SymbolicMaxNotTaken,
bool MaxOrZero,
8706 :
ExitLimit(E, ConstantMaxNotTaken, SymbolicMaxNotTaken, MaxOrZero,
8711ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo(
8713 bool IsComplete,
const SCEV *ConstantMax,
bool MaxOrZero)
8714 : ConstantMax(ConstantMax), IsComplete(IsComplete), MaxOrZero(MaxOrZero) {
8715 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo;
8717 ExitNotTaken.reserve(ExitCounts.
size());
8718 std::transform(ExitCounts.
begin(), ExitCounts.
end(),
8719 std::back_inserter(ExitNotTaken),
8720 [&](
const EdgeExitInfo &EEI) {
8721 BasicBlock *ExitBB = EEI.first;
8722 const ExitLimit &EL = EEI.second;
8723 return ExitNotTakenInfo(ExitBB, EL.ExactNotTaken,
8724 EL.ConstantMaxNotTaken, EL.SymbolicMaxNotTaken,
8727 assert((isa<SCEVCouldNotCompute>(ConstantMax) ||
8728 isa<SCEVConstant>(ConstantMax)) &&
8729 "No point in having a non-constant max backedge taken count!");
8733ScalarEvolution::BackedgeTakenInfo
8734ScalarEvolution::computeBackedgeTakenCount(
const Loop *L,
8735 bool AllowPredicates) {
8737 L->getExitingBlocks(ExitingBlocks);
8739 using EdgeExitInfo = ScalarEvolution::BackedgeTakenInfo::EdgeExitInfo;
8742 bool CouldComputeBECount =
true;
8744 const SCEV *MustExitMaxBECount =
nullptr;
8745 const SCEV *MayExitMaxBECount =
nullptr;
8746 bool MustExitMaxOrZero =
false;
8751 for (
unsigned i = 0, e = ExitingBlocks.
size(); i != e; ++i) {
8757 if (
auto *BI = dyn_cast<BranchInst>(ExitBB->
getTerminator()))
8758 if (
auto *CI = dyn_cast<ConstantInt>(BI->
getCondition())) {
8760 if (ExitIfTrue == CI->
isZero())
8764 ExitLimit EL = computeExitLimit(L, ExitBB, AllowPredicates);
8766 assert((AllowPredicates || EL.Predicates.empty()) &&
8767 "Predicated exit limit when predicates are not allowed!");
8772 ++NumExitCountsComputed;
8776 CouldComputeBECount =
false;
8783 "Exact is known but symbolic isn't?");
8784 ++NumExitCountsNotComputed;
8800 if (!MustExitMaxBECount) {
8801 MustExitMaxBECount = EL.ConstantMaxNotTaken;
8802 MustExitMaxOrZero = EL.MaxOrZero;
8805 EL.ConstantMaxNotTaken);
8809 MayExitMaxBECount = EL.ConstantMaxNotTaken;
8812 EL.ConstantMaxNotTaken);
8816 const SCEV *MaxBECount = MustExitMaxBECount ? MustExitMaxBECount :
8820 bool MaxOrZero = (MustExitMaxOrZero && ExitingBlocks.
size() == 1);
8826 for (
const auto &Pair : ExitCounts) {
8827 if (!isa<SCEVConstant>(Pair.second.ExactNotTaken))
8828 BECountUsers[Pair.second.ExactNotTaken].insert({L, AllowPredicates});
8829 if (!isa<SCEVConstant>(Pair.second.SymbolicMaxNotTaken))
8830 BECountUsers[Pair.second.SymbolicMaxNotTaken].insert(
8831 {L, AllowPredicates});
8833 return BackedgeTakenInfo(std::move(ExitCounts), CouldComputeBECount,
8834 MaxBECount, MaxOrZero);
8838ScalarEvolution::computeExitLimit(
const Loop *L,
BasicBlock *ExitingBlock,
8839 bool AllowPredicates) {
8840 assert(
L->contains(ExitingBlock) &&
"Exit count for non-loop block?");
8844 if (!Latch || !DT.
dominates(ExitingBlock, Latch))
8847 bool IsOnlyExit = (
L->getExitingBlock() !=
nullptr);
8849 if (
BranchInst *BI = dyn_cast<BranchInst>(Term)) {
8853 "It should have one successor in loop and one exit block!");
8860 if (
SwitchInst *SI = dyn_cast<SwitchInst>(Term)) {
8864 if (!
L->contains(SBB)) {
8869 assert(Exit &&
"Exiting block must have at least one exit");
8870 return computeExitLimitFromSingleExitSwitch(
8879 const Loop *L,
Value *ExitCond,
bool ExitIfTrue,
bool ControlsOnlyExit,
8880 bool AllowPredicates) {
8881 ScalarEvolution::ExitLimitCacheTy Cache(L, ExitIfTrue, AllowPredicates);
8882 return computeExitLimitFromCondCached(Cache, L, ExitCond, ExitIfTrue,
8883 ControlsOnlyExit, AllowPredicates);
8886std::optional<ScalarEvolution::ExitLimit>
8887ScalarEvolution::ExitLimitCache::find(
const Loop *L,
Value *ExitCond,
8888 bool ExitIfTrue,
bool ControlsOnlyExit,
8889 bool AllowPredicates) {
8891 (void)this->ExitIfTrue;
8892 (void)this->AllowPredicates;
8894 assert(this->L == L && this->ExitIfTrue == ExitIfTrue &&
8895 this->AllowPredicates == AllowPredicates &&
8896 "Variance in assumed invariant key components!");
8897 auto Itr = TripCountMap.find({ExitCond, ControlsOnlyExit});
8898 if (Itr == TripCountMap.end())
8899 return std::nullopt;
8903void ScalarEvolution::ExitLimitCache::insert(
const Loop *L,
Value *ExitCond,
8905 bool ControlsOnlyExit,
8906 bool AllowPredicates,
8907 const ExitLimit &EL) {
8908 assert(this->L == L && this->ExitIfTrue == ExitIfTrue &&
8909 this->AllowPredicates == AllowPredicates &&
8910 "Variance in assumed invariant key components!");
8912 auto InsertResult = TripCountMap.insert({{ExitCond, ControlsOnlyExit}, EL});
8913 assert(InsertResult.second &&
"Expected successful insertion!");
8919 ExitLimitCacheTy &Cache,
const Loop *L,
Value *ExitCond,
bool ExitIfTrue,
8920 bool ControlsOnlyExit,
bool AllowPredicates) {
8922 if (
auto MaybeEL = Cache.find(L, ExitCond, ExitIfTrue, ControlsOnlyExit,
8926 ExitLimit EL = computeExitLimitFromCondImpl(
8927 Cache, L, ExitCond, ExitIfTrue, ControlsOnlyExit, AllowPredicates);
8928 Cache.insert(L, ExitCond, ExitIfTrue, ControlsOnlyExit, AllowPredicates, EL);
8933 ExitLimitCacheTy &Cache,
const Loop *L,
Value *ExitCond,
bool ExitIfTrue,
8934 bool ControlsOnlyExit,
bool AllowPredicates) {
8936 if (
auto LimitFromBinOp = computeExitLimitFromCondFromBinOp(
8937 Cache, L, ExitCond, ExitIfTrue, ControlsOnlyExit, AllowPredicates))
8938 return *LimitFromBinOp;
8942 if (
ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond)) {
8944 computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue, ControlsOnlyExit);
8945 if (EL.hasFullInfo() || !AllowPredicates)
8949 return computeExitLimitFromICmp(L, ExitCondICmp, ExitIfTrue,
8958 if (
ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) {
8984 auto EL = computeExitLimitFromICmp(L, Pred, LHS,
getConstant(NewRHSC),
8985 ControlsOnlyExit, AllowPredicates);
8986 if (EL.hasAnyInfo())
8991 return computeExitCountExhaustively(L, ExitCond, ExitIfTrue);
8994std::optional<ScalarEvolution::ExitLimit>
8995ScalarEvolution::computeExitLimitFromCondFromBinOp(
8996 ExitLimitCacheTy &Cache,
const Loop *L,
Value *ExitCond,
bool ExitIfTrue,
8997 bool ControlsOnlyExit,
bool AllowPredicates) {
9006 return std::nullopt;
9011 bool EitherMayExit = IsAnd ^ ExitIfTrue;
9012 ExitLimit EL0 = computeExitLimitFromCondCached(
9013 Cache, L, Op0, ExitIfTrue, ControlsOnlyExit && !EitherMayExit,
9015 ExitLimit EL1 = computeExitLimitFromCondCached(
9016 Cache, L, Op1, ExitIfTrue, ControlsOnlyExit && !EitherMayExit,
9020 const Constant *NeutralElement = ConstantInt::get(ExitCond->
getType(), IsAnd);
9021 if (isa<ConstantInt>(Op1))
9022 return Op1 == NeutralElement ? EL0 : EL1;
9023 if (isa<ConstantInt>(Op0))
9024 return Op0 == NeutralElement ? EL1 : EL0;
9029 if (EitherMayExit) {
9030 bool UseSequentialUMin = !isa<BinaryOperator>(ExitCond);
9039 ConstantMaxBECount = EL1.ConstantMaxNotTaken;
9041 ConstantMaxBECount = EL0.ConstantMaxNotTaken;
9044 EL1.ConstantMaxNotTaken);
9046 SymbolicMaxBECount = EL1.SymbolicMaxNotTaken;
9048 SymbolicMaxBECount = EL0.SymbolicMaxNotTaken;
9051 EL0.SymbolicMaxNotTaken, EL1.SymbolicMaxNotTaken, UseSequentialUMin);
9055 if (EL0.ExactNotTaken == EL1.ExactNotTaken)
9056 BECount = EL0.ExactNotTaken;
9065 if (isa<SCEVCouldNotCompute>(ConstantMaxBECount) &&
9066 !isa<SCEVCouldNotCompute>(BECount))
9068 if (isa<SCEVCouldNotCompute>(SymbolicMaxBECount))
9069 SymbolicMaxBECount =
9070 isa<SCEVCouldNotCompute>(BECount) ? ConstantMaxBECount : BECount;
9071 return ExitLimit(BECount, ConstantMaxBECount, SymbolicMaxBECount,
false,
9072 { &EL0.Predicates, &EL1.Predicates });
9076 const Loop *L,
ICmpInst *ExitCond,
bool ExitIfTrue,
bool ControlsOnlyExit,
9077 bool AllowPredicates) {
9089 ExitLimit EL = computeExitLimitFromICmp(L, Pred, LHS, RHS, ControlsOnlyExit,
9091 if (EL.hasAnyInfo())
9094 auto *ExhaustiveCount =
9095 computeExitCountExhaustively(L, ExitCond, ExitIfTrue);
9097 if (!isa<SCEVCouldNotCompute>(ExhaustiveCount))
9098 return ExhaustiveCount;
9100 return computeShiftCompareExitLimit(ExitCond->
getOperand(0),
9105 bool ControlsOnlyExit,
bool AllowPredicates) {
9126 if (
const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS))
9127 if (
const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS))
9134 if (!isa<SCEVCouldNotCompute>(Ret))
return Ret;
9145 auto *InnerLHS =
LHS;
9146 if (
auto *ZExt = dyn_cast<SCEVZeroExtendExpr>(LHS))
9147 InnerLHS = ZExt->getOperand();
9148 if (
const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(InnerLHS)) {
9151 StrideC && StrideC->getAPInt().isPowerOf2()) {
9166 if (isa<SCEVCouldNotCompute>(LHS))
9171 if (isa<SCEVCouldNotCompute>(RHS))
9174 ExitLimit EL = howFarToZero(
getMinusSCEV(LHS, RHS), L, ControlsOnlyExit,
9176 if (EL.hasAnyInfo())
9184 if (isa<SCEVCouldNotCompute>(LHS))
9189 if (isa<SCEVCouldNotCompute>(RHS))
9192 ExitLimit EL = howFarToNonZero(
getMinusSCEV(LHS, RHS), L);
9193 if (EL.hasAnyInfo())
return EL;
9208 ExitLimit EL = howManyLessThans(LHS, RHS, L, IsSigned, ControlsOnlyExit,
9210 if (EL.hasAnyInfo())
9226 ExitLimit EL = howManyGreaterThans(LHS, RHS, L, IsSigned, ControlsOnlyExit,
9228 if (EL.hasAnyInfo())
9240ScalarEvolution::computeExitLimitFromSingleExitSwitch(
const Loop *L,
9243 bool ControlsOnlyExit) {
9244 assert(!
L->contains(ExitingBlock) &&
"Not an exiting block!");
9247 if (
Switch->getDefaultDest() == ExitingBlock)
9251 "Default case must not exit the loop!");
9256 ExitLimit EL = howFarToZero(
getMinusSCEV(LHS, RHS), L, ControlsOnlyExit);
9257 if (EL.hasAnyInfo())
9268 assert(isa<SCEVConstant>(Val) &&
9269 "Evaluation of SCEV at constant didn't fold correctly?");
9270 return cast<SCEVConstant>(Val)->getValue();
9283 const BasicBlock *Predecessor =
L->getLoopPredecessor();
9289 auto MatchPositiveShift =
9292 using namespace PatternMatch;
9296 OutOpCode = Instruction::LShr;
9298 OutOpCode = Instruction::AShr;
9300 OutOpCode = Instruction::Shl;
9315 auto MatchShiftRecurrence =
9317 std::optional<Instruction::BinaryOps> PostShiftOpCode;
9332 if (MatchPositiveShift(LHS, V, OpC)) {
9333 PostShiftOpCode = OpC;
9338 PNOut = dyn_cast<PHINode>(LHS);
9339 if (!PNOut || PNOut->getParent() !=
L->getHeader())
9342 Value *BEValue = PNOut->getIncomingValueForBlock(Latch);
9348 MatchPositiveShift(BEValue, OpLHS, OpCodeOut) &&
9355 (!PostShiftOpCode || *PostShiftOpCode == OpCodeOut);
9360 if (!MatchShiftRecurrence(LHS, PN, OpCode))
9377 case Instruction::AShr: {
9383 auto *Ty = cast<IntegerType>(
RHS->
getType());
9385 StableValue = ConstantInt::get(Ty, 0);
9387 StableValue = ConstantInt::get(Ty, -1,
true);
9393 case Instruction::LShr:
9394 case Instruction::Shl:
9397 StableValue = ConstantInt::get(cast<IntegerType>(
RHS->
getType()), 0);
9404 "Otherwise cannot be an operand to a branch instruction");
9406 if (
Result->isZeroValue()) {
9408 const SCEV *UpperBound =
9419 if (isa<BinaryOperator>(
I) || isa<CmpInst>(
I) ||
9420 isa<SelectInst>(
I) || isa<CastInst>(
I) || isa<GetElementPtrInst>(
I) ||
9421 isa<LoadInst>(
I) || isa<ExtractValueInst>(
I))
9424 if (
const CallInst *CI = dyn_cast<CallInst>(
I))
9425 if (
const Function *F = CI->getCalledFunction())
9434 if (!L->contains(
I))
return false;
9436 if (isa<PHINode>(
I)) {
9439 return L->getHeader() ==
I->getParent();
9460 if (isa<Constant>(
Op))
continue;
9465 PHINode *
P = dyn_cast<PHINode>(OpInst);
9496 if (
PHINode *PN = dyn_cast<PHINode>(
I))
9513 if (
Constant *
C = dyn_cast<Constant>(V))
return C;
9515 if (!
I)
return nullptr;
9526 if (isa<PHINode>(
I))
return nullptr;
9528 std::vector<Constant*>
Operands(
I->getNumOperands());
9530 for (
unsigned i = 0, e =
I->getNumOperands(); i != e; ++i) {
9531 Instruction *Operand = dyn_cast<Instruction>(
I->getOperand(i));
9533 Operands[i] = dyn_cast<Constant>(
I->getOperand(i));
9539 if (!
C)
return nullptr;
9560 if (IncomingVal != CurrentVal) {
9563 IncomingVal = CurrentVal;
9575ScalarEvolution::getConstantEvolutionLoopExitValue(
PHINode *PN,
9578 auto I = ConstantEvolutionLoopExitValue.find(PN);
9579 if (
I != ConstantEvolutionLoopExitValue.end())
9583 return ConstantEvolutionLoopExitValue[PN] =
nullptr;
9585 Constant *&RetVal = ConstantEvolutionLoopExitValue[PN];
9589 assert(PN->
getParent() == Header &&
"Can't evaluate PHI not in loop header!");
9597 CurrentIterVals[&
PHI] = StartCST;
9599 if (!CurrentIterVals.
count(PN))
9600 return RetVal =
nullptr;
9606 "BEs is <= MaxBruteForceIterations which is an 'unsigned'!");
9609 unsigned IterationNum = 0;
9611 for (; ; ++IterationNum) {
9612 if (IterationNum == NumIterations)
9613 return RetVal = CurrentIterVals[PN];
9622 NextIterVals[PN] = NextPHI;
9624 bool StoppedEvolving = NextPHI == CurrentIterVals[PN];
9630 for (
const auto &
I : CurrentIterVals) {
9632 if (!
PHI ||
PHI == PN ||
PHI->getParent() != Header)
continue;
9637 for (
const auto &
I : PHIsToCompute) {
9641 Value *BEValue =
PHI->getIncomingValueForBlock(Latch);
9644 if (NextPHI !=
I.second)
9645 StoppedEvolving =
false;
9650 if (StoppedEvolving)
9651 return RetVal = CurrentIterVals[PN];
9653 CurrentIterVals.swap(NextIterVals);
9657const SCEV *ScalarEvolution::computeExitCountExhaustively(
const Loop *L,
9669 assert(PN->
getParent() == Header &&
"Can't evaluate PHI not in loop header!");
9672 assert(Latch &&
"Should follow from NumIncomingValues == 2!");
9676 CurrentIterVals[&
PHI] = StartCST;
9678 if (!CurrentIterVals.
count(PN))
9686 for (
unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){
9687 auto *CondVal = dyn_cast_or_null<ConstantInt>(
9693 if (CondVal->getValue() ==
uint64_t(ExitWhen)) {
9694 ++NumBruteForceTripCountsComputed;
9705 for (
const auto &
I : CurrentIterVals) {
9707 if (!
PHI ||
PHI->getParent() != Header)
continue;
9712 if (NextPHI)
continue;
9714 Value *BEValue =
PHI->getIncomingValueForBlock(Latch);
9717 CurrentIterVals.swap(NextIterVals);
9728 for (
auto &LS : Values)
9730 return LS.second ? LS.second : V;
9735 const SCEV *
C = computeSCEVAtScope(V, L);
9736 for (
auto &LS :
reverse(ValuesAtScopes[V]))
9737 if (LS.first == L) {
9739 if (!isa<SCEVConstant>(
C))
9740 ValuesAtScopesUsers[
C].push_back({L, V});
9751 switch (V->getSCEVType()) {
9757 return cast<SCEVConstant>(V)->getValue();
9759 return dyn_cast<Constant>(cast<SCEVUnknown>(V)->getValue());
9784 assert(!
C->getType()->isPointerTy() &&
9785 "Can only have one pointer, and it must be last");
9812ScalarEvolution::getWithOperands(
const SCEV *S,
9821 auto *AddRec = cast<SCEVAddRecExpr>(S);
9825 return getAddExpr(NewOps, cast<SCEVAddExpr>(S)->getNoWrapFlags());
9827 return getMulExpr(NewOps, cast<SCEVMulExpr>(S)->getNoWrapFlags());
9847const SCEV *ScalarEvolution::computeSCEVAtScope(
const SCEV *V,
const Loop *L) {
9848 switch (
V->getSCEVType()) {
9859 for (
unsigned i = 0, e = AddRec->
getNumOperands(); i != e; ++i) {
9870 for (++i; i !=
e; ++i)
9875 AddRec = dyn_cast<SCEVAddRecExpr>(FoldedRec);
9914 for (
unsigned i = 0, e = Ops.
size(); i != e; ++i) {
9916 if (OpAtScope != Ops[i]) {
9924 for (++i; i !=
e; ++i) {
9929 return getWithOperands(V, NewOps);
9943 if (
PHINode *PN = dyn_cast<PHINode>(
I)) {
9944 const Loop *CurrLoop = this->LI[
I->getParent()];
9955 if (BackedgeTakenCount->
isZero()) {
9956 Value *InitValue =
nullptr;
9957 bool MultipleInitValues =
false;
9963 MultipleInitValues =
true;
9968 if (!MultipleInitValues && InitValue)
9973 if (!isa<SCEVCouldNotCompute>(BackedgeTakenCount) &&
9977 unsigned InLoopPred =
9983 if (
auto *BTCC = dyn_cast<SCEVConstant>(BackedgeTakenCount)) {
9988 getConstantEvolutionLoopExitValue(PN, BTCC->getAPInt(), CurrLoop);
10004 bool MadeImprovement =
false;
10019 MadeImprovement |= OrigV != OpV;
10024 assert(
C->getType() ==
Op->getType() &&
"Type mismatch");
10029 if (!MadeImprovement)
10049const SCEV *ScalarEvolution::stripInjectiveFunctions(
const SCEV *S)
const {
10051 return stripInjectiveFunctions(ZExt->getOperand());
10053 return stripInjectiveFunctions(SExt->getOperand());
10069 assert(
A != 0 &&
"A must be non-zero.");
10091 APInt AD =
A.lshr(Mult2).trunc(BW - Mult2);
10111static std::optional<std::tuple<APInt, APInt, APInt, APInt, unsigned>>
10117 LLVM_DEBUG(
dbgs() << __func__ <<
": analyzing quadratic addrec: "
10118 << *AddRec <<
'\n');
10121 if (!LC || !MC || !
NC) {
10122 LLVM_DEBUG(
dbgs() << __func__ <<
": coefficients are not constant\n");
10123 return std::nullopt;
10129 assert(!
N.isZero() &&
"This is not a quadratic addrec");
10137 N =
N.sext(NewWidth);
10138 M = M.sext(NewWidth);
10139 L = L.sext(NewWidth);
10156 <<
"x + " <<
C <<
", coeff bw: " << NewWidth
10157 <<
", multiplied by " <<
T <<
'\n');
10166 std::optional<APInt>
Y) {
10168 unsigned W = std::max(
X->getBitWidth(),
Y->getBitWidth());
10171 return XW.
slt(YW) ? *
X : *
Y;
10174 return std::nullopt;
10175 return X ? *
X : *
Y;
10192 return std::nullopt;
10193 unsigned W =
X->getBitWidth();
10213static std::optional<APInt>
10219 return std::nullopt;
10222 LLVM_DEBUG(
dbgs() << __func__ <<
": solving for unsigned overflow\n");
10223 std::optional<APInt>
X =
10226 return std::nullopt;
10231 return std::nullopt;
10246static std::optional<APInt>
10250 "Starting value of addrec should be 0");
10251 LLVM_DEBUG(
dbgs() << __func__ <<
": solving boundary crossing for range "
10252 << Range <<
", addrec " << *AddRec <<
'\n');
10256 "Addrec's initial value should be in range");
10262 return std::nullopt;
10272 auto SolveForBoundary =
10273 [&](
APInt Bound) -> std::pair<std::optional<APInt>,
bool> {
10276 LLVM_DEBUG(
dbgs() <<
"SolveQuadraticAddRecRange: checking boundary "
10277 << Bound <<
" (before multiplying by " << M <<
")\n");
10280 std::optional<APInt> SO;
10283 "signed overflow\n");
10287 "unsigned overflow\n");
10288 std::optional<APInt> UO =
10291 auto LeavesRange = [&] (
const APInt &
X) {
10294 if (Range.contains(V0->
getValue()))
10299 if (Range.contains(V1->
getValue()))
10308 return {std::nullopt,
false};
10313 if (LeavesRange(*Min))
10314 return { Min,
true };
10315 std::optional<APInt> Max = Min == SO ? UO : SO;
10316 if (LeavesRange(*Max))
10317 return { Max,
true };
10320 return {std::nullopt,
true};
10325 APInt Lower = Range.getLower().sext(
A.getBitWidth()) - 1;
10326 APInt Upper = Range.getUpper().sext(
A.getBitWidth());
10327 auto SL = SolveForBoundary(
Lower);
10328 auto SU = SolveForBoundary(
Upper);
10331 if (!SL.second || !SU.second)
10332 return std::nullopt;
10377 bool ControlsOnlyExit,
10378 bool AllowPredicates) {
10389 if (
C->getValue()->isZero())
return C;
10394 dyn_cast<SCEVAddRecExpr>(stripInjectiveFunctions(V));
10396 if (!AddRec && AllowPredicates)
10402 if (!AddRec || AddRec->
getLoop() != L)
10413 return ExitLimit(R, R, R,
false, Predicates);
10443 const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step);
10478 return ExitLimit(Distance,
getConstant(MaxBECount), Distance,
false,
10497 const SCEV *SymbolicMax =
10498 isa<SCEVCouldNotCompute>(
Exact) ? ConstantMax :
Exact;
10499 return ExitLimit(
Exact, ConstantMax, SymbolicMax,
false, Predicates);
10511 auto *S = isa<SCEVCouldNotCompute>(E) ?
M : E;
10512 return ExitLimit(E, M, S,
false, Predicates);
10516ScalarEvolution::howFarToNonZero(
const SCEV *V,
const Loop *L) {
10524 if (!
C->getValue()->isZero())
10534std::pair<const BasicBlock *, const BasicBlock *>
10535ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(
const BasicBlock *BB)
10547 return {
L->getLoopPredecessor(),
L->getHeader()};
10549 return {
nullptr,
nullptr};
10558 if (
A ==
B)
return true;
10564 return A->isIdenticalTo(
B) && (isa<BinaryOperator>(
A) || isa<GetElementPtrInst>(
A));
10571 if (
const Instruction *AI = dyn_cast<Instruction>(AU->getValue()))
10572 if (
const Instruction *BI = dyn_cast<Instruction>(BU->getValue()))
10573 if (ComputesEqualValues(AI, BI))
10582 if (!
Add ||
Add->getNumOperands() != 2)
10584 if (
auto *ME = dyn_cast<SCEVMulExpr>(
Add->getOperand(0));
10585 ME && ME->getNumOperands() == 2 && ME->getOperand(0)->isAllOnesValue()) {
10586 LHS =
Add->getOperand(1);
10587 RHS = ME->getOperand(1);
10590 if (
auto *ME = dyn_cast<SCEVMulExpr>(
Add->getOperand(1));
10591 ME && ME->getNumOperands() == 2 && ME->getOperand(0)->isAllOnesValue()) {
10592 LHS =
Add->getOperand(0);
10593 RHS = ME->getOperand(1);
10600 const SCEV *&LHS,
const SCEV *&RHS,
10602 bool Changed =
false;
10605 auto TrivialCase = [&](
bool TriviallyTrue) {
10619 return TrivialCase(
false);
10620 return TrivialCase(
true);
10643 const APInt &
RA = RC->getAPInt();
10645 bool SimplifiedByConstantRange =
false;
10650 return TrivialCase(
true);
10652 return TrivialCase(
false);
10661 Changed = SimplifiedByConstantRange =
true;
10665 if (!SimplifiedByConstantRange) {
10682 assert(!
RA.isMinValue() &&
"Should have been caught earlier!");
10688 assert(!
RA.isMaxValue() &&
"Should have been caught earlier!");
10694 assert(!
RA.isMinSignedValue() &&
"Should have been caught earlier!");
10700 assert(!
RA.isMaxSignedValue() &&
"Should have been caught earlier!");
10712 return TrivialCase(
true);
10714 return TrivialCase(
false);
10803 if (
const auto *SExt = dyn_cast<SCEVSignExtendExpr>(S))
10808std::pair<const SCEV *, const SCEV *>
10811 const SCEV *Start = SCEVInitRewriter::rewrite(S, L, *
this);
10813 return { Start, Start };
10815 const SCEV *
PostInc = SCEVPostIncRewriter::rewrite(S, L, *
this);
10821 const SCEV *LHS,
const SCEV *RHS) {
10824 getUsedLoops(
LHS, LoopsUsed);
10825 getUsedLoops(
RHS, LoopsUsed);
10827 if (LoopsUsed.
empty())
10832 for (
const auto *L1 : LoopsUsed)
10833 for (
const auto *L2 : LoopsUsed)
10835 DT.
dominates(L2->getHeader(), L1->getHeader())) &&
10836 "Domination relationship is not a linear order");
10866 SplitRHS.second) &&
10871 const SCEV *LHS,
const SCEV *RHS) {
10878 if (isKnownPredicateViaSplitting(Pred,
LHS,
RHS))
10882 return isKnownViaNonRecursiveReasoning(Pred,
LHS,
RHS);
10892 return std::nullopt;
10907 if (KnownWithoutContext)
10908 return KnownWithoutContext;
10916 return std::nullopt;
10922 const Loop *L =
LHS->getLoop();
10927std::optional<ScalarEvolution::MonotonicPredicateType>
10930 auto Result = getMonotonicPredicateTypeImpl(
LHS, Pred);
10936 auto ResultSwapped =
10939 assert(*ResultSwapped != *Result &&
10940 "monotonicity should flip as we flip the predicate");
10947std::optional<ScalarEvolution::MonotonicPredicateType>
10948ScalarEvolution::getMonotonicPredicateTypeImpl(
const SCEVAddRecExpr *LHS,
10962 return std::nullopt;
10966 "Should be greater or less!");
10970 if (!
LHS->hasNoUnsignedWrap())
10971 return std::nullopt;
10975 "Relational predicate is either signed or unsigned!");
10976 if (!
LHS->hasNoSignedWrap())
10977 return std::nullopt;
10979 const SCEV *Step =
LHS->getStepRecurrence(*
this);
10987 return std::nullopt;
10990std::optional<ScalarEvolution::LoopInvariantPredicate>
10998 return std::nullopt;
11005 if (!ArLHS || ArLHS->
getLoop() != L)
11006 return std::nullopt;
11009 if (!MonotonicType)
11010 return std::nullopt;
11036 return std::nullopt;
11073 return std::nullopt;
11076std::optional<ScalarEvolution::LoopInvariantPredicate>
11081 Pred,
LHS,
RHS, L, CtxI, MaxIter))
11083 if (
auto *
UMin = dyn_cast<SCEVUMinExpr>(MaxIter))
11089 for (
auto *
Op :
UMin->operands())
11093 return std::nullopt;
11096std::optional<ScalarEvolution::LoopInvariantPredicate>
11111 return std::nullopt;
11117 auto *AR = dyn_cast<SCEVAddRecExpr>(
LHS);
11118 if (!AR || AR->
getLoop() != L)
11119 return std::nullopt;
11123 return std::nullopt;
11129 if (Step != One && Step != MinusOne)
11130 return std::nullopt;
11136 return std::nullopt;
11142 return std::nullopt;
11150 if (Step == MinusOne)
11154 return std::nullopt;
11160bool ScalarEvolution::isKnownPredicateViaConstantRanges(
11170 return RangeLHS.
icmp(Pred, RangeRHS);
11181 if (CheckRanges(SL, SR))
11185 if (CheckRanges(UL, UR))
11194 return CheckRanges(SL, SR);
11199 return CheckRanges(UL, UR);
11209 auto MatchBinaryAddToConst = [
this](
const SCEV *
X,
const SCEV *
Y,
11212 const SCEV *XNonConstOp, *XConstOp;
11213 const SCEV *YNonConstOp, *YConstOp;
11217 if (!splitBinaryAdd(
X, XConstOp, XNonConstOp, XFlagsPresent)) {
11220 XFlagsPresent = ExpectedFlags;
11222 if (!isa<SCEVConstant>(XConstOp) ||
11223 (XFlagsPresent & ExpectedFlags) != ExpectedFlags)
11226 if (!splitBinaryAdd(
Y, YConstOp, YNonConstOp, YFlagsPresent)) {
11229 YFlagsPresent = ExpectedFlags;
11232 if (!isa<SCEVConstant>(YConstOp) ||
11233 (YFlagsPresent & ExpectedFlags) != ExpectedFlags)
11236 if (YNonConstOp != XNonConstOp)
11239 OutC1 = cast<SCEVConstant>(XConstOp)->getAPInt();
11240 OutC2 = cast<SCEVConstant>(YConstOp)->getAPInt();
11317bool ScalarEvolution::isImpliedViaGuard(
const BasicBlock *BB,
11319 const SCEV *LHS,
const SCEV *RHS) {
11328 return match(&
I, m_Intrinsic<Intrinsic::experimental_guard>(
11330 isImpliedCond(Pred, LHS, RHS, Condition,
false);
11340 const SCEV *LHS,
const SCEV *RHS) {
11349 "This cannot be done on broken IR!");
11352 if (isKnownViaNonRecursiveReasoning(Pred,
LHS,
RHS))
11361 if (LoopContinuePredicate && LoopContinuePredicate->
isConditional() &&
11362 isImpliedCond(Pred,
LHS,
RHS,
11364 LoopContinuePredicate->
getSuccessor(0) != L->getHeader()))
11369 if (WalkingBEDominatingConds)
11375 const auto &BETakenInfo = getBackedgeTakenInfo(L);
11376 const SCEV *LatchBECount = BETakenInfo.getExact(Latch,
this);
11383 const SCEV *LoopCounter =
11394 auto *CI = cast<CallInst>(AssumeVH);
11398 if (isImpliedCond(Pred,
LHS,
RHS, CI->getArgOperand(0),
false))
11402 if (isImpliedViaGuard(Latch, Pred,
LHS,
RHS))
11405 for (
DomTreeNode *DTN = DT[Latch], *HeaderDTN = DT[L->getHeader()];
11406 DTN != HeaderDTN; DTN = DTN->getIDom()) {
11407 assert(DTN &&
"should reach the loop header before reaching the root!");
11410 if (isImpliedViaGuard(BB, Pred,
LHS,
RHS))
11418 if (!ContinuePredicate || !ContinuePredicate->
isConditional())
11434 if (isImpliedCond(Pred,
LHS,
RHS, Condition,
11452 "This cannot be done on broken IR!");
11460 const bool ProvingStrictComparison = (Pred != NonStrictPredicate);
11461 bool ProvedNonStrictComparison =
false;
11462 bool ProvedNonEquality =
false;
11464 auto SplitAndProve =
11466 if (!ProvedNonStrictComparison)
11467 ProvedNonStrictComparison = Fn(NonStrictPredicate);
11468 if (!ProvedNonEquality)
11470 if (ProvedNonStrictComparison && ProvedNonEquality)
11475 if (ProvingStrictComparison) {
11477 return isKnownViaNonRecursiveReasoning(
P,
LHS,
RHS);
11479 if (SplitAndProve(ProofFn))
11484 auto ProveViaCond = [&](
const Value *Condition,
bool Inverse) {
11486 if (isImpliedCond(Pred,
LHS,
RHS, Condition,
Inverse, CtxI))
11488 if (ProvingStrictComparison) {
11492 if (SplitAndProve(ProofFn))
11503 if (ContainingLoop && ContainingLoop->
getHeader() == BB)
11507 for (std::pair<const BasicBlock *, const BasicBlock *> Pair(PredBB, BB);
11508 Pair.first; Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) {
11510 dyn_cast<BranchInst>(Pair.first->getTerminator());
11523 auto *CI = cast<CallInst>(AssumeVH);
11527 if (ProveViaCond(CI->getArgOperand(0),
false))
11535 for (
const auto *GU : GuardDecl->users())
11536 if (
const auto *Guard = dyn_cast<IntrinsicInst>(GU))
11538 if (ProveViaCond(Guard->getArgOperand(0),
false))
11554 "LHS is not available at Loop Entry");
11556 "RHS is not available at Loop Entry");
11558 if (isKnownViaNonRecursiveReasoning(Pred,
LHS,
RHS))
11569 if (FoundCondValue ==
11573 if (!PendingLoopPredicates.insert(FoundCondValue).second)
11577 make_scope_exit([&]() { PendingLoopPredicates.erase(FoundCondValue); });
11580 const Value *Op0, *Op1;
11583 return isImpliedCond(Pred, LHS, RHS, Op0,
Inverse, CtxI) ||
11584 isImpliedCond(Pred, LHS, RHS, Op1,
Inverse, CtxI);
11587 return isImpliedCond(Pred, LHS, RHS, Op0,
Inverse, CtxI) ||
11588 isImpliedCond(Pred, LHS, RHS, Op1,
Inverse, CtxI);
11591 const ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue);
11592 if (!ICI)
return false;
11605 return isImpliedCond(Pred, LHS, RHS, FoundPred, FoundLHS, FoundRHS, CtxI);
11611 const SCEV *FoundLHS,
const SCEV *FoundRHS,
11622 auto *WideType = FoundLHS->
getType();
11632 if (isImpliedCondBalancedTypes(Pred, LHS, RHS, FoundPred, TruncFoundLHS,
11633 TruncFoundRHS, CtxI))
11659 return isImpliedCondBalancedTypes(Pred, LHS, RHS, FoundPred, FoundLHS,
11663bool ScalarEvolution::isImpliedCondBalancedTypes(
11669 "Types should be balanced!");
11676 if (FoundLHS == FoundRHS)
11680 if (LHS == FoundRHS || RHS == FoundLHS) {
11681 if (isa<SCEVConstant>(RHS)) {
11691 if (FoundPred == Pred)
11692 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, CtxI);
11706 if (!isa<SCEVConstant>(RHS) && !isa<SCEVAddRecExpr>(LHS))
11707 return isImpliedCondOperands(FoundPred, RHS, LHS, FoundLHS, FoundRHS,
11709 if (!isa<SCEVConstant>(FoundRHS) && !isa<SCEVAddRecExpr>(FoundLHS))
11710 return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS, CtxI);
11717 FoundLHS, FoundRHS, CtxI))
11722 isImpliedCondOperands(Pred, LHS, RHS,
getNotSCEV(FoundLHS),
11731 assert(P1 != P2 &&
"Handled earlier!");
11735 if (IsSignFlippedPredicate(Pred, FoundPred)) {
11740 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, CtxI);
11744 const SCEV *CanonicalLHS =
LHS, *CanonicalRHS =
RHS,
11745 *CanonicalFoundLHS = FoundLHS, *CanonicalFoundRHS = FoundRHS;
11750 std::swap(CanonicalFoundLHS, CanonicalFoundRHS);
11761 return isImpliedCondOperands(CanonicalFoundPred, CanonicalLHS,
11762 CanonicalRHS, CanonicalFoundLHS,
11763 CanonicalFoundRHS);
11768 return isImpliedCondOperands(CanonicalFoundPred, CanonicalLHS,
11769 CanonicalRHS, CanonicalFoundLHS,
11770 CanonicalFoundRHS);
11775 (isa<SCEVConstant>(FoundLHS) || isa<SCEVConstant>(FoundRHS))) {
11778 const SCEV *
V =
nullptr;
11780 if (isa<SCEVConstant>(FoundLHS)) {
11781 C = cast<SCEVConstant>(FoundLHS);
11784 C = cast<SCEVConstant>(FoundRHS);
11796 if (Min ==
C->getAPInt()) {
11801 APInt SharperMin = Min + 1;
11808 if (isImpliedCondOperands(Pred, LHS, RHS, V,
getConstant(SharperMin),
11824 if (isImpliedCondOperands(Pred, LHS, RHS, V,
getConstant(Min), CtxI))
11853 if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS, CtxI))
11857 if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS, CtxI))
11860 if (isImpliedCondOperandsViaRanges(Pred, LHS, RHS, FoundPred, FoundLHS, FoundRHS))
11867bool ScalarEvolution::splitBinaryAdd(
const SCEV *Expr,
11870 const auto *AE = dyn_cast<SCEVAddExpr>(Expr);
11871 if (!AE || AE->getNumOperands() != 2)
11874 L = AE->getOperand(0);
11875 R = AE->getOperand(1);
11876 Flags = AE->getNoWrapFlags();
11880std::optional<APInt>
11889 if (isa<SCEVAddRecExpr>(
Less) && isa<SCEVAddRecExpr>(More)) {
11890 const auto *LAR = cast<SCEVAddRecExpr>(
Less);
11891 const auto *MAR = cast<SCEVAddRecExpr>(More);
11893 if (LAR->getLoop() != MAR->getLoop())
11894 return std::nullopt;
11898 if (!LAR->isAffine() || !MAR->isAffine())
11899 return std::nullopt;
11901 if (LAR->getStepRecurrence(*
this) != MAR->getStepRecurrence(*
this))
11902 return std::nullopt;
11904 Less = LAR->getStart();
11905 More = MAR->getStart();
11910 if (isa<SCEVConstant>(
Less) && isa<SCEVConstant>(More)) {
11911 const auto &M = cast<SCEVConstant>(More)->getAPInt();
11912 const auto &L = cast<SCEVConstant>(
Less)->getAPInt();
11917 const SCEV *LLess =
nullptr, *RLess =
nullptr;
11918 const SCEV *LMore =
nullptr, *RMore =
nullptr;
11921 if (splitBinaryAdd(
Less, LLess, RLess, Flags))
11922 if ((C1 = dyn_cast<SCEVConstant>(LLess)))
11927 if (splitBinaryAdd(More, LMore, RMore, Flags))
11928 if ((C2 = dyn_cast<SCEVConstant>(LMore)))
11930 return C2->getAPInt();
11933 if (C1 && C2 && RLess == RMore)
11934 return C2->getAPInt() - C1->
getAPInt();
11936 return std::nullopt;
11939bool ScalarEvolution::isImpliedCondOperandsViaAddRecStart(
11959 if (
auto *AR = dyn_cast<SCEVAddRecExpr>(FoundLHS)) {
11963 if (!L->contains(ContextBB) || !DT.
dominates(ContextBB, L->getLoopLatch()))
11967 return isImpliedCondOperands(Pred, LHS, RHS, AR->
getStart(), FoundRHS);
11970 if (
auto *AR = dyn_cast<SCEVAddRecExpr>(FoundRHS)) {
11974 if (!L->contains(ContextBB) || !DT.
dominates(ContextBB, L->getLoopLatch()))
11978 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, AR->
getStart());
11984bool ScalarEvolution::isImpliedCondOperandsViaNoOverflow(
11986 const SCEV *FoundLHS,
const SCEV *FoundRHS) {
11990 const auto *AddRecLHS = dyn_cast<SCEVAddRecExpr>(LHS);
11994 const auto *AddRecFoundLHS = dyn_cast<SCEVAddRecExpr>(FoundLHS);
11995 if (!AddRecFoundLHS)
12002 const Loop *
L = AddRecFoundLHS->getLoop();
12003 if (L != AddRecLHS->getLoop())
12040 if (!LDiff || !RDiff || *LDiff != *RDiff)
12043 if (LDiff->isMinValue())
12046 APInt FoundRHSLimit;
12049 FoundRHSLimit = -(*RDiff);
12063 const SCEV *FoundLHS,
12064 const SCEV *FoundRHS,
unsigned Depth) {
12065 const PHINode *LPhi =
nullptr, *RPhi =
nullptr;
12069 bool Erased = PendingMerges.erase(LPhi);
12070 assert(Erased &&
"Failed to erase LPhi!");
12074 bool Erased = PendingMerges.erase(RPhi);
12075 assert(Erased &&
"Failed to erase RPhi!");
12081 if (
const SCEVUnknown *LU = dyn_cast<SCEVUnknown>(LHS))
12082 if (
auto *Phi = dyn_cast<PHINode>(LU->getValue())) {
12083 if (!PendingMerges.insert(Phi).second)
12087 if (
const SCEVUnknown *RU = dyn_cast<SCEVUnknown>(RHS))
12088 if (
auto *Phi = dyn_cast<PHINode>(RU->getValue())) {
12097 if (!PendingMerges.insert(Phi).second)
12103 if (!LPhi && !RPhi)
12114 assert(LPhi &&
"LPhi should definitely be a SCEVUnknown Phi!");
12118 auto ProvedEasily = [&](
const SCEV *
S1,
const SCEV *S2) {
12119 return isKnownViaNonRecursiveReasoning(Pred,
S1, S2) ||
12120 isImpliedCondOperandsViaRanges(Pred,
S1, S2, Pred, FoundLHS, FoundRHS) ||
12121 isImpliedViaOperations(Pred,
S1, S2, FoundLHS, FoundRHS,
Depth);
12124 if (RPhi && RPhi->getParent() == LBB) {
12131 const SCEV *
R =
getSCEV(RPhi->getIncomingValueForBlock(IncBB));
12132 if (!ProvedEasily(L, R))
12143 auto *RLoop = RAR->
getLoop();
12144 auto *Predecessor = RLoop->getLoopPredecessor();
12145 assert(Predecessor &&
"Loop with AddRec with no predecessor?");
12147 if (!ProvedEasily(L1, RAR->
getStart()))
12149 auto *Latch = RLoop->getLoopLatch();
12150 assert(Latch &&
"Loop with AddRec with no latch?");
12168 if (!ProvedEasily(L, RHS))
12178 const SCEV *FoundLHS,
12179 const SCEV *FoundRHS) {
12182 if (RHS == FoundRHS) {
12187 if (LHS != FoundLHS)
12190 auto *SUFoundRHS = dyn_cast<SCEVUnknown>(FoundRHS);
12194 Value *Shiftee, *ShiftValue;
12196 using namespace PatternMatch;
12197 if (
match(SUFoundRHS->getValue(),
12199 auto *ShifteeS =
getSCEV(Shiftee);
12219 const SCEV *FoundLHS,
12220 const SCEV *FoundRHS,
12222 if (isImpliedCondOperandsViaRanges(Pred, LHS, RHS, Pred, FoundLHS, FoundRHS))
12225 if (isImpliedCondOperandsViaNoOverflow(Pred, LHS, RHS, FoundLHS, FoundRHS))
12228 if (isImpliedCondOperandsViaShift(Pred, LHS, RHS, FoundLHS, FoundRHS))
12231 if (isImpliedCondOperandsViaAddRecStart(Pred, LHS, RHS, FoundLHS, FoundRHS,
12235 return isImpliedCondOperandsHelper(Pred, LHS, RHS,
12236 FoundLHS, FoundRHS);
12240template <
typename MinMaxExprType>
12242 const SCEV *Candidate) {
12243 const MinMaxExprType *MinMaxExpr = dyn_cast<MinMaxExprType>(MaybeMinMaxExpr);
12247 return is_contained(MinMaxExpr->operands(), Candidate);
12252 const SCEV *LHS,
const SCEV *RHS) {
12286 const SCEV *LHS,
const SCEV *RHS) {
12297 IsMinMaxConsistingOf<SCEVSMinExpr>(
LHS,
RHS) ||
12299 IsMinMaxConsistingOf<SCEVSMaxExpr>(
RHS,
LHS);
12308 IsMinMaxConsistingOf<SCEVUMinExpr>(
LHS,
RHS) ||
12310 IsMinMaxConsistingOf<SCEVUMaxExpr>(
RHS,
LHS);
12318 const SCEV *FoundLHS,
12319 const SCEV *FoundRHS,
12323 "LHS and RHS have different sizes?");
12326 "FoundLHS and FoundRHS have different sizes?");
12358 auto GetOpFromSExt = [&](
const SCEV *S) {
12359 if (
auto *Ext = dyn_cast<SCEVSignExtendExpr>(S))
12360 return Ext->getOperand();
12367 auto *OrigLHS =
LHS;
12368 auto *OrigFoundLHS = FoundLHS;
12369 LHS = GetOpFromSExt(LHS);
12370 FoundLHS = GetOpFromSExt(FoundLHS);
12373 auto IsSGTViaContext = [&](
const SCEV *
S1,
const SCEV *S2) {
12376 FoundRHS,
Depth + 1);
12379 if (
auto *LHSAddExpr = dyn_cast<SCEVAddExpr>(LHS)) {
12389 if (!LHSAddExpr->hasNoSignedWrap())
12392 auto *LL = LHSAddExpr->getOperand(0);
12393 auto *LR = LHSAddExpr->getOperand(1);
12397 auto IsSumGreaterThanRHS = [&](
const SCEV *
S1,
const SCEV *S2) {
12398 return IsSGTViaContext(
S1, MinusOne) && IsSGTViaContext(S2, RHS);
12403 if (IsSumGreaterThanRHS(LL, LR) || IsSumGreaterThanRHS(LR, LL))
12405 }
else if (
auto *LHSUnknownExpr = dyn_cast<SCEVUnknown>(LHS)) {
12420 if (!isa<ConstantInt>(LR))
12423 auto *Denominator = cast<SCEVConstant>(
getSCEV(LR));
12428 if (!Numerator || Numerator->getType() != FoundLHS->
getType())
12436 auto *DTy = Denominator->getType();
12437 auto *FRHSTy = FoundRHS->
getType();
12438 if (DTy->isPointerTy() != FRHSTy->isPointerTy())
12457 IsSGTViaContext(FoundRHSExt, DenomMinusTwo))
12468 auto *NegDenomMinusOne =
getMinusSCEV(MinusOne, DenominatorExt);
12470 IsSGTViaContext(FoundRHSExt, NegDenomMinusOne))
12478 if (isImpliedViaMerge(Pred, OrigLHS, RHS, OrigFoundLHS, FoundRHS,
Depth + 1))
12485 const SCEV *LHS,
const SCEV *RHS) {
12518 const SCEV *LHS,
const SCEV *RHS) {
12520 isKnownPredicateViaConstantRanges(Pred, LHS, RHS) ||
12523 isKnownPredicateViaNoOverflow(Pred, LHS, RHS);
12529 const SCEV *FoundLHS,
12530 const SCEV *FoundRHS) {
12565 if (isImpliedViaOperations(Pred, LHS, RHS, FoundLHS, FoundRHS))
12575 const SCEV *FoundLHS,
12576 const SCEV *FoundRHS) {
12577 if (!isa<SCEVConstant>(RHS) || !isa<SCEVConstant>(FoundRHS))
12586 const APInt &ConstFoundRHS = cast<SCEVConstant>(FoundRHS)->getAPInt();
12598 const APInt &ConstRHS = cast<SCEVConstant>(RHS)->getAPInt();
12601 return LHSRange.
icmp(Pred, ConstRHS);
12604bool ScalarEvolution::canIVOverflowOnLT(
const SCEV *RHS,
const SCEV *Stride,
12617 return (std::move(MaxValue) - MaxStrideMinusOne).slt(MaxRHS);
12625 return (std::move(MaxValue) - MaxStrideMinusOne).ult(MaxRHS);
12628bool ScalarEvolution::canIVOverflowOnGT(
const SCEV *RHS,
const SCEV *Stride,
12640 return (std::move(MinValue) + MaxStrideMinusOne).sgt(MinRHS);
12648 return (std::move(MinValue) + MaxStrideMinusOne).ugt(MinRHS);
12660const SCEV *ScalarEvolution::computeMaxBECountForLT(
const SCEV *Start,
12661 const SCEV *Stride,
12688 : APIntOps::umax(One, MinStride);
12692 APInt Limit = MaxValue - (StrideForMaxBECount - 1);
12703 : APIntOps::umax(MaxEnd, MinStart);
12710ScalarEvolution::howManyLessThans(
const SCEV *LHS,
const SCEV *RHS,
12711 const Loop *L,
bool IsSigned,
12712 bool ControlsOnlyExit,
bool AllowPredicates) {
12716 bool PredicatedIV =
false;
12737 if (!StrideC || !StrideC->getAPInt().isPowerOf2())
12747 if (
auto *ZExt = dyn_cast<SCEVZeroExtendExpr>(LHS)) {
12748 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(ZExt->getOperand());
12750 auto canProveNUW = [&]() {
12753 if (!ControlsOnlyExit)
12774 Limit = Limit.
zext(OuterBitWidth);
12786 Type *Ty = ZExt->getType();
12788 getExtendAddRecStart<SCEVZeroExtendExpr>(AR, Ty,
this, 0),
12790 IV = dyn_cast<SCEVAddRecExpr>(S);
12797 if (!
IV && AllowPredicates) {
12802 PredicatedIV =
true;
12806 if (!
IV ||
IV->getLoop() != L || !
IV->isAffine())
12820 bool NoWrap = ControlsOnlyExit &&
IV->getNoWrapFlags(WrapType);
12823 const SCEV *Stride =
IV->getStepRecurrence(*
this);
12828 if (!PositiveStride) {
12880 auto wouldZeroStrideBeUB = [&]() {
12892 if (!wouldZeroStrideBeUB()) {
12896 }
else if (!Stride->
isOne() && !NoWrap) {
12897 auto isUBOnWrap = [&]() {
12903 return canAssumeNoSelfWrap(
IV);
12910 if (canIVOverflowOnLT(RHS, Stride, IsSigned) && !isUBOnWrap())
12923 const SCEV *Start =
IV->getStart();
12929 const SCEV *OrigStart = Start;
12931 if (Start->getType()->isPointerTy()) {
12933 if (isa<SCEVCouldNotCompute>(Start))
12938 if (isa<SCEVCouldNotCompute>(RHS))
12948 const SCEV *MaxBECount = computeMaxBECountForLT(
12951 MaxBECount,
false , Predicates);
12958 const SCEV *BECount =
nullptr;
12959 auto *OrigStartMinusStride =
getMinusSCEV(OrigStart, Stride);
12985 const SCEV *Numerator =
12990 const SCEV *BECountIfBackedgeTaken =
nullptr;
12992 auto canProveRHSGreaterThanEqualStart = [&]() {
13019 if (canProveRHSGreaterThanEqualStart()) {
13049 bool MayAddOverflow = [&] {
13050 if (
auto *StrideC = dyn_cast<SCEVConstant>(Stride)) {
13051 if (StrideC->getAPInt().isPowerOf2()) {
13096 if (Start == Stride || Start ==
getMinusSCEV(Stride, One)) {
13108 if (!MayAddOverflow) {
13118 const SCEV *ConstantMaxBECount;
13119 bool MaxOrZero =
false;
13120 if (isa<SCEVConstant>(BECount)) {
13121 ConstantMaxBECount = BECount;
13122 }
else if (BECountIfBackedgeTaken &&
13123 isa<SCEVConstant>(BECountIfBackedgeTaken)) {
13127 ConstantMaxBECount = BECountIfBackedgeTaken;
13130 ConstantMaxBECount = computeMaxBECountForLT(
13134 if (isa<SCEVCouldNotCompute>(ConstantMaxBECount) &&
13135 !isa<SCEVCouldNotCompute>(BECount))
13138 const SCEV *SymbolicMaxBECount =
13139 isa<SCEVCouldNotCompute>(BECount) ? ConstantMaxBECount : BECount;
13140 return ExitLimit(BECount, ConstantMaxBECount, SymbolicMaxBECount, MaxOrZero,
13145 const SCEV *LHS,
const SCEV *RHS,
const Loop *L,
bool IsSigned,
13146 bool ControlsOnlyExit,
bool AllowPredicates) {
13153 if (!
IV && AllowPredicates)
13160 if (!
IV ||
IV->getLoop() != L || !
IV->isAffine())
13164 bool NoWrap = ControlsOnlyExit &&
IV->getNoWrapFlags(WrapType);
13177 if (!Stride->
isOne() && !NoWrap)
13178 if (canIVOverflowOnGT(RHS, Stride, IsSigned))
13181 const SCEV *Start =
IV->getStart();
13193 if (Start->getType()->isPointerTy()) {
13195 if (isa<SCEVCouldNotCompute>(Start))
13198 if (
End->getType()->isPointerTy()) {
13200 if (isa<SCEVCouldNotCompute>(
End))
13228 const SCEV *ConstantMaxBECount =
13229 isa<SCEVConstant>(BECount)
13234 if (isa<SCEVCouldNotCompute>(ConstantMaxBECount))
13235 ConstantMaxBECount = BECount;
13236 const SCEV *SymbolicMaxBECount =
13237 isa<SCEVCouldNotCompute>(BECount) ? ConstantMaxBECount : BECount;
13239 return ExitLimit(BECount, ConstantMaxBECount, SymbolicMaxBECount,
false,
13245 if (Range.isFullSet())
13249 if (
const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart()))
13250 if (!SC->getValue()->isZero()) {
13254 getNoWrapFlags(FlagNW));
13255 if (
const auto *ShiftedAddRec = dyn_cast<SCEVAddRecExpr>(Shifted))
13256 return ShiftedAddRec->getNumIterationsInRange(
13257 Range.subtract(SC->getAPInt()), SE);
13264 if (
any_of(operands(), [](
const SCEV *
Op) {
return !isa<SCEVConstant>(
Op); }))
13284 APInt A = cast<SCEVConstant>(getOperand(1))->getAPInt();
13285 APInt End =
A.sge(1) ? (Range.getUpper() - 1) : Range.getLower();
13295 if (Range.contains(Val->
getValue()))
13302 "Linear scev computation is off in a bad way!");
13306 if (isQuadratic()) {
13316 assert(getNumOperands() > 1 &&
"AddRec with zero step?");
13326 for (
unsigned i = 0, e = getNumOperands() - 1; i < e; ++i)
13332 const SCEV *
Last = getOperand(getNumOperands() - 1);
13333 assert(!
Last->isZero() &&
"Recurrency with zero step?");
13335 return cast<SCEVAddRecExpr>(SE.
getAddRecExpr(Ops, getLoop(),
13342 if (
const auto *SU = dyn_cast<SCEVUnknown>(S))
13343 return isa<UndefValue>(SU->
getValue());
13351 if (
const auto *SU = dyn_cast<SCEVUnknown>(S))
13360 if (
StoreInst *Store = dyn_cast<StoreInst>(Inst))
13361 Ty = Store->getValueOperand()->getType();
13362 else if (
LoadInst *Load = dyn_cast<LoadInst>(Inst))
13363 Ty = Load->getType();
13375void ScalarEvolution::SCEVCallbackVH::deleted() {
13376 assert(SE &&
"SCEVCallbackVH called with a null ScalarEvolution!");
13377 if (
PHINode *PN = dyn_cast<PHINode>(getValPtr()))
13378 SE->ConstantEvolutionLoopExitValue.erase(PN);
13379 SE->eraseValueFromMap(getValPtr());
13383void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(
Value *V) {
13384 assert(SE &&
"SCEVCallbackVH called with a null ScalarEvolution!");
13403 :
F(
F), TLI(TLI), AC(AC), DT(DT), LI(LI),
13405 LoopDispositions(64), BlockDispositions(64) {
13416 auto *GuardDecl =
F.getParent()->getFunction(
13418 HasGuards = GuardDecl && !GuardDecl->use_empty();
13422 :
F(Arg.
F), HasGuards(Arg.HasGuards), TLI(Arg.TLI), AC(Arg.AC), DT(Arg.DT),
13423 LI(Arg.LI), CouldNotCompute(
std::
move(Arg.CouldNotCompute)),
13424 ValueExprMap(
std::
move(Arg.ValueExprMap)),
13425 PendingLoopPredicates(
std::
move(Arg.PendingLoopPredicates)),
13426 PendingPhiRanges(
std::
move(Arg.PendingPhiRanges)),
13427 PendingMerges(
std::
move(Arg.PendingMerges)),
13428 ConstantMultipleCache(
std::
move(Arg.ConstantMultipleCache)),
13429 BackedgeTakenCounts(
std::
move(Arg.BackedgeTakenCounts)),
13430 PredicatedBackedgeTakenCounts(
13431 std::
move(Arg.PredicatedBackedgeTakenCounts)),
13432 BECountUsers(
std::
move(Arg.BECountUsers)),
13433 ConstantEvolutionLoopExitValue(
13434 std::
move(Arg.ConstantEvolutionLoopExitValue)),
13435 ValuesAtScopes(
std::
move(Arg.ValuesAtScopes)),
13436 ValuesAtScopesUsers(
std::
move(Arg.ValuesAtScopesUsers)),
13437 LoopDispositions(
std::
move(Arg.LoopDispositions)),
13438 LoopPropertiesCache(
std::
move(Arg.LoopPropertiesCache)),
13439 BlockDispositions(
std::
move(Arg.BlockDispositions)),
13440 SCEVUsers(
std::
move(Arg.SCEVUsers)),
13441 UnsignedRanges(
std::
move(Arg.UnsignedRanges)),
13442 SignedRanges(
std::
move(Arg.SignedRanges)),
13443 UniqueSCEVs(
std::
move(Arg.UniqueSCEVs)),
13444 UniquePreds(
std::
move(Arg.UniquePreds)),
13445 SCEVAllocator(
std::
move(Arg.SCEVAllocator)),
13446 LoopUsers(
std::
move(Arg.LoopUsers)),
13447 PredicatedSCEVRewrites(
std::
move(Arg.PredicatedSCEVRewrites)),
13448 FirstUnknown(Arg.FirstUnknown) {
13449 Arg.FirstUnknown =
nullptr;
13458 Tmp->~SCEVUnknown();
13460 FirstUnknown =
nullptr;
13462 ExprValueMap.
clear();
13463 ValueExprMap.
clear();
13465 BackedgeTakenCounts.clear();
13466 PredicatedBackedgeTakenCounts.clear();
13468 assert(PendingLoopPredicates.empty() &&
"isImpliedCond garbage");
13469 assert(PendingPhiRanges.empty() &&
"getRangeRef garbage");
13470 assert(PendingMerges.empty() &&
"isImpliedViaMerge garbage");
13471 assert(!WalkingBEDominatingConds &&
"isLoopBackedgeGuardedByCond garbage!");
13472 assert(!ProvingSplitPredicate &&
"ProvingSplitPredicate garbage!");
13482 if (isa<SCEVConstant>(S))
13494 L->getHeader()->printAsOperand(
OS,
false);
13498 L->getExitingBlocks(ExitingBlocks);
13499 if (ExitingBlocks.
size() != 1)
13500 OS <<
"<multiple exits> ";
13503 if (!isa<SCEVCouldNotCompute>(BTC)) {
13504 OS <<
"backedge-taken count is ";
13507 OS <<
"Unpredictable backedge-taken count.";
13510 if (ExitingBlocks.
size() > 1)
13511 for (
BasicBlock *ExitingBlock : ExitingBlocks) {
13512 OS <<
" exit count for " << ExitingBlock->
getName() <<
": ";
13518 L->getHeader()->printAsOperand(
OS,
false);
13522 if (!isa<SCEVCouldNotCompute>(ConstantBTC)) {
13523 OS <<
"constant max backedge-taken count is ";
13526 OS <<
", actual taken count either this or zero.";
13528 OS <<
"Unpredictable constant max backedge-taken count. ";
13533 L->getHeader()->printAsOperand(
OS,
false);
13537 if (!isa<SCEVCouldNotCompute>(SymbolicBTC)) {
13538 OS <<
"symbolic max backedge-taken count is ";
13541 OS <<
", actual taken count either this or zero.";
13543 OS <<
"Unpredictable symbolic max backedge-taken count. ";
13547 if (ExitingBlocks.
size() > 1)
13548 for (
BasicBlock *ExitingBlock : ExitingBlocks) {
13549 OS <<
" symbolic max exit count for " << ExitingBlock->
getName() <<
": ";
13558 if (PBT != BTC || !Preds.
empty()) {
13560 L->getHeader()->printAsOperand(
OS,
false);
13562 if (!isa<SCEVCouldNotCompute>(PBT)) {
13563 OS <<
"Predicated backedge-taken count is ";
13566 OS <<
"Unpredictable predicated backedge-taken count.";
13568 OS <<
" Predicates:\n";
13569 for (
const auto *
P : Preds)
13575 L->getHeader()->printAsOperand(
OS,
false);
13591 OS <<
"Computable";
13600 OS <<
"DoesNotDominate";
13606 OS <<
"ProperlyDominates";
13623 OS <<
"Classifying expressions for: ";
13632 if (!isa<SCEVCouldNotCompute>(SV)) {
13645 if (!isa<SCEVCouldNotCompute>(AtUse)) {
13654 OS <<
"\t\t" "Exits: ";
13657 OS <<
"<<Unknown>>";
13663 for (
const auto *Iter = L; Iter; Iter = Iter->getParentLoop()) {
13665 OS <<
"\t\t" "LoopDispositions: { ";
13671 Iter->getHeader()->printAsOperand(
OS,
false);
13679 OS <<
"\t\t" "LoopDispositions: { ";
13685 InnerL->getHeader()->printAsOperand(
OS,
false);
13696 OS <<
"Determining loop execution counts for: ";
13705 auto &Values = LoopDispositions[S];
13706 for (
auto &V : Values) {
13707 if (V.getPointer() == L)
13712 auto &Values2 = LoopDispositions[S];
13714 if (V.getPointer() == L) {
13723ScalarEvolution::computeLoopDisposition(
const SCEV *S,
const Loop *L) {
13742 assert(!L->contains(AR->
getLoop()) &&
"Containing loop's header does not"
13743 " dominate the contained loop's header?");
13770 bool HasVarying =
false;
13785 if (
auto *
I = dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue()))
13804 auto &Values = BlockDispositions[S];
13805 for (
auto &V : Values) {
13806 if (V.getPointer() == BB)
13811 auto &Values2 = BlockDispositions[S];
13813 if (V.getPointer() == BB) {
13822ScalarEvolution::computeBlockDisposition(
const SCEV *S,
const BasicBlock *BB) {
13851 bool Proper =
true;
13863 dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) {
13864 if (
I->getParent() == BB)
13889void ScalarEvolution::forgetBackedgeTakenCounts(
const Loop *L,
13892 Predicated ? PredicatedBackedgeTakenCounts : BackedgeTakenCounts;
13893 auto It = BECounts.find(L);
13894 if (It != BECounts.end()) {
13895 for (
const ExitNotTakenInfo &ENT : It->second.ExitNotTaken) {
13896 for (
const SCEV *S : {ENT.ExactNotTaken, ENT.SymbolicMaxNotTaken}) {
13897 if (!isa<SCEVConstant>(S)) {
13898 auto UserIt = BECountUsers.find(S);
13899 assert(UserIt != BECountUsers.end());
13900 UserIt->second.erase({L, Predicated});
13904 BECounts.erase(It);
13912 while (!Worklist.
empty()) {
13914 auto Users = SCEVUsers.find(Curr);
13915 if (
Users != SCEVUsers.end())
13921 for (
const auto *S : ToForget)
13922 forgetMemoizedResultsImpl(S);
13924 for (
auto I = PredicatedSCEVRewrites.begin();
13925 I != PredicatedSCEVRewrites.end();) {
13926 std::pair<const SCEV *, const Loop *> Entry =
I->first;
13927 if (ToForget.count(Entry.first))
13928 PredicatedSCEVRewrites.erase(
I++);
13934void ScalarEvolution::forgetMemoizedResultsImpl(
const SCEV *S) {
13935 LoopDispositions.erase(S);
13936 BlockDispositions.erase(S);
13937 UnsignedRanges.erase(S);
13938 SignedRanges.erase(S);
13939 HasRecMap.
erase(S);
13940 ConstantMultipleCache.erase(S);
13942 if (
auto *AR = dyn_cast<SCEVAddRecExpr>(S)) {
13943 UnsignedWrapViaInductionTried.erase(AR);
13944 SignedWrapViaInductionTried.erase(AR);
13947 auto ExprIt = ExprValueMap.
find(S);
13948 if (ExprIt != ExprValueMap.
end()) {
13949 for (
Value *V : ExprIt->second) {
13950 auto ValueIt = ValueExprMap.
find_as(V);
13951 if (ValueIt != ValueExprMap.
end())
13952 ValueExprMap.
erase(ValueIt);
13954 ExprValueMap.
erase(ExprIt);
13957 auto ScopeIt = ValuesAtScopes.find(S);
13958 if (ScopeIt != ValuesAtScopes.end()) {
13959 for (
const auto &Pair : ScopeIt->second)
13960 if (!isa_and_nonnull<SCEVConstant>(Pair.second))
13962 std::make_pair(Pair.first, S));
13963 ValuesAtScopes.erase(ScopeIt);
13966 auto ScopeUserIt = ValuesAtScopesUsers.find(S);
13967 if (ScopeUserIt != ValuesAtScopesUsers.end()) {
13968 for (
const auto &Pair : ScopeUserIt->second)
13969 llvm::erase(ValuesAtScopes[Pair.second], std::make_pair(Pair.first, S));
13970 ValuesAtScopesUsers.erase(ScopeUserIt);
13973 auto BEUsersIt = BECountUsers.find(S);
13974 if (BEUsersIt != BECountUsers.end()) {
13976 auto Copy = BEUsersIt->second;
13977 for (
const auto &Pair : Copy)
13978 forgetBackedgeTakenCounts(Pair.getPointer(), Pair.getInt());
13979 BECountUsers.erase(BEUsersIt);
13982 auto FoldUser = FoldCacheUser.find(S);
13983 if (FoldUser != FoldCacheUser.end())
13984 for (
auto &KV : FoldUser->second)
13985 FoldCache.erase(KV);
13986 FoldCacheUser.erase(S);
13990ScalarEvolution::getUsedLoops(
const SCEV *S,
13992 struct FindUsedLoops {
13994 : LoopsUsed(LoopsUsed) {}
13996 bool follow(
const SCEV *S) {
13997 if (
auto *AR = dyn_cast<SCEVAddRecExpr>(S))
14002 bool isDone()
const {
return false; }
14005 FindUsedLoops F(LoopsUsed);
14009void ScalarEvolution::getReachableBlocks(
14013 while (!Worklist.
empty()) {
14015 if (!Reachable.
insert(BB).second)
14022 if (
auto *
C = dyn_cast<ConstantInt>(
Cond)) {
14023 Worklist.
push_back(
C->isOne() ? TrueBB : FalseBB);
14027 if (
auto *Cmp = dyn_cast<ICmpInst>(
Cond)) {
14030 if (isKnownPredicateViaConstantRanges(
Cmp->getPredicate(), L, R)) {
14034 if (isKnownPredicateViaConstantRanges(
Cmp->getInversePredicate(), L,
14069 SCEVMapper SCM(SE2);
14071 SE2.getReachableBlocks(ReachableBlocks,
F);
14073 auto GetDelta = [&](
const SCEV *Old,
const SCEV *New) ->
const SCEV * {
14091 while (!LoopStack.
empty()) {
14097 if (!ReachableBlocks.
contains(L->getHeader()))
14102 auto It = BackedgeTakenCounts.find(L);
14103 if (It == BackedgeTakenCounts.end())
14107 SCM.visit(It->second.getExact(L,
const_cast<ScalarEvolution *
>(
this)));
14127 const SCEV *Delta = GetDelta(CurBECount, NewBECount);
14128 if (Delta && !Delta->
isZero()) {
14129 dbgs() <<
"Trip Count for " << *L <<
" Changed!\n";
14130 dbgs() <<
"Old: " << *CurBECount <<
"\n";
14131 dbgs() <<
"New: " << *NewBECount <<
"\n";
14132 dbgs() <<
"Delta: " << *Delta <<
"\n";
14140 while (!Worklist.
empty()) {
14142 if (ValidLoops.
insert(L).second)
14143 Worklist.
append(L->begin(), L->end());
14145 for (
const auto &KV : ValueExprMap) {
14148 if (
auto *AR = dyn_cast<SCEVAddRecExpr>(KV.second)) {
14150 "AddRec references invalid loop");
14155 auto It = ExprValueMap.
find(KV.second);
14156 if (It == ExprValueMap.
end() || !It->second.contains(KV.first)) {
14157 dbgs() <<
"Value " << *KV.first
14158 <<
" is in ValueExprMap but not in ExprValueMap\n";
14162 if (
auto *
I = dyn_cast<Instruction>(&*KV.first)) {
14163 if (!ReachableBlocks.
contains(
I->getParent()))
14165 const SCEV *OldSCEV = SCM.visit(KV.second);
14167 const SCEV *Delta = GetDelta(OldSCEV, NewSCEV);
14168 if (Delta && !Delta->
isZero()) {
14169 dbgs() <<
"SCEV for value " << *
I <<
" changed!\n"
14170 <<
"Old: " << *OldSCEV <<
"\n"
14171 <<
"New: " << *NewSCEV <<
"\n"
14172 <<
"Delta: " << *Delta <<
"\n";
14178 for (
const auto &KV : ExprValueMap) {
14179 for (
Value *V : KV.second) {
14180 auto It = ValueExprMap.find_as(V);
14181 if (It == ValueExprMap.end()) {
14182 dbgs() <<
"Value " << *V
14183 <<
" is in ExprValueMap but not in ValueExprMap\n";
14186 if (It->second != KV.first) {
14187 dbgs() <<
"Value " << *V <<
" mapped to " << *It->second
14188 <<
" rather than " << *KV.first <<
"\n";
14195 for (
const auto &S : UniqueSCEVs) {
14198 if (isa<SCEVConstant>(
Op))
14200 auto It = SCEVUsers.find(
Op);
14201 if (It != SCEVUsers.end() && It->second.count(&S))
14203 dbgs() <<
"Use of operand " << *
Op <<
" by user " << S
14204 <<
" is not being tracked!\n";
14210 for (
const auto &ValueAndVec : ValuesAtScopes) {
14212 for (
const auto &LoopAndValueAtScope : ValueAndVec.second) {
14213 const Loop *L = LoopAndValueAtScope.first;
14214 const SCEV *ValueAtScope = LoopAndValueAtScope.second;
14215 if (!isa<SCEVConstant>(ValueAtScope)) {
14216 auto It = ValuesAtScopesUsers.find(ValueAtScope);
14217 if (It != ValuesAtScopesUsers.end() &&
14220 dbgs() <<
"Value: " << *
Value <<
", Loop: " << *L <<
", ValueAtScope: "
14221 << *ValueAtScope <<
" missing in ValuesAtScopesUsers\n";
14227 for (
const auto &ValueAtScopeAndVec : ValuesAtScopesUsers) {
14228 const SCEV *ValueAtScope = ValueAtScopeAndVec.first;
14229 for (
const auto &LoopAndValue : ValueAtScopeAndVec.second) {
14230 const Loop *L = LoopAndValue.first;
14231 const SCEV *
Value = LoopAndValue.second;
14233 auto It = ValuesAtScopes.find(
Value);
14234 if (It != ValuesAtScopes.end() &&
14235 is_contained(It->second, std::make_pair(L, ValueAtScope)))
14237 dbgs() <<
"Value: " << *
Value <<
", Loop: " << *L <<
", ValueAtScope: "
14238 << *ValueAtScope <<
" missing in ValuesAtScopes\n";
14244 auto VerifyBECountUsers = [&](
bool Predicated) {
14246 Predicated ? PredicatedBackedgeTakenCounts : BackedgeTakenCounts;
14247 for (
const auto &LoopAndBEInfo : BECounts) {
14248 for (
const ExitNotTakenInfo &ENT : LoopAndBEInfo.second.ExitNotTaken) {
14249 for (
const SCEV *S : {ENT.ExactNotTaken, ENT.SymbolicMaxNotTaken}) {
14250 if (!isa<SCEVConstant>(S)) {
14251 auto UserIt = BECountUsers.find(S);
14252 if (UserIt != BECountUsers.end() &&
14253 UserIt->second.contains({ LoopAndBEInfo.first, Predicated }))
14255 dbgs() <<
"Value " << *S <<
" for loop " << *LoopAndBEInfo.first
14256 <<
" missing from BECountUsers\n";
14263 VerifyBECountUsers(
false);
14264 VerifyBECountUsers(
true);
14267 for (
auto &[S, Values] : LoopDispositions) {
14268 for (
auto [
Loop, CachedDisposition] : Values) {
14270 if (CachedDisposition != RecomputedDisposition) {
14271 dbgs() <<
"Cached disposition of " << *S <<
" for loop " << *
Loop
14272 <<
" is incorrect: cached " << CachedDisposition <<
", actual "
14273 << RecomputedDisposition <<
"\n";
14280 for (
auto &[S, Values] : BlockDispositions) {
14281 for (
auto [BB, CachedDisposition] : Values) {
14283 if (CachedDisposition != RecomputedDisposition) {
14284 dbgs() <<
"Cached disposition of " << *S <<
" for block %"
14285 << BB->
getName() <<
" is incorrect: cached " << CachedDisposition
14286 <<
", actual " << RecomputedDisposition <<
"\n";
14293 for (
auto [
FoldID, Expr] : FoldCache) {
14294 auto I = FoldCacheUser.find(Expr);
14295 if (
I == FoldCacheUser.end()) {
14296 dbgs() <<
"Missing entry in FoldCacheUser for cached expression " << *Expr
14301 dbgs() <<
"Missing FoldID in cached users of " << *Expr <<
"!\n";
14305 for (
auto [Expr, IDs] : FoldCacheUser) {
14306 for (
auto &
FoldID : IDs) {
14307 auto I = FoldCache.find(
FoldID);
14308 if (
I == FoldCache.end()) {
14309 dbgs() <<
"Missing entry in FoldCache for expression " << *Expr
14313 if (
I->second != Expr) {
14314 dbgs() <<
"Entry in FoldCache doesn't match FoldCacheUser: "
14315 << *
I->second <<
" != " << *Expr <<
"!\n";
14326 for (
auto [S, Multiple] : ConstantMultipleCache) {
14328 if ((Multiple != 0 && RecomputedMultiple != 0 &&
14329 Multiple.
urem(RecomputedMultiple) != 0 &&
14330 RecomputedMultiple.
urem(Multiple) != 0)) {
14331 dbgs() <<
"Incorrect cached computation in ConstantMultipleCache for "
14332 << *S <<
" : Computed " << RecomputedMultiple
14333 <<
" but cache contains " << Multiple <<
"!\n";
14373 OS <<
"Printing analysis 'Scalar Evolution Analysis' for function '"
14374 <<
F.getName() <<
"':\n";
14380 "Scalar Evolution Analysis",
false,
true)
14396 F, getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(
F),
14397 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(
F),
14398 getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
14399 getAnalysis<LoopInfoWrapperPass>().getLoopInfo()));
14431 const SCEV *LHS,
const SCEV *RHS) {
14434 "Type mismatch between LHS and RHS");
14437 ID.AddInteger(Pred);
14438 ID.AddPointer(
LHS);
14439 ID.AddPointer(
RHS);
14440 void *IP =
nullptr;
14441 if (
const auto *S = UniquePreds.FindNodeOrInsertPos(
ID, IP))
14445 UniquePreds.InsertNode(Eq, IP);
14456 ID.AddInteger(AddedFlags);
14457 void *IP =
nullptr;
14458 if (
const auto *S = UniquePreds.FindNodeOrInsertPos(
ID, IP))
14460 auto *OF =
new (SCEVAllocator)
14462 UniquePreds.InsertNode(OF, IP);
14482 SCEVPredicateRewriter
Rewriter(L, SE, NewPreds, Pred);
14488 if (
auto *U = dyn_cast<SCEVUnionPredicate>(Pred)) {
14489 for (
const auto *Pred : U->getPredicates())
14490 if (
const auto *IPred = dyn_cast<SCEVComparePredicate>(Pred))
14491 if (IPred->getLHS() == Expr &&
14492 IPred->getPredicate() == ICmpInst::ICMP_EQ)
14493 return IPred->getRHS();
14494 }
else if (
const auto *IPred = dyn_cast<SCEVComparePredicate>(Pred)) {
14495 if (IPred->getLHS() == Expr &&
14496 IPred->getPredicate() == ICmpInst::ICMP_EQ)
14497 return IPred->getRHS();
14500 return convertToAddRecWithPreds(Expr);
14553 return addOverflowAssumption(
A);
14563 if (!isa<PHINode>(Expr->
getValue()))
14566 std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
14568 if (!PredicatedRewrite)
14570 for (
const auto *
P : PredicatedRewrite->second){
14572 if (
auto *WP = dyn_cast<const SCEVWrapPredicate>(
P)) {
14573 if (L != WP->getExpr()->getLoop())
14576 if (!addOverflowAssumption(
P))
14579 return PredicatedRewrite->first;
14592 return SCEVPredicateRewriter::rewrite(S, L, *
this,
nullptr, &Preds);
14599 S = SCEVPredicateRewriter::rewrite(S, L, *
this, &TransformPreds,
nullptr);
14600 auto *AddRec = dyn_cast<SCEVAddRecExpr>(S);
14607 for (
const auto *
P : TransformPreds)
14616 : FastID(
ID), Kind(Kind) {}
14623 assert(
LHS !=
RHS &&
"LHS and RHS are the same SCEV");
14627 const auto *
Op = dyn_cast<SCEVComparePredicate>(
N);
14657 const auto *
Op = dyn_cast<SCEVWrapPredicate>(
N);
14659 return Op &&
Op->AR == AR &&
setFlags(Flags,
Op->Flags) == Flags;
14695 if (Step->getValue()->getValue().isNonNegative())
14699 return ImpliedFlags;
14705 for (
const auto *
P : Preds)
14715 if (
const auto *Set = dyn_cast<SCEVUnionPredicate>(
N))
14716 return all_of(Set->Preds,
14724 for (
const auto *Pred : Preds)
14729 if (
const auto *Set = dyn_cast<SCEVUnionPredicate>(
N)) {
14730 for (
const auto *Pred : Set->Preds)
14742 Preds = std::make_unique<SCEVUnionPredicate>(
Empty);
14747 for (
const auto *
Op : Ops)
14751 if (!isa<SCEVConstant>(
Op))
14752 SCEVUsers[
Op].insert(
User);
14757 RewriteEntry &Entry = RewriteMap[Expr];
14760 if (Entry.second && Generation == Entry.first)
14761 return Entry.second;
14766 Expr = Entry.second;
14769 Entry = {Generation, NewSCEV};
14775 if (!BackedgeCount) {
14778 for (
const auto *
P : Preds)
14781 return BackedgeCount;
14785 if (Preds->implies(&Pred))
14788 auto &OldPreds = Preds->getPredicates();
14791 Preds = std::make_unique<SCEVUnionPredicate>(NewPreds);
14792 updateGeneration();
14799void PredicatedScalarEvolution::updateGeneration() {
14801 if (++Generation == 0) {
14802 for (
auto &II : RewriteMap) {
14803 const SCEV *Rewritten = II.second.second;
14812 const auto *AR = cast<SCEVAddRecExpr>(Expr);
14820 auto II = FlagsMap.insert({V, Flags});
14828 const auto *AR = cast<SCEVAddRecExpr>(Expr);
14833 auto II = FlagsMap.find(V);
14835 if (II != FlagsMap.end())
14849 for (
const auto *
P : NewPreds)
14852 RewriteMap[SE.
getSCEV(V)] = {Generation, New};
14858 : RewriteMap(
Init.RewriteMap), SE(
Init.SE), L(
Init.L),
14860 Generation(
Init.Generation), BackedgeCount(
Init.BackedgeCount) {
14861 for (
auto I :
Init.FlagsMap)
14862 FlagsMap.insert(
I);
14867 for (
auto *BB : L.getBlocks())
14868 for (
auto &
I : *BB) {
14873 auto II = RewriteMap.find(Expr);
14875 if (II == RewriteMap.end())
14879 if (II->second.second == Expr)
14884 OS.
indent(
Depth + 2) <<
"--> " << *II->second.second <<
"\n";
14893bool ScalarEvolution::matchURem(
const SCEV *Expr,
const SCEV *&LHS,
14894 const SCEV *&RHS) {
14898 if (
const auto *ZExt = dyn_cast<SCEVZeroExtendExpr>(Expr))
14899 if (
const auto *Trunc = dyn_cast<SCEVTruncateExpr>(ZExt->getOperand(0))) {
14900 LHS = Trunc->getOperand();
14912 const auto *
Add = dyn_cast<SCEVAddExpr>(Expr);
14913 if (
Add ==
nullptr ||
Add->getNumOperands() != 2)
14916 const SCEV *
A =
Add->getOperand(1);
14917 const auto *
Mul = dyn_cast<SCEVMulExpr>(
Add->getOperand(0));
14919 if (
Mul ==
nullptr)
14922 const auto MatchURemWithDivisor = [&](
const SCEV *
B) {
14933 if (
Mul->getNumOperands() == 3 && isa<SCEVConstant>(
Mul->getOperand(0)))
14934 return MatchURemWithDivisor(
Mul->getOperand(1)) ||
14935 MatchURemWithDivisor(
Mul->getOperand(2));
14938 if (
Mul->getNumOperands() == 2)
14939 return MatchURemWithDivisor(
Mul->getOperand(1)) ||
14940 MatchURemWithDivisor(
Mul->getOperand(0)) ||
14947ScalarEvolution::computeSymbolicMaxBackedgeTakenCount(
const Loop *L) {
14949 L->getExitingBlocks(ExitingBlocks);
14955 for (
BasicBlock *ExitingBB : ExitingBlocks) {
14956 const SCEV *ExitCount =
14958 if (!isa<SCEVCouldNotCompute>(ExitCount)) {
14960 "We should only have known counts for exiting blocks that "
14961 "dominate latch!");
14965 if (ExitCounts.
empty())
14984 auto I = Map.find(Expr);
14985 if (
I == Map.end())
14991 auto I = Map.find(Expr);
14992 if (
I == Map.end()) {
14998 while (Bitwidth % 8 == 0 && Bitwidth >= 8 &&
14999 Bitwidth >
Op->getType()->getScalarSizeInBits()) {
15002 auto I = Map.find(NarrowExt);
15003 if (
I != Map.end())
15005 Bitwidth = Bitwidth / 2;
15015 auto I = Map.find(Expr);
15016 if (
I == Map.end())
15023 auto I = Map.find(Expr);
15024 if (
I == Map.end())
15030 auto I = Map.find(Expr);
15031 if (
I == Map.end())
15049 if (isa<SCEVConstant>(
LHS)) {
15057 auto MatchRangeCheckIdiom = [
this, Predicate,
LHS,
RHS, &RewriteMap,
15058 &ExprsToRewrite]() {
15059 auto *AddExpr = dyn_cast<SCEVAddExpr>(
LHS);
15060 if (!AddExpr || AddExpr->getNumOperands() != 2)
15063 auto *C1 = dyn_cast<SCEVConstant>(AddExpr->getOperand(0));
15064 auto *LHSUnknown = dyn_cast<SCEVUnknown>(AddExpr->getOperand(1));
15065 auto *C2 = dyn_cast<SCEVConstant>(
RHS);
15066 if (!C1 || !C2 || !LHSUnknown)
15071 .
sub(C1->getAPInt());
15074 if (ExactRegion.isWrappedSet() || ExactRegion.isFullSet())
15076 auto I = RewriteMap.find(LHSUnknown);
15077 const SCEV *RewrittenLHS =
I != RewriteMap.end() ?
I->second : LHSUnknown;
15084 if (MatchRangeCheckIdiom())
15090 auto IsMinMaxSCEVWithNonNegativeConstant =
15093 if (
auto *
MinMax = dyn_cast<SCEVMinMaxExpr>(Expr)) {
15094 if (
MinMax->getNumOperands() != 2)
15096 if (
auto *
C = dyn_cast<SCEVConstant>(
MinMax->getOperand(0))) {
15097 if (
C->getAPInt().isNegative())
15099 SCTy =
MinMax->getSCEVType();
15110 auto GetNonNegExprAndPosDivisor = [&](
const SCEV *Expr,
const SCEV *Divisor,
15112 auto *ConstExpr = dyn_cast<SCEVConstant>(Expr);
15113 auto *ConstDivisor = dyn_cast<SCEVConstant>(Divisor);
15114 if (!ConstExpr || !ConstDivisor)
15116 ExprVal = ConstExpr->getAPInt();
15117 DivisorVal = ConstDivisor->getAPInt();
15118 return ExprVal.isNonNegative() && !DivisorVal.isNonPositive();
15124 auto GetNextSCEVDividesByDivisor = [&](
const SCEV *Expr,
15125 const SCEV *Divisor) {
15128 if (!GetNonNegExprAndPosDivisor(Expr, Divisor, ExprVal, DivisorVal))
15140 auto GetPreviousSCEVDividesByDivisor = [&](
const SCEV *Expr,
15141 const SCEV *Divisor) {
15144 if (!GetNonNegExprAndPosDivisor(Expr, Divisor, ExprVal, DivisorVal))
15154 std::function<
const SCEV *(
const SCEV *,
const SCEV *)>
15155 ApplyDivisibiltyOnMinMaxExpr = [&](
const SCEV *MinMaxExpr,
15156 const SCEV *Divisor) {
15157 const SCEV *MinMaxLHS =
nullptr, *MinMaxRHS =
nullptr;
15159 if (!IsMinMaxSCEVWithNonNegativeConstant(MinMaxExpr, SCTy, MinMaxLHS,
15163 isa<SCEVSMinExpr>(MinMaxExpr) || isa<SCEVUMinExpr>(MinMaxExpr);
15165 "Expected non-negative operand!");
15166 auto *DivisibleExpr =
15167 IsMin ? GetPreviousSCEVDividesByDivisor(MinMaxLHS, Divisor)
15168 : GetNextSCEVDividesByDivisor(MinMaxLHS, Divisor);
15170 ApplyDivisibiltyOnMinMaxExpr(MinMaxRHS, Divisor), DivisibleExpr};
15181 const SCEV *URemLHS =
nullptr;
15182 const SCEV *URemRHS =
nullptr;
15183 if (matchURem(
LHS, URemLHS, URemRHS)) {
15184 if (
const SCEVUnknown *LHSUnknown = dyn_cast<SCEVUnknown>(URemLHS)) {
15185 auto I = RewriteMap.find(LHSUnknown);
15186 const SCEV *RewrittenLHS =
15187 I != RewriteMap.end() ?
I->second : LHSUnknown;
15188 RewrittenLHS = ApplyDivisibiltyOnMinMaxExpr(RewrittenLHS, URemRHS);
15189 const auto *Multiple =
15191 RewriteMap[LHSUnknown] = Multiple;
15203 if (!isa<SCEVUnknown>(
LHS) && isa<SCEVUnknown>(
RHS)) {
15212 auto AddRewrite = [&](
const SCEV *
From,
const SCEV *FromRewritten,
15214 if (
From == FromRewritten)
15216 RewriteMap[
From] = To;
15222 auto GetMaybeRewritten = [&](
const SCEV *S) {
15223 auto I = RewriteMap.find(S);
15224 return I != RewriteMap.end() ?
I->second : S;
15234 std::function<
bool(
const SCEV *,
const SCEV *&)> HasDivisibiltyInfo =
15235 [&](
const SCEV *Expr,
const SCEV *&DividesBy) {
15236 if (
auto *
Mul = dyn_cast<SCEVMulExpr>(Expr)) {
15237 if (
Mul->getNumOperands() != 2)
15239 auto *MulLHS =
Mul->getOperand(0);
15240 auto *MulRHS =
Mul->getOperand(1);
15241 if (isa<SCEVConstant>(MulLHS))
15243 if (
auto *Div = dyn_cast<SCEVUDivExpr>(MulLHS))
15244 if (Div->getOperand(1) == MulRHS) {
15245 DividesBy = MulRHS;
15249 if (
auto *
MinMax = dyn_cast<SCEVMinMaxExpr>(Expr))
15250 return HasDivisibiltyInfo(
MinMax->getOperand(0), DividesBy) ||
15251 HasDivisibiltyInfo(
MinMax->getOperand(1), DividesBy);
15256 std::function<
bool(
const SCEV *,
const SCEV *&)> IsKnownToDivideBy =
15257 [&](
const SCEV *Expr,
const SCEV *DividesBy) {
15260 if (
auto *
MinMax = dyn_cast<SCEVMinMaxExpr>(Expr))
15261 return IsKnownToDivideBy(
MinMax->getOperand(0), DividesBy) &&
15262 IsKnownToDivideBy(
MinMax->getOperand(1), DividesBy);
15266 const SCEV *RewrittenLHS = GetMaybeRewritten(
LHS);
15267 const SCEV *DividesBy =
nullptr;
15268 if (HasDivisibiltyInfo(RewrittenLHS, DividesBy))
15271 IsKnownToDivideBy(RewrittenLHS, DividesBy) ? DividesBy :
nullptr;
15285 switch (Predicate) {
15293 RHS = DividesBy ? GetPreviousSCEVDividesByDivisor(
RHS, DividesBy) :
RHS;
15299 RHS = DividesBy ? GetNextSCEVDividesByDivisor(
RHS, DividesBy) :
RHS;
15303 RHS = DividesBy ? GetPreviousSCEVDividesByDivisor(
RHS, DividesBy) :
RHS;
15307 RHS = DividesBy ? GetNextSCEVDividesByDivisor(
RHS, DividesBy) :
RHS;
15316 auto EnqueueOperands = [&Worklist](
const SCEVNAryExpr *S) {
15320 while (!Worklist.
empty()) {
15322 if (isa<SCEVConstant>(
From))
15326 const SCEV *FromRewritten = GetMaybeRewritten(
From);
15327 const SCEV *To =
nullptr;
15329 switch (Predicate) {
15333 if (
auto *
UMax = dyn_cast<SCEVUMaxExpr>(FromRewritten))
15334 EnqueueOperands(
UMax);
15339 if (
auto *
SMax = dyn_cast<SCEVSMaxExpr>(FromRewritten))
15340 EnqueueOperands(
SMax);
15345 if (
auto *
UMin = dyn_cast<SCEVUMinExpr>(FromRewritten))
15346 EnqueueOperands(
UMin);
15351 if (
auto *
SMin = dyn_cast<SCEVSMinExpr>(FromRewritten))
15352 EnqueueOperands(
SMin);
15355 if (isa<SCEVConstant>(
RHS))
15359 if (isa<SCEVConstant>(
RHS) &&
15360 cast<SCEVConstant>(
RHS)->getValue()->isNullValue()) {
15361 const SCEV *OneAlignedUp =
15362 DividesBy ? GetNextSCEVDividesByDivisor(One, DividesBy) : One;
15371 AddRewrite(
From, FromRewritten, To);
15381 auto *AssumeI = cast<CallInst>(AssumeVH);
15388 auto *GuardDecl =
F.getParent()->getFunction(
15391 for (
const auto *GU : GuardDecl->users())
15392 if (
const auto *Guard = dyn_cast<IntrinsicInst>(GU))
15393 if (Guard->getFunction() == Header->getParent() && DT.
dominates(Guard, Header))
15401 for (std::pair<const BasicBlock *, const BasicBlock *> Pair(
15402 L->getLoopPredecessor(), Header);
15403 Pair.first; Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) {
15406 dyn_cast<BranchInst>(Pair.first->getTerminator());
15419 for (
auto [Term, EnterIfTrue] :
reverse(Terms)) {
15423 while (!Worklist.
empty()) {
15428 if (
auto *Cmp = dyn_cast<ICmpInst>(
Cond)) {
15430 EnterIfTrue ? Cmp->getPredicate() : Cmp->getInversePredicate();
15431 const auto *
LHS =
getSCEV(Cmp->getOperand(0));
15432 const auto *
RHS =
getSCEV(Cmp->getOperand(1));
15433 CollectCondition(Predicate,
LHS,
RHS, RewriteMap);
15446 if (RewriteMap.
empty())
15452 if (ExprsToRewrite.
size() > 1) {
15453 for (
const SCEV *Expr : ExprsToRewrite) {
15454 const SCEV *RewriteTo = RewriteMap[Expr];
15455 RewriteMap.
erase(Expr);
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file implements a class to represent arbitrary precision integral constant values and operations...
Expand Atomic instructions
block Block Frequency Analysis
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
#define LLVM_DUMP_METHOD
Mark debug helper function definitions like dump() that should not be stripped from debug builds.
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file defines the DenseMap class.
This file builds on the ADT/GraphTraits.h file to build generic depth first graph iterator.
Generic implementation of equivalence classes through the use Tarjan's efficient union-find algorithm...
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static bool isSigned(unsigned int Opcode)
This file defines a hash set that can be used to remove duplication of nodes in a graph.
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
iv Induction Variable Users
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
mir Rename Register Operands
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
PowerPC Reduce CR logical Operation
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
static bool rewrite(Function &F)
const SmallVectorImpl< MachineOperand > & Cond
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
SI optimize exec mask operations pre RA
This file provides utility classes that use RAII to save and restore values.
bool SCEVMinMaxExprContains(const SCEV *Root, const SCEV *OperandToFind, SCEVTypes RootKind)
static cl::opt< unsigned > MaxAddRecSize("scalar-evolution-max-add-rec-size", cl::Hidden, cl::desc("Max coefficients in AddRec during evolving"), cl::init(8))
static cl::opt< unsigned > RangeIterThreshold("scev-range-iter-threshold", cl::Hidden, cl::desc("Threshold for switching to iteratively computing SCEV ranges"), cl::init(32))
static const Loop * isIntegerLoopHeaderPHI(const PHINode *PN, LoopInfo &LI)
static unsigned getConstantTripCount(const SCEVConstant *ExitCount)
static void PushLoopPHIs(const Loop *L, SmallVectorImpl< Instruction * > &Worklist, SmallPtrSetImpl< Instruction * > &Visited)
Push PHI nodes in the header of the given loop onto the given Worklist.
static void insertFoldCacheEntry(const ScalarEvolution::FoldID &ID, const SCEV *S, DenseMap< ScalarEvolution::FoldID, const SCEV * > &FoldCache, DenseMap< const SCEV *, SmallVector< ScalarEvolution::FoldID, 2 > > &FoldCacheUser)
static bool IsKnownPredicateViaMinOrMax(ScalarEvolution &SE, ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS)
Is LHS Pred RHS true on the virtue of LHS or RHS being a Min or Max expression?
static cl::opt< bool > ClassifyExpressions("scalar-evolution-classify-expressions", cl::Hidden, cl::init(true), cl::desc("When printing analysis, include information on every instruction"))
static bool CanConstantFold(const Instruction *I)
Return true if we can constant fold an instruction of the specified type, assuming that all operands ...
static cl::opt< unsigned > AddOpsInlineThreshold("scev-addops-inline-threshold", cl::Hidden, cl::desc("Threshold for inlining addition operands into a SCEV"), cl::init(500))
static cl::opt< bool > VerifyIR("scev-verify-ir", cl::Hidden, cl::desc("Verify IR correctness when making sensitive SCEV queries (slow)"), cl::init(false))
static bool BrPHIToSelect(DominatorTree &DT, BranchInst *BI, PHINode *Merge, Value *&C, Value *&LHS, Value *&RHS)
static const SCEV * getPreStartForExtend(const SCEVAddRecExpr *AR, Type *Ty, ScalarEvolution *SE, unsigned Depth)
static std::optional< APInt > MinOptional(std::optional< APInt > X, std::optional< APInt > Y)
Helper function to compare optional APInts: (a) if X and Y both exist, return min(X,...
static cl::opt< unsigned > MulOpsInlineThreshold("scev-mulops-inline-threshold", cl::Hidden, cl::desc("Threshold for inlining multiplication operands into a SCEV"), cl::init(32))
static void GroupByComplexity(SmallVectorImpl< const SCEV * > &Ops, LoopInfo *LI, DominatorTree &DT)
Given a list of SCEV objects, order them by their complexity, and group objects of the same complexit...
static std::optional< const SCEV * > createNodeForSelectViaUMinSeq(ScalarEvolution *SE, const SCEV *CondExpr, const SCEV *TrueExpr, const SCEV *FalseExpr)
static Constant * BuildConstantFromSCEV(const SCEV *V)
This builds up a Constant using the ConstantExpr interface.
static ConstantInt * EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C, ScalarEvolution &SE)
static const SCEV * BinomialCoefficient(const SCEV *It, unsigned K, ScalarEvolution &SE, Type *ResultTy)
Compute BC(It, K). The result has width W. Assume, K > 0.
static cl::opt< unsigned > MaxCastDepth("scalar-evolution-max-cast-depth", cl::Hidden, cl::desc("Maximum depth of recursive SExt/ZExt/Trunc"), cl::init(8))
static bool IsMinMaxConsistingOf(const SCEV *MaybeMinMaxExpr, const SCEV *Candidate)
Is MaybeMinMaxExpr an (U|S)(Min|Max) of Candidate and some other values?
static PHINode * getConstantEvolvingPHI(Value *V, const Loop *L)
getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node in the loop that V is deri...
static cl::opt< unsigned > MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden, cl::desc("Maximum number of iterations SCEV will " "symbolically execute a constant " "derived loop"), cl::init(100))
static bool MatchBinarySub(const SCEV *S, const SCEV *&LHS, const SCEV *&RHS)
static std::optional< int > CompareSCEVComplexity(EquivalenceClasses< const SCEV * > &EqCacheSCEV, EquivalenceClasses< const Value * > &EqCacheValue, const LoopInfo *const LI, const SCEV *LHS, const SCEV *RHS, DominatorTree &DT, unsigned Depth=0)
static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow)
static void PrintSCEVWithTypeHint(raw_ostream &OS, const SCEV *S)
When printing a top-level SCEV for trip counts, it's helpful to include a type for constants which ar...
static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE, const Loop *L)
static bool containsConstantInAddMulChain(const SCEV *StartExpr)
Determine if any of the operands in this SCEV are a constant or if any of the add or multiply express...
static const SCEV * getExtendAddRecStart(const SCEVAddRecExpr *AR, Type *Ty, ScalarEvolution *SE, unsigned Depth)
static bool hasHugeExpression(ArrayRef< const SCEV * > Ops)
Returns true if Ops contains a huge SCEV (the subtree of S contains at least HugeExprThreshold nodes)...
static cl::opt< unsigned > MaxPhiSCCAnalysisSize("scalar-evolution-max-scc-analysis-depth", cl::Hidden, cl::desc("Maximum amount of nodes to process while searching SCEVUnknown " "Phi strongly connected components"), cl::init(8))
static int CompareValueComplexity(EquivalenceClasses< const Value * > &EqCacheValue, const LoopInfo *const LI, Value *LV, Value *RV, unsigned Depth)
Compare the two values LV and RV in terms of their "complexity" where "complexity" is a partial (and ...
static cl::opt< unsigned > MaxSCEVOperationsImplicationDepth("scalar-evolution-max-scev-operations-implication-depth", cl::Hidden, cl::desc("Maximum depth of recursive SCEV operations implication analysis"), cl::init(2))
static void PushDefUseChildren(Instruction *I, SmallVectorImpl< Instruction * > &Worklist, SmallPtrSetImpl< Instruction * > &Visited)
Push users of the given Instruction onto the given Worklist.
static std::optional< APInt > SolveQuadraticAddRecRange(const SCEVAddRecExpr *AddRec, const ConstantRange &Range, ScalarEvolution &SE)
Let c(n) be the value of the quadratic chrec {0,+,M,+,N} after n iterations.
static cl::opt< bool > UseContextForNoWrapFlagInference("scalar-evolution-use-context-for-no-wrap-flag-strenghening", cl::Hidden, cl::desc("Infer nuw/nsw flags using context where suitable"), cl::init(true))
static cl::opt< bool > EnableFiniteLoopControl("scalar-evolution-finite-loop", cl::Hidden, cl::desc("Handle <= and >= in finite loops"), cl::init(true))
static std::optional< std::tuple< APInt, APInt, APInt, APInt, unsigned > > GetQuadraticEquation(const SCEVAddRecExpr *AddRec)
For a given quadratic addrec, generate coefficients of the corresponding quadratic equation,...
static std::optional< BinaryOp > MatchBinaryOp(Value *V, const DataLayout &DL, AssumptionCache &AC, const DominatorTree &DT, const Instruction *CxtI)
Try to map V into a BinaryOp, and return std::nullopt on failure.
APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2)
static std::optional< APInt > SolveQuadraticAddRecExact(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE)
Let c(n) be the value of the quadratic chrec {L,+,M,+,N} after n iterations.
static std::optional< APInt > TruncIfPossible(std::optional< APInt > X, unsigned BitWidth)
Helper function to truncate an optional APInt to a given BitWidth.
static bool IsKnownPredicateViaAddRecStart(ScalarEvolution &SE, ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS)
static cl::opt< unsigned > MaxSCEVCompareDepth("scalar-evolution-max-scev-compare-depth", cl::Hidden, cl::desc("Maximum depth of recursive SCEV complexity comparisons"), cl::init(32))
static APInt extractConstantWithoutWrapping(ScalarEvolution &SE, const SCEVConstant *ConstantTerm, const SCEVAddExpr *WholeAddExpr)
static cl::opt< unsigned > MaxConstantEvolvingDepth("scalar-evolution-max-constant-evolving-depth", cl::Hidden, cl::desc("Maximum depth of recursive constant evolving"), cl::init(32))
static const SCEV * SolveLinEquationWithOverflow(const APInt &A, const SCEV *B, ScalarEvolution &SE)
Finds the minimum unsigned root of the following equation:
static ConstantRange getRangeForAffineARHelper(APInt Step, const ConstantRange &StartRange, const APInt &MaxBECount, bool Signed)
static std::optional< ConstantRange > GetRangeFromMetadata(Value *V)
Helper method to assign a range to V from metadata present in the IR.
static bool CollectAddOperandsWithScales(DenseMap< const SCEV *, APInt > &M, SmallVectorImpl< const SCEV * > &NewOps, APInt &AccumulatedConstant, ArrayRef< const SCEV * > Ops, const APInt &Scale, ScalarEvolution &SE)
Process the given Ops list, which is a list of operands to be added under the given scale,...
static cl::opt< unsigned > HugeExprThreshold("scalar-evolution-huge-expr-threshold", cl::Hidden, cl::desc("Size of the expression which is considered huge"), cl::init(4096))
static bool isKnownPredicateExtendIdiom(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS)
static Type * isSimpleCastedPHI(const SCEV *Op, const SCEVUnknown *SymbolicPHI, bool &Signed, ScalarEvolution &SE)
Helper function to createAddRecFromPHIWithCasts.
static Constant * EvaluateExpression(Value *V, const Loop *L, DenseMap< Instruction *, Constant * > &Vals, const DataLayout &DL, const TargetLibraryInfo *TLI)
EvaluateExpression - Given an expression that passes the getConstantEvolvingPHI predicate,...
static const SCEV * MatchNotExpr(const SCEV *Expr)
If Expr computes ~A, return A else return nullptr.
static cl::opt< unsigned > MaxValueCompareDepth("scalar-evolution-max-value-compare-depth", cl::Hidden, cl::desc("Maximum depth of recursive value complexity comparisons"), cl::init(2))
static cl::opt< bool, true > VerifySCEVOpt("verify-scev", cl::Hidden, cl::location(VerifySCEV), cl::desc("Verify ScalarEvolution's backedge taken counts (slow)"))
static const SCEV * getSignedOverflowLimitForStep(const SCEV *Step, ICmpInst::Predicate *Pred, ScalarEvolution *SE)
static SCEV::NoWrapFlags StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type, const ArrayRef< const SCEV * > Ops, SCEV::NoWrapFlags Flags)
static cl::opt< unsigned > MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden, cl::desc("Maximum depth of recursive arithmetics"), cl::init(32))
static bool HasSameValue(const SCEV *A, const SCEV *B)
SCEV structural equivalence is usually sufficient for testing whether two expressions are equal,...
static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow)
Compute the result of "n choose k", the binomial coefficient.
static bool canConstantEvolve(Instruction *I, const Loop *L)
Determine whether this instruction can constant evolve within this loop assuming its operands can all...
static PHINode * getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L, DenseMap< Instruction *, PHINode * > &PHIMap, unsigned Depth)
getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by recursing through each instructi...
static bool scevUnconditionallyPropagatesPoisonFromOperands(SCEVTypes Kind)
static cl::opt< bool > VerifySCEVStrict("verify-scev-strict", cl::Hidden, cl::desc("Enable stricter verification with -verify-scev is passed"))
static Constant * getOtherIncomingValue(PHINode *PN, BasicBlock *BB)
static cl::opt< bool > UseExpensiveRangeSharpening("scalar-evolution-use-expensive-range-sharpening", cl::Hidden, cl::init(false), cl::desc("Use more powerful methods of sharpening expression ranges. May " "be costly in terms of compile time"))
static const SCEV * getUnsignedOverflowLimitForStep(const SCEV *Step, ICmpInst::Predicate *Pred, ScalarEvolution *SE)
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
Provides some synthesis utilities to produce sequences of values.
This file defines the SmallPtrSet class.
This file defines the SmallSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static SymbolRef::Type getType(const Symbol *Sym)
This defines the Use class.
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Virtual Register Rewriter
static const uint32_t IV[8]
A rewriter to replace SCEV expressions in Map with the corresponding entry in the map.
const SCEV * visitAddRecExpr(const SCEVAddRecExpr *Expr)
const SCEV * visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr)
SCEVLoopGuardRewriter(ScalarEvolution &SE, DenseMap< const SCEV *, const SCEV * > &M)
const SCEV * visitSignExtendExpr(const SCEVSignExtendExpr *Expr)
const SCEV * visitUnknown(const SCEVUnknown *Expr)
const SCEV * visitUMinExpr(const SCEVUMinExpr *Expr)
const SCEV * visitSMinExpr(const SCEVSMinExpr *Expr)
Class for arbitrary precision integers.
APInt umul_ov(const APInt &RHS, bool &Overflow) const
APInt udiv(const APInt &RHS) const
Unsigned division operation.
APInt zext(unsigned width) const
Zero extend to a new width.
bool isMinSignedValue() const
Determine if this is the smallest signed value.
uint64_t getZExtValue() const
Get zero extended value.
void setHighBits(unsigned hiBits)
Set the top hiBits bits.
APInt getHiBits(unsigned numBits) const
Compute an APInt containing numBits highbits from this APInt.
APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
unsigned getActiveBits() const
Compute the number of active bits in the value.
APInt trunc(unsigned width) const
Truncate to new width.
static APInt getMaxValue(unsigned numBits)
Gets maximum unsigned value of APInt for specific bit width.
APInt abs() const
Get the absolute value.
bool ugt(const APInt &RHS) const
Unsigned greater than comparison.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
bool isSignMask() const
Check if the APInt's value is returned by getSignMask.
APInt urem(const APInt &RHS) const
Unsigned remainder operation.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
static APInt getSignedMaxValue(unsigned numBits)
Gets maximum signed value of APInt for a specific bit width.
static APInt getMinValue(unsigned numBits)
Gets minimum unsigned value of APInt for a specific bit width.
bool isNegative() const
Determine sign of this APInt.
bool sle(const APInt &RHS) const
Signed less or equal comparison.
static APInt getSignedMinValue(unsigned numBits)
Gets minimum signed value of APInt for a specific bit width.
unsigned countTrailingZeros() const
bool isStrictlyPositive() const
Determine if this APInt Value is positive.
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
APInt multiplicativeInverse() const
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
APInt sext(unsigned width) const
Sign extend to a new width.
APInt shl(unsigned shiftAmt) const
Left-shift function.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
bool isSignBitSet() const
Determine if sign bit of this APInt is set.
bool slt(const APInt &RHS) const
Signed less than comparison.
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
bool isIntN(unsigned N) const
Check if this APInt has an N-bits unsigned integer value.
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
This templated class represents "all analyses that operate over <a particular IR unit>" (e....
API to communicate dependencies between analyses during invalidation.
bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA)
Trigger the invalidation of some other analysis pass if not already handled and return whether it was...
A container for analyses that lazily runs them and caches their results.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
AnalysisUsage & addRequiredTransitive()
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
ArrayRef< T > take_front(size_t N=1) const
Return a copy of *this with only the first N elements.
size_t size() const
size - Get the array size.
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
MutableArrayRef< ResultElem > assumptions()
Access the list of assumption handles currently tracked for this function.
bool isSingleEdge() const
Check if this is the only edge between Start and End.
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
const Instruction & front() const
const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const Function * getParent() const
Return the enclosing method, or null if none.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
unsigned getNoWrapKind() const
Returns one of OBO::NoSignedWrap or OBO::NoUnsignedWrap.
Instruction::BinaryOps getBinaryOp() const
Returns the binary operation underlying the intrinsic.
BinaryOps getOpcode() const
Conditional or Unconditional Branch instruction.
bool isConditional() const
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
Value * getCondition() const
LLVM_ATTRIBUTE_RETURNS_NONNULL void * Allocate(size_t Size, Align Alignment)
Allocate space at the specified alignment.
This class represents a function call, abstracting a target machine's calling convention.
Value handle with callbacks on RAUW and destruction.
bool isFalseWhenEqual() const
This is just a convenience.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ ICMP_ULT
unsigned less than
@ ICMP_SGE
signed greater or equal
@ ICMP_ULE
unsigned less or equal
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
bool isTrueWhenEqual() const
This is just a convenience.
Predicate getNonStrictPredicate() const
For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Predicate getPredicate() const
Return the predicate for this instruction.
Predicate getFlippedSignednessPredicate()
For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->Failed assert.
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
static Constant * getNot(Constant *C)
static Constant * getPtrToInt(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList, bool InBounds=false, std::optional< ConstantRange > InRange=std::nullopt, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
static Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static Constant * getNeg(Constant *C, bool HasNSW=false)
static Constant * getTrunc(Constant *C, Type *Ty, bool OnlyIfReduced=false)
This is the shared class of boolean and integer constants.
bool isMinusOne() const
This function will return true iff every bit in this constant is set to true.
bool isOne() const
This is just a convenience method to make client code smaller for a common case.
bool isZero() const
This is just a convenience method to make client code smaller for a common code.
static ConstantInt * getFalse(LLVMContext &Context)
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getValue() const
Return the constant as an APInt value reference.
static ConstantInt * getBool(LLVMContext &Context, bool V)
This class represents a range of values.
ConstantRange add(const ConstantRange &Other) const
Return a new range representing the possible values resulting from an addition of a value in this ran...
ConstantRange zextOrTrunc(uint32_t BitWidth) const
Make this range have the bit width given by BitWidth.
PreferredRangeType
If represented precisely, the result of some range operations may consist of multiple disjoint ranges...
bool getEquivalentICmp(CmpInst::Predicate &Pred, APInt &RHS) const
Set up Pred and RHS such that ConstantRange::makeExactICmpRegion(Pred, RHS) == *this.
const APInt & getLower() const
Return the lower value for this range.
ConstantRange truncate(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly smaller than the current typ...
bool isFullSet() const
Return true if this set contains all of the elements possible for this data-type.
bool icmp(CmpInst::Predicate Pred, const ConstantRange &Other) const
Does the predicate Pred hold between ranges this and Other? NOTE: false does not mean that inverse pr...
bool isEmptySet() const
Return true if this set contains no members.
ConstantRange zeroExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
bool isSignWrappedSet() const
Return true if this set wraps around the signed domain.
APInt getSignedMin() const
Return the smallest signed value contained in the ConstantRange.
bool isWrappedSet() const
Return true if this set wraps around the unsigned domain.
void print(raw_ostream &OS) const
Print out the bounds to a stream.
ConstantRange signExtend(uint32_t BitWidth) const
Return a new range in the specified integer type, which must be strictly larger than the current type...
const APInt & getUpper() const
Return the upper value for this range.
ConstantRange unionWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the union of this range with another range.
static ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
bool contains(const APInt &Val) const
Return true if the specified value is in the set.
APInt getUnsignedMax() const
Return the largest unsigned value contained in the ConstantRange.
ConstantRange intersectWith(const ConstantRange &CR, PreferredRangeType Type=Smallest) const
Return the range that results from the intersection of this range with another range.
APInt getSignedMax() const
Return the largest signed value contained in the ConstantRange.
static ConstantRange getNonEmpty(APInt Lower, APInt Upper)
Create non-empty constant range with the given bounds.
static ConstantRange makeGuaranteedNoWrapRegion(Instruction::BinaryOps BinOp, const ConstantRange &Other, unsigned NoWrapKind)
Produce the largest range containing all X such that "X BinOp Y" is guaranteed not to wrap (overflow)...
unsigned getMinSignedBits() const
Compute the maximal number of bits needed to represent every value in this signed range.
uint32_t getBitWidth() const
Get the bit width of this ConstantRange.
ConstantRange sub(const ConstantRange &Other) const
Return a new range representing the possible values resulting from a subtraction of a value in this r...
ConstantRange sextOrTrunc(uint32_t BitWidth) const
Make this range have the bit width given by BitWidth.
static ConstantRange makeExactNoWrapRegion(Instruction::BinaryOps BinOp, const APInt &Other, unsigned NoWrapKind)
Produce the range that contains X if and only if "X BinOp Other" does not wrap.
This is an important base class in LLVM.
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
const StructLayout * getStructLayout(StructType *Ty) const
Returns a StructLayout object, indicating the alignment of the struct, its size, and the offsets of i...
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space.
unsigned getIndexTypeSizeInBits(Type *Ty) const
Layout size of the index used in GEP calculation.
IntegerType * getIndexType(LLVMContext &C, unsigned AddressSpace) const
Returns the type of a GEP index in AddressSpace.
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
iterator find(const_arg_type_t< KeyT > Val)
bool erase(const KeyT &Val)
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT > iterator
iterator find_as(const LookupKeyT &Val)
Alternate version of find() which allows a different, and possibly less expensive,...
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Analysis pass which computes a DominatorTree.
bool properlyDominates(const DomTreeNodeBase< NodeT > *A, const DomTreeNodeBase< NodeT > *B) const
properlyDominates - Returns true iff A dominates B and A != B.
Legacy analysis pass which computes a DominatorTree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
EquivalenceClasses - This represents a collection of equivalence classes and supports three efficient...
member_iterator unionSets(const ElemTy &V1, const ElemTy &V2)
union - Merge the two equivalence sets for the specified values, inserting them if they do not alread...
bool isEquivalent(const ElemTy &V1, const ElemTy &V2) const
FoldingSetNodeIDRef - This class describes a reference to an interned FoldingSetNodeID,...
FoldingSetNodeID - This class is used to gather all the unique data bits of a node.
FunctionPass class - This class is used to implement most global optimizations.
const BasicBlock & getEntryBlock() const
static Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
Module * getParent()
Get the module that this global value is contained inside of...
static bool isPrivateLinkage(LinkageTypes Linkage)
static bool isInternalLinkage(LinkageTypes Linkage)
This instruction compares its operands according to the predicate given to the constructor.
static bool isGE(Predicate P)
Return true if the predicate is SGE or UGE.
static bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
static bool isLT(Predicate P)
Return true if the predicate is SLT or ULT.
static bool isGT(Predicate P)
Return true if the predicate is SGT or UGT.
bool isEquality() const
Return true if this predicate is either EQ or NE.
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
static bool isLE(Predicate P)
Return true if the predicate is SLE or ULE.
bool hasNoUnsignedWrap() const LLVM_READONLY
Determine whether the no unsigned wrap flag is set.
bool hasNoSignedWrap() const LLVM_READONLY
Determine whether the no signed wrap flag is set.
const BasicBlock * getParent() const
Class to represent integer types.
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
An instruction for reading from memory.
Analysis pass that exposes the LoopInfo for a function.
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
BlockT * getHeader() const
unsigned getLoopDepth() const
Return the nesting level of this loop.
BlockT * getLoopPredecessor() const
If the given loop's header has exactly one unique predecessor outside the loop, return it.
LoopT * getParentLoop() const
Return the parent loop if it exists or nullptr for top level loops.
unsigned getLoopDepth(const BlockT *BB) const
Return the loop nesting level of the specified block.
LoopT * getLoopFor(const BlockT *BB) const
Return the inner most loop that BB lives in.
The legacy pass manager's analysis pass to compute loop information.
Represents a single loop in the control flow graph.
bool isLoopInvariant(const Value *V) const
Return true if the specified value is loop invariant.
A Module instance is used to store all the information related to an LLVM module.
Function * getFunction(StringRef Name) const
Look up the specified function in the module symbol table.
This is a utility class that provides an abstraction for the common functionality between Instruction...
unsigned getOpcode() const
Return the opcode for this Instruction or ConstantExpr.
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
iterator_range< const_block_iterator > blocks() const
Value * getIncomingValueForBlock(const BasicBlock *BB) const
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
PointerIntPair - This class implements a pair of a pointer and small integer.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
An interface layer with SCEV used to manage how we see SCEV expressions for values in the context of ...
void addPredicate(const SCEVPredicate &Pred)
Adds a new predicate.
const SCEVPredicate & getPredicate() const
bool hasNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Returns true if we've proved that V doesn't wrap by means of a SCEV predicate.
void setNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Proves that V doesn't overflow by adding SCEV predicate.
void print(raw_ostream &OS, unsigned Depth) const
Print the SCEV mappings done by the Predicated Scalar Evolution.
bool areAddRecsEqualWithPreds(const SCEVAddRecExpr *AR1, const SCEVAddRecExpr *AR2) const
Check if AR1 and AR2 are equal, while taking into account Equal predicates in Preds.
PredicatedScalarEvolution(ScalarEvolution &SE, Loop &L)
const SCEVAddRecExpr * getAsAddRec(Value *V)
Attempts to produce an AddRecExpr for V by adding additional SCEV predicates.
const SCEV * getBackedgeTakenCount()
Get the (predicated) backedge count for the analyzed loop.
const SCEV * getSCEV(Value *V)
Returns the SCEV expression of V, in the context of the current SCEV predicate.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalysisChecker getChecker() const
Build a checker for this PreservedAnalyses and the specified analysis type.
constexpr bool isValid() const
This node represents an addition of some number of SCEVs.
This node represents a polynomial recurrence on the trip count of the specified loop.
const SCEV * getStart() const
const SCEV * evaluateAtIteration(const SCEV *It, ScalarEvolution &SE) const
Return the value of this chain of recurrences at the specified iteration number.
const SCEV * getStepRecurrence(ScalarEvolution &SE) const
Constructs and returns the recurrence indicating how much this expression steps by.
void setNoWrapFlags(NoWrapFlags Flags)
Set flags for a recurrence without clearing any previously set flags.
bool isAffine() const
Return true if this represents an expression A + B*x where A and B are loop invariant values.
bool isQuadratic() const
Return true if this represents an expression A + B*x + C*x^2 where A, B and C are loop invariant valu...
const SCEV * getNumIterationsInRange(const ConstantRange &Range, ScalarEvolution &SE) const
Return the number of iterations of this loop that produce values in the specified constant range.
const SCEVAddRecExpr * getPostIncExpr(ScalarEvolution &SE) const
Return an expression representing the value of this expression one iteration of the loop ahead.
const Loop * getLoop() const
This is the base class for unary cast operator classes.
const SCEV * getOperand() const
SCEVCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, const SCEV *op, Type *ty)
void setNoWrapFlags(NoWrapFlags Flags)
Set flags for a non-recurrence without clearing previously set flags.
This class represents an assumption that the expression LHS Pred RHS evaluates to true,...
SCEVComparePredicate(const FoldingSetNodeIDRef ID, const ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS)
bool isAlwaysTrue() const override
Returns true if the predicate is always true.
bool implies(const SCEVPredicate *N) const override
Implementation of the SCEVPredicate interface.
void print(raw_ostream &OS, unsigned Depth=0) const override
Prints a textual representation of this predicate with an indentation of Depth.
This class represents a constant integer value.
ConstantInt * getValue() const
const APInt & getAPInt() const
This is the base class for unary integral cast operator classes.
SCEVIntegralCastExpr(const FoldingSetNodeIDRef ID, SCEVTypes SCEVTy, const SCEV *op, Type *ty)
This node is the base class min/max selections.
static enum SCEVTypes negate(enum SCEVTypes T)
This node represents multiplication of some number of SCEVs.
This node is a base class providing common functionality for n'ary operators.
bool hasNoUnsignedWrap() const
bool hasNoSelfWrap() const
size_t getNumOperands() const
bool hasNoSignedWrap() const
NoWrapFlags getNoWrapFlags(NoWrapFlags Mask=NoWrapMask) const
const SCEV * getOperand(unsigned i) const
const SCEV *const * Operands
ArrayRef< const SCEV * > operands() const
This class represents an assumption made using SCEV expressions which can be checked at run-time.
virtual bool implies(const SCEVPredicate *N) const =0
Returns true if this predicate implies N.
SCEVPredicate(const SCEVPredicate &)=default
virtual void print(raw_ostream &OS, unsigned Depth=0) const =0
Prints a textual representation of this predicate with an indentation of Depth.
This class represents a cast from a pointer to a pointer-sized integer value.
This visitor recursively visits a SCEV expression and re-writes it.
const SCEV * visitSignExtendExpr(const SCEVSignExtendExpr *Expr)
const SCEV * visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr)
const SCEV * visitSMinExpr(const SCEVSMinExpr *Expr)
const SCEV * visitUMinExpr(const SCEVUMinExpr *Expr)
This class represents a signed maximum selection.
This class represents a signed minimum selection.
This node is the base class for sequential/in-order min/max selections.
SCEVTypes getEquivalentNonSequentialSCEVType() const
This class represents a sequential/in-order unsigned minimum selection.
This class represents a sign extension of a small integer value to a larger integer value.
Visit all nodes in the expression tree using worklist traversal.
void visitAll(const SCEV *Root)
This class represents a truncation of an integer value to a smaller integer value.
This class represents a binary unsigned division operation.
const SCEV * getLHS() const
const SCEV * getRHS() const
This class represents an unsigned maximum selection.
This class represents an unsigned minimum selection.
This class represents a composition of other SCEV predicates, and is the class that most clients will...
SCEVUnionPredicate(ArrayRef< const SCEVPredicate * > Preds)
Union predicates don't get cached so create a dummy set ID for it.
void print(raw_ostream &OS, unsigned Depth) const override
Prints a textual representation of this predicate with an indentation of Depth.
bool isAlwaysTrue() const override
Implementation of the SCEVPredicate interface.
bool implies(const SCEVPredicate *N) const override
Returns true if this predicate implies N.
This means that we are dealing with an entirely unknown SCEV value, and only represent it as its LLVM...
This class represents the value of vscale, as used when defining the length of a scalable vector or r...
This class represents an assumption made on an AddRec expression.
IncrementWrapFlags
Similar to SCEV::NoWrapFlags, but with slightly different semantics for FlagNUSW.
SCEVWrapPredicate(const FoldingSetNodeIDRef ID, const SCEVAddRecExpr *AR, IncrementWrapFlags Flags)
bool implies(const SCEVPredicate *N) const override
Returns true if this predicate implies N.
static SCEVWrapPredicate::IncrementWrapFlags setFlags(SCEVWrapPredicate::IncrementWrapFlags Flags, SCEVWrapPredicate::IncrementWrapFlags OnFlags)
void print(raw_ostream &OS, unsigned Depth=0) const override
Prints a textual representation of this predicate with an indentation of Depth.
bool isAlwaysTrue() const override
Returns true if the predicate is always true.
const SCEVAddRecExpr * getExpr() const
Implementation of the SCEVPredicate interface.
static SCEVWrapPredicate::IncrementWrapFlags clearFlags(SCEVWrapPredicate::IncrementWrapFlags Flags, SCEVWrapPredicate::IncrementWrapFlags OffFlags)
Convenient IncrementWrapFlags manipulation methods.
static SCEVWrapPredicate::IncrementWrapFlags getImpliedFlags(const SCEVAddRecExpr *AR, ScalarEvolution &SE)
Returns the set of SCEVWrapPredicate no wrap flags implied by a SCEVAddRecExpr.
IncrementWrapFlags getFlags() const
Returns the set assumed no overflow flags.
This class represents a zero extension of a small integer value to a larger integer value.
This class represents an analyzed expression in the program.
ArrayRef< const SCEV * > operands() const
Return operands of this SCEV expression.
unsigned short getExpressionSize() const
bool isOne() const
Return true if the expression is a constant one.
bool isZero() const
Return true if the expression is a constant zero.
void dump() const
This method is used for debugging.
bool isAllOnesValue() const
Return true if the expression is a constant all-ones value.
bool isNonConstantNegative() const
Return true if the specified scev is negated, but not a constant.
void print(raw_ostream &OS) const
Print out the internal representation of this scalar to the specified stream.
SCEVTypes getSCEVType() const
Type * getType() const
Return the LLVM type of this SCEV expression.
NoWrapFlags
NoWrapFlags are bitfield indices into SubclassData.
Analysis pass that exposes the ScalarEvolution for a function.
ScalarEvolution run(Function &F, FunctionAnalysisManager &AM)
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
void print(raw_ostream &OS, const Module *=nullptr) const override
print - Print out the internal state of the pass.
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
void releaseMemory() override
releaseMemory() - This member can be implemented by a pass if it wants to be able to release its memo...
void verifyAnalysis() const override
verifyAnalysis() - This member can be implemented by a analysis pass to check state of analysis infor...
The main scalar evolution driver.
const SCEV * getConstantMaxBackedgeTakenCount(const Loop *L)
When successful, this returns a SCEVConstant that is greater than or equal to (i.e.
static bool hasFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags TestFlags)
const DataLayout & getDataLayout() const
Return the DataLayout associated with the module this SCEV instance is operating on.
bool isKnownNonNegative(const SCEV *S)
Test if the given expression is known to be non-negative.
const SCEV * getNegativeSCEV(const SCEV *V, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
Return the SCEV object corresponding to -V.
bool isLoopBackedgeGuardedByCond(const Loop *L, ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS)
Test whether the backedge of the loop is protected by a conditional between LHS and RHS.
const SCEV * getSMaxExpr(const SCEV *LHS, const SCEV *RHS)
const SCEV * getUDivCeilSCEV(const SCEV *N, const SCEV *D)
Compute ceil(N / D).
const SCEV * getGEPExpr(GEPOperator *GEP, const SmallVectorImpl< const SCEV * > &IndexExprs)
Returns an expression for a GEP.
Type * getWiderType(Type *Ty1, Type *Ty2) const
const SCEV * getAbsExpr(const SCEV *Op, bool IsNSW)
bool isKnownNonPositive(const SCEV *S)
Test if the given expression is known to be non-positive.
const SCEV * getURemExpr(const SCEV *LHS, const SCEV *RHS)
Represents an unsigned remainder expression based on unsigned division.
bool SimplifyICmpOperands(ICmpInst::Predicate &Pred, const SCEV *&LHS, const SCEV *&RHS, unsigned Depth=0)
Simplify LHS and RHS in a comparison with predicate Pred.
APInt getConstantMultiple(const SCEV *S)
Returns the max constant multiple of S.
bool isKnownNegative(const SCEV *S)
Test if the given expression is known to be negative.
const SCEV * removePointerBase(const SCEV *S)
Compute an expression equivalent to S - getPointerBase(S).
bool isKnownNonZero(const SCEV *S)
Test if the given expression is known to be non-zero.
const SCEV * getSCEVAtScope(const SCEV *S, const Loop *L)
Return a SCEV expression for the specified value at the specified scope in the program.
const SCEV * getSMinExpr(const SCEV *LHS, const SCEV *RHS)
const SCEV * getBackedgeTakenCount(const Loop *L, ExitCountKind Kind=Exact)
If the specified loop has a predictable backedge-taken count, return it, otherwise return a SCEVCould...
const SCEV * getUMaxExpr(const SCEV *LHS, const SCEV *RHS)
void setNoWrapFlags(SCEVAddRecExpr *AddRec, SCEV::NoWrapFlags Flags)
Update no-wrap flags of an AddRec.
const SCEV * getUMaxFromMismatchedTypes(const SCEV *LHS, const SCEV *RHS)
Promote the operands to the wider of the types using zero-extension, and then perform a umax operatio...
const SCEV * getZero(Type *Ty)
Return a SCEV for the constant 0 of a specific type.
bool willNotOverflow(Instruction::BinaryOps BinOp, bool Signed, const SCEV *LHS, const SCEV *RHS, const Instruction *CtxI=nullptr)
Is operation BinOp between LHS and RHS provably does not have a signed/unsigned overflow (Signed)?...
ExitLimit computeExitLimitFromCond(const Loop *L, Value *ExitCond, bool ExitIfTrue, bool ControlsOnlyExit, bool AllowPredicates=false)
Compute the number of times the backedge of the specified loop will execute if its exit condition wer...
const SCEV * getZeroExtendExprImpl(const SCEV *Op, Type *Ty, unsigned Depth=0)
const SCEVPredicate * getEqualPredicate(const SCEV *LHS, const SCEV *RHS)
unsigned getSmallConstantTripMultiple(const Loop *L, const SCEV *ExitCount)
Returns the largest constant divisor of the trip count as a normal unsigned value,...
uint64_t getTypeSizeInBits(Type *Ty) const
Return the size in bits of the specified type, for which isSCEVable must return true.
const SCEV * getConstant(ConstantInt *V)
const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
ConstantRange getSignedRange(const SCEV *S)
Determine the signed range for a particular SCEV.
const SCEV * getNoopOrSignExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
unsigned getSmallConstantMaxTripCount(const Loop *L)
Returns the upper bound of the loop trip count as a normal unsigned value.
bool loopHasNoAbnormalExits(const Loop *L)
Return true if the loop has no abnormal exits.
const SCEV * getTripCountFromExitCount(const SCEV *ExitCount)
A version of getTripCountFromExitCount below which always picks an evaluation type which can not resu...
ScalarEvolution(Function &F, TargetLibraryInfo &TLI, AssumptionCache &AC, DominatorTree &DT, LoopInfo &LI)
const SCEV * getOne(Type *Ty)
Return a SCEV for the constant 1 of a specific type.
const SCEV * getTruncateOrNoop(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
const SCEV * getCastExpr(SCEVTypes Kind, const SCEV *Op, Type *Ty)
const SCEV * getSequentialMinMaxExpr(SCEVTypes Kind, SmallVectorImpl< const SCEV * > &Operands)
const SCEV * getLosslessPtrToIntExpr(const SCEV *Op, unsigned Depth=0)
bool isKnownViaInduction(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS)
We'd like to check the predicate on every iteration of the most dominated loop between loops used in ...
std::optional< bool > evaluatePredicate(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS)
Check whether the condition described by Pred, LHS, and RHS is true or false.
bool isKnownPredicateAt(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Instruction *CtxI)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
const SCEV * getPtrToIntExpr(const SCEV *Op, Type *Ty)
bool isBackedgeTakenCountMaxOrZero(const Loop *L)
Return true if the backedge taken count is either the value returned by getConstantMaxBackedgeTakenCo...
void forgetLoop(const Loop *L)
This method should be called by the client when it has changed a loop in a way that may effect Scalar...
bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
bool isKnownPositive(const SCEV *S)
Test if the given expression is known to be positive.
APInt getUnsignedRangeMin(const SCEV *S)
Determine the min of the unsigned range for a particular SCEV.
bool isKnownPredicate(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS)
Test if the given expression is known to satisfy the condition described by Pred, LHS,...
const SCEV * getOffsetOfExpr(Type *IntTy, StructType *STy, unsigned FieldNo)
Return an expression for offsetof on the given field with type IntTy.
LoopDisposition getLoopDisposition(const SCEV *S, const Loop *L)
Return the "disposition" of the given SCEV with respect to the given loop.
bool containsAddRecurrence(const SCEV *S)
Return true if the SCEV is a scAddRecExpr or it contains scAddRecExpr.
const SCEV * getSignExtendExprImpl(const SCEV *Op, Type *Ty, unsigned Depth=0)
const SCEV * getAddRecExpr(const SCEV *Start, const SCEV *Step, const Loop *L, SCEV::NoWrapFlags Flags)
Get an add recurrence expression for the specified loop.
bool isBasicBlockEntryGuardedByCond(const BasicBlock *BB, ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS)
Test whether entry to the basic block is protected by a conditional between LHS and RHS.
bool isKnownOnEveryIteration(ICmpInst::Predicate Pred, const SCEVAddRecExpr *LHS, const SCEV *RHS)
Test if the condition described by Pred, LHS, RHS is known to be true on every iteration of the loop ...
bool hasOperand(const SCEV *S, const SCEV *Op) const
Test whether the given SCEV has Op as a direct or indirect operand.
const SCEV * getUDivExpr(const SCEV *LHS, const SCEV *RHS)
Get a canonical unsigned division expression, or something simpler if possible.
const SCEV * getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
bool isSCEVable(Type *Ty) const
Test if values of the given type are analyzable within the SCEV framework.
Type * getEffectiveSCEVType(Type *Ty) const
Return a type with the same bitwidth as the given type and which represents how SCEV will treat the g...
const SCEVPredicate * getComparePredicate(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS)
const SCEV * getNotSCEV(const SCEV *V)
Return the SCEV object corresponding to ~V.
std::optional< LoopInvariantPredicate > getLoopInvariantPredicate(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, const Instruction *CtxI=nullptr)
If the result of the predicate LHS Pred RHS is loop invariant with respect to L, return a LoopInvaria...
bool instructionCouldExistWithOperands(const SCEV *A, const SCEV *B)
Return true if there exists a point in the program at which both A and B could be operands to the sam...
std::optional< bool > evaluatePredicateAt(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Instruction *CtxI)
Check whether the condition described by Pred, LHS, and RHS is true or false in the given Context.
ConstantRange getUnsignedRange(const SCEV *S)
Determine the unsigned range for a particular SCEV.
uint32_t getMinTrailingZeros(const SCEV *S)
Determine the minimum number of zero bits that S is guaranteed to end in (at every loop iteration).
void print(raw_ostream &OS) const
const SCEV * getUMinExpr(const SCEV *LHS, const SCEV *RHS, bool Sequential=false)
const SCEV * getPredicatedBackedgeTakenCount(const Loop *L, SmallVector< const SCEVPredicate *, 4 > &Predicates)
Similar to getBackedgeTakenCount, except it will add a set of SCEV predicates to Predicates that are ...
static SCEV::NoWrapFlags clearFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags OffFlags)
void forgetTopmostLoop(const Loop *L)
void forgetValue(Value *V)
This method should be called by the client when it has changed a value in a way that may effect its v...
APInt getSignedRangeMin(const SCEV *S)
Determine the min of the signed range for a particular SCEV.
const SCEV * getNoopOrAnyExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
void forgetBlockAndLoopDispositions(Value *V=nullptr)
Called when the client has changed the disposition of values in a loop or block.
const SCEV * getTruncateExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
@ MonotonicallyDecreasing
@ MonotonicallyIncreasing
const SCEV * getStoreSizeOfExpr(Type *IntTy, Type *StoreTy)
Return an expression for the store size of StoreTy that is type IntTy.
const SCEVPredicate * getWrapPredicate(const SCEVAddRecExpr *AR, SCEVWrapPredicate::IncrementWrapFlags AddedFlags)
const SCEV * getMinusSCEV(const SCEV *LHS, const SCEV *RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Return LHS-RHS.
APInt getNonZeroConstantMultiple(const SCEV *S)
const SCEV * getMinusOne(Type *Ty)
Return a SCEV for the constant -1 of a specific type.
static SCEV::NoWrapFlags setFlags(SCEV::NoWrapFlags Flags, SCEV::NoWrapFlags OnFlags)
bool hasLoopInvariantBackedgeTakenCount(const Loop *L)
Return true if the specified loop has an analyzable loop-invariant backedge-taken count.
BlockDisposition getBlockDisposition(const SCEV *S, const BasicBlock *BB)
Return the "disposition" of the given SCEV with respect to the given block.
const SCEV * getNoopOrZeroExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
bool invalidate(Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
const SCEV * getUMinFromMismatchedTypes(const SCEV *LHS, const SCEV *RHS, bool Sequential=false)
Promote the operands to the wider of the types using zero-extension, and then perform a umin operatio...
bool loopIsFiniteByAssumption(const Loop *L)
Return true if this loop is finite by assumption.
const SCEV * getExistingSCEV(Value *V)
Return an existing SCEV for V if there is one, otherwise return nullptr.
LoopDisposition
An enum describing the relationship between a SCEV and a loop.
@ LoopComputable
The SCEV varies predictably with the loop.
@ LoopVariant
The SCEV is loop-variant (unknown).
@ LoopInvariant
The SCEV is loop-invariant.
friend class SCEVCallbackVH
const SCEV * getAnyExtendExpr(const SCEV *Op, Type *Ty)
getAnyExtendExpr - Return a SCEV for the given operand extended with unspecified bits out to the give...
const SCEVAddRecExpr * convertSCEVToAddRecWithPredicates(const SCEV *S, const Loop *L, SmallPtrSetImpl< const SCEVPredicate * > &Preds)
Tries to convert the S expression to an AddRec expression, adding additional predicates to Preds as r...
std::optional< SCEV::NoWrapFlags > getStrengthenedNoWrapFlagsFromBinOp(const OverflowingBinaryOperator *OBO)
Parse NSW/NUW flags from add/sub/mul IR binary operation Op into SCEV no-wrap flags,...
void forgetLcssaPhiWithNewPredecessor(Loop *L, PHINode *V)
Forget LCSSA phi node V of loop L to which a new predecessor was added, such that it may no longer be...
bool containsUndefs(const SCEV *S) const
Return true if the SCEV expression contains an undef value.
std::optional< MonotonicPredicateType > getMonotonicPredicateType(const SCEVAddRecExpr *LHS, ICmpInst::Predicate Pred)
If, for all loop invariant X, the predicate "LHS `Pred` X" is monotonically increasing or decreasing,...
const SCEV * getCouldNotCompute()
bool isAvailableAtLoopEntry(const SCEV *S, const Loop *L)
Determine if the SCEV can be evaluated at loop's entry.
BlockDisposition
An enum describing the relationship between a SCEV and a basic block.
@ DominatesBlock
The SCEV dominates the block.
@ ProperlyDominatesBlock
The SCEV properly dominates the block.
@ DoesNotDominateBlock
The SCEV does not dominate the block.
std::optional< LoopInvariantPredicate > getLoopInvariantExitCondDuringFirstIterationsImpl(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, const Instruction *CtxI, const SCEV *MaxIter)
const SCEV * getExitCount(const Loop *L, const BasicBlock *ExitingBlock, ExitCountKind Kind=Exact)
Return the number of times the backedge executes before the given exit would be taken; if not exactly...
const SCEV * getSignExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
void getPoisonGeneratingValues(SmallPtrSetImpl< const Value * > &Result, const SCEV *S)
Return the set of Values that, if poison, will definitively result in S being poison as well.
void forgetLoopDispositions()
Called when the client has changed the disposition of values in this loop.
const SCEV * getVScale(Type *Ty)
unsigned getSmallConstantTripCount(const Loop *L)
Returns the exact trip count of the loop if we can compute it, and the result is a small constant.
bool hasComputableLoopEvolution(const SCEV *S, const Loop *L)
Return true if the given SCEV changes value in a known way in the specified loop.
const SCEV * getPointerBase(const SCEV *V)
Transitively follow the chain of pointer-type operands until reaching a SCEV that does not have a sin...
const SCEV * getMinMaxExpr(SCEVTypes Kind, SmallVectorImpl< const SCEV * > &Operands)
bool dominates(const SCEV *S, const BasicBlock *BB)
Return true if elements that makes up the given SCEV dominate the specified basic block.
APInt getUnsignedRangeMax(const SCEV *S)
Determine the max of the unsigned range for a particular SCEV.
ExitCountKind
The terms "backedge taken count" and "exit count" are used interchangeably to refer to the number of ...
@ SymbolicMaximum
An expression which provides an upper bound on the exact trip count.
@ ConstantMaximum
A constant which provides an upper bound on the exact trip count.
@ Exact
An expression exactly describing the number of times the backedge has executed when a loop is exited.
std::optional< LoopInvariantPredicate > getLoopInvariantExitCondDuringFirstIterations(ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS, const Loop *L, const Instruction *CtxI, const SCEV *MaxIter)
If the result of the predicate LHS Pred RHS is loop invariant with respect to L at given Context duri...
const SCEV * applyLoopGuards(const SCEV *Expr, const Loop *L)
Try to apply information from loop guards for L to Expr.
const SCEV * getMulExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical multiply expression, or something simpler if possible.
const SCEV * getElementSize(Instruction *Inst)
Return the size of an element read or written by Inst.
const SCEV * getSizeOfExpr(Type *IntTy, TypeSize Size)
Return an expression for a TypeSize.
const SCEV * getUnknown(Value *V)
std::optional< std::pair< const SCEV *, SmallVector< const SCEVPredicate *, 3 > > > createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI)
Checks if SymbolicPHI can be rewritten as an AddRecExpr under some Predicates.
const SCEV * getTruncateOrZeroExtend(const SCEV *V, Type *Ty, unsigned Depth=0)
Return a SCEV corresponding to a conversion of the input value to the specified type.
bool isLoopEntryGuardedByCond(const Loop *L, ICmpInst::Predicate Pred, const SCEV *LHS, const SCEV *RHS)
Test whether entry to the loop is protected by a conditional between LHS and RHS.
const SCEV * getElementCount(Type *Ty, ElementCount EC)
static SCEV::NoWrapFlags maskFlags(SCEV::NoWrapFlags Flags, int Mask)
Convenient NoWrapFlags manipulation that hides enum casts and is visible in the ScalarEvolution name ...
std::optional< APInt > computeConstantDifference(const SCEV *LHS, const SCEV *RHS)
Compute LHS - RHS and returns the result as an APInt if it is a constant, and std::nullopt if it isn'...
bool properlyDominates(const SCEV *S, const BasicBlock *BB)
Return true if elements that makes up the given SCEV properly dominate the specified basic block.
const SCEV * rewriteUsingPredicate(const SCEV *S, const Loop *L, const SCEVPredicate &A)
Re-writes the SCEV according to the Predicates in A.
std::pair< const SCEV *, const SCEV * > SplitIntoInitAndPostInc(const Loop *L, const SCEV *S)
Splits SCEV expression S into two SCEVs.
bool canReuseInstruction(const SCEV *S, Instruction *I, SmallVectorImpl< Instruction * > &DropPoisonGeneratingInsts)
Check whether it is poison-safe to represent the expression S using the instruction I.
const SCEV * getUDivExactExpr(const SCEV *LHS, const SCEV *RHS)
Get a canonical unsigned division expression, or something simpler if possible.
void registerUser(const SCEV *User, ArrayRef< const SCEV * > Ops)
Notify this ScalarEvolution that User directly uses SCEVs in Ops.
const SCEV * getAddExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
const SCEV * getTruncateOrSignExtend(const SCEV *V, Type *Ty, unsigned Depth=0)
Return a SCEV corresponding to a conversion of the input value to the specified type.
bool containsErasedValue(const SCEV *S) const
Return true if the SCEV expression contains a Value that has been optimised out and is now a nullptr.
const SCEV * getSymbolicMaxBackedgeTakenCount(const Loop *L)
When successful, this returns a SCEV that is greater than or equal to (i.e.
APInt getSignedRangeMax(const SCEV *S)
Determine the max of the signed range for a particular SCEV.
LLVMContext & getContext() const
This class represents the LLVM 'select' instruction.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
iterator erase(const_iterator CI)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
iterator insert(iterator I, T &&Elt)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
Used to lazily calculate structure layout information for a target machine, based on the DataLayout s...
TypeSize getElementOffset(unsigned Idx) const
TypeSize getSizeInBits() const
Class to represent struct types.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
The instances of the Type class are immutable: once they are created, they are never changed.
bool isPointerTy() const
True if this is an instance of PointerType.
static IntegerType * getInt1Ty(LLVMContext &C)
static IntegerType * getIntNTy(LLVMContext &C, unsigned N)
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
static IntegerType * getInt8Ty(LLVMContext &C)
bool isIntOrPtrTy() const
Return true if this is an integer type or a pointer type.
static IntegerType * getInt32Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
unsigned getValueID() const
Return an ID for the concrete type of this object.
void printAsOperand(raw_ostream &O, bool PrintType=true, const Module *M=nullptr) const
Print the name of this Value out to the specified raw_ostream.
LLVMContext & getContext() const
All values hold a context through their type.
iterator_range< use_iterator > uses()
StringRef getName() const
Return a constant reference to the value's name.
Represents an op.with.overflow intrinsic.
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
This class implements an extremely fast bulk output stream that can only output to a stream.
raw_ostream & indent(unsigned NumSpaces)
indent - Insert 'NumSpaces' spaces.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
const APInt & smin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be signed.
const APInt & smax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be signed.
const APInt & umin(const APInt &A, const APInt &B)
Determine the smaller of two APInts considered to be unsigned.
std::optional< APInt > SolveQuadraticEquationWrap(APInt A, APInt B, APInt C, unsigned RangeWidth)
Let q(n) = An^2 + Bn + C, and BW = bit width of the value range (e.g.
const APInt & umax(const APInt &A, const APInt &B)
Determine the larger of two APInts considered to be unsigned.
APInt GreatestCommonDivisor(APInt A, APInt B)
Compute GCD of two unsigned APInt values.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
StringRef getName(ID id)
Return the LLVM name for an intrinsic, such as "llvm.ppc.altivec.lvx".
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
bool match(Val *V, const Pattern &P)
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
bind_ty< WithOverflowInst > m_WithOverflowInst(WithOverflowInst *&I)
Match a with overflow intrinsic, capturing it if we match.
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
brc_match< Cond_t, bind_ty< BasicBlock >, bind_ty< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
class_match< BasicBlock > m_BasicBlock()
Match an arbitrary basic block value and ignore it.
initializer< Ty > init(const Ty &Val)
LocationClass< Ty > location(Ty &L)
@ Switch
The "resume-switch" lowering, where there are separate resume and destroy functions that are shared b...
NodeAddr< PhiNode * > Phi
This is an optimization pass for GlobalISel generic memory operations.
void visitAll(const SCEV *Root, SV &Visitor)
Use SCEVTraversal to visit all nodes in the given expression tree.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
void stable_sort(R &&Range)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
bool canCreatePoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
bool mustTriggerUB(const Instruction *I, const SmallPtrSetImpl< const Value * > &KnownPoison)
Return true if the given instruction must trigger undefined behavior when I is executed with any oper...
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
bool canConstantFoldCallTo(const CallBase *Call, const Function *F)
canConstantFoldCallTo - Return true if its even possible to fold a call to the specified function.
bool verifyFunction(const Function &F, raw_ostream *OS=nullptr)
Check a function for errors, useful for use when debugging a pass.
auto successors(const MachineBasicBlock *BB)
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
unsigned short computeExpressionSize(ArrayRef< const SCEV * > Args)
Printable print(const GCNRegPressure &RP, const GCNSubtarget *ST=nullptr)
ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
bool isOverflowIntrinsicNoWrap(const WithOverflowInst *WO, const DominatorTree &DT)
Returns true if the arithmetic part of the WO 's result is used only along the paths control dependen...
bool matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, Value *&Start, Value *&Step)
Attempt to match a simple first order recurrence cycle of the form: iv = phi Ty [Start,...
void erase(Container &C, ValueType V)
Wrapper function to remove a value from a container:
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, const TargetLibraryInfo *TLI, ObjectSizeOpts Opts={})
Compute the size of the object pointed by Ptr.
void initializeScalarEvolutionWrapperPassPass(PassRegistry &)
auto reverse(ContainerTy &&C)
bool isMustProgress(const Loop *L)
Return true if this loop can be assumed to make progress.
bool impliesPoison(const Value *ValAssumedPoison, const Value *V)
Return true if V is poison given that ValAssumedPoison is already poison.
Constant * ConstantFoldInstOperands(Instruction *I, ArrayRef< Constant * > Ops, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldInstOperands - Attempt to constant fold an instruction with the specified operands.
bool isFinite(const Loop *L)
Return true if this loop can be assumed to run for a finite number of iterations.
bool programUndefinedIfPoison(const Instruction *Inst)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
bool isPointerTy(const Type *T)
ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
bool propagatesPoison(const Use &PoisonOp)
Return true if PoisonOp's user yields poison or raises UB if its operand PoisonOp is poison.
@ UMin
Unsigned integer min implemented in terms of select(cmp()).
@ Mul
Product of integers.
@ SMax
Signed integer max implemented in terms of select(cmp()).
@ SMin
Signed integer min implemented in terms of select(cmp()).
@ UMax
Unsigned integer max implemented in terms of select(cmp()).
bool isIntN(unsigned N, int64_t x)
Checks if an signed integer fits into the given (dynamic) bit width.
auto count(R &&Range, const E &Element)
Wrapper function around std::count to count the number of times an element Element occurs in the give...
void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
DWARFExpression::Operation Op
auto max_element(R &&Range)
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
constexpr unsigned BitWidth
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
auto count_if(R &&Range, UnaryPredicate P)
Wrapper function around std::count_if to count the number of times an element satisfying a given pred...
auto predecessors(const MachineBasicBlock *BB)
bool isAllocationFn(const Value *V, const TargetLibraryInfo *TLI)
Tests if a value is a call or invoke to a library function that allocates or reallocates memory (eith...
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL, unsigned Depth=0, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true)
Return the number of times the sign bit of the register is replicated into the other bits.
iterator_range< df_iterator< T > > depth_first(const T &G)
auto seq(T Begin, T End)
Iterate over an integral type from Begin up to - but not including - End.
bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be poison, but may be undef.
bool SCEVExprContains(const SCEV *Root, PredTy Pred)
Return true if any node in Root satisfies the predicate Pred.
Implement std::hash so that hash_code can be used in STL containers.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
A special type used by analysis passes to provide an address that identifies that particular analysis...
static KnownBits makeConstant(const APInt &C)
Create known bits from a known constant.
bool isNonNegative() const
Returns true if this value is known to be non-negative.
static KnownBits ashr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for ashr(LHS, RHS).
unsigned getBitWidth() const
Get the bit width of this value.
static KnownBits lshr(const KnownBits &LHS, const KnownBits &RHS, bool ShAmtNonZero=false, bool Exact=false)
Compute known bits for lshr(LHS, RHS).
KnownBits zextOrTrunc(unsigned BitWidth) const
Return known bits for a zero extension or truncation of the value we're tracking.
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
APInt getMinValue() const
Return the minimal unsigned value possible given these KnownBits.
bool isNegative() const
Returns true if this value is known to be negative.
static KnownBits shl(const KnownBits &LHS, const KnownBits &RHS, bool NUW=false, bool NSW=false, bool ShAmtNonZero=false)
Compute known bits for shl(LHS, RHS).
Various options to control the behavior of getObjectSize.
bool NullIsUnknownSize
If this is true, null pointers in address space 0 will be treated as though they can't be evaluated.
bool RoundToAlign
Whether to round the result up to the alignment of allocas, byval arguments, and global variables.
An object of this class is returned by queries that could not be answered.
static bool classof(const SCEV *S)
Methods for support type inquiry through isa, cast, and dyn_cast:
This class defines a simple visitor class that may be used for various SCEV analysis purposes.
A utility class that uses RAII to save and restore the value of a variable.
Information about the number of loop iterations for which a loop exit's branch condition evaluates to...
ExitLimit(const SCEV *E)
Construct either an exact exit limit from a constant, or an unknown one from a SCEVCouldNotCompute.
const SCEV * ExactNotTaken
const SCEV * SymbolicMaxNotTaken
void addPredicate(const SCEVPredicate *P)
const SCEV * ConstantMaxNotTaken