llvm.org GIT mirror llvm / ddb3eaf
Don't attempt to analyze values which are obviously undef. This fixes some assertion failures in extreme cases. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@102042 91177308-0d34-0410-b5e6-96231b3b80d8 Dan Gohman 10 years ago
2 changed file(s) with 144 addition(s) and 78 deletion(s). Raw diff Collapse all Expand all
18451845 if (const SCEVConstant *RHSC = dyn_cast(RHS)) {
18461846 if (RHSC->getValue()->equalsInt(1))
18471847 return LHS; // X udiv 1 --> x
1848 if (RHSC->getValue()->isZero())
1849 return getIntegerSCEV(0, LHS->getType()); // value is undefined
1850
1851 // Determine if the division can be folded into the operands of
1852 // its operands.
1853 // TODO: Generalize this to non-constants by using known-bits information.
1854 const Type *Ty = LHS->getType();
1855 unsigned LZ = RHSC->getValue()->getValue().countLeadingZeros();
1856 unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ;
1857 // For non-power-of-two values, effectively round the value up to the
1858 // nearest power of two.
1859 if (!RHSC->getValue()->getValue().isPowerOf2())
1860 ++MaxShiftAmt;
1861 const IntegerType *ExtTy =
1862 IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt);
1863 // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded.
1864 if (const SCEVAddRecExpr *AR = dyn_cast(LHS))
1865 if (const SCEVConstant *Step =
1866 dyn_cast(AR->getStepRecurrence(*this)))
1867 if (!Step->getValue()->getValue()
1868 .urem(RHSC->getValue()->getValue()) &&
1869 getZeroExtendExpr(AR, ExtTy) ==
1870 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
1871 getZeroExtendExpr(Step, ExtTy),
1872 AR->getLoop())) {
1873 SmallVector Operands;
1874 for (unsigned i = 0, e = AR->getNumOperands(); i != e; ++i)
1875 Operands.push_back(getUDivExpr(AR->getOperand(i), RHS));
1876 return getAddRecExpr(Operands, AR->getLoop());
1848 // If the denominator is zero, the result of the udiv is undefined. Don't
1849 // try to analyze it, because the resolution chosen here may differ from
1850 // the resolution chosen in other parts of the compiler.
1851 if (!RHSC->getValue()->isZero()) {
1852 // Determine if the division can be folded into the operands of
1853 // its operands.
1854 // TODO: Generalize this to non-constants by using known-bits information.
1855 const Type *Ty = LHS->getType();
1856 unsigned LZ = RHSC->getValue()->getValue().countLeadingZeros();
1857 unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ;
1858 // For non-power-of-two values, effectively round the value up to the
1859 // nearest power of two.
1860 if (!RHSC->getValue()->getValue().isPowerOf2())
1861 ++MaxShiftAmt;
1862 const IntegerType *ExtTy =
1863 IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt);
1864 // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded.
1865 if (const SCEVAddRecExpr *AR = dyn_cast(LHS))
1866 if (const SCEVConstant *Step =
1867 dyn_cast(AR->getStepRecurrence(*this)))
1868 if (!Step->getValue()->getValue()
1869 .urem(RHSC->getValue()->getValue()) &&
1870 getZeroExtendExpr(AR, ExtTy) ==
1871 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
1872 getZeroExtendExpr(Step, ExtTy),
1873 AR->getLoop())) {
1874 SmallVector Operands;
1875 for (unsigned i = 0, e = AR->getNumOperands(); i != e; ++i)
1876 Operands.push_back(getUDivExpr(AR->getOperand(i), RHS));
1877 return getAddRecExpr(Operands, AR->getLoop());
1878 }
1879 // (A*B)/C --> A*(B/C) if safe and B/C can be folded.
1880 if (const SCEVMulExpr *M = dyn_cast(LHS)) {
1881 SmallVector Operands;
1882 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i)
1883 Operands.push_back(getZeroExtendExpr(M->getOperand(i), ExtTy));
1884 if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands))
1885 // Find an operand that's safely divisible.
1886 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
1887 const SCEV *Op = M->getOperand(i);
1888 const SCEV *Div = getUDivExpr(Op, RHSC);
1889 if (!isa(Div) && getMulExpr(Div, RHSC) == Op) {
1890 Operands = SmallVector(M->op_begin(),
1891 M->op_end());
1892 Operands[i] = Div;
1893 return getMulExpr(Operands);
1894 }
1895 }
1896 }
1897 // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded.
1898 if (const SCEVAddRecExpr *A = dyn_cast(LHS)) {
1899 SmallVector Operands;
1900 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i)
1901 Operands.push_back(getZeroExtendExpr(A->getOperand(i), ExtTy));
1902 if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) {
1903 Operands.clear();
1904 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) {
1905 const SCEV *Op = getUDivExpr(A->getOperand(i), RHS);
1906 if (isa(Op) ||
1907 getMulExpr(Op, RHS) != A->getOperand(i))
1908 break;
1909 Operands.push_back(Op);
1910 }
1911 if (Operands.size() == A->getNumOperands())
1912 return getAddExpr(Operands);
18771913 }
1878 // (A*B)/C --> A*(B/C) if safe and B/C can be folded.
1879 if (const SCEVMulExpr *M = dyn_cast(LHS)) {
1880 SmallVector Operands;
1881 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i)
1882 Operands.push_back(getZeroExtendExpr(M->getOperand(i), ExtTy));
1883 if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands))
1884 // Find an operand that's safely divisible.
1885 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
1886 const SCEV *Op = M->getOperand(i);
1887 const SCEV *Div = getUDivExpr(Op, RHSC);
1888 if (!isa(Div) && getMulExpr(Div, RHSC) == Op) {
1889 Operands = SmallVector(M->op_begin(), M->op_end());
1890 Operands[i] = Div;
1891 return getMulExpr(Operands);
1892 }
1893 }
1894 }
1895 // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded.
1896 if (const SCEVAddRecExpr *A = dyn_cast(LHS)) {
1897 SmallVector Operands;
1898 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i)
1899 Operands.push_back(getZeroExtendExpr(A->getOperand(i), ExtTy));
1900 if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) {
1901 Operands.clear();
1902 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) {
1903 const SCEV *Op = getUDivExpr(A->getOperand(i), RHS);
1904 if (isa(Op) || getMulExpr(Op, RHS) != A->getOperand(i))
1905 break;
1906 Operands.push_back(Op);
1907 }
1908 if (Operands.size() == A->getNumOperands())
1909 return getAddExpr(Operands);
1910 }
1911 }
1912
1913 // Fold if both operands are constant.
1914 if (const SCEVConstant *LHSC = dyn_cast(LHS)) {
1915 Constant *LHSCV = LHSC->getValue();
1916 Constant *RHSCV = RHSC->getValue();
1917 return getConstant(cast(ConstantExpr::getUDiv(LHSCV,
1918 RHSCV)));
1914 }
1915
1916 // Fold if both operands are constant.
1917 if (const SCEVConstant *LHSC = dyn_cast(LHS)) {
1918 Constant *LHSCV = LHSC->getValue();
1919 Constant *RHSCV = RHSC->getValue();
1920 return getConstant(cast(ConstantExpr::getUDiv(LHSCV,
1921 RHSCV)));
1922 }
19191923 }
19201924 }
19211925
33183322 // Turn shift left of a constant amount into a multiply.
33193323 if (ConstantInt *SA = dyn_cast(U->getOperand(1))) {
33203324 uint32_t BitWidth = cast(U->getType())->getBitWidth();
3325
3326 // If the shift count is not less than the bitwidth, the result of
3327 // the shift is undefined. Don't try to analyze it, because the
3328 // resolution chosen here may differ from the resolution chosen in
3329 // other parts of the compiler.
3330 if (SA->getValue().uge(BitWidth))
3331 break;
3332
33213333 Constant *X = ConstantInt::get(getContext(),
3322 APInt(BitWidth, 1).shl(SA->getLimitedValue(BitWidth)));
3334 APInt(BitWidth, 1).shl(SA->getZExtValue()));
33233335 return getMulExpr(getSCEV(U->getOperand(0)), getSCEV(X));
33243336 }
33253337 break;
33283340 // Turn logical shift right of a constant into a unsigned divide.
33293341 if (ConstantInt *SA = dyn_cast(U->getOperand(1))) {
33303342 uint32_t BitWidth = cast(U->getType())->getBitWidth();
3343
3344 // If the shift count is not less than the bitwidth, the result of
3345 // the shift is undefined. Don't try to analyze it, because the
3346 // resolution chosen here may differ from the resolution chosen in
3347 // other parts of the compiler.
3348 if (SA->getValue().uge(BitWidth))
3349 break;
3350
33313351 Constant *X = ConstantInt::get(getContext(),
3332 APInt(BitWidth, 1).shl(SA->getLimitedValue(BitWidth)));
3352 APInt(BitWidth, 1).shl(SA->getZExtValue()));
33333353 return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(X));
33343354 }
33353355 break;
33373357 case Instruction::AShr:
33383358 // For a two-shift sext-inreg, use sext(trunc(x)) as the SCEV expression.
33393359 if (ConstantInt *CI = dyn_cast(U->getOperand(1)))
3340 if (Instruction *L = dyn_cast>(U->getOperand(0)))
3360 if (Operator *L = dyn_cast>(U->getOperand(0)))
33413361 if (L->getOpcode() == Instruction::Shl &&
33423362 L->getOperand(1) == U->getOperand(1)) {
3343 unsigned BitWidth = getTypeSizeInBits(U->getType());
3363 uint64_t BitWidth = getTypeSizeInBits(U->getType());
3364
3365 // If the shift count is not less than the bitwidth, the result of
3366 // the shift is undefined. Don't try to analyze it, because the
3367 // resolution chosen here may differ from the resolution chosen in
3368 // other parts of the compiler.
3369 if (CI->getValue().uge(BitWidth))
3370 break;
3371
33443372 uint64_t Amt = BitWidth - CI->getZExtValue();
33453373 if (Amt == BitWidth)
33463374 return getSCEV(L->getOperand(0)); // shift by zero --> noop
3347 if (Amt > BitWidth)
3348 return getIntegerSCEV(0, U->getType()); // value is undefined
33493375 return
33503376 getSignExtendExpr(getTruncateExpr(getSCEV(L->getOperand(0)),
3351 IntegerType::get(getContext(), Amt)),
3352 U->getType());
3377 IntegerType::get(getContext(),
3378 Amt)),
3379 U->getType());
33533380 }
33543381 break;
33553382
0 ; RUN: opt -analyze -scalar-evolution < %s | FileCheck %s
1
2 ; ScalarEvolution shouldn't attempt to interpret expressions which have
3 ; undefined results.
4
5 define void @foo(i64 %x) {
6
7 %a = udiv i64 %x, 0
8 ; CHECK: --> (%x /u 0)
9
10 %B = shl i64 %x, 64
11 ; CHECK: --> %B
12
13 %b = ashr i64 %B, 64
14 ; CHECK: --> %b
15
16 %c = lshr i64 %x, 64
17 ; CHECK: --> %c
18
19 %d = shl i64 %x, 64
20 ; CHECK: --> %d
21
22 %E = shl i64 %x, -1
23 ; CHECK: --> %E
24
25 %e = ashr i64 %E, -1
26 ; CHECK: --> %e
27
28 %f = lshr i64 %x, -1
29 ; CHECK: --> %f
30
31 %g = shl i64 %x, -1
32 ; CHECK: --> %g
33
34 %h = bitcast i64 undef to i64
35 ; CHECK: --> undef
36
37 ret void
38 }