108#define DEBUG_TYPE "instcombine"
116 "Number of instruction combining iterations performed");
117STATISTIC(NumOneIteration,
"Number of functions with one iteration");
118STATISTIC(NumTwoIterations,
"Number of functions with two iterations");
119STATISTIC(NumThreeIterations,
"Number of functions with three iterations");
121 "Number of functions with four or more iterations");
125STATISTIC(NumDeadInst ,
"Number of dead inst eliminated");
131 "Controls which instructions are visited");
138 "instcombine-max-sink-users",
cl::init(32),
139 cl::desc(
"Maximum number of undroppable users for instruction sinking"));
143 cl::desc(
"Maximum array size considered when doing a combine"));
155std::optional<Instruction *>
166 bool &KnownBitsComputed) {
183 *
this, II, DemandedElts, PoisonElts, PoisonElts2, PoisonElts3,
198 auto *Inst = dyn_cast<Instruction>(
GEP);
205 if (Inst && !
GEP->hasOneUse() && !
GEP->hasAllConstantIndices() &&
206 !
GEP->getSourceElementType()->isIntegerTy(8)) {
220bool InstCombinerImpl::isDesirableIntType(
unsigned BitWidth)
const {
239bool InstCombinerImpl::shouldChangeType(
unsigned FromWidth,
240 unsigned ToWidth)
const {
246 if (ToWidth < FromWidth && isDesirableIntType(ToWidth))
251 if ((FromLegal || isDesirableIntType(FromWidth)) && !ToLegal)
256 if (!FromLegal && !ToLegal && ToWidth > FromWidth)
267bool InstCombinerImpl::shouldChangeType(
Type *
From,
Type *To)
const {
273 unsigned FromWidth =
From->getPrimitiveSizeInBits();
275 return shouldChangeType(FromWidth, ToWidth);
284 auto *OBO = dyn_cast<OverflowingBinaryOperator>(&
I);
285 if (!OBO || !OBO->hasNoSignedWrap())
290 if (Opcode != Instruction::Add && Opcode != Instruction::Sub)
293 const APInt *BVal, *CVal;
297 bool Overflow =
false;
298 if (Opcode == Instruction::Add)
299 (void)BVal->
sadd_ov(*CVal, Overflow);
301 (
void)BVal->
ssub_ov(*CVal, Overflow);
307 auto *OBO = dyn_cast<OverflowingBinaryOperator>(&
I);
308 return OBO && OBO->hasNoUnsignedWrap();
312 auto *OBO = dyn_cast<OverflowingBinaryOperator>(&
I);
313 return OBO && OBO->hasNoSignedWrap();
322 I.clearSubclassOptionalData();
327 I.clearSubclassOptionalData();
328 I.setFastMathFlags(FMF);
337 auto *Cast = dyn_cast<CastInst>(BinOp1->
getOperand(0));
338 if (!Cast || !Cast->hasOneUse())
342 auto CastOpcode = Cast->getOpcode();
343 if (CastOpcode != Instruction::ZExt)
351 auto *BinOp2 = dyn_cast<BinaryOperator>(Cast->getOperand(0));
352 if (!BinOp2 || !BinOp2->hasOneUse() || BinOp2->getOpcode() != AssocOpcode)
378 Cast->dropPoisonGeneratingFlags();
384Value *InstCombinerImpl::simplifyIntToPtrRoundTripCast(
Value *Val) {
385 auto *IntToPtr = dyn_cast<IntToPtrInst>(Val);
388 auto *PtrToInt = dyn_cast<PtrToIntInst>(IntToPtr->getOperand(0));
389 Type *CastTy = IntToPtr->getDestTy();
392 PtrToInt->getSrcTy()->getPointerAddressSpace() &&
395 return PtrToInt->getOperand(0);
422 bool Changed =
false;
430 Changed = !
I.swapOperands();
432 if (
I.isCommutative()) {
433 if (
auto Pair = matchSymmetricPair(
I.getOperand(0),
I.getOperand(1))) {
443 if (
I.isAssociative()) {
466 I.setHasNoUnsignedWrap(
true);
469 I.setHasNoSignedWrap(
true);
498 if (
I.isAssociative() &&
I.isCommutative()) {
561 if (isa<FPMathOperator>(NewBO)) {
575 I.setHasNoUnsignedWrap(
true);
593 if (LOp == Instruction::And)
594 return ROp == Instruction::Or || ROp == Instruction::Xor;
597 if (LOp == Instruction::Or)
598 return ROp == Instruction::And;
602 if (LOp == Instruction::Mul)
603 return ROp == Instruction::Add || ROp == Instruction::Sub;
626 if (isa<Constant>(V))
640 assert(
Op &&
"Expected a binary operator");
641 LHS =
Op->getOperand(0);
642 RHS =
Op->getOperand(1);
643 if (TopOpcode == Instruction::Add || TopOpcode == Instruction::Sub) {
648 return Instruction::Mul;
653 if (OtherOp && OtherOp->
getOpcode() == Instruction::AShr &&
656 return Instruction::AShr;
659 return Op->getOpcode();
668 assert(
A &&
B &&
C &&
D &&
"All values must be provided");
671 Value *RetVal =
nullptr;
682 if (
A ==
C || (InnerCommutative &&
A ==
D)) {
702 if (
B ==
D || (InnerCommutative &&
B ==
C)) {
725 if (isa<OverflowingBinaryOperator>(RetVal)) {
728 if (isa<OverflowingBinaryOperator>(&
I)) {
729 HasNSW =
I.hasNoSignedWrap();
730 HasNUW =
I.hasNoUnsignedWrap();
732 if (
auto *LOBO = dyn_cast<OverflowingBinaryOperator>(
LHS)) {
733 HasNSW &= LOBO->hasNoSignedWrap();
734 HasNUW &= LOBO->hasNoUnsignedWrap();
737 if (
auto *ROBO = dyn_cast<OverflowingBinaryOperator>(
RHS)) {
738 HasNSW &= ROBO->hasNoSignedWrap();
739 HasNUW &= ROBO->hasNoUnsignedWrap();
742 if (TopLevelOpcode == Instruction::Add && InnerOpcode == Instruction::Mul) {
752 cast<Instruction>(RetVal)->setHasNoSignedWrap(HasNSW);
755 cast<Instruction>(RetVal)->setHasNoUnsignedWrap(HasNUW);
770 unsigned Opc =
I->getOpcode();
771 unsigned ConstIdx = 1;
778 case Instruction::Sub:
781 case Instruction::ICmp:
788 case Instruction::Or:
792 case Instruction::Add:
798 if (!
match(
I->getOperand(1 - ConstIdx),
811 if (Opc == Instruction::ICmp && !cast<ICmpInst>(
I)->isEquality()) {
814 if (!Cmp || !Cmp->isZeroValue())
819 bool Consumes =
false;
823 assert(NotOp !=
nullptr &&
824 "Desync between isFreeToInvert and getFreelyInverted");
833 case Instruction::Sub:
836 case Instruction::Or:
837 case Instruction::Add:
840 case Instruction::ICmp:
876 auto IsValidBinOpc = [](
unsigned Opc) {
880 case Instruction::And:
881 case Instruction::Or:
882 case Instruction::Xor:
883 case Instruction::Add:
892 auto IsCompletelyDistributable = [](
unsigned BinOpc1,
unsigned BinOpc2,
894 assert(ShOpc != Instruction::AShr);
895 return (BinOpc1 != Instruction::Add && BinOpc2 != Instruction::Add) ||
896 ShOpc == Instruction::Shl;
899 auto GetInvShift = [](
unsigned ShOpc) {
900 assert(ShOpc != Instruction::AShr);
901 return ShOpc == Instruction::LShr ? Instruction::Shl : Instruction::LShr;
904 auto CanDistributeBinops = [&](
unsigned BinOpc1,
unsigned BinOpc2,
908 if (BinOpc1 == Instruction::And)
913 if (!IsCompletelyDistributable(BinOpc1, BinOpc2, ShOpc))
919 if (BinOpc2 == Instruction::And)
930 auto MatchBinOp = [&](
unsigned ShOpnum) ->
Instruction * {
932 Value *
X, *
Y, *ShiftedX, *Mask, *Shift;
933 if (!
match(
I.getOperand(ShOpnum),
936 if (!
match(
I.getOperand(1 - ShOpnum),
944 auto *IY = dyn_cast<Instruction>(
I.getOperand(ShOpnum));
945 auto *IX = dyn_cast<Instruction>(ShiftedX);
950 unsigned ShOpc = IY->getOpcode();
951 if (ShOpc != IX->getOpcode())
955 auto *BO2 = dyn_cast<Instruction>(
I.getOperand(1 - ShOpnum));
959 unsigned BinOpc = BO2->getOpcode();
961 if (!IsValidBinOpc(
I.getOpcode()) || !IsValidBinOpc(BinOpc))
964 if (ShOpc == Instruction::AShr) {
978 if (BinOpc ==
I.getOpcode() &&
979 IsCompletelyDistributable(
I.getOpcode(), BinOpc, ShOpc)) {
994 if (!CanDistributeBinops(
I.getOpcode(), BinOpc, ShOpc, CMask, CShift))
1008 return MatchBinOp(1);
1026 Value *
A, *CondVal, *TrueVal, *FalseVal;
1029 auto MatchSelectAndCast = [&](
Value *CastOp,
Value *SelectOp) {
1031 A->getType()->getScalarSizeInBits() == 1 &&
1038 if (MatchSelectAndCast(
LHS,
RHS))
1040 else if (MatchSelectAndCast(
RHS,
LHS))
1045 auto NewFoldedConst = [&](
bool IsTrueArm,
Value *V) {
1046 bool IsCastOpRHS = (CastOp ==
RHS);
1047 bool IsZExt = isa<ZExtInst>(CastOp);
1052 }
else if (IsZExt) {
1053 unsigned BitWidth = V->getType()->getScalarSizeInBits();
1066 Value *NewTrueVal = NewFoldedConst(
false, TrueVal);
1068 NewFoldedConst(
true, FalseVal));
1072 Value *NewTrueVal = NewFoldedConst(
true, TrueVal);
1074 NewFoldedConst(
false, FalseVal));
1095 if (Op0 && Op1 && LHSOpcode == RHSOpcode)
1215static std::optional<std::pair<Value *, Value *>>
1217 if (
LHS->getParent() !=
RHS->getParent())
1218 return std::nullopt;
1220 if (
LHS->getNumIncomingValues() < 2)
1221 return std::nullopt;
1224 return std::nullopt;
1226 Value *L0 =
LHS->getIncomingValue(0);
1227 Value *R0 =
RHS->getIncomingValue(0);
1229 for (
unsigned I = 1, E =
LHS->getNumIncomingValues();
I != E; ++
I) {
1233 if ((L0 == L1 && R0 == R1) || (L0 == R1 && R0 == L1))
1236 return std::nullopt;
1239 return std::optional(std::pair(L0, R0));
1242std::optional<std::pair<Value *, Value *>>
1243InstCombinerImpl::matchSymmetricPair(
Value *LHS,
Value *RHS) {
1244 Instruction *LHSInst = dyn_cast<Instruction>(LHS);
1245 Instruction *RHSInst = dyn_cast<Instruction>(RHS);
1247 return std::nullopt;
1249 case Instruction::PHI:
1251 case Instruction::Select: {
1257 return std::pair(TrueVal, FalseVal);
1258 return std::nullopt;
1260 case Instruction::Call: {
1264 if (LHSMinMax && RHSMinMax &&
1271 return std::pair(LHSMinMax->
getLHS(), LHSMinMax->
getRHS());
1272 return std::nullopt;
1275 return std::nullopt;
1285 if (!LHSIsSelect && !RHSIsSelect)
1290 if (isa<FPMathOperator>(&
I)) {
1291 FMF =
I.getFastMathFlags();
1298 Value *
Cond, *True =
nullptr, *False =
nullptr;
1306 if (Opcode != Instruction::Add || (!True && !False) || (True && False))
1321 if (LHSIsSelect && RHSIsSelect &&
A ==
D) {
1330 else if (True && !False)
1338 if (
Value *NewSel = foldAddNegate(
B,
C,
RHS))
1345 if (
Value *NewSel = foldAddNegate(E,
F,
LHS))
1349 if (!True || !False)
1360 assert(!isa<Constant>(
I) &&
"Shouldn't invert users of constant");
1362 if (U == IgnoredUser)
1364 switch (cast<Instruction>(U)->
getOpcode()) {
1365 case Instruction::Select: {
1366 auto *SI = cast<SelectInst>(U);
1368 SI->swapProfMetadata();
1371 case Instruction::Br: {
1378 case Instruction::Xor:
1385 "canFreelyInvertAllUsersOf() ?");
1392Value *InstCombinerImpl::dyn_castNegVal(
Value *V)
const {
1402 if (
C->getType()->getElementType()->isIntegerTy())
1406 for (
unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1411 if (isa<UndefValue>(Elt))
1414 if (!isa<ConstantInt>(Elt))
1421 if (
auto *CV = dyn_cast<Constant>(V))
1422 if (CV->getType()->isVectorTy() &&
1423 CV->getType()->getScalarType()->isIntegerTy() && CV->getSplatValue())
1436Instruction *InstCombinerImpl::foldFBinOpOfIntCastsFromSign(
1437 BinaryOperator &BO,
bool OpsFromSigned, std::array<Value *, 2> IntOps,
1441 Type *IntTy = IntOps[0]->getType();
1446 unsigned MaxRepresentableBits =
1451 unsigned NumUsedLeadingBits[2] = {IntSz, IntSz};
1455 auto IsNonZero = [&](
unsigned OpNo) ->
bool {
1456 if (OpsKnown[OpNo].hasKnownBits() &&
1457 OpsKnown[OpNo].getKnownBits(
SQ).isNonZero())
1462 auto IsNonNeg = [&](
unsigned OpNo) ->
bool {
1466 return OpsKnown[OpNo].getKnownBits(
SQ).isNonNegative();
1470 auto IsValidPromotion = [&](
unsigned OpNo) ->
bool {
1472 if (OpsFromSigned != isa<SIToFPInst>(BO.
getOperand(OpNo)) &&
1481 if (MaxRepresentableBits < IntSz) {
1491 NumUsedLeadingBits[OpNo] =
1492 IntSz - OpsKnown[OpNo].getKnownBits(
SQ).countMinLeadingZeros();
1500 if (MaxRepresentableBits < NumUsedLeadingBits[OpNo])
1503 return !OpsFromSigned || BO.
getOpcode() != Instruction::FMul ||
1508 if (Op1FpC !=
nullptr) {
1510 if (OpsFromSigned && BO.
getOpcode() == Instruction::FMul &&
1515 OpsFromSigned ? Instruction::FPToSI : Instruction::FPToUI, Op1FpC,
1517 if (Op1IntC ==
nullptr)
1520 : Instruction::UIToFP,
1521 Op1IntC, FPTy,
DL) != Op1FpC)
1525 IntOps[1] = Op1IntC;
1529 if (IntTy != IntOps[1]->
getType())
1532 if (Op1FpC ==
nullptr) {
1533 if (!IsValidPromotion(1))
1536 if (!IsValidPromotion(0))
1542 bool NeedsOverflowCheck =
true;
1545 unsigned OverflowMaxOutputBits = OpsFromSigned ? 2 : 1;
1546 unsigned OverflowMaxCurBits =
1547 std::max(NumUsedLeadingBits[0], NumUsedLeadingBits[1]);
1548 bool OutputSigned = OpsFromSigned;
1550 case Instruction::FAdd:
1551 IntOpc = Instruction::Add;
1552 OverflowMaxOutputBits += OverflowMaxCurBits;
1554 case Instruction::FSub:
1555 IntOpc = Instruction::Sub;
1556 OverflowMaxOutputBits += OverflowMaxCurBits;
1558 case Instruction::FMul:
1559 IntOpc = Instruction::Mul;
1560 OverflowMaxOutputBits += OverflowMaxCurBits * 2;
1566 if (OverflowMaxOutputBits < IntSz) {
1567 NeedsOverflowCheck =
false;
1570 if (IntOpc == Instruction::Sub)
1571 OutputSigned =
true;
1577 if (NeedsOverflowCheck &&
1578 !willNotOverflow(IntOpc, IntOps[0], IntOps[1], BO, OutputSigned))
1582 if (
auto *IntBO = dyn_cast<BinaryOperator>(IntBinOp)) {
1583 IntBO->setHasNoSignedWrap(OutputSigned);
1584 IntBO->setHasNoUnsignedWrap(!OutputSigned);
1597 std::array<Value *, 2> IntOps = {
nullptr,
nullptr};
1617 if (
Instruction *R = foldFBinOpOfIntCastsFromSign(BO,
false,
1618 IntOps, Op1FpC, OpsKnown))
1620 return foldFBinOpOfIntCastsFromSign(BO,
true, IntOps,
1636 !
X->getType()->isIntOrIntVectorTy(1))
1655 C = dyn_cast<Constant>(IsTrueArm ? SI->getTrueValue()
1656 : SI->getFalseValue());
1657 }
else if (
match(SI->getCondition(),
1663 C = dyn_cast<Constant>(
Op);
1684 bool FoldWithMultiUse) {
1686 if (!SI->hasOneUse() && !FoldWithMultiUse)
1689 Value *TV = SI->getTrueValue();
1690 Value *FV = SI->getFalseValue();
1691 if (!(isa<Constant>(TV) || isa<Constant>(FV)))
1695 if (SI->getType()->isIntOrIntVectorTy(1))
1705 if (
auto *CI = dyn_cast<FCmpInst>(SI->getCondition())) {
1706 if (CI->hasOneUse()) {
1707 Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1);
1708 if ((TV == Op0 && FV == Op1) || (FV == Op0 && TV == Op1))
1716 if (!NewTV && !NewFV)
1754 const ICmpInst *ICmp = dyn_cast<ICmpInst>(&
I);
1758 std::optional<bool> ImpliedCond =
1760 Ops[0], Ops[1],
DL, LHSIsTrue);
1770 if (NumPHIValues == 0)
1780 if (UI != &
I && !
I.isIdenticalTo(UI))
1791 Value *NonSimplifiedInVal =
nullptr;
1792 for (
unsigned i = 0; i != NumPHIValues; ++i) {
1801 if (NonSimplifiedBB)
return nullptr;
1803 NonSimplifiedBB = InBB;
1804 NonSimplifiedInVal = InVal;
1809 if (isa<InvokeInst>(InVal))
1810 if (cast<Instruction>(InVal)->
getParent() == NonSimplifiedBB)
1827 if (NonSimplifiedBB !=
nullptr) {
1843 if (NonSimplifiedBB) {
1847 U = NonSimplifiedInVal;
1849 U = U->DoPHITranslation(PN->
getParent(), NonSimplifiedBB);
1854 for (
unsigned i = 0; i != NumPHIValues; ++i) {
1855 if (NewPhiValues[i])
1863 if (
User == &
I)
continue;
1869 const_cast<PHINode &
>(*NewPN),
1878 auto *Phi0 = dyn_cast<PHINode>(BO.
getOperand(0));
1879 auto *Phi1 = dyn_cast<PHINode>(BO.
getOperand(1));
1880 if (!Phi0 || !Phi1 || !Phi0->hasOneUse() || !Phi1->hasOneUse() ||
1881 Phi0->getNumOperands() != Phi1->getNumOperands())
1902 auto CanFoldIncomingValuePair = [&](std::tuple<Use &, Use &>
T) {
1903 auto &Phi0Use = std::get<0>(
T);
1904 auto &Phi1Use = std::get<1>(
T);
1905 if (Phi0->getIncomingBlock(Phi0Use) != Phi1->getIncomingBlock(Phi1Use))
1907 Value *Phi0UseV = Phi0Use.get();
1908 Value *Phi1UseV = Phi1Use.get();
1911 else if (Phi1UseV ==
C)
1918 if (
all_of(
zip(Phi0->operands(), Phi1->operands()),
1919 CanFoldIncomingValuePair)) {
1922 assert(NewIncomingValues.
size() == Phi0->getNumOperands() &&
1923 "The number of collected incoming values should equal the number "
1924 "of the original PHINode operands!");
1925 for (
unsigned I = 0;
I < Phi0->getNumOperands();
I++)
1926 NewPhi->
addIncoming(NewIncomingValues[
I], Phi0->getIncomingBlock(
I));
1931 if (Phi0->getNumOperands() != 2 || Phi1->getNumOperands() != 2)
1938 ConstBB = Phi0->getIncomingBlock(0);
1939 OtherBB = Phi0->getIncomingBlock(1);
1941 ConstBB = Phi0->getIncomingBlock(1);
1942 OtherBB = Phi0->getIncomingBlock(0);
1952 auto *PredBlockBranch = dyn_cast<BranchInst>(OtherBB->
getTerminator());
1953 if (!PredBlockBranch || PredBlockBranch->isConditional() ||
1960 for (
auto BBIter = BO.
getParent()->
begin(); &*BBIter != &BO; ++BBIter)
1973 Phi0->getIncomingValueForBlock(OtherBB),
1974 Phi1->getIncomingValueForBlock(OtherBB));
1975 if (
auto *NotFoldedNewBO = dyn_cast<BinaryOperator>(NewBO))
1976 NotFoldedNewBO->copyIRFlags(&BO);
1986 if (!isa<Constant>(
I.getOperand(1)))
1989 if (
auto *Sel = dyn_cast<SelectInst>(
I.getOperand(0))) {
1992 }
else if (
auto *PN = dyn_cast<PHINode>(
I.getOperand(0))) {
2003 if (
GEP.hasAllZeroIndices() && !Src.hasAllZeroIndices() &&
2010 if (!isa<VectorType>(Inst.
getType()))
2016 cast<VectorType>(Inst.
getType())->getElementCount());
2018 cast<VectorType>(Inst.
getType())->getElementCount());
2023 Value *L0, *L1, *R0, *R1;
2028 cast<ShuffleVectorInst>(
LHS)->isConcat() &&
2029 cast<ShuffleVectorInst>(
RHS)->isConcat()) {
2036 if (
auto *BO = dyn_cast<BinaryOperator>(NewBO0))
2039 if (
auto *BO = dyn_cast<BinaryOperator>(NewBO1))
2046 if (
auto *BO = dyn_cast<BinaryOperator>(V))
2063 return createBinOpReverse(V1, V2);
2067 return createBinOpReverse(V1,
RHS);
2071 return createBinOpReverse(
LHS, V2);
2081 if (
auto *BO = dyn_cast<BinaryOperator>(XY))
2090 V1->
getType() == V2->getType() &&
2093 return createBinOpShuffle(V1, V2, Mask);
2102 auto *LShuf = cast<ShuffleVectorInst>(
LHS);
2103 auto *RShuf = cast<ShuffleVectorInst>(
RHS);
2108 if (LShuf->isSelect() &&
2110 RShuf->isSelect() &&
2128 auto *InstVTy = dyn_cast<FixedVectorType>(Inst.
getType());
2133 cast<FixedVectorType>(V1->
getType())->getNumElements() <=
2134 InstVTy->getNumElements()) {
2136 "Shuffle should not change scalar type");
2143 bool ConstOp1 = isa<Constant>(
RHS);
2145 unsigned SrcVecNumElts =
2146 cast<FixedVectorType>(V1->
getType())->getNumElements();
2149 bool MayChange =
true;
2150 unsigned NumElts = InstVTy->getNumElements();
2151 for (
unsigned I = 0;
I < NumElts; ++
I) {
2153 if (ShMask[
I] >= 0) {
2154 assert(ShMask[
I] < (
int)NumElts &&
"Not expecting narrowing shuffle");
2162 if (!CElt || (!isa<PoisonValue>(NewCElt) && NewCElt != CElt) ||
2163 I >= SrcVecNumElts) {
2167 NewVecC[ShMask[
I]] = CElt;
2178 if (
I >= SrcVecNumElts || ShMask[
I] < 0) {
2183 if (!MaybePoison || !isa<PoisonValue>(MaybePoison)) {
2200 Value *NewLHS = ConstOp1 ? V1 : NewC;
2201 Value *NewRHS = ConstOp1 ? NewC : V1;
2202 return createBinOpShuffle(NewLHS, NewRHS, Mask);
2209 if (isa<ShuffleVectorInst>(
RHS))
2242 if (isa<FPMathOperator>(R)) {
2243 R->copyFastMathFlags(&Inst);
2246 if (
auto *NewInstBO = dyn_cast<BinaryOperator>(NewBO))
2247 NewInstBO->copyIRFlags(R);
2276 cast<Operator>(Op1)->getOpcode() == CastOpc &&
2277 (Op0->
hasOneUse() || Op1->hasOneUse()))) {
2295 if (!willNotOverflow(BO.
getOpcode(),
X,
Y, BO, IsSext))
2301 if (
auto *NewBinOp = dyn_cast<BinaryOperator>(NarrowBO)) {
2303 NewBinOp->setHasNoSignedWrap();
2305 NewBinOp->setHasNoUnsignedWrap();
2323 if (!
GEP.hasAllConstantIndices())
2338 bool IsInBounds =
GEP.isInBounds();
2339 Type *Ty =
GEP.getSourceElementType();
2340 Value *NewTrueC = Builder.
CreateGEP(Ty, TrueC, IndexC,
"", IsInBounds);
2341 Value *NewFalseC = Builder.
CreateGEP(Ty, FalseC, IndexC,
"", IsInBounds);
2351 if (
GEP.getNumIndices() != 1)
2360 Type *PtrTy = Src->getType()->getScalarType();
2361 unsigned IndexSizeInBits =
DL.getIndexTypeSizeInBits(PtrTy);
2368 if (isa<ScalableVectorType>(
BaseType))
2372 if (NewOffset.
isZero() ||
2373 (Src->hasOneUse() &&
GEP.getOperand(1)->hasOneUse())) {
2394 Type *PtrTy = Src->getType()->getScalarType();
2395 if (
GEP.hasAllConstantIndices() &&
2396 (Src->hasOneUse() || Src->hasAllConstantIndices())) {
2400 bool IsFirstType =
true;
2401 unsigned NumVarIndices = 0;
2402 for (
auto Pair :
enumerate(Src->indices())) {
2403 if (!isa<ConstantInt>(Pair.value())) {
2405 IsFirstType =
false;
2406 NumVarIndices = Pair.index() + 1;
2413 if (NumVarIndices != Src->getNumIndices()) {
2434 if (!
Offset.isZero() || (!IsFirstType && !ConstIndices[0].isZero())) {
2437 if (Src->hasAllConstantIndices())
2449 Src->getNumIndices() - NumVarIndices));
2456 IsInBounds &=
Idx.isNonNegative() == ConstIndices[0].isNonNegative();
2461 Indices,
"", IsInBounds));
2464 if (Src->getResultElementType() !=
GEP.getSourceElementType())
2470 bool EndsWithSequential =
false;
2473 EndsWithSequential =
I.isSequential();
2476 if (EndsWithSequential) {
2479 Value *SO1 = Src->getOperand(Src->getNumOperands()-1);
2497 if (Src->getNumOperands() == 2) {
2503 Indices.
append(Src->op_begin()+1, Src->op_end()-1);
2506 }
else if (isa<Constant>(*
GEP.idx_begin()) &&
2507 cast<Constant>(*
GEP.idx_begin())->isNullValue() &&
2508 Src->getNumOperands() != 1) {
2510 Indices.
append(Src->op_begin()+1, Src->op_end());
2514 if (!Indices.
empty())
2517 Src->getSourceElementType(), Src->getOperand(0), Indices,
"",
2525 bool &DoesConsume,
unsigned Depth) {
2526 static Value *
const NonNull =
reinterpret_cast<Value *
>(uintptr_t(1));
2544 if (!WillInvertAllUses)
2549 if (
auto *
I = dyn_cast<CmpInst>(V)) {
2560 DoesConsume,
Depth))
2563 DoesConsume,
Depth))
2572 DoesConsume,
Depth))
2575 DoesConsume,
Depth))
2584 DoesConsume,
Depth))
2593 DoesConsume,
Depth))
2605 bool LocalDoesConsume = DoesConsume;
2607 LocalDoesConsume,
Depth))
2610 LocalDoesConsume,
Depth)) {
2611 DoesConsume = LocalDoesConsume;
2614 DoesConsume,
Depth);
2615 assert(NotB !=
nullptr &&
2616 "Unable to build inverted value for known freely invertable op");
2617 if (
auto *II = dyn_cast<IntrinsicInst>(V))
2626 if (
PHINode *PN = dyn_cast<PHINode>(V)) {
2627 bool LocalDoesConsume = DoesConsume;
2629 for (
Use &U : PN->operands()) {
2630 BasicBlock *IncomingBlock = PN->getIncomingBlock(U);
2634 if (NewIncomingVal ==
nullptr)
2637 if (NewIncomingVal == V)
2640 IncomingValues.
emplace_back(NewIncomingVal, IncomingBlock);
2643 DoesConsume = LocalDoesConsume;
2649 for (
auto [Val, Pred] : IncomingValues)
2658 DoesConsume,
Depth))
2665 DoesConsume,
Depth))
2674 bool IsLogical,
Value *
A,
2676 bool LocalDoesConsume = DoesConsume;
2678 LocalDoesConsume,
Depth))
2681 LocalDoesConsume,
Depth)) {
2683 LocalDoesConsume,
Depth);
2684 DoesConsume = LocalDoesConsume;
2694 return TryInvertAndOrUsingDeMorgan(Instruction::And,
false,
A,
2698 return TryInvertAndOrUsingDeMorgan(Instruction::Or,
false,
A,
2702 return TryInvertAndOrUsingDeMorgan(Instruction::And,
true,
A,
2706 return TryInvertAndOrUsingDeMorgan(Instruction::Or,
true,
A,
2715 Type *GEPType =
GEP.getType();
2716 Type *GEPEltType =
GEP.getSourceElementType();
2724 if (
auto *GEPFVTy = dyn_cast<FixedVectorType>(GEPType)) {
2725 auto VWidth = GEPFVTy->getNumElements();
2726 APInt PoisonElts(VWidth, 0);
2742 bool MadeChange =
false;
2746 Type *NewScalarIndexTy =
2756 Type *IndexTy = (*I)->getType();
2757 Type *NewIndexType =
2760 cast<VectorType>(IndexTy)->getElementCount())
2772 if (IndexTy != NewIndexType) {
2784 if (!GEPEltType->
isIntegerTy(8) &&
GEP.hasAllConstantIndices()) {
2801 if (
auto *PN = dyn_cast<PHINode>(PtrOp)) {
2802 auto *Op1 = dyn_cast<GetElementPtrInst>(PN->getOperand(0));
2817 for (
auto I = PN->op_begin()+1, E = PN->op_end();
I !=E; ++
I) {
2818 auto *Op2 = dyn_cast<GetElementPtrInst>(*
I);
2819 if (!Op2 || Op1->getNumOperands() != Op2->getNumOperands() ||
2820 Op1->getSourceElementType() != Op2->getSourceElementType())
2828 Type *CurTy =
nullptr;
2830 for (
unsigned J = 0,
F = Op1->getNumOperands(); J !=
F; ++J) {
2831 if (Op1->getOperand(J)->getType() != Op2->getOperand(J)->getType())
2834 if (Op1->getOperand(J) != Op2->getOperand(J)) {
2843 assert(CurTy &&
"No current type?");
2863 CurTy = Op1->getSourceElementType();
2875 if (DI != -1 && !PN->hasOneUse())
2878 auto *NewGEP = cast<GetElementPtrInst>(Op1->clone());
2891 PN->getNumOperands());
2894 for (
auto &
I : PN->operands())
2895 NewPN->
addIncoming(cast<GEPOperator>(
I)->getOperand(DI),
2896 PN->getIncomingBlock(
I));
2898 NewGEP->setOperand(DI, NewPN);
2901 NewGEP->insertBefore(*
GEP.getParent(),
GEP.getParent()->getFirstInsertionPt());
2905 if (
auto *Src = dyn_cast<GEPOperator>(PtrOp))
2909 if (
GEP.getNumIndices() == 1) {
2910 unsigned AS =
GEP.getPointerAddressSpace();
2911 if (
GEP.getOperand(1)->getType()->getScalarSizeInBits() ==
2915 if (TyAllocSize == 1) {
2924 GEPType ==
Y->getType()) {
2925 bool HasSameUnderlyingObject =
2927 bool Changed =
false;
2928 GEP.replaceUsesWithIf(
Y, [&](
Use &U) {
2929 bool ShouldReplace = HasSameUnderlyingObject ||
2930 isa<ICmpInst>(U.getUser()) ||
2931 isa<PtrToIntInst>(U.getUser());
2932 Changed |= ShouldReplace;
2933 return ShouldReplace;
2935 return Changed ? &
GEP :
nullptr;
2958 if (
GEP.getNumIndices() == 1) {
2961 auto CanPreserveInBounds = [&](
bool AddIsNSW,
Value *Idx1,
Value *Idx2) {
2976 bool IsInBounds = CanPreserveInBounds(
2977 cast<OverflowingBinaryOperator>(
GEP.getOperand(1))->hasNoSignedWrap(),
2981 Idx1,
"", IsInBounds);
2995 bool IsInBounds = CanPreserveInBounds(
2998 GEP.getResultElementType(),
GEP.getPointerOperand(),
3009 if (!
GEP.isInBounds()) {
3012 APInt BasePtrOffset(IdxWidth, 0);
3013 Value *UnderlyingPtrOp =
3016 bool CanBeNull, CanBeFreed;
3018 DL, CanBeNull, CanBeFreed);
3019 if (!CanBeNull && !CanBeFreed && DerefBytes != 0) {
3020 if (
GEP.accumulateConstantOffset(
DL, BasePtrOffset) &&
3022 APInt AllocSize(IdxWidth, DerefBytes);
3023 if (BasePtrOffset.
ule(AllocSize)) {
3025 GEP.getSourceElementType(), PtrOp, Indices,
GEP.getName());
3039 if (isa<ConstantPointerNull>(V))
3041 if (
auto *LI = dyn_cast<LoadInst>(V))
3042 return isa<GlobalVariable>(LI->getPointerOperand());
3066 return Dest && Dest->Ptr == UsedV;
3080 switch (
I->getOpcode()) {
3085 case Instruction::AddrSpaceCast:
3086 case Instruction::BitCast:
3087 case Instruction::GetElementPtr:
3092 case Instruction::ICmp: {
3099 unsigned OtherIndex = (ICI->
getOperand(0) == PI) ? 1 : 0;
3106 auto AlignmentAndSizeKnownValid = [](
CallBase *CB) {
3110 const APInt *Alignment;
3112 return match(CB->getArgOperand(0),
m_APInt(Alignment)) &&
3116 auto *CB = dyn_cast<CallBase>(AI);
3118 if (CB && TLI.
getLibFunc(*CB->getCalledFunction(), TheLibFunc) &&
3119 TLI.
has(TheLibFunc) && TheLibFunc == LibFunc_aligned_alloc &&
3120 !AlignmentAndSizeKnownValid(CB))
3126 case Instruction::Call:
3133 case Intrinsic::memmove:
3134 case Intrinsic::memcpy:
3135 case Intrinsic::memset: {
3137 if (
MI->isVolatile() ||
MI->getRawDest() != PI)
3141 case Intrinsic::assume:
3142 case Intrinsic::invariant_start:
3143 case Intrinsic::invariant_end:
3144 case Intrinsic::lifetime_start:
3145 case Intrinsic::lifetime_end:
3146 case Intrinsic::objectsize:
3149 case Intrinsic::launder_invariant_group:
3150 case Intrinsic::strip_invariant_group:
3179 case Instruction::Store: {
3181 if (SI->isVolatile() || SI->getPointerOperand() != PI)
3189 }
while (!Worklist.
empty());
3212 std::unique_ptr<DIBuilder> DIB;
3213 if (isa<AllocaInst>(
MI)) {
3219 for (
unsigned i = 0, e =
Users.size(); i != e; ++i) {
3231 II,
DL, &
TLI,
AA,
true, &InsertedInstructions);
3232 for (
Instruction *Inserted : InsertedInstructions)
3240 for (
unsigned i = 0, e =
Users.size(); i != e; ++i) {
3249 C->isFalseWhenEqual()));
3250 }
else if (
auto *SI = dyn_cast<StoreInst>(
I)) {
3251 for (
auto *DVI : DVIs)
3252 if (DVI->isAddressOfVariable())
3254 for (
auto *DVR : DVRs)
3255 if (DVR->isAddressOfVariable())
3298 for (
auto *DVI : DVIs)
3299 if (DVI->isAddressOfVariable() || DVI->getExpression()->startsWithDeref())
3300 DVI->eraseFromParent();
3301 for (
auto *DVR : DVRs)
3302 if (DVR->isAddressOfVariable() || DVR->getExpression()->startsWithDeref())
3303 DVR->eraseFromParent();
3349 if (FreeInstrBB->
size() != 2) {
3351 if (&Inst == &FI || &Inst == FreeInstrBBTerminator)
3353 auto *Cast = dyn_cast<CastInst>(&Inst);
3354 if (!Cast || !Cast->isNoopCast(
DL))
3375 "Broken CFG: missing edge from predecessor to successor");
3380 if (&Instr == FreeInstrBBTerminator)
3382 Instr.moveBeforePreserving(TI);
3385 "Only the branch instruction should remain");
3396 Attrs = Attrs.removeParamAttribute(FI.
getContext(), 0, Attribute::NonNull);
3397 Attribute Dereferenceable = Attrs.getParamAttr(0, Attribute::Dereferenceable);
3398 if (Dereferenceable.
isValid()) {
3400 Attrs = Attrs.removeParamAttribute(FI.
getContext(), 0,
3401 Attribute::Dereferenceable);
3402 Attrs = Attrs.addDereferenceableOrNullParamAttr(FI.
getContext(), 0, Bytes);
3411 if (isa<UndefValue>(
Op)) {
3419 if (isa<ConstantPointerNull>(
Op))
3455 FPClassTest ReturnClass =
F->getAttributes().getRetNoFPClass();
3456 if (ReturnClass ==
fcNone)
3473 bool Changed =
false;
3474 while (
Instruction *Prev =
I.getPrevNonDebugInstruction()) {
3479 if (Prev->isEHPad())
3510 return BBI->isDebugOrPseudoInst() ||
3511 (isa<BitCastInst>(BBI) && BBI->getType()->isPointerTy());
3516 if (BBI != FirstInstr)
3518 }
while (BBI != FirstInstr && IsNoopInstrForStoreMerging(BBI));
3520 return dyn_cast<StoreInst>(BBI);
3532 if (!
DeadEdges.insert({From, To}).second)
3537 for (
Use &U : PN.incoming_values())
3538 if (PN.getIncomingBlock(U) ==
From && !isa<PoisonValue>(U)) {
3554 std::next(
I->getReverseIterator())))) {
3555 if (!Inst.use_empty() && !Inst.getType()->isTokenTy()) {
3559 if (Inst.isEHPad() || Inst.getType()->isTokenTy())
3562 Inst.dropDbgRecords();
3570 for (
Value *V : Changed)
3597 if (Succ == LiveSucc)
3625 if (isa<SelectInst>(
Cond) &&
3646 auto *Cmp = cast<CmpInst>(
Cond);
3655 if (isa<UndefValue>(
Cond)) {
3659 if (
auto *CI = dyn_cast<ConstantInt>(
Cond)) {
3675 unsigned CstOpIdx = IsTrueArm ? 1 : 2;
3676 auto *
C = dyn_cast<ConstantInt>(
Select->getOperand(CstOpIdx));
3680 BasicBlock *CstBB = SI.findCaseValue(
C)->getCaseSuccessor();
3681 if (CstBB != SI.getDefaultDest())
3694 for (
auto Case : SI.cases())
3695 if (!CR.
contains(Case.getCaseValue()->getValue()))
3707 for (
auto Case : SI.cases()) {
3709 assert(isa<ConstantInt>(NewCase) &&
3710 "Result of expression should be constant");
3711 Case.setValue(cast<ConstantInt>(NewCase));
3719 for (
auto Case : SI.cases()) {
3721 assert(isa<ConstantInt>(NewCase) &&
3722 "Result of expression should be constant");
3723 Case.setValue(cast<ConstantInt>(NewCase));
3731 all_of(SI.cases(), [&](
const auto &Case) {
3732 return Case.getCaseValue()->getValue().countr_zero() >= ShiftAmt;
3738 Value *NewCond = Op0;
3745 for (
auto Case : SI.cases()) {
3746 const APInt &CaseVal = Case.getCaseValue()->getValue();
3748 : CaseVal.
lshr(ShiftAmt);
3749 Case.setValue(ConstantInt::get(SI.getContext(), ShiftedCase));
3757 bool IsZExt = isa<ZExtInst>(
Cond);
3761 if (
all_of(SI.cases(), [&](
const auto &Case) {
3762 const APInt &CaseVal = Case.getCaseValue()->getValue();
3763 return IsZExt ? CaseVal.isIntN(NewWidth)
3764 : CaseVal.isSignedIntN(NewWidth);
3766 for (
auto &Case : SI.cases()) {
3767 APInt TruncatedCase = Case.getCaseValue()->getValue().
trunc(NewWidth);
3768 Case.setValue(ConstantInt::get(SI.getContext(), TruncatedCase));
3775 if (
auto *
Select = dyn_cast<SelectInst>(
Cond)) {
3790 for (
const auto &
C : SI.cases()) {
3792 std::min(LeadingKnownZeros,
C.getCaseValue()->getValue().countl_zero());
3794 std::min(LeadingKnownOnes,
C.getCaseValue()->getValue().countl_one());
3797 unsigned NewWidth = Known.
getBitWidth() - std::max(LeadingKnownZeros, LeadingKnownOnes);
3803 if (NewWidth > 0 && NewWidth < Known.
getBitWidth() &&
3804 shouldChangeType(Known.
getBitWidth(), NewWidth)) {
3809 for (
auto Case : SI.cases()) {
3810 APInt TruncatedCase = Case.getCaseValue()->getValue().
trunc(NewWidth);
3811 Case.setValue(ConstantInt::get(SI.getContext(), TruncatedCase));
3816 if (isa<UndefValue>(
Cond)) {
3820 if (
auto *CI = dyn_cast<ConstantInt>(
Cond)) {
3822 SI.findCaseValue(CI)->getCaseSuccessor());
3836 const APInt *
C =
nullptr;
3838 if (*EV.
idx_begin() == 0 && (OvID == Intrinsic::smul_with_overflow ||
3839 OvID == Intrinsic::umul_with_overflow)) {
3844 if (
C->isPowerOf2()) {
3845 return BinaryOperator::CreateShl(
3847 ConstantInt::get(WO->getLHS()->getType(),
C->logBase2()));
3855 if (!WO->hasOneUse())
3869 assert(*EV.
idx_begin() == 1 &&
"Unexpected extract index for overflow inst");
3872 if (OvID == Intrinsic::usub_with_overflow)
3877 if (OvID == Intrinsic::smul_with_overflow &&
3878 WO->getLHS()->getType()->isIntOrIntVectorTy(1))
3879 return BinaryOperator::CreateAnd(WO->getLHS(), WO->getRHS());
3882 if (OvID == Intrinsic::umul_with_overflow && WO->getLHS() == WO->getRHS()) {
3883 unsigned BitWidth = WO->getLHS()->getType()->getScalarSizeInBits();
3888 ConstantInt::get(WO->getLHS()->getType(),
3899 WO->getBinaryOp(), *
C, WO->getNoWrapKind());
3904 auto *OpTy = WO->getRHS()->getType();
3905 auto *NewLHS = WO->getLHS();
3909 ConstantInt::get(OpTy, NewRHSC));
3927 const unsigned *exti, *exte, *insi, *inse;
3928 for (exti = EV.
idx_begin(), insi =
IV->idx_begin(),
3929 exte = EV.
idx_end(), inse =
IV->idx_end();
3930 exti != exte && insi != inse;
3944 if (exti == exte && insi == inse)
3977 if (
Instruction *R = foldExtractOfOverflowIntrinsic(EV))
3980 if (
LoadInst *L = dyn_cast<LoadInst>(Agg)) {
3982 if (
auto *STy = dyn_cast<StructType>(Agg->
getType());
3983 STy && STy->containsScalableVectorType())
3991 if (L->isSimple() && L->hasOneUse()) {
4003 L->getPointerOperand(), Indices);
4007 NL->setAAMetadata(L->getAAMetadata());
4014 if (
auto *PN = dyn_cast<PHINode>(Agg))
4020 if (
auto *SI = dyn_cast<SelectInst>(Agg))
4037 switch (Personality) {
4067 cast<ArrayType>(
LHS->
getType())->getNumElements()
4069 cast<ArrayType>(
RHS->
getType())->getNumElements();
4081 bool MakeNewInstruction =
false;
4083 bool CleanupFlag =
LI.isCleanup();
4086 for (
unsigned i = 0, e =
LI.getNumClauses(); i != e; ++i) {
4087 bool isLastClause = i + 1 == e;
4088 if (
LI.isCatch(i)) {
4095 if (AlreadyCaught.
insert(TypeInfo).second) {
4100 MakeNewInstruction =
true;
4107 MakeNewInstruction =
true;
4108 CleanupFlag =
false;
4119 assert(
LI.isFilter(i) &&
"Unsupported landingpad clause!");
4127 if (!NumTypeInfos) {
4130 MakeNewInstruction =
true;
4131 CleanupFlag =
false;
4135 bool MakeNewFilter =
false;
4137 if (isa<ConstantAggregateZero>(FilterClause)) {
4139 assert(NumTypeInfos > 0 &&
"Should have handled empty filter already!");
4145 MakeNewInstruction =
true;
4152 if (NumTypeInfos > 1)
4153 MakeNewFilter =
true;
4157 NewFilterElts.
reserve(NumTypeInfos);
4162 bool SawCatchAll =
false;
4163 for (
unsigned j = 0; j != NumTypeInfos; ++j) {
4191 if (SeenInFilter.
insert(TypeInfo).second)
4192 NewFilterElts.
push_back(cast<Constant>(Elt));
4197 MakeNewInstruction =
true;
4202 if (NewFilterElts.
size() < NumTypeInfos)
4203 MakeNewFilter =
true;
4205 if (MakeNewFilter) {
4207 NewFilterElts.
size());
4209 MakeNewInstruction =
true;
4218 if (MakeNewFilter && !NewFilterElts.
size()) {
4219 assert(MakeNewInstruction &&
"New filter but not a new instruction!");
4220 CleanupFlag =
false;
4231 for (
unsigned i = 0, e = NewClauses.
size(); i + 1 < e; ) {
4234 for (j = i; j != e; ++j)
4235 if (!isa<ArrayType>(NewClauses[j]->
getType()))
4241 for (
unsigned k = i; k + 1 < j; ++k)
4245 std::stable_sort(NewClauses.
begin() + i, NewClauses.
begin() + j,
4247 MakeNewInstruction =
true;
4266 for (
unsigned i = 0; i + 1 < NewClauses.
size(); ++i) {
4276 for (
unsigned j = NewClauses.
size() - 1; j != i; --j) {
4277 Value *LFilter = NewClauses[j];
4288 NewClauses.
erase(J);
4289 MakeNewInstruction =
true;
4299 if (isa<ConstantAggregateZero>(LFilter)) {
4302 if (isa<ConstantAggregateZero>(
Filter)) {
4303 assert(FElts <= LElts &&
"Should have handled this case earlier!");
4305 NewClauses.
erase(J);
4306 MakeNewInstruction =
true;
4312 if (isa<ConstantAggregateZero>(
Filter)) {
4315 assert(FElts > 0 &&
"Should have eliminated the empty filter earlier!");
4316 for (
unsigned l = 0; l != LElts; ++l)
4319 NewClauses.
erase(J);
4320 MakeNewInstruction =
true;
4331 bool AllFound =
true;
4332 for (
unsigned f = 0; f != FElts; ++f) {
4335 for (
unsigned l = 0; l != LElts; ++l) {
4337 if (LTypeInfo == FTypeInfo) {
4347 NewClauses.
erase(J);
4348 MakeNewInstruction =
true;
4356 if (MakeNewInstruction) {
4359 for (
unsigned i = 0, e = NewClauses.
size(); i != e; ++i)
4364 if (NewClauses.
empty())
4372 if (
LI.isCleanup() != CleanupFlag) {
4373 assert(!CleanupFlag &&
"Adding a cleanup, not removing one?!");
4374 LI.setCleanup(CleanupFlag);
4398 auto *OrigOpInst = dyn_cast<Instruction>(OrigOp);
4403 if (!OrigOpInst || !OrigOpInst->hasOneUse() || isa<PHINode>(OrigOp))
4417 Use *MaybePoisonOperand =
nullptr;
4418 for (
Use &U : OrigOpInst->operands()) {
4419 if (isa<MetadataAsValue>(U.get()) ||
4422 if (!MaybePoisonOperand)
4423 MaybePoisonOperand = &U;
4428 OrigOpInst->dropPoisonGeneratingAnnotations();
4431 if (!MaybePoisonOperand)
4436 MaybePoisonOperand->get(), MaybePoisonOperand->get()->
getName() +
".fr");
4438 replaceUse(*MaybePoisonOperand, FrozenMaybePoisonOperand);
4449 Use *StartU =
nullptr;
4467 Value *StartV = StartU->get();
4479 if (!Visited.
insert(V).second)
4482 if (Visited.
size() > 32)
4499 I->dropPoisonGeneratingAnnotations();
4501 if (StartNeedsFreeze) {
4513 if (isa<Constant>(
Op) ||
Op->hasOneUse())
4522 if (isa<Argument>(
Op)) {
4526 auto MoveBeforeOpt = cast<Instruction>(
Op)->getInsertionPointAfterDef();
4529 MoveBefore = *MoveBeforeOpt;
4533 if (isa<DbgInfoIntrinsic>(MoveBefore))
4534 MoveBefore = MoveBefore->getNextNonDebugInstruction()->getIterator();
4537 MoveBefore.setHeadBit(
false);
4539 bool Changed =
false;
4540 if (&FI != &*MoveBefore) {
4541 FI.
moveBefore(*MoveBefore->getParent(), MoveBefore);
4545 Op->replaceUsesWithIf(&FI, [&](
Use &U) ->
bool {
4547 Changed |= Dominates;
4556 for (
auto *U : V->users()) {
4557 if (isa<ShuffleVectorInst>(U))
4566 Value *Op0 =
I.getOperand(0);
4572 if (
auto *PN = dyn_cast<PHINode>(Op0)) {
4595 auto getUndefReplacement = [&
I](
Type *Ty) {
4598 for (
const auto *U :
I.users()) {
4607 else if (BestValue !=
C)
4608 BestValue = NullValue;
4610 assert(BestValue &&
"Must have at least one use");
4625 Constant *ReplaceC = getUndefReplacement(
I.getType()->getScalarType());
4640 auto *CB = dyn_cast<CallBase>(
I);
4659 for (
const User *U :
I.users()) {
4660 if (Visited.
insert(U).second)
4665 while (!AllocaUsers.
empty()) {
4666 auto *UserI = cast<Instruction>(AllocaUsers.
pop_back_val());
4667 if (isa<BitCastInst>(UserI) || isa<GetElementPtrInst>(UserI) ||
4668 isa<AddrSpaceCastInst>(UserI)) {
4689 if (isa<PHINode>(
I) ||
I->isEHPad() ||
I->mayThrow() || !
I->willReturn() ||
4697 if (isa<AllocaInst>(
I))
4705 if (
auto *CI = dyn_cast<CallInst>(
I)) {
4706 if (CI->isConvergent())
4712 if (
I->mayWriteToMemory()) {
4719 if (
I->mayReadFromMemory()) {
4726 E =
I->getParent()->end();
4728 if (Scan->mayWriteToMemory())
4732 I->dropDroppableUses([&](
const Use *U) {
4733 auto *
I = dyn_cast<Instruction>(U->getUser());
4734 if (
I &&
I->getParent() != DestBlock) {
4744 I->moveBefore(*DestBlock, InsertPos);
4755 if (!DbgUsers.
empty())
4757 if (!DbgVariableRecords.
empty())
4759 DbgVariableRecords);
4779 for (
auto &DbgUser : DbgUsers)
4780 if (DbgUser->getParent() != DestBlock)
4787 if (DVI->getParent() == SrcBlock)
4790 [](
auto *
A,
auto *
B) {
return B->comesBefore(
A); });
4794 for (
auto *
User : DbgUsersToSink) {
4799 if (isa<DbgDeclareInst>(
User))
4804 User->getDebugLoc()->getInlinedAt());
4806 if (!SunkVariables.
insert(DbgUserVariable).second)
4811 if (isa<DbgAssignIntrinsic>(
User))
4814 DIIClones.emplace_back(cast<DbgVariableIntrinsic>(
User->clone()));
4815 if (isa<DbgDeclareInst>(
User) && isa<CastInst>(
I))
4816 DIIClones.back()->replaceVariableLocationOp(
I,
I->getOperand(0));
4821 if (!DIIClones.empty()) {
4826 DIIClone->insertBefore(&*InsertPos);
4841 for (
auto &DVR : DbgVariableRecords)
4842 if (DVR->getParent() != DestBlock)
4843 DbgVariableRecordsToSalvage.
push_back(DVR);
4849 if (DVR->getParent() == SrcBlock)
4850 DbgVariableRecordsToSink.
push_back(DVR);
4857 return B->getInstruction()->comesBefore(
A->getInstruction());
4864 using InstVarPair = std::pair<const Instruction *, DebugVariable>;
4866 if (DbgVariableRecordsToSink.
size() > 1) {
4872 DVR->getDebugLoc()->getInlinedAt());
4873 CountMap[std::make_pair(DVR->getInstruction(), DbgUserVariable)] += 1;
4879 for (
auto It : CountMap) {
4880 if (It.second > 1) {
4881 FilterOutMap[It.first] =
nullptr;
4882 DupSet.
insert(It.first.first);
4893 DVR.getDebugLoc()->getInlinedAt());
4895 FilterOutMap.
find(std::make_pair(Inst, DbgUserVariable));
4896 if (FilterIt == FilterOutMap.
end())
4898 if (FilterIt->second !=
nullptr)
4900 FilterIt->second = &DVR;
4915 DVR->getDebugLoc()->getInlinedAt());
4919 if (!FilterOutMap.
empty()) {
4920 InstVarPair IVP = std::make_pair(DVR->getInstruction(), DbgUserVariable);
4921 auto It = FilterOutMap.
find(IVP);
4924 if (It != FilterOutMap.
end() && It->second != DVR)
4928 if (!SunkVariables.
insert(DbgUserVariable).second)
4931 if (DVR->isDbgAssign())
4939 if (DVRClones.
empty())
4953 assert(InsertPos.getHeadBit());
4955 InsertPos->getParent()->insertDbgRecordBefore(DVRClone, InsertPos);
4979 if (
I ==
nullptr)
continue;
4994 auto getOptionalSinkBlockForInst =
4995 [
this](
Instruction *
I) -> std::optional<BasicBlock *> {
4997 return std::nullopt;
5001 unsigned NumUsers = 0;
5003 for (
auto *U :
I->users()) {
5004 if (U->isDroppable())
5007 return std::nullopt;
5011 if (
PHINode *PN = dyn_cast<PHINode>(UserInst)) {
5012 for (
unsigned i = 0; i < PN->getNumIncomingValues(); i++) {
5013 if (PN->getIncomingValue(i) ==
I) {
5017 if (UserParent && UserParent != PN->getIncomingBlock(i))
5018 return std::nullopt;
5019 UserParent = PN->getIncomingBlock(i);
5022 assert(UserParent &&
"expected to find user block!");
5024 if (UserParent && UserParent != UserInst->
getParent())
5025 return std::nullopt;
5031 if (NumUsers == 0) {
5035 return std::nullopt;
5047 return std::nullopt;
5057 return std::nullopt;
5062 auto OptBB = getOptionalSinkBlockForInst(
I);
5064 auto *UserParent = *OptBB;
5072 for (
Use &U :
I->operands())
5073 if (
Instruction *OpI = dyn_cast<Instruction>(U.get()))
5081 I, {LLVMContext::MD_dbg, LLVMContext::MD_annotation});
5094 <<
" New = " << *Result <<
'\n');
5096 Result->copyMetadata(*
I,
5097 {LLVMContext::MD_dbg, LLVMContext::MD_annotation});
5099 I->replaceAllUsesWith(Result);
5102 Result->takeName(
I);
5109 if (isa<PHINode>(Result) != isa<PHINode>(
I)) {
5111 if (isa<PHINode>(
I))
5117 Result->insertInto(InstParent, InsertPos);
5126 <<
" New = " << *
I <<
'\n');
5158 if (!
I->hasMetadataOtherThanDebugLoc())
5161 auto Track = [](
Metadata *ScopeList,
auto &Container) {
5162 const auto *MDScopeList = dyn_cast_or_null<MDNode>(ScopeList);
5163 if (!MDScopeList || !Container.insert(MDScopeList).second)
5165 for (
const auto &
MDOperand : MDScopeList->operands())
5166 if (
auto *MDScope = dyn_cast<MDNode>(
MDOperand))
5167 Container.insert(MDScope);
5170 Track(
I->getMetadata(LLVMContext::MD_alias_scope), UsedAliasScopesAndLists);
5171 Track(
I->getMetadata(LLVMContext::MD_noalias), UsedNoAliasScopesAndLists);
5180 "llvm.experimental.noalias.scope.decl in use ?");
5183 "llvm.experimental.noalias.scope should refer to a single scope");
5185 if (
auto *MD = dyn_cast<MDNode>(
MDOperand))
5186 return !UsedAliasScopesAndLists.
contains(MD) ||
5187 !UsedNoAliasScopesAndLists.
contains(MD);
5212 if (Succ != LiveSucc &&
DeadEdges.insert({BB, Succ}).second)
5213 for (
PHINode &PN : Succ->phis())
5214 for (
Use &U : PN.incoming_values())
5215 if (PN.getIncomingBlock(U) == BB && !isa<PoisonValue>(U)) {
5225 HandleOnlyLiveSuccessor(BB,
nullptr);
5232 if (!Inst.use_empty() &&
5233 (Inst.getNumOperands() == 0 || isa<Constant>(Inst.getOperand(0))))
5237 Inst.replaceAllUsesWith(
C);
5240 Inst.eraseFromParent();
5246 for (
Use &U : Inst.operands()) {
5247 if (!isa<ConstantVector>(U) && !isa<ConstantExpr>(U))
5250 auto *
C = cast<Constant>(U);
5251 Constant *&FoldRes = FoldedConstants[
C];
5257 <<
"\n Old = " << *
C
5258 <<
"\n New = " << *FoldRes <<
'\n');
5267 if (!Inst.isDebugOrPseudoInst()) {
5268 InstrsForInstructionWorklist.
push_back(&Inst);
5269 SeenAliasScopes.
analyse(&Inst);
5277 if (isa<UndefValue>(BI->getCondition())) {
5279 HandleOnlyLiveSuccessor(BB,
nullptr);
5282 if (
auto *
Cond = dyn_cast<ConstantInt>(BI->getCondition())) {
5283 bool CondVal =
Cond->getZExtValue();
5284 HandleOnlyLiveSuccessor(BB, BI->getSuccessor(!CondVal));
5287 }
else if (
SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
5288 if (isa<UndefValue>(SI->getCondition())) {
5290 HandleOnlyLiveSuccessor(BB,
nullptr);
5293 if (
auto *
Cond = dyn_cast<ConstantInt>(SI->getCondition())) {
5294 HandleOnlyLiveSuccessor(BB,
5295 SI->findCaseValue(
Cond)->getCaseSuccessor());
5305 if (LiveBlocks.
count(&BB))
5308 unsigned NumDeadInstInBB;
5309 unsigned NumDeadDbgInstInBB;
5310 std::tie(NumDeadInstInBB, NumDeadDbgInstInBB) =
5313 MadeIRChange |= NumDeadInstInBB + NumDeadDbgInstInBB > 0;
5314 NumDeadInst += NumDeadInstInBB;
5331 Inst->eraseFromParent();
5348 auto &
DL =
F.getParent()->getDataLayout();
5356 if (
auto *Assume = dyn_cast<AssumeInst>(
I))
5364 bool MadeIRChange =
false;
5369 unsigned Iteration = 0;
5375 <<
" on " <<
F.getName()
5376 <<
" reached; stopping without verifying fixpoint\n");
5380 ++NumWorklistIterations;
5381 LLVM_DEBUG(
dbgs() <<
"\n\nINSTCOMBINE ITERATION #" << Iteration <<
" on "
5382 <<
F.getName() <<
"\n");
5385 ORE, BFI, BPI, PSI,
DL, LI);
5388 MadeChangeInThisIteration |= IC.
run();
5389 if (!MadeChangeInThisIteration)
5392 MadeIRChange =
true;
5395 "Instruction Combining did not reach a fixpoint after " +
5403 else if (Iteration == 2)
5405 else if (Iteration == 3)
5406 ++NumThreeIterations;
5408 ++NumFourOrMoreIterations;
5410 return MadeIRChange;
5418 OS, MapClassName2PassName);
5421 OS << (Options.
UseLoopInfo ?
"" :
"no-") <<
"use-loop-info;";
5444 auto *BFI = (PSI && PSI->hasProfileSummary()) ?
5449 BFI, BPI, PSI, LI, Options))
5480 auto AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
5481 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(
F);
5482 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(
F);
5483 auto &
TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(
F);
5484 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
5485 auto &ORE = getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
5488 auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
5489 auto *LI = LIWP ? &LIWP->getLoopInfo() :
nullptr;
5491 &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
5494 &getAnalysis<LazyBlockFrequencyInfoPass>().getBFI() :
5497 if (
auto *WrapperPass =
5498 getAnalysisIfAvailable<BranchProbabilityInfoWrapperPass>())
5499 BPI = &WrapperPass->getBPI();
5513 "Combine redundant instructions",
false,
false)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
amdgpu AMDGPU Register Bank Select
This file implements a class to represent arbitrary precision integral constant values and operations...
Expand Atomic instructions
static const Function * getParent(const Value *V)
This is the interface for LLVM's primary stateless and local alias analysis.
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file provides an implementation of debug counters.
#define DEBUG_COUNTER(VARNAME, COUNTERNAME, DESC)
This file defines the DenseMap class.
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static bool isSigned(unsigned int Opcode)
This is the interface for a simple mod/ref and alias analysis over globals.
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
iv Induction Variable Users
This file provides internal interfaces used to implement the InstCombine.
This file provides the primary interface to the instcombine pass.
static Value * simplifySwitchOnSelectUsingRanges(SwitchInst &SI, SelectInst *Select, bool IsTrueArm)
static bool isUsedWithinShuffleVector(Value *V)
static bool isNeverEqualToUnescapedAlloc(Value *V, const TargetLibraryInfo &TLI, Instruction *AI)
static bool combineInstructionsOverFunction(Function &F, InstructionWorklist &Worklist, AliasAnalysis *AA, AssumptionCache &AC, TargetLibraryInfo &TLI, TargetTransformInfo &TTI, DominatorTree &DT, OptimizationRemarkEmitter &ORE, BlockFrequencyInfo *BFI, BranchProbabilityInfo *BPI, ProfileSummaryInfo *PSI, LoopInfo *LI, const InstCombineOptions &Opts)
static bool shorter_filter(const Value *LHS, const Value *RHS)
static Instruction * foldSelectGEP(GetElementPtrInst &GEP, InstCombiner::BuilderTy &Builder)
Thread a GEP operation with constant indices through the constant true/false arms of a select.
static bool shouldMergeGEPs(GEPOperator &GEP, GEPOperator &Src)
static cl::opt< unsigned > MaxArraySize("instcombine-maxarray-size", cl::init(1024), cl::desc("Maximum array size considered when doing a combine"))
static cl::opt< unsigned > ShouldLowerDbgDeclare("instcombine-lower-dbg-declare", cl::Hidden, cl::init(true))
static bool hasNoSignedWrap(BinaryOperator &I)
static bool simplifyAssocCastAssoc(BinaryOperator *BinOp1, InstCombinerImpl &IC)
Combine constant operands of associative operations either before or after a cast to eliminate one of...
static Value * simplifyInstructionWithPHI(Instruction &I, PHINode *PN, Value *InValue, BasicBlock *InBB, const DataLayout &DL, const SimplifyQuery SQ)
static void ClearSubclassDataAfterReassociation(BinaryOperator &I)
Conservatively clears subclassOptionalData after a reassociation or commutation.
static bool isAllocSiteRemovable(Instruction *AI, SmallVectorImpl< WeakTrackingVH > &Users, const TargetLibraryInfo &TLI)
static Value * getIdentityValue(Instruction::BinaryOps Opcode, Value *V)
This function returns identity value for given opcode, which can be used to factor patterns like (X *...
static bool leftDistributesOverRight(Instruction::BinaryOps LOp, Instruction::BinaryOps ROp)
Return whether "X LOp (Y ROp Z)" is always equal to "(X LOp Y) ROp (X LOp Z)".
static std::optional< std::pair< Value *, Value * > > matchSymmetricPhiNodesPair(PHINode *LHS, PHINode *RHS)
static Value * foldOperationIntoSelectOperand(Instruction &I, SelectInst *SI, Value *NewOp, InstCombiner &IC)
static Instruction * canonicalizeGEPOfConstGEPI8(GetElementPtrInst &GEP, GEPOperator *Src, InstCombinerImpl &IC)
static Instruction * tryToMoveFreeBeforeNullTest(CallInst &FI, const DataLayout &DL)
Move the call to free before a NULL test.
static bool rightDistributesOverLeft(Instruction::BinaryOps LOp, Instruction::BinaryOps ROp)
Return whether "(X LOp Y) ROp Z" is always equal to "(X ROp Z) LOp (Y ROp Z)".
static Value * tryFactorization(BinaryOperator &I, const SimplifyQuery &SQ, InstCombiner::BuilderTy &Builder, Instruction::BinaryOps InnerOpcode, Value *A, Value *B, Value *C, Value *D)
This tries to simplify binary operations by factorizing out common terms (e.
static bool isRemovableWrite(CallBase &CB, Value *UsedV, const TargetLibraryInfo &TLI)
Given a call CB which uses an address UsedV, return true if we can prove the call's only possible eff...
static Instruction::BinaryOps getBinOpsForFactorization(Instruction::BinaryOps TopOpcode, BinaryOperator *Op, Value *&LHS, Value *&RHS, BinaryOperator *OtherOp)
This function predicates factorization using distributive laws.
static bool hasNoUnsignedWrap(BinaryOperator &I)
static bool SoleWriteToDeadLocal(Instruction *I, TargetLibraryInfo &TLI)
Check for case where the call writes to an otherwise dead alloca.
static cl::opt< unsigned > MaxSinkNumUsers("instcombine-max-sink-users", cl::init(32), cl::desc("Maximum number of undroppable users for instruction sinking"))
static Constant * constantFoldOperationIntoSelectOperand(Instruction &I, SelectInst *SI, bool IsTrueArm)
static bool isCatchAll(EHPersonality Personality, Constant *TypeInfo)
Return 'true' if the given typeinfo will match anything.
static bool isMergedGEPInBounds(GEPOperator &GEP1, GEPOperator &GEP2)
static cl::opt< bool > EnableCodeSinking("instcombine-code-sinking", cl::desc("Enable code sinking"), cl::init(true))
static bool maintainNoSignedWrap(BinaryOperator &I, Value *B, Value *C)
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
static bool IsSelect(MachineInstr &MI)
This header defines various interfaces for pass management in LLVM.
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static SymbolRef::Type getType(const Symbol *Sym)
This defines the Use class.
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
static const uint32_t IV[8]
bool isNoAliasScopeDeclDead(Instruction *Inst)
void analyse(Instruction *I)
A manager for alias analyses.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
bool isMinSignedValue() const
Determine if this is the smallest signed value.
APInt trunc(unsigned width) const
Truncate to new width.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
unsigned getBitWidth() const
Return the number of bits in the APInt.
APInt sadd_ov(const APInt &RHS, bool &Overflow) const
APInt ashr(unsigned ShiftAmt) const
Arithmetic right-shift function.
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
APInt ssub_ov(const APInt &RHS, bool &Overflow) const
APInt lshr(unsigned shiftAmt) const
Logical right-shift function.
A container for analyses that lazily runs them and caches their results.
PassT::Result * getCachedResult(IRUnitT &IR) const
Get the cached result of an analysis pass for a given IR unit.
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
void setPreservesCFG()
This function should be called by the pass, iff they do not:
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
Class to represent array types.
uint64_t getNumElements() const
static ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
Type * getElementType() const
A function analysis which provides an AssumptionCache.
An immutable pass that tracks lazily created AssumptionCache objects.
A cache of @llvm.assume calls within a function.
void registerAssumption(AssumeInst *CI)
Add an @llvm.assume intrinsic to this function's cache.
uint64_t getDereferenceableBytes() const
Returns the number of dereferenceable bytes from the dereferenceable attribute.
bool isValid() const
Return true if the attribute is any kind of attribute.
Legacy wrapper pass to provide the BasicAAResult object.
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
const_iterator getFirstInsertionPt() const
Returns an iterator to the first instruction in this block that is suitable for inserting a non-PHI i...
iterator_range< filter_iterator< BasicBlock::const_iterator, std::function< bool(const Instruction &)> > > instructionsWithoutDebug(bool SkipPseudoOp=true) const
Return a const iterator range over the instructions in the block, skipping any debug instructions.
InstListType::const_iterator getFirstNonPHIIt() const
Iterator returning form of getFirstNonPHI.
const Instruction & front() const
bool isEntryBlock() const
Return true if this is the entry block of the containing function.
const BasicBlock * getSinglePredecessor() const
Return the predecessor of this block if it has a single predecessor block.
const BasicBlock * getUniquePredecessor() const
Return the predecessor of this block if it has a unique predecessor block.
const Function * getParent() const
Return the enclosing method, or null if none.
InstListType::iterator iterator
Instruction iterators...
const_iterator getFirstNonPHIOrDbgOrAlloca() const
Returns an iterator to the first instruction in this block that is not a PHINode, a debug intrinsic,...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
static BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name, BasicBlock::iterator InsertBefore)
Construct a binary instruction, given the opcode and the two operands.
BinaryOps getOpcode() const
static BinaryOperator * CreateNeg(Value *Op, const Twine &Name, BasicBlock::iterator InsertBefore)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
static BinaryOperator * CreateNUW(BinaryOps Opc, Value *V1, Value *V2, const Twine &Name="")
Analysis pass which computes BlockFrequencyInfo.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Conditional or Unconditional Branch instruction.
void swapSuccessors()
Swap the successors of this branch instruction.
bool isConditional() const
BasicBlock * getSuccessor(unsigned i) const
bool isUnconditional() const
Value * getCondition() const
Analysis pass which computes BranchProbabilityInfo.
Analysis providing branch probability information.
void swapSuccEdgesProbabilities(const BasicBlock *Src)
Swap outgoing edges probabilities for Src with branch terminator.
Represents analyses that only rely on functions' control flow.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
void setAttributes(AttributeList A)
Set the parameter attributes for this call.
bool doesNotThrow() const
Determine if the call cannot unwind.
Value * getArgOperand(unsigned i) const
AttributeList getAttributes() const
Return the parameter attributes for this call.
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr, BasicBlock::iterator InsertBefore)
static CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name, BasicBlock::iterator InsertBefore)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ ICMP_UGT
unsigned greater than
@ ICMP_ULT
unsigned less than
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Predicate getPredicate() const
Return the predicate for this instruction.
ConstantArray - Constant Array Declarations.
static Constant * get(ArrayType *T, ArrayRef< Constant * > V)
A vector constant whose element type is a simple 1/2/4/8-byte integer or float/double,...
static Constant * getSub(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static Constant * getNot(Constant *C)
static Constant * getShl(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static Constant * getAdd(Constant *C1, Constant *C2, bool HasNUW=false, bool HasNSW=false)
static Constant * getBinOpIdentity(unsigned Opcode, Type *Ty, bool AllowRHSConstant=false, bool NSZ=false)
Return the identity constant for a binary opcode.
static Constant * getNeg(Constant *C, bool HasNSW=false)
This is the shared class of boolean and integer constants.
static ConstantInt * getTrue(LLVMContext &Context)
static ConstantInt * getFalse(LLVMContext &Context)
static ConstantInt * getBool(LLVMContext &Context, bool V)
This class represents a range of values.
bool getEquivalentICmp(CmpInst::Predicate &Pred, APInt &RHS) const
Set up Pred and RHS such that ConstantRange::makeExactICmpRegion(Pred, RHS) == *this.
static ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
bool contains(const APInt &Val) const
Return true if the specified value is in the set.
static ConstantRange makeExactNoWrapRegion(Instruction::BinaryOps BinOp, const APInt &Other, unsigned NoWrapKind)
Produce the range that contains X if and only if "X BinOp Other" does not wrap.
Constant Vector Declarations.
static Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
static Constant * getIntegerValue(Type *Ty, const APInt &V)
Return the value for an integer or pointer constant, or a vector thereof, with the given scalar value...
static Constant * replaceUndefsWith(Constant *C, Constant *Replacement)
Try to replace undefined constant C or undefined elements in C with Replacement.
static Constant * getAllOnesValue(Type *Ty)
const Constant * stripPointerCasts() const
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
SmallVector< APInt > getGEPIndicesForOffset(Type *&ElemTy, APInt &Offset) const
Get GEP indices to access Offset inside ElemTy.
bool isLegalInteger(uint64_t Width) const
Returns true if the specified type is known to be a native integer type supported by the CPU.
unsigned getIndexTypeSizeInBits(Type *Ty) const
Layout size of the index used in GEP calculation.
IntegerType * getIndexType(LLVMContext &C, unsigned AddressSpace) const
Returns the type of a GEP index in AddressSpace.
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
unsigned getIndexSizeInBits(unsigned AS) const
Size in bits of index used for address calculation in getelementptr.
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
int64_t getIndexedOffsetInType(Type *ElemTy, ArrayRef< Value * > Indices) const
Returns the offset from the beginning of the type for the specified indices.
This is the common base class for debug info intrinsics for variables.
Record of a variable value-assignment, aka a non instruction representation of the dbg....
static bool shouldExecute(unsigned CounterName)
Identifies a unique instance of a variable.
iterator find(const_arg_type_t< KeyT > Val)
void registerBranch(BranchInst *BI)
Add a branch condition to the cache.
Analysis pass which computes a DominatorTree.
Legacy analysis pass which computes a DominatorTree.
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
bool isReachableFromEntry(const Use &U) const
Provide an overload for a Use.
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Utility class for floating point operations which can have information about relaxed accuracy require...
Convenience struct for specifying and reasoning about fast-math flags.
This class represents a freeze function that returns random concrete value if an operand is either a ...
FunctionPass class - This class is used to implement most global optimizations.
bool skipFunction(const Function &F) const
Optional passes call this function to check whether the pass should be skipped.
const BasicBlock & getEntryBlock() const
static bool isTargetIntrinsic(Intrinsic::ID IID)
isTargetIntrinsic - Returns true if IID is an intrinsic specific to a certain target.
bool isInBounds() const
Test whether this is an inbounds GEP, as defined by LangRef.html.
bool hasAllZeroIndices() const
Return true if all of the indices of this GEP are zeros.
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
static Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr, BasicBlock::iterator InsertBefore)
static GetElementPtrInst * CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Create an "inbounds" getelementptr.
void setIsInBounds(bool b=true)
Set or clear the inbounds flag on this GEP instruction.
Legacy wrapper pass to provide the GlobalsAAResult object.
This instruction compares its operands according to the predicate given to the constructor.
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
Value * CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, Instruction *FMFSource=nullptr, const Twine &Name="")
Create a call to intrinsic ID with 2 operands which is mangled on the first type.
Value * CreatePtrAdd(Value *Ptr, Value *Offset, const Twine &Name="", bool IsInBounds=false)
Value * CreateLogicalOp(Instruction::BinaryOps Opc, Value *Cond1, Value *Cond2, const Twine &Name="")
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, Instruction *FMFSource=nullptr, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
Value * CreateSExt(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateFreeze(Value *V, const Twine &Name="")
void setFastMathFlags(FastMathFlags NewFMF)
Set the fast-math flags to be used with generated fp-math operators.
Value * CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="")
void CollectMetadataToCopy(Instruction *Src, ArrayRef< unsigned > MetadataKinds)
Collect metadata with IDs MetadataKinds from Src which should be added to all created instructions.
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Value * CreateCmp(CmpInst::Predicate Pred, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Value * CreateNot(Value *V, const Twine &Name="")
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of converting the string to 'bool...
Value * CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name="")
Value * CreateAnd(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateAdd(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Value * CreateAShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateGEP(Type *Ty, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &Name="", bool IsInBounds=false)
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
Value * CreateLogicalOr(Value *Cond1, Value *Cond2, const Twine &Name="")
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
ConstantInt * getInt(const APInt &AI)
Get a constant integer value.
Provides an 'InsertHelper' that calls a user-provided callback after performing the default insertion...
This instruction inserts a struct field of array element value into an aggregate value.
static InsertValueInst * Create(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &NameStr, BasicBlock::iterator InsertBefore)
InstCombinePass(InstCombineOptions Opts={})
void printPipeline(raw_ostream &OS, function_ref< StringRef(StringRef)> MapClassName2PassName)
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM)
Instruction * FoldOpIntoSelect(Instruction &Op, SelectInst *SI, bool FoldWithMultiUse=false)
Given an instruction with a select as one operand and a constant as the other operand,...
Instruction * foldBinOpOfSelectAndCastOfSelectCondition(BinaryOperator &I)
Tries to simplify binops of select and cast of the select condition.
Instruction * foldBinOpIntoSelectOrPhi(BinaryOperator &I)
This is a convenience wrapper function for the above two functions.
bool SimplifyAssociativeOrCommutative(BinaryOperator &I)
Performs a few simplifications for operators which are associative or commutative.
Instruction * visitGEPOfGEP(GetElementPtrInst &GEP, GEPOperator *Src)
Value * foldUsingDistributiveLaws(BinaryOperator &I)
Tries to simplify binary operations which some other binary operation distributes over.
Instruction * foldBinOpShiftWithShift(BinaryOperator &I)
Instruction * visitUnreachableInst(UnreachableInst &I)
Instruction * foldOpIntoPhi(Instruction &I, PHINode *PN)
Given a binary operator, cast instruction, or select which has a PHI node as operand #0,...
void handleUnreachableFrom(Instruction *I, SmallVectorImpl< BasicBlock * > &Worklist)
Value * SimplifyDemandedVectorElts(Value *V, APInt DemandedElts, APInt &PoisonElts, unsigned Depth=0, bool AllowMultipleUsers=false) override
The specified value produces a vector with any number of elements.
Instruction * visitFreeze(FreezeInst &I)
void handlePotentiallyDeadBlocks(SmallVectorImpl< BasicBlock * > &Worklist)
Instruction * visitFree(CallInst &FI, Value *FreedOp)
Instruction * visitExtractValueInst(ExtractValueInst &EV)
void handlePotentiallyDeadSuccessors(BasicBlock *BB, BasicBlock *LiveSucc)
Instruction * visitUnconditionalBranchInst(BranchInst &BI)
Instruction * eraseInstFromFunction(Instruction &I) override
Combiner aware instruction erasure.
Instruction * visitLandingPadInst(LandingPadInst &LI)
bool prepareWorklist(Function &F, ReversePostOrderTraversal< BasicBlock * > &RPOT)
Perform early cleanup and prepare the InstCombine worklist.
Instruction * visitReturnInst(ReturnInst &RI)
Instruction * visitSwitchInst(SwitchInst &SI)
Instruction * foldBinopWithPhiOperands(BinaryOperator &BO)
For a binary operator with 2 phi operands, try to hoist the binary operation before the phi.
Constant * getLosslessTrunc(Constant *C, Type *TruncTy, unsigned ExtOp)
Value * SimplifyDemandedUseFPClass(Value *V, FPClassTest DemandedMask, KnownFPClass &Known, unsigned Depth, Instruction *CxtI)
Attempts to replace V with a simpler value based on the demanded floating-point classes.
bool mergeStoreIntoSuccessor(StoreInst &SI)
Try to transform: if () { *P = v1; } else { *P = v2 } or: *P = v1; if () { *P = v2; } into a phi node...
Instruction * tryFoldInstWithCtpopWithNot(Instruction *I)
void tryToSinkInstructionDbgValues(Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock, BasicBlock *DestBlock, SmallVectorImpl< DbgVariableIntrinsic * > &DbgUsers)
void CreateNonTerminatorUnreachable(Instruction *InsertAt)
Create and insert the idiom we use to indicate a block is unreachable without having to rewrite the C...
Value * pushFreezeToPreventPoisonFromPropagating(FreezeInst &FI)
bool run()
Run the combiner over the entire worklist until it is empty.
Instruction * foldVectorBinop(BinaryOperator &Inst)
Canonicalize the position of binops relative to shufflevector.
bool removeInstructionsBeforeUnreachable(Instruction &I)
Value * SimplifySelectsFeedingBinaryOp(BinaryOperator &I, Value *LHS, Value *RHS)
void tryToSinkInstructionDbgVariableRecords(Instruction *I, BasicBlock::iterator InsertPos, BasicBlock *SrcBlock, BasicBlock *DestBlock, SmallVectorImpl< DbgVariableRecord * > &DPUsers)
void addDeadEdge(BasicBlock *From, BasicBlock *To, SmallVectorImpl< BasicBlock * > &Worklist)
Instruction * visitAllocSite(Instruction &FI)
Instruction * visitGetElementPtrInst(GetElementPtrInst &GEP)
Instruction * visitBranchInst(BranchInst &BI)
Value * tryFactorizationFolds(BinaryOperator &I)
This tries to simplify binary operations by factorizing out common terms (e.
Instruction * foldFreezeIntoRecurrence(FreezeInst &I, PHINode *PN)
bool tryToSinkInstruction(Instruction *I, BasicBlock *DestBlock)
Try to move the specified instruction from its current block into the beginning of DestBlock,...
bool freezeOtherUses(FreezeInst &FI)
void freelyInvertAllUsersOf(Value *V, Value *IgnoredUser=nullptr)
Freely adapt every user of V as-if V was changed to !V.
The core instruction combiner logic.
const DataLayout & getDataLayout() const
static bool isCanonicalPredicate(CmpInst::Predicate Pred)
Predicate canonicalization reduces the number of patterns that need to be matched by other transforms...
bool isFreeToInvert(Value *V, bool WillInvertAllUses, bool &DoesConsume)
Return true if the specified value is free to invert (apply ~ to).
static unsigned getComplexity(Value *V)
Assign a complexity or rank value to LLVM Values.
Instruction * InsertNewInstBefore(Instruction *New, BasicBlock::iterator Old)
Inserts an instruction New before instruction Old.
Instruction * replaceInstUsesWith(Instruction &I, Value *V)
A combiner-aware RAUW-like routine.
uint64_t MaxArraySizeForCombine
Maximum size of array considered when transforming.
static bool shouldAvoidAbsorbingNotIntoSelect(const SelectInst &SI)
void replaceUse(Use &U, Value *NewValue)
Replace use and add the previously used value to the worklist.
InstructionWorklist & Worklist
A worklist of the instructions that need to be simplified.
Instruction * InsertNewInstWith(Instruction *New, BasicBlock::iterator Old)
Same as InsertNewInstBefore, but also sets the debug loc.
BranchProbabilityInfo * BPI
unsigned ComputeNumSignBits(const Value *Op, unsigned Depth=0, const Instruction *CxtI=nullptr) const
std::optional< Instruction * > targetInstCombineIntrinsic(IntrinsicInst &II)
void addToWorklist(Instruction *I)
Value * getFreelyInvertedImpl(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume, unsigned Depth)
Return nonnull value if V is free to invert under the condition of WillInvertAllUses.
std::optional< Value * > targetSimplifyDemandedVectorEltsIntrinsic(IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, APInt &UndefElts2, APInt &UndefElts3, std::function< void(Instruction *, unsigned, APInt, APInt &)> SimplifyAndSetOp)
Instruction * replaceOperand(Instruction &I, unsigned OpNum, Value *V)
Replace operand of instruction and add old operand to the worklist.
static Constant * getSafeVectorConstantForBinop(BinaryOperator::BinaryOps Opcode, Constant *In, bool IsRHSConstant)
Some binary operators require special handling to avoid poison and undefined behavior.
SmallDenseSet< std::pair< BasicBlock *, BasicBlock * >, 8 > DeadEdges
Edges that are known to never be taken.
std::optional< Value * > targetSimplifyDemandedUseBitsIntrinsic(IntrinsicInst &II, APInt DemandedMask, KnownBits &Known, bool &KnownBitsComputed)
void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth, const Instruction *CxtI) const
bool isValidAddrSpaceCast(unsigned FromAS, unsigned ToAS) const
Value * getFreelyInverted(Value *V, bool WillInvertAllUses, BuilderTy *Builder, bool &DoesConsume)
void visit(Iterator Start, Iterator End)
The legacy pass manager's instcombine pass.
InstructionCombiningPass()
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
bool runOnFunction(Function &F) override
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass.
InstructionWorklist - This is the worklist management logic for InstCombine and other simplification ...
Instruction * removeOne()
void pushUsersToWorkList(Instruction &I)
When an instruction is simplified, add all users of the instruction to the work lists because they mi...
void add(Instruction *I)
Add instruction to the worklist.
void push(Instruction *I)
Push the instruction onto the worklist stack.
Instruction * popDeferred()
void zap()
Check that the worklist is empty and nuke the backing store for the map.
void reserve(size_t Size)
static bool isBitwiseLogicOp(unsigned Opcode)
Determine if the Opcode is and/or/xor.
void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
bool isAssociative() const LLVM_READONLY
Return true if the instruction is associative:
bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
void setFastMathFlags(FastMathFlags FMF)
Convenience function for setting multiple fast-math flags on this instruction, which must be an opera...
const BasicBlock * getParent() const
const Function * getFunction() const
Return the function this instruction belongs to.
bool isTerminator() const
void dropUBImplyingAttrsAndMetadata()
Drop any attributes or metadata that can cause immediate undefined behavior.
FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
bool willReturn() const LLVM_READONLY
Return true if the instruction will return (unwinding is considered as a form of returning control fl...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
bool isBitwiseLogicOp() const
Return true if this is and/or/xor.
void dropPoisonGeneratingFlags()
Drops flags that may cause this instruction to evaluate to poison despite having non-poison inputs.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
void moveBefore(Instruction *MovePos)
Unlink this instruction from its current basic block and insert it into the basic block that MovePos ...
Class to represent integer types.
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
A wrapper class for inspecting calls to intrinsic functions.
Intrinsic::ID getIntrinsicID() const
Return the intrinsic ID of this intrinsic.
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, BasicBlock::iterator InsertBefore)
The landingpad instruction holds all of the information necessary to generate correct exception handl...
void addClause(Constant *ClauseVal)
Add a catch or filter clause to the landing pad.
void setCleanup(bool V)
Indicate that this landingpad instruction is a cleanup.
static LandingPadInst * Create(Type *RetTy, unsigned NumReservedClauses, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructors - NumReservedClauses is a hint for the number of incoming clauses that this landingpad w...
This is an alternative analysis pass to BlockFrequencyInfoWrapperPass.
static void getLazyBFIAnalysisUsage(AnalysisUsage &AU)
Helper for client passes to set up the analysis usage on behalf of this pass.
An instruction for reading from memory.
Analysis pass that exposes the LoopInfo for a function.
const MDOperand & getOperand(unsigned I) const
unsigned getNumOperands() const
Return number of MDNode operands.
Tracking metadata reference owned by Metadata.
This is the common base class for memset/memcpy/memmove.
static MemoryLocation getForDest(const MemIntrinsic *MI)
Return a location representing the destination of a memory set or transfer.
This class represents min/max intrinsics.
static ICmpInst::Predicate getPredicate(Intrinsic::ID ID)
Returns the comparison predicate underlying the intrinsic.
A Module instance is used to store all the information related to an LLVM module.
MDNode * getScopeList() const
An analysis over an "inner" IR unit that provides access to an analysis manager over a "outer" IR uni...
Utility class for integer operators which may exhibit overflow - Add, Sub, Mul, and Shl.
bool hasNoSignedWrap() const
Test whether this operation is known to never undergo signed overflow, aka the nsw property.
bool hasNoUnsignedWrap() const
Test whether this operation is known to never undergo unsigned overflow, aka the nuw property.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
op_range incoming_values()
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
PassRegistry - This class manages the registration and intitialization of the pass subsystem as appli...
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
In order to facilitate speculative execution, many instructions do not invoke immediate undefined beh...
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
A set of analyses that are preserved following a run of a transformation pass.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
void preserveSet()
Mark an analysis set as preserved.
An analysis pass based on the new PM to deliver ProfileSummaryInfo.
An analysis pass based on legacy pass manager to deliver ProfileSummaryInfo.
Analysis providing profile information.
bool hasProfileSummary() const
Returns true if profile summary is available.
A global registry used in conjunction with static constructors to make pluggable components (like tar...
Return a value (possibly void), from a function.
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
static ReturnInst * Create(LLVMContext &C, Value *retVal, BasicBlock::iterator InsertBefore)
This class represents a cast from signed integer to floating point.
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr, BasicBlock::iterator InsertBefore, Instruction *MDFrom=nullptr)
This instruction constructs a fixed permutation of two input vectors.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
iterator erase(const_iterator CI)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
typename SuperClass::iterator iterator
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
StringRef - Represent a constant reference to a string, i.e.
TargetFolder - Create constants with target dependent folding.
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
bool has(LibFunc F) const
Tests whether a library function is available.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
const fltSemantics & getFltSemantics() const
bool isVectorTy() const
True if this is an instance of VectorType.
static IntegerType * getInt1Ty(LLVMContext &C)
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isStructTy() const
True if this is an instance of StructType.
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
bool isScalableTy() const
Return true if this is a type whose size is a known multiple of vscale.
static IntegerType * getInt32Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
This class represents a cast unsigned integer to floating point.
This function has undefined behavior.
A Use represents the edge between a Value definition and its users.
bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
const Value * stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset) const
This is a wrapper around stripAndAccumulateConstantOffsets with the in-bounds requirement set to fals...
bool hasOneUse() const
Return true if there is exactly one use of this value.
iterator_range< user_iterator > users()
bool hasNUses(unsigned N) const
Return true if this Value has exactly N uses.
const Value * stripPointerCasts() const
Strip off pointer casts, all-zero GEPs and address space casts.
LLVMContext & getContext() const
All values hold a context through their type.
uint64_t getPointerDereferenceableBytes(const DataLayout &DL, bool &CanBeNull, bool &CanBeFreed) const
Returns the number of bytes known to be dereferenceable for the pointer value.
StringRef getName() const
Return a constant reference to the value's name.
void takeName(Value *V)
Transfer the name from V to this value.
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
constexpr ScalarTy getFixedValue() const
constexpr bool isZero() const
An efficient, type-erasing, non-owning reference to a callable.
Type * getIndexedType() const
reverse_self_iterator getReverseIterator()
self_iterator getIterator()
This class implements an extremely fast bulk output stream that can only output to a stream.
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool isNoFPClassCompatibleType(Type *Ty)
Returns true if this is a type legal for the 'nofpclass' attribute.
@ C
The default llvm calling convention, compatible with C.
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
class_match< PoisonValue > m_Poison()
Match an arbitrary poison constant.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
PtrAdd_match< PointerOpTy, OffsetOpTy > m_PtrAdd(const PointerOpTy &PointerOp, const OffsetOpTy &OffsetOp)
Matches GEP with i8 source element type.
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
br_match m_UnconditionalBr(BasicBlock *&Succ)
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
bool match(Val *V, const Pattern &P)
BinOpPred_match< LHS, RHS, is_idiv_op > m_IDiv(const LHS &L, const RHS &R)
Matches integer division operations.
bind_ty< Instruction > m_Instruction(Instruction *&I)
Match an instruction, capturing it if we match.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
DisjointOr_match< LHS, RHS > m_DisjointOr(const LHS &L, const RHS &R)
constantexpr_match m_ConstantExpr()
Match a constant expression or a constant that contains a constant expression.
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
cst_pred_ty< is_nonnegative > m_NonNegative()
Match an integer or vector of non-negative values.
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
CmpClass_match< LHS, RHS, FCmpInst, FCmpInst::Predicate > m_FCmp(FCmpInst::Predicate &Pred, const LHS &L, const RHS &R)
CastOperator_match< OpTy, Instruction::Trunc > m_Trunc(const OpTy &Op)
Matches Trunc.
apint_match m_APIntAllowPoison(const APInt *&Res)
Match APInt while allowing poison in splat vector constants.
CmpClass_match< LHS, RHS, ICmpInst, ICmpInst::Predicate > m_ICmp(ICmpInst::Predicate &Pred, const LHS &L, const RHS &R)
OneUse_match< T > m_OneUse(const T &SubPattern)
auto m_LogicalOr()
Matches L || R where L and R are arbitrary values.
BinaryOp_match< cst_pred_ty< is_zero_int >, ValTy, Instruction::Sub > m_Neg(const ValTy &V)
Matches a 'Neg' as 'sub 0, V'.
TwoOps_match< V1_t, V2_t, Instruction::ShuffleVector > m_Shuffle(const V1_t &v1, const V2_t &v2)
Matches ShuffleVectorInst independently of mask value.
match_combine_and< class_match< Constant >, match_unless< constantexpr_match > > m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
brc_match< Cond_t, bind_ty< BasicBlock >, bind_ty< BasicBlock > > m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F)
match_combine_or< BinaryOp_match< LHS, RHS, Instruction::Add >, DisjointOr_match< LHS, RHS > > m_AddLike(const LHS &L, const RHS &R)
Match either "add" or "or disjoint".
CastInst_match< OpTy, UIToFPInst > m_UIToFP(const OpTy &Op)
CastOperator_match< OpTy, Instruction::BitCast > m_BitCast(const OpTy &Op)
Matches BitCast.
match_combine_or< CastInst_match< OpTy, SExtInst >, NNegZExt_match< OpTy > > m_SExtLike(const OpTy &Op)
Match either "sext" or "zext nneg".
apint_match m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
AnyBinaryOp_match< LHS, RHS, true > m_c_BinOp(const LHS &L, const RHS &R)
Matches a BinaryOperator with LHS and RHS in either order.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap > m_NSWAdd(const LHS &L, const RHS &R)
CastInst_match< OpTy, SIToFPInst > m_SIToFP(const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
match_combine_or< CastInst_match< OpTy, ZExtInst >, CastInst_match< OpTy, SExtInst > > m_ZExtOrSExt(const OpTy &Op)
Exact_match< T > m_Exact(const T &SubPattern)
BinOpPred_match< LHS, RHS, is_shift_op > m_Shift(const LHS &L, const RHS &R)
Matches shift operations.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
cstfp_pred_ty< is_non_zero_fp > m_NonZeroFP()
Match a floating-point non-zero.
m_Intrinsic_Ty< Opnd0 >::Ty m_VecReverse(const Opnd0 &Op0)
auto m_LogicalAnd()
Matches L && R where L and R are arbitrary values.
match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > > > m_MaxOrMin(const LHS &L, const RHS &R)
auto m_Undef()
Match an arbitrary undef constant.
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
CastOperator_match< OpTy, Instruction::PtrToInt > m_PtrToInt(const OpTy &Op)
Matches PtrToInt.
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID)
detail::zippy< detail::zip_shortest, T, U, Args... > zip(T &&t, U &&u, Args &&...args)
zip iterator for two or more iteratable types.
void stable_sort(R &&Range)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
bool succ_empty(const Instruction *I)
Value * simplifyFreezeInst(Value *Op, const SimplifyQuery &Q)
Given an operand for a Freeze, see if we can fold the result.
FunctionPass * createInstructionCombiningPass()
std::pair< unsigned, unsigned > removeAllNonTerminatorAndEHPadInstructions(BasicBlock *BB)
Remove all instructions from a basic block other than its terminator and any present EH pad instructi...
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are are tuples (A,...
void salvageDebugInfoForDbgValues(Instruction &I, ArrayRef< DbgVariableIntrinsic * > Insns, ArrayRef< DbgVariableRecord * > DPInsns)
Implementation of salvageDebugInfo, applying only to instructions in Insns, rather than all debug use...
void findDbgUsers(SmallVectorImpl< DbgVariableIntrinsic * > &DbgInsts, Value *V, SmallVectorImpl< DbgVariableRecord * > *DbgVariableRecords=nullptr)
Finds the debug info intrinsics describing a value.
void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
auto successors(const MachineBasicBlock *BB)
bool isRemovableAlloc(const CallBase *V, const TargetLibraryInfo *TLI)
Return true if this is a call to an allocation function that does not have side effects that we are r...
std::optional< StringRef > getAllocationFamily(const Value *I, const TargetLibraryInfo *TLI)
If a function is part of an allocation family (e.g.
Value * lowerObjectSizeCall(IntrinsicInst *ObjectSize, const DataLayout &DL, const TargetLibraryInfo *TLI, bool MustSucceed)
Try to turn a call to @llvm.objectsize into an integer value of the given Type.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
Value * simplifyInstructionWithOperands(Instruction *I, ArrayRef< Value * > NewOps, const SimplifyQuery &Q)
Like simplifyInstruction but the operands of I are replaced with NewOps.
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
gep_type_iterator gep_type_end(const User *GEP)
Value * getReallocatedOperand(const CallBase *CB)
If this is a call to a realloc function, return the reallocated operand.
bool isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI)
Tests if a value is a call or invoke to a library function that allocates memory (either malloc,...
bool handleUnreachableTerminator(Instruction *I, SmallVectorImpl< Value * > &PoisonedValues)
If a terminator in an unreachable basic block has an operand of type Instruction, transform it into p...
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Value * simplifyAddInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for an Add, fold the result or return null.
Constant * ConstantFoldConstant(const Constant *C, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldConstant - Fold the constant using the specified DataLayout.
constexpr bool has_single_bit(T Value) noexcept
bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=nullptr)
Return true if the result produced by the instruction is not used, and the instruction will return.
bool isSplatValue(const Value *V, int Index=-1, unsigned Depth=0)
Return true if each element of the vector value V is poisoned or equal to every other non-poisoned el...
Value * emitGEPOffset(IRBuilderBase *Builder, const DataLayout &DL, User *GEP, bool NoAssumptions=false)
Given a getelementptr instruction/constantexpr, emit the code necessary to compute the offset from th...
constexpr unsigned MaxAnalysisRecursionDepth
auto reverse(ContainerTy &&C)
Constant * ConstantFoldInstOperands(Instruction *I, ArrayRef< Constant * > Ops, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldInstOperands - Attempt to constant fold an instruction with the specified operands.
void sort(IteratorTy Start, IteratorTy End)
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
bool LowerDbgDeclare(Function &F)
Lowers llvm.dbg.declare intrinsics into appropriate set of llvm.dbg.value intrinsics.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
void ConvertDebugDeclareToDebugValue(DbgVariableIntrinsic *DII, StoreInst *SI, DIBuilder &Builder)
===------------------------------------------------------------------—===// Dbg Intrinsic utilities
Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
bool canCreateUndefOrPoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
canCreateUndefOrPoison returns true if Op can create undef or poison from non-undef & non-poison oper...
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
Value * simplifyExtractValueInst(Value *Agg, ArrayRef< unsigned > Idxs, const SimplifyQuery &Q)
Given operands for an ExtractValueInst, fold the result or return null.
Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
bool replaceAllDbgUsesWith(Instruction &From, Value &To, Instruction &DomPoint, DominatorTree &DT)
Point debug users of From to To or salvage them.
bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
constexpr int PoisonMaskElem
auto drop_end(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the last N elements excluded.
Value * simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a BinaryOperator, fold the result or return null.
@ Or
Bitwise or logical OR of integers.
DWARFExpression::Operation Op
Constant * ConstantFoldInstruction(Instruction *I, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldInstruction - Try to constant fold the specified instruction.
bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
Value * getFreedOperand(const CallBase *CB, const TargetLibraryInfo *TLI)
If this if a call to a free function, return the freed operand.
bool isSafeToSpeculativelyExecute(const Instruction *I, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Return true if the instruction does not have any effects besides calculating the result and does not ...
constexpr unsigned BitWidth
bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
gep_type_iterator gep_type_begin(const User *GEP)
auto predecessors(const MachineBasicBlock *BB)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
bool equal(L &&LRange, R &&RRange)
Wrapper function around std::equal to detect if pair-wise elements between two ranges are the same.
Value * simplifyGEPInst(Type *SrcTy, Value *Ptr, ArrayRef< Value * > Indices, bool InBounds, const SimplifyQuery &Q)
Given operands for a GetElementPtrInst, fold the result or return null.
bool isKnownNonNegative(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Returns true if the give value is known to be non-negative.
static auto filterDbgVars(iterator_range< simple_ilist< DbgRecord >::iterator > R)
Filter the DbgRecord range to DbgVariableRecord types only and downcast.
void initializeInstCombine(PassRegistry &)
Initialize all passes linked into the InstCombine library.
void initializeInstructionCombiningPassPass(PassRegistry &)
std::optional< bool > isImpliedCondition(const Value *LHS, const Value *RHS, const DataLayout &DL, bool LHSIsTrue=true, unsigned Depth=0)
Return true if RHS is known to be implied true by LHS.
bool isPotentiallyReachable(const Instruction *From, const Instruction *To, const SmallPtrSetImpl< BasicBlock * > *ExclusionSet=nullptr, const DominatorTree *DT=nullptr, const LoopInfo *LI=nullptr)
Determine whether instruction 'To' is reachable from 'From', without passing through any blocks in Ex...
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
static unsigned int semanticsPrecision(const fltSemantics &)
unsigned countMinLeadingOnes() const
Returns the minimum number of leading one bits.
unsigned getBitWidth() const
Get the bit width of this value.
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
A CRTP mix-in to automatically provide informational APIs needed for passes.
SimplifyQuery getWithInstruction(const Instruction *I) const
SimplifyQuery getWithoutUndef() const