llvm.org GIT mirror llvm / fe353a0
Merge isKnownNonNull into isKnownNonZero It now knows the tricks of both functions. Also, fix a bug that considered allocas of non-zero address space to be always non null Differential Revision: https://reviews.llvm.org/D37628 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@312869 91177308-0d34-0410-b5e6-96231b3b80d8 Nuno Lopes 2 years ago
17 changed file(s) with 142 addition(s) and 149 deletion(s). Raw diff Collapse all Expand all
365365 /// Instructions which just compute a value based on the values of their
366366 /// operands are not memory dependent.
367367 bool mayBeMemoryDependent(const Instruction &I);
368
369 /// Return true if this pointer couldn't possibly be null by its definition.
370 /// This returns true for allocas, non-extern-weak globals, and byval
371 /// arguments.
372 bool isKnownNonNull(const Value *V);
373
374 /// Return true if this pointer couldn't possibly be null. If the context
375 /// instruction and dominator tree are specified, perform context-sensitive
376 /// analysis and return true if the pointer couldn't possibly be null at the
377 /// specified instruction.
378 bool isKnownNonNullAt(const Value *V,
379 const Instruction *CtxI = nullptr,
380 const DominatorTree *DT = nullptr);
381368
382369 /// Return true if it is valid to use the assumptions provided by an
383370 /// assume intrinsic, I, at the point in the control-flow identified by the
20622062 static Constant *
20632063 computePointerICmp(const DataLayout &DL, const TargetLibraryInfo *TLI,
20642064 const DominatorTree *DT, CmpInst::Predicate Pred,
2065 const Instruction *CxtI, Value *LHS, Value *RHS) {
2065 AssumptionCache *AC, const Instruction *CxtI,
2066 Value *LHS, Value *RHS) {
20662067 // First, skip past any trivial no-ops.
20672068 LHS = LHS->stripPointerCasts();
20682069 RHS = RHS->stripPointerCasts();
20692070
20702071 // A non-null pointer is not equal to a null pointer.
2071 if (llvm::isKnownNonNull(LHS) && isa(RHS) &&
2072 if (llvm::isKnownNonZero(LHS, DL) && isa(RHS) &&
20722073 (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE))
20732074 return ConstantInt::get(GetCompareTy(LHS),
20742075 !CmpInst::isTrueWhenEqual(Pred));
22232224 // cannot be elided. We cannot fold malloc comparison to null. Also, the
22242225 // dynamic allocation call could be either of the operands.
22252226 Value *MI = nullptr;
2226 if (isAllocLikeFn(LHS, TLI) && llvm::isKnownNonNullAt(RHS, CxtI, DT))
2227 if (isAllocLikeFn(LHS, TLI) &&
2228 llvm::isKnownNonZero(RHS, DL, 0, nullptr, CxtI, DT))
22272229 MI = LHS;
2228 else if (isAllocLikeFn(RHS, TLI) && llvm::isKnownNonNullAt(LHS, CxtI, DT))
2230 else if (isAllocLikeFn(RHS, TLI) &&
2231 llvm::isKnownNonZero(LHS, DL, 0, nullptr, CxtI, DT))
22292232 MI = RHS;
22302233 // FIXME: We should also fold the compare when the pointer escapes, but the
22312234 // compare dominates the pointer escape
33123315 // Simplify comparisons of related pointers using a powerful, recursive
33133316 // GEP-walk when we have target data available..
33143317 if (LHS->getType()->isPointerTy())
3315 if (auto *C = computePointerICmp(Q.DL, Q.TLI, Q.DT, Pred, Q.CxtI, LHS, RHS))
3318 if (auto *C = computePointerICmp(Q.DL, Q.TLI, Q.DT, Pred, Q.AC, Q.CxtI, LHS,
3319 RHS))
33163320 return C;
33173321 if (auto *CLHS = dyn_cast(LHS))
33183322 if (auto *CRHS = dyn_cast(RHS))
33203324 Q.DL.getTypeSizeInBits(CLHS->getType()) &&
33213325 Q.DL.getTypeSizeInBits(CRHS->getPointerOperandType()) ==
33223326 Q.DL.getTypeSizeInBits(CRHS->getType()))
3323 if (auto *C = computePointerICmp(Q.DL, Q.TLI, Q.DT, Pred, Q.CxtI,
3327 if (auto *C = computePointerICmp(Q.DL, Q.TLI, Q.DT, Pred, Q.AC, Q.CxtI,
33243328 CLHS->getPointerOperand(),
33253329 CRHS->getPointerOperand()))
33263330 return C;
816816 // definition. We could easily extend this to look through geps, bitcasts,
817817 // and the like to prove non-nullness, but it's not clear that's worth it
818818 // compile time wise. The context-insensitive value walk done inside
819 // isKnownNonNull gets most of the profitable cases at much less expense.
819 // isKnownNonZero gets most of the profitable cases at much less expense.
820820 // This does mean that we have a sensativity to where the defining
821821 // instruction is placed, even if it could legally be hoisted much higher.
822822 // That is unfortunate.
823823 PointerType *PT = dyn_cast(BBI->getType());
824 if (PT && isKnownNonNull(BBI)) {
824 if (PT && isKnownNonZero(BBI, DL)) {
825825 Res = LVILatticeVal::getNot(ConstantPointerNull::get(PT));
826826 return true;
827827 }
900900 // Before giving up, see if we can prove the pointer non-null local to
901901 // this particular block.
902902 if (Val->getType()->isPointerTy() &&
903 (isKnownNonNull(Val) || isObjectDereferencedInBlock(Val, BB))) {
903 (isKnownNonZero(Val, DL) || isObjectDereferencedInBlock(Val, BB))) {
904904 PointerType *PTy = cast(Val->getType());
905905 Result = LVILatticeVal::getNot(ConstantPointerNull::get(PTy));
906906 } else {
18851885 LazyValueInfo::getPredicateAt(unsigned Pred, Value *V, Constant *C,
18861886 Instruction *CxtI) {
18871887 // Is or is not NonNull are common predicates being queried. If
1888 // isKnownNonNull can tell us the result of the predicate, we can
1888 // isKnownNonZero can tell us the result of the predicate, we can
18891889 // return it quickly. But this is only a fastpath, and falling
18901890 // through would still be correct.
1891 const DataLayout &DL = CxtI->getModule()->getDataLayout();
18911892 if (V->getType()->isPointerTy() && C->isNullValue() &&
1892 isKnownNonNull(V->stripPointerCasts())) {
1893 isKnownNonZero(V->stripPointerCasts(), DL)) {
18931894 if (Pred == ICmpInst::ICMP_EQ)
18941895 return LazyValueInfo::False;
18951896 else if (Pred == ICmpInst::ICMP_NE)
18961897 return LazyValueInfo::True;
18971898 }
1898 const DataLayout &DL = CxtI->getModule()->getDataLayout();
18991899 LVILatticeVal Result = getImpl(PImpl, AC, &DL, DT).getValueAt(V, CxtI);
19001900 Tristate Ret = getPredicateResult(Pred, C, Result, DL, TLI);
19011901 if (Ret != Unknown)
7171 V->getPointerDereferenceableBytes(DL, CheckForNonNull));
7272 if (KnownDerefBytes.getBoolValue()) {
7373 if (KnownDerefBytes.uge(Size))
74 if (!CheckForNonNull || isKnownNonNullAt(V, CtxI, DT))
74 if (!CheckForNonNull || isKnownNonZero(V, DL, 0, nullptr, CtxI, DT))
7575 return isAligned(V, Align, DL);
7676 }
7777
17481748 return false;
17491749 }
17501750
1751 static bool isKnownNonNullFromDominatingCondition(const Value *V,
1752 const Instruction *CtxI,
1753 const DominatorTree *DT) {
1754 assert(V->getType()->isPointerTy() && "V must be pointer type");
1755 assert(!isa(V) && "Did not expect ConstantPointerNull");
1756
1757 if (!CtxI || !DT)
1758 return false;
1759
1760 unsigned NumUsesExplored = 0;
1761 for (auto *U : V->users()) {
1762 // Avoid massive lists
1763 if (NumUsesExplored >= DomConditionsMaxUses)
1764 break;
1765 NumUsesExplored++;
1766
1767 // If the value is used as an argument to a call or invoke, then argument
1768 // attributes may provide an answer about null-ness.
1769 if (auto CS = ImmutableCallSite(U))
1770 if (auto *CalledFunc = CS.getCalledFunction())
1771 for (const Argument &Arg : CalledFunc->args())
1772 if (CS.getArgOperand(Arg.getArgNo()) == V &&
1773 Arg.hasNonNullAttr() && DT->dominates(CS.getInstruction(), CtxI))
1774 return true;
1775
1776 // Consider only compare instructions uniquely controlling a branch
1777 CmpInst::Predicate Pred;
1778 if (!match(const_cast(U),
1779 m_c_ICmp(Pred, m_Specific(V), m_Zero())) ||
1780 (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE))
1781 continue;
1782
1783 for (auto *CmpU : U->users()) {
1784 if (const BranchInst *BI = dyn_cast(CmpU)) {
1785 assert(BI->isConditional() && "uses a comparison!");
1786
1787 BasicBlock *NonNullSuccessor =
1788 BI->getSuccessor(Pred == ICmpInst::ICMP_EQ ? 1 : 0);
1789 BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor);
1790 if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent()))
1791 return true;
1792 } else if (Pred == ICmpInst::ICMP_NE &&
1793 match(CmpU, m_Intrinsic()) &&
1794 DT->dominates(cast(CmpU), CtxI)) {
1795 return true;
1796 }
1797 }
1798 }
1799
1800 return false;
1801 }
1802
17511803 /// Does the 'Range' metadata (which must be a valid MD_range operand list)
17521804 /// ensure that the value it's attached to is never Value? 'RangeType' is
17531805 /// is the type of the value described by the range.
17931845 return true;
17941846 }
17951847
1796 return false;
1848 // A global variable in address space 0 is non null unless extern weak
1849 // or an absolute symbol reference. Other address spaces may have null as a
1850 // valid address for a global, so we can't assume anything.
1851 if (const GlobalValue *GV = dyn_cast(V)) {
1852 if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() &&
1853 GV->getType()->getAddressSpace() == 0)
1854 return true;
1855 } else
1856 return false;
17971857 }
17981858
17991859 if (auto *I = dyn_cast(V)) {
18081868 }
18091869 }
18101870
1871 // Check for pointer simplifications.
1872 if (V->getType()->isPointerTy()) {
1873 // Alloca never returns null, malloc might.
1874 if (isa(V) && Q.DL.getAllocaAddrSpace() == 0)
1875 return true;
1876
1877 // A byval, inalloca, or nonnull argument is never null.
1878 if (const Argument *A = dyn_cast(V))
1879 if (A->hasByValOrInAllocaAttr() || A->hasNonNullAttr())
1880 return true;
1881
1882 // A Load tagged with nonnull metadata is never null.
1883 if (const LoadInst *LI = dyn_cast(V))
1884 if (LI->getMetadata(LLVMContext::MD_nonnull))
1885 return true;
1886
1887 if (auto CS = ImmutableCallSite(V))
1888 if (CS.isReturnNonNull())
1889 return true;
1890 }
1891
18111892 // The remaining tests are all recursive, so bail out if we hit the limit.
18121893 if (Depth++ >= MaxDepth)
18131894 return false;
18141895
1815 // Check for pointer simplifications.
1896 // Check for recursive pointer simplifications.
18161897 if (V->getType()->isPointerTy()) {
1817 if (isKnownNonNullAt(V, Q.CxtI, Q.DT))
1898 if (isKnownNonNullFromDominatingCondition(V, Q.CxtI, Q.DT))
18181899 return true;
1900
18191901 if (const GEPOperator *GEP = dyn_cast(V))
18201902 if (isGEPKnownNonNull(GEP, Depth, Q))
18211903 return true;
34813563 return I.mayReadOrWriteMemory() || !isSafeToSpeculativelyExecute(&I);
34823564 }
34833565
3484 /// Return true if we know that the specified value is never null.
3485 bool llvm::isKnownNonNull(const Value *V) {
3486 assert(V->getType()->isPointerTy() && "V must be pointer type");
3487
3488 // Alloca never returns null, malloc might.
3489 if (isa(V)) return true;
3490
3491 // A byval, inalloca, or nonnull argument is never null.
3492 if (const Argument *A = dyn_cast(V))
3493 return A->hasByValOrInAllocaAttr() || A->hasNonNullAttr();
3494
3495 // A global variable in address space 0 is non null unless extern weak
3496 // or an absolute symbol reference. Other address spaces may have null as a
3497 // valid address for a global, so we can't assume anything.
3498 if (const GlobalValue *GV = dyn_cast(V))
3499 return !GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() &&
3500 GV->getType()->getAddressSpace() == 0;
3501
3502 // A Load tagged with nonnull metadata is never null.
3503 if (const LoadInst *LI = dyn_cast(V))
3504 return LI->getMetadata(LLVMContext::MD_nonnull);
3505
3506 if (auto CS = ImmutableCallSite(V))
3507 if (CS.isReturnNonNull())
3508 return true;
3509
3510 return false;
3511 }
3512
3513 static bool isKnownNonNullFromDominatingCondition(const Value *V,
3514 const Instruction *CtxI,
3515 const DominatorTree *DT) {
3516 assert(V->getType()->isPointerTy() && "V must be pointer type");
3517 assert(!isa(V) && "Did not expect ConstantPointerNull");
3518 assert(CtxI && "Context instruction required for analysis");
3519 assert(DT && "Dominator tree required for analysis");
3520
3521 unsigned NumUsesExplored = 0;
3522 for (auto *U : V->users()) {
3523 // Avoid massive lists
3524 if (NumUsesExplored >= DomConditionsMaxUses)
3525 break;
3526 NumUsesExplored++;
3527
3528 // If the value is used as an argument to a call or invoke, then argument
3529 // attributes may provide an answer about null-ness.
3530 if (auto CS = ImmutableCallSite(U))
3531 if (auto *CalledFunc = CS.getCalledFunction())
3532 for (const Argument &Arg : CalledFunc->args())
3533 if (CS.getArgOperand(Arg.getArgNo()) == V &&
3534 Arg.hasNonNullAttr() && DT->dominates(CS.getInstruction(), CtxI))
3535 return true;
3536
3537 // Consider only compare instructions uniquely controlling a branch
3538 CmpInst::Predicate Pred;
3539 if (!match(const_cast(U),
3540 m_c_ICmp(Pred, m_Specific(V), m_Zero())) ||
3541 (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE))
3542 continue;
3543
3544 for (auto *CmpU : U->users()) {
3545 if (const BranchInst *BI = dyn_cast(CmpU)) {
3546 assert(BI->isConditional() && "uses a comparison!");
3547
3548 BasicBlock *NonNullSuccessor =
3549 BI->getSuccessor(Pred == ICmpInst::ICMP_EQ ? 1 : 0);
3550 BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor);
3551 if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent()))
3552 return true;
3553 } else if (Pred == ICmpInst::ICMP_NE &&
3554 match(CmpU, m_Intrinsic()) &&
3555 DT->dominates(cast(CmpU), CtxI)) {
3556 return true;
3557 }
3558 }
3559 }
3560
3561 return false;
3562 }
3563
3564 bool llvm::isKnownNonNullAt(const Value *V, const Instruction *CtxI,
3565 const DominatorTree *DT) {
3566 if (isa(V) || isa(V))
3567 return false;
3568
3569 if (isKnownNonNull(V))
3570 return true;
3571
3572 if (!CtxI || !DT)
3573 return false;
3574
3575 return ::isKnownNonNullFromDominatingCondition(V, CtxI, DT);
3576 }
3577
35783566 OverflowResult llvm::computeOverflowForUnsignedMul(const Value *LHS,
35793567 const Value *RHS,
35803568 const DataLayout &DL,
883883 if (auto *Ret = dyn_cast(BB.getTerminator()))
884884 FlowsToReturn.insert(Ret->getReturnValue());
885885
886 auto &DL = F->getParent()->getDataLayout();
887
886888 for (unsigned i = 0; i != FlowsToReturn.size(); ++i) {
887889 Value *RetVal = FlowsToReturn[i];
888890
889891 // If this value is locally known to be non-null, we're good
890 if (isKnownNonNull(RetVal))
892 if (isKnownNonZero(RetVal, DL))
891893 continue;
892894
893895 // Otherwise, we need to look upwards since we can't make any local
37423742 return replaceInstUsesWith(*II, ConstantPointerNull::get(PT));
37433743
37443744 // isKnownNonNull -> nonnull attribute
3745 if (isKnownNonNullAt(DerivedPtr, II, &DT))
3745 if (isKnownNonZero(DerivedPtr, DL, 0, &AC, II, &DT))
37463746 II->addAttribute(AttributeList::ReturnIndex, Attribute::NonNull);
37473747 }
37483748
39313931 for (Value *V : CS.args()) {
39323932 if (V->getType()->isPointerTy() &&
39333933 !CS.paramHasAttr(ArgNo, Attribute::NonNull) &&
3934 isKnownNonNullAt(V, CS.getInstruction(), &DT))
3934 isKnownNonZero(V, DL, 0, &AC, CS.getInstruction(), &DT))
39353935 ArgNos.push_back(ArgNo);
39363936 ArgNo++;
39373937 }
337337 /// and thus must be phi-ed with undef. We fall back to the standard alloca
338338 /// promotion algorithm in that case.
339339 static bool rewriteSingleStoreAlloca(AllocaInst *AI, AllocaInfo &Info,
340 LargeBlockInfo &LBI, DominatorTree &DT,
341 AssumptionCache *AC) {
340 LargeBlockInfo &LBI, const DataLayout &DL,
341 DominatorTree &DT, AssumptionCache *AC) {
342342 StoreInst *OnlyStore = Info.OnlyStore;
343343 bool StoringGlobalVal = !isa(OnlyStore->getOperand(0));
344344 BasicBlock *StoreBB = OnlyStore->getParent();
394394 // that information when we erase this Load. So we preserve
395395 // it with an assume.
396396 if (AC && LI->getMetadata(LLVMContext::MD_nonnull) &&
397 !llvm::isKnownNonNullAt(ReplVal, LI, &DT))
397 !llvm::isKnownNonZero(ReplVal, DL, 0, AC, LI, &DT))
398398 addAssumeNonNull(AC, LI);
399399
400400 LI->replaceAllUsesWith(ReplVal);
441441 /// }
442442 static bool promoteSingleBlockAlloca(AllocaInst *AI, const AllocaInfo &Info,
443443 LargeBlockInfo &LBI,
444 const DataLayout &DL,
444445 DominatorTree &DT,
445446 AssumptionCache *AC) {
446447 // The trickiest case to handle is when we have large blocks. Because of this,
489490 // information when we erase it. So we preserve it with an assume.
490491 Value *ReplVal = std::prev(I)->second->getOperand(0);
491492 if (AC && LI->getMetadata(LLVMContext::MD_nonnull) &&
492 !llvm::isKnownNonNullAt(ReplVal, LI, &DT))
493 !llvm::isKnownNonZero(ReplVal, DL, 0, AC, LI, &DT))
493494 addAssumeNonNull(AC, LI);
494495
495496 LI->replaceAllUsesWith(ReplVal);
559560 // If there is only a single store to this value, replace any loads of
560561 // it that are directly dominated by the definition with the value stored.
561562 if (Info.DefiningBlocks.size() == 1) {
562 if (rewriteSingleStoreAlloca(AI, Info, LBI, DT, AC)) {
563 if (rewriteSingleStoreAlloca(AI, Info, LBI, SQ.DL, DT, AC)) {
563564 // The alloca has been processed, move on.
564565 RemoveFromAllocasList(AllocaNum);
565566 ++NumSingleStore;
570571 // If the alloca is only read and written in one basic block, just perform a
571572 // linear sweep over the block to eliminate it.
572573 if (Info.OnlyUsedInOneBlock &&
573 promoteSingleBlockAlloca(AI, Info, LBI, DT, AC)) {
574 promoteSingleBlockAlloca(AI, Info, LBI, SQ.DL, DT, AC)) {
574575 // The alloca has been processed, move on.
575576 RemoveFromAllocasList(AllocaNum);
576577 continue;
930931 // that information when we erase this Load. So we preserve
931932 // it with an assume.
932933 if (AC && LI->getMetadata(LLVMContext::MD_nonnull) &&
933 !llvm::isKnownNonNullAt(V, LI, &DT))
934 !llvm::isKnownNonZero(V, SQ.DL, 0, AC, LI, &DT))
934935 addAssumeNonNull(AC, LI);
935936
936937 // Anything using the load now uses the current value.
1717 %tmp3 = call i64 @llvm.objectsize.i64.p0i8(i8* %tmp2, i1 false, i1 true)
1818 %tmp4 = call i8* @__strncpy_chk(i8* %arg2, i8* %tmp2, i64 1023, i64 %tmp3)
1919 ; CHECK-NOT: call
20 ; CHECK: call i8* @strncpy(i8* %arg2, i8* %tmp2, i64 1023)
20 ; CHECK: call i8* @strncpy(i8* %arg2, i8* nonnull %tmp2, i64 1023)
2121 ; CHECK-NOT: call
2222
2323 ret i8* %tmp4
215215 unreachable
216216 }
217217
218 ; CHECK: define nonnull i32* @gep1(
219 define i32* @gep1(i32* %p) {
220 %q = getelementptr inbounds i32, i32* %p, i32 1
221 ret i32* %q
222 }
223
224 ; CHECK: define i32 addrspace(3)* @gep2(
225 define i32 addrspace(3)* @gep2(i32 addrspace(3)* %p) {
226 %q = getelementptr inbounds i32, i32 addrspace(3)* %p, i32 1
227 ret i32 addrspace(3)* %q
228 }
1616 ; CHECK: bb3:
1717 ; CHECK-NEXT: call void @llvm.dbg.declare
1818 ; CHECK-NEXT: br label %fin
19 ; CHECK: call void @llvm.lifetime.start.p0i8(i64 1, i8* %[[T]])
20 ; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 1, i8* %[[B]])
21 ; CHECK-NEXT: call void @foo(i8* %[[B]], i8* %[[T]])
22 ; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 1, i8* %[[B]])
23 ; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 1, i8* %[[T]])
19 ; CHECK: call void @llvm.lifetime.start.p0i8(i64 1, i8* nonnull %[[T]])
20 ; CHECK-NEXT: call void @llvm.lifetime.start.p0i8(i64 1, i8* nonnull %[[B]])
21 ; CHECK-NEXT: call void @foo(i8* nonnull %[[B]], i8* nonnull %[[T]])
22 ; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 1, i8* nonnull %[[B]])
23 ; CHECK-NEXT: call void @llvm.lifetime.end.p0i8(i64 1, i8* nonnull %[[T]])
2424 %text = alloca [1 x i8], align 1
2525 %buff = alloca [1 x i8], align 1
2626 %0 = getelementptr inbounds [1 x i8], [1 x i8]* %text, i64 0, i64 0
4444 ; CHECK: alloca
4545 ; CHECK: call void @llvm.memcpy.p0i8.p2i8.i64
4646 ; CHECK-NOT: addrspacecast
47 ; CHECK: call i32 @foo(i32* %{{.*}})
47 ; CHECK: call i32 @foo(i32* nonnull %{{.*}})
4848 define void @test_call(i32 addrspace(1)* %out, i64 %x) {
4949 entry:
5050 %data = alloca [8 x i32], align 4
6161 ; CHECK: alloca
6262 ; CHECK: call void @llvm.memcpy.p0i8.p2i8.i64
6363 ; CHECK: load i32, i32* %{{.*}}
64 ; CHECK: call i32 @foo(i32* %{{.*}})
64 ; CHECK: call i32 @foo(i32* nonnull %{{.*}})
6565 ; CHECK-NOT: addrspacecast
6666 ; CHECK-NOT: load i32, i32 addrspace(2)*
6767 define void @test_load_and_call(i32 addrspace(1)* %out, i64 %x, i64 %y) {
5959 ; CHECK-NEXT: getelementptr inbounds [124 x i8], [124 x i8]*
6060
6161 ; use @G instead of %A
62 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* %{{.*}}, i8* getelementptr inbounds (%T, %T* @G, i64 0, i32 0)
62 ; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull %{{.*}}, i8* getelementptr inbounds (%T, %T* @G, i64 0, i32 0)
6363 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %a, i8* bitcast (%T* @G to i8*), i64 124, i32 4, i1 false)
6464 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %b, i8* %a, i64 124, i32 4, i1 false)
6565 call void @bar(i8* %b)
33
44 define void @func(i8* %i) nounwind ssp {
55 ; CHECK-LABEL: @func(
6 ; CHECK: @__strcpy_chk(i8* %arraydecay, i8* %i, i64 32)
6 ; CHECK: @__strcpy_chk(i8* nonnull %arraydecay, i8* %i, i64 32)
77 entry:
88 %s = alloca [32 x i8], align 16
99 %arraydecay = getelementptr inbounds [32 x i8], [32 x i8]* %s, i32 0, i32 0
153153 define i32 @test_no_simplify2(i32 %x) {
154154 ; CHECK-LABEL: @test_no_simplify2(
155155 ; CHECK-NEXT: [[HELLO_P:%.*]] = getelementptr inbounds [7 x i8], [7 x i8]* @null_hello, i32 0, i32 %x
156 ; CHECK-NEXT: [[HELLO_L:%.*]] = call i32 @strlen(i8* [[HELLO_P]])
156 ; CHECK-NEXT: [[HELLO_L:%.*]] = call i32 @strlen(i8* nonnull [[HELLO_P]])
157157 ; CHECK-NEXT: ret i32 [[HELLO_L]]
158158 ;
159159 %hello_p = getelementptr inbounds [7 x i8], [7 x i8]* @null_hello, i32 0, i32 %x
167167 ; CHECK-LABEL: @test_no_simplify3(
168168 ; CHECK-NEXT: [[AND:%.*]] = and i32 %x, 15
169169 ; CHECK-NEXT: [[HELLO_P:%.*]] = getelementptr inbounds [13 x i8], [13 x i8]* @null_hello_mid, i32 0, i32 [[AND]]
170 ; CHECK-NEXT: [[HELLO_L:%.*]] = call i32 @strlen(i8* [[HELLO_P]])
170 ; CHECK-NEXT: [[HELLO_L:%.*]] = call i32 @strlen(i8* nonnull [[HELLO_P]])
171171 ; CHECK-NEXT: ret i32 [[HELLO_L]]
172172 ;
173173 %and = and i32 %x, 15
154154 ; CHECK-LABEL: @test_no_simplify2(
155155 ; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[X:%.*]] to i64
156156 ; CHECK-NEXT: [[HELLO_P:%.*]] = getelementptr inbounds [7 x i32], [7 x i32]* @null_hello, i64 0, i64 [[TMP1]]
157 ; CHECK-NEXT: [[HELLO_L:%.*]] = call i64 @wcslen(i32* [[HELLO_P]])
157 ; CHECK-NEXT: [[HELLO_L:%.*]] = call i64 @wcslen(i32* nonnull [[HELLO_P]])
158158 ; CHECK-NEXT: ret i64 [[HELLO_L]]
159159 ;
160160 %hello_p = getelementptr inbounds [7 x i32], [7 x i32]* @null_hello, i32 0, i32 %x
169169 ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 15
170170 ; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[AND]] to i64
171171 ; CHECK-NEXT: [[HELLO_P:%.*]] = getelementptr inbounds [13 x i32], [13 x i32]* @null_hello_mid, i64 0, i64 [[TMP1]]
172 ; CHECK-NEXT: [[HELLO_L:%.*]] = call i64 @wcslen(i32* [[HELLO_P]])
172 ; CHECK-NEXT: [[HELLO_L:%.*]] = call i64 @wcslen(i32* nonnull [[HELLO_P]])
173173 ; CHECK-NEXT: ret i64 [[HELLO_L]]
174174 ;
175175 %and = and i32 %x, 15
158158 ; CHECK-LABEL: @test_no_simplify2(
159159 ; CHECK-NEXT: [[TMP1:%.*]] = sext i16 [[X:%.*]] to i64
160160 ; CHECK-NEXT: [[HELLO_P:%.*]] = getelementptr inbounds [7 x i16], [7 x i16]* @null_hello, i64 0, i64 [[TMP1]]
161 ; CHECK-NEXT: [[HELLO_L:%.*]] = call i64 @wcslen(i16* [[HELLO_P]])
161 ; CHECK-NEXT: [[HELLO_L:%.*]] = call i64 @wcslen(i16* nonnull [[HELLO_P]])
162162 ; CHECK-NEXT: ret i64 [[HELLO_L]]
163163 ;
164164 %hello_p = getelementptr inbounds [7 x i16], [7 x i16]* @null_hello, i16 0, i16 %x
173173 ; CHECK-NEXT: [[AND:%.*]] = and i16 [[X:%.*]], 15
174174 ; CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[AND]] to i64
175175 ; CHECK-NEXT: [[HELLO_P:%.*]] = getelementptr inbounds [13 x i16], [13 x i16]* @null_hello_mid, i64 0, i64 [[TMP1]]
176 ; CHECK-NEXT: [[HELLO_L:%.*]] = call i64 @wcslen(i16* [[HELLO_P]])
176 ; CHECK-NEXT: [[HELLO_L:%.*]] = call i64 @wcslen(i16* nonnull [[HELLO_P]])
177177 ; CHECK-NEXT: ret i64 [[HELLO_L]]
178178 ;
179179 %and = and i16 %x, 15