llvm.org GIT mirror llvm / e82e4dd
Remove dead TLI arg of isKnownNonNull and propagate deadness. NFC. This actually uncovered a surprisingly large chain of ultimately unused TLI args. From what I can gather, this argument is a remnant of when isKnownNonNull would look at the TLI directly. The current approach seems to be that InferFunctionAttrs runs early in the pipeline and uses TLI to annotate the TLI-dependent non-null information as return attributes. This also removes the dependence of functionattrs on TLI altogether. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@274455 91177308-0d34-0410-b5e6-96231b3b80d8 Sean Silva 4 years ago
10 changed file(s) with 42 addition(s) and 70 deletion(s). Raw diff Collapse all Expand all
2828 /// specified instruction.
2929 bool isDereferenceablePointer(const Value *V, const DataLayout &DL,
3030 const Instruction *CtxI = nullptr,
31 const DominatorTree *DT = nullptr,
32 const TargetLibraryInfo *TLI = nullptr);
31 const DominatorTree *DT = nullptr);
3332
3433 /// Returns true if V is always a dereferenceable pointer with alignment
3534 /// greater or equal than requested. If the context instruction is specified
3837 bool isDereferenceableAndAlignedPointer(const Value *V, unsigned Align,
3938 const DataLayout &DL,
4039 const Instruction *CtxI = nullptr,
41 const DominatorTree *DT = nullptr,
42 const TargetLibraryInfo *TLI = nullptr);
40 const DominatorTree *DT = nullptr);
4341
4442 /// isSafeToLoadUnconditionally - Return true if we know that executing a load
4543 /// from this value cannot trap.
5351 bool isSafeToLoadUnconditionally(Value *V, unsigned Align,
5452 const DataLayout &DL,
5553 Instruction *ScanFrom = nullptr,
56 const DominatorTree *DT = nullptr,
57 const TargetLibraryInfo *TLI = nullptr);
54 const DominatorTree *DT = nullptr);
5855
5956 /// DefMaxInstsToScan - the default number of maximum instructions
6057 /// to scan in the block, used by FindAvailableLoadedValue().
287287 /// for such instructions, moving them may change the resulting value.
288288 bool isSafeToSpeculativelyExecute(const Value *V,
289289 const Instruction *CtxI = nullptr,
290 const DominatorTree *DT = nullptr,
291 const TargetLibraryInfo *TLI = nullptr);
290 const DominatorTree *DT = nullptr);
292291
293292 /// Returns true if the result or effects of the given instructions \p I
294293 /// depend on or influence global memory.
303302 /// Return true if this pointer couldn't possibly be null by its definition.
304303 /// This returns true for allocas, non-extern-weak globals, and byval
305304 /// arguments.
306 bool isKnownNonNull(const Value *V, const TargetLibraryInfo *TLI = nullptr);
305 bool isKnownNonNull(const Value *V);
307306
308307 /// Return true if this pointer couldn't possibly be null. If the context
309308 /// instruction is specified, perform context-sensitive analysis and return
311310 /// instruction.
312311 bool isKnownNonNullAt(const Value *V,
313312 const Instruction *CtxI = nullptr,
314 const DominatorTree *DT = nullptr,
315 const TargetLibraryInfo *TLI = nullptr);
313 const DominatorTree *DT = nullptr);
316314
317315 /// Return true if it is valid to use the assumptions provided by an
318316 /// assume intrinsic, I, at the point in the control-flow identified by the
19741974 RHS = RHS->stripPointerCasts();
19751975
19761976 // A non-null pointer is not equal to a null pointer.
1977 if (llvm::isKnownNonNull(LHS, TLI) && isa(RHS) &&
1977 if (llvm::isKnownNonNull(LHS) && isa(RHS) &&
19781978 (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE))
19791979 return ConstantInt::get(GetCompareTy(LHS),
19801980 !CmpInst::isTrueWhenEqual(Pred));
21292129 // cannot be elided. We cannot fold malloc comparison to null. Also, the
21302130 // dynamic allocation call could be either of the operands.
21312131 Value *MI = nullptr;
2132 if (isAllocLikeFn(LHS, TLI) && llvm::isKnownNonNullAt(RHS, CxtI, DT, TLI))
2132 if (isAllocLikeFn(LHS, TLI) && llvm::isKnownNonNullAt(RHS, CxtI, DT))
21332133 MI = LHS;
2134 else if (isAllocLikeFn(RHS, TLI) &&
2135 llvm::isKnownNonNullAt(LHS, CxtI, DT, TLI))
2134 else if (isAllocLikeFn(RHS, TLI) && llvm::isKnownNonNullAt(LHS, CxtI, DT))
21362135 MI = RHS;
21372136 // FIXME: We should also fold the compare when the pointer escapes, but the
21382137 // compare dominates the pointer escape
5353 static bool isDereferenceableAndAlignedPointer(
5454 const Value *V, unsigned Align, const APInt &Size, const DataLayout &DL,
5555 const Instruction *CtxI, const DominatorTree *DT,
56 const TargetLibraryInfo *TLI, SmallPtrSetImpl &Visited) {
56 SmallPtrSetImpl &Visited) {
5757 // Note that it is not safe to speculate into a malloc'd region because
5858 // malloc may return null.
5959
6060 // bitcast instructions are no-ops as far as dereferenceability is concerned.
6161 if (const BitCastOperator *BC = dyn_cast(V))
6262 return isDereferenceableAndAlignedPointer(BC->getOperand(0), Align, Size,
63 DL, CtxI, DT, TLI, Visited);
63 DL, CtxI, DT, Visited);
6464
6565 bool CheckForNonNull = false;
6666 APInt KnownDerefBytes(Size.getBitWidth(),
6767 V->getPointerDereferenceableBytes(DL, CheckForNonNull));
6868 if (KnownDerefBytes.getBoolValue()) {
6969 if (KnownDerefBytes.uge(Size))
70 if (!CheckForNonNull || isKnownNonNullAt(V, CtxI, DT, TLI))
70 if (!CheckForNonNull || isKnownNonNullAt(V, CtxI, DT))
7171 return isAligned(V, Align, DL);
7272 }
7373
8888
8989 return Visited.insert(Base).second &&
9090 isDereferenceableAndAlignedPointer(Base, Align, Offset + Size, DL,
91 CtxI, DT, TLI, Visited);
91 CtxI, DT, Visited);
9292 }
9393
9494 // For gc.relocate, look through relocations
9595 if (const GCRelocateInst *RelocateInst = dyn_cast(V))
9696 return isDereferenceableAndAlignedPointer(
97 RelocateInst->getDerivedPtr(), Align, Size, DL, CtxI, DT, TLI, Visited);
97 RelocateInst->getDerivedPtr(), Align, Size, DL, CtxI, DT, Visited);
9898
9999 if (const AddrSpaceCastInst *ASC = dyn_cast(V))
100100 return isDereferenceableAndAlignedPointer(ASC->getOperand(0), Align, Size,
101 DL, CtxI, DT, TLI, Visited);
101 DL, CtxI, DT, Visited);
102102
103103 // If we don't know, assume the worst.
104104 return false;
107107 bool llvm::isDereferenceableAndAlignedPointer(const Value *V, unsigned Align,
108108 const DataLayout &DL,
109109 const Instruction *CtxI,
110 const DominatorTree *DT,
111 const TargetLibraryInfo *TLI) {
110 const DominatorTree *DT) {
112111 // When dereferenceability information is provided by a dereferenceable
113112 // attribute, we know exactly how many bytes are dereferenceable. If we can
114113 // determine the exact offset to the attributed variable, we can use that
126125 SmallPtrSet Visited;
127126 return ::isDereferenceableAndAlignedPointer(
128127 V, Align, APInt(DL.getTypeSizeInBits(VTy), DL.getTypeStoreSize(Ty)), DL,
129 CtxI, DT, TLI, Visited);
128 CtxI, DT, Visited);
130129 }
131130
132131 bool llvm::isDereferenceablePointer(const Value *V, const DataLayout &DL,
133132 const Instruction *CtxI,
134 const DominatorTree *DT,
135 const TargetLibraryInfo *TLI) {
136 return isDereferenceableAndAlignedPointer(V, 1, DL, CtxI, DT, TLI);
133 const DominatorTree *DT) {
134 return isDereferenceableAndAlignedPointer(V, 1, DL, CtxI, DT);
137135 }
138136
139137 /// \brief Test if A and B will obviously have the same value.
181179 bool llvm::isSafeToLoadUnconditionally(Value *V, unsigned Align,
182180 const DataLayout &DL,
183181 Instruction *ScanFrom,
184 const DominatorTree *DT,
185 const TargetLibraryInfo *TLI) {
182 const DominatorTree *DT) {
186183 // Zero alignment means that the load has the ABI alignment for the target
187184 if (Align == 0)
188185 Align = DL.getABITypeAlignment(V->getType()->getPointerElementType());
190187
191188 // If DT is not specified we can't make context-sensitive query
192189 const Instruction* CtxI = DT ? ScanFrom : nullptr;
193 if (isDereferenceableAndAlignedPointer(V, Align, DL, CtxI, DT, TLI))
190 if (isDereferenceableAndAlignedPointer(V, Align, DL, CtxI, DT))
194191 return true;
195192
196193 int64_t ByteOffset = 0;
30513051
30523052 bool llvm::isSafeToSpeculativelyExecute(const Value *V,
30533053 const Instruction *CtxI,
3054 const DominatorTree *DT,
3055 const TargetLibraryInfo *TLI) {
3054 const DominatorTree *DT) {
30563055 const Operator *Inst = dyn_cast(V);
30573056 if (!Inst)
30583057 return false;
31033102 Attribute::SanitizeAddress))
31043103 return false;
31053104 const DataLayout &DL = LI->getModule()->getDataLayout();
3106 return isDereferenceableAndAlignedPointer(
3107 LI->getPointerOperand(), LI->getAlignment(), DL, CtxI, DT, TLI);
3105 return isDereferenceableAndAlignedPointer(LI->getPointerOperand(),
3106 LI->getAlignment(), DL, CtxI, DT);
31083107 }
31093108 case Instruction::Call: {
31103109 if (const IntrinsicInst *II = dyn_cast(Inst)) {
31893188 }
31903189
31913190 /// Return true if we know that the specified value is never null.
3192 bool llvm::isKnownNonNull(const Value *V, const TargetLibraryInfo *TLI) {
3191 bool llvm::isKnownNonNull(const Value *V) {
31933192 assert(V->getType()->isPointerTy() && "V must be pointer type");
31943193
31953194 // Alloca never returns null, malloc might.
32563255 }
32573256
32583257 bool llvm::isKnownNonNullAt(const Value *V, const Instruction *CtxI,
3259 const DominatorTree *DT, const TargetLibraryInfo *TLI) {
3260 if (isKnownNonNull(V, TLI))
3258 const DominatorTree *DT) {
3259 if (isKnownNonNull(V))
32613260 return true;
32623261
32633262 return CtxI ? ::isKnownNonNullFromDominatingCondition(V, CtxI, DT) : false;
781781 /// \p Speculative based on whether the returned conclusion is a speculative
782782 /// conclusion due to SCC calls.
783783 static bool isReturnNonNull(Function *F, const SCCNodeSet &SCCNodes,
784 const TargetLibraryInfo &TLI, bool &Speculative) {
784 bool &Speculative) {
785785 assert(F->getReturnType()->isPointerTy() &&
786786 "nonnull only meaningful on pointer types");
787787 Speculative = false;
795795 Value *RetVal = FlowsToReturn[i];
796796
797797 // If this value is locally known to be non-null, we're good
798 if (isKnownNonNull(RetVal, &TLI))
798 if (isKnownNonNull(RetVal))
799799 continue;
800800
801801 // Otherwise, we need to look upwards since we can't make any local
844844 }
845845
846846 /// Deduce nonnull attributes for the SCC.
847 static bool addNonNullAttrs(const SCCNodeSet &SCCNodes,
848 const TargetLibraryInfo &TLI) {
847 static bool addNonNullAttrs(const SCCNodeSet &SCCNodes) {
849848 // Speculative that all functions in the SCC return only nonnull
850849 // pointers. We may refute this as we analyze functions.
851850 bool SCCReturnsNonNull = true;
872871 continue;
873872
874873 bool Speculative = false;
875 if (isReturnNonNull(F, SCCNodes, TLI, Speculative)) {
874 if (isReturnNonNull(F, SCCNodes, Speculative)) {
876875 if (!Speculative) {
877876 // Mark the function eagerly since we may discover a function
878877 // which prevents us from speculating about the entire SCC
986985
987986 PreservedAnalyses PostOrderFunctionAttrsPass::run(LazyCallGraph::SCC &C,
988987 CGSCCAnalysisManager &AM) {
989 Module &M = *C.begin()->getFunction().getParent();
990 const ModuleAnalysisManager &MAM =
991 AM.getResult(C).getManager();
992988 FunctionAnalysisManager &FAM =
993989 AM.getResult(C).getManager();
994
995 // FIXME: Need some way to make it more reasonable to assume that this is
996 // always cached.
997 TargetLibraryInfo &TLI = *MAM.getCachedResult(M);
998990
999991 // We pass a lambda into functions to wire them up to the analysis manager
1000992 // for getting function analyses.
10381030 // more precise attributes as well.
10391031 if (!HasUnknownCall) {
10401032 Changed |= addNoAliasAttrs(SCCNodes);
1041 Changed |= addNonNullAttrs(SCCNodes, TLI);
1033 Changed |= addNonNullAttrs(SCCNodes);
10421034 Changed |= removeConvergentAttrs(SCCNodes);
10431035 Changed |= addNoRecurseAttrs(SCCNodes);
10441036 }
10581050 void getAnalysisUsage(AnalysisUsage &AU) const override {
10591051 AU.setPreservesCFG();
10601052 AU.addRequired();
1061 AU.addRequired();
10621053 getAAResultsAnalysisUsage(AU);
10631054 CallGraphSCCPass::getAnalysisUsage(AU);
10641055 }
1065
1066 private:
1067 TargetLibraryInfo *TLI;
10681056 };
10691057 }
10701058
10731061 "Deduce function attributes", false, false)
10741062 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
10751063 INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass)
1076 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
10771064 INITIALIZE_PASS_END(PostOrderFunctionAttrsLegacyPass, "functionattrs",
10781065 "Deduce function attributes", false, false)
10791066
10821069 bool PostOrderFunctionAttrsLegacyPass::runOnSCC(CallGraphSCC &SCC) {
10831070 if (skipSCC(SCC))
10841071 return false;
1085
1086 TLI = &getAnalysis().getTLI();
10871072 bool Changed = false;
10881073
10891074 // We compute dedicated AA results for each function in the SCC as needed. We
11221107 // more precise attributes as well.
11231108 if (!ExternalNode) {
11241109 Changed |= addNoAliasAttrs(SCCNodes);
1125 Changed |= addNonNullAttrs(SCCNodes, *TLI);
1110 Changed |= addNonNullAttrs(SCCNodes);
11261111 Changed |= removeConvergentAttrs(SCCNodes);
11271112 Changed |= addNoRecurseAttrs(SCCNodes);
11281113 }
23192319 return replaceInstUsesWith(*II, ConstantPointerNull::get(PT));
23202320
23212321 // isKnownNonNull -> nonnull attribute
2322 if (isKnownNonNullAt(DerivedPtr, II, DT, TLI))
2322 if (isKnownNonNullAt(DerivedPtr, II, DT))
23232323 II->addAttribute(AttributeSet::ReturnIndex, Attribute::NonNull);
23242324 }
23252325
24822482 for (Value *V : CS.args()) {
24832483 if (V->getType()->isPointerTy() &&
24842484 !CS.paramHasAttr(ArgNo + 1, Attribute::NonNull) &&
2485 isKnownNonNullAt(V, CS.getInstruction(), DT, TLI))
2485 isKnownNonNullAt(V, CS.getInstruction(), DT))
24862486 Indices.push_back(ArgNo + 1);
24872487 ArgNo++;
24882488 }
8787 const LoopSafetyInfo *SafetyInfo);
8888 static bool isSafeToExecuteUnconditionally(const Instruction &Inst,
8989 const DominatorTree *DT,
90 const TargetLibraryInfo *TLI,
9190 const Loop *CurLoop,
9291 const LoopSafetyInfo *SafetyInfo,
9392 const Instruction *CtxI = nullptr);
364363 if (CurLoop->hasLoopInvariantOperands(&I) &&
365364 canSinkOrHoistInst(I, AA, DT, TLI, CurLoop, CurAST, SafetyInfo) &&
366365 isSafeToExecuteUnconditionally(
367 I, DT, TLI, CurLoop, SafetyInfo,
366 I, DT, CurLoop, SafetyInfo,
368367 CurLoop->getLoopPreheader()->getTerminator()))
369368 Changed |= hoist(I, DT, CurLoop, SafetyInfo);
370369 }
489488 // TODO: Plumb the context instruction through to make hoisting and sinking
490489 // more powerful. Hoisting of loads already works due to the special casing
491490 // above.
492 return isSafeToExecuteUnconditionally(I, DT, TLI, CurLoop, SafetyInfo,
493 nullptr);
491 return isSafeToExecuteUnconditionally(I, DT, CurLoop, SafetyInfo, nullptr);
494492 }
495493
496494 /// Returns true if a PHINode is a trivially replaceable with an
723721 /// or if it is a trapping instruction and is guaranteed to execute.
724722 static bool isSafeToExecuteUnconditionally(const Instruction &Inst,
725723 const DominatorTree *DT,
726 const TargetLibraryInfo *TLI,
727724 const Loop *CurLoop,
728725 const LoopSafetyInfo *SafetyInfo,
729726 const Instruction *CtxI) {
730 if (isSafeToSpeculativelyExecute(&Inst, CtxI, DT, TLI))
727 if (isSafeToSpeculativelyExecute(&Inst, CtxI, DT))
731728 return true;
732729
733730 return isGuaranteedToExecute(Inst, DT, CurLoop, SafetyInfo);
925922
926923 if (!GuaranteedToExecute && !CanSpeculateLoad)
927924 CanSpeculateLoad = isSafeToExecuteUnconditionally(
928 *Load, DT, TLI, CurLoop, SafetyInfo, Preheader->getTerminator());
925 *Load, DT, CurLoop, SafetyInfo, Preheader->getTerminator());
929926 } else if (const StoreInst *Store = dyn_cast(UI)) {
930927 // Stores *of* the pointer are not interesting, only stores *to* the
931928 // pointer.
958955 if (!GuaranteedToExecute && !CanSpeculateLoad) {
959956 CanSpeculateLoad = isDereferenceableAndAlignedPointer(
960957 Store->getPointerOperand(), Store->getAlignment(), MDL,
961 Preheader->getTerminator(), DT, TLI);
958 Preheader->getTerminator(), DT);
962959 }
963960 } else
964961 return Changed; // Not a load or store.
0 ; RUN: opt < %s -basicaa -functionattrs -rpo-functionattrs -S | FileCheck %s
1 ; RUN: opt < %s -aa-pipeline=basic-aa -passes='require,cgscc(function-attrs),rpo-functionattrs' -S | FileCheck %s
1 ; RUN: opt < %s -aa-pipeline=basic-aa -passes='cgscc(function-attrs),rpo-functionattrs' -S | FileCheck %s
22
33 ; CHECK: define i32 @leaf() #0
44 define i32 @leaf() {
0 ; RUN: opt < %s -functionattrs -S | FileCheck %s
1 ; RUN: opt < %s -aa-pipeline=basic-aa -passes='require,cgscc(function-attrs)' -S | FileCheck %s
1 ; RUN: opt < %s -aa-pipeline=basic-aa -passes='cgscc(function-attrs)' -S | FileCheck %s
22 @x = global i32 0
33
44 declare void @test1_1(i8* %x1_1, i8* readonly %y1_1, ...)