llvm.org GIT mirror llvm / 570e52c
[C++11] More 'nullptr' conversion. In some cases just using a boolean check instead of comparing to nullptr. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@206243 91177308-0d34-0410-b5e6-96231b3b80d8 Craig Topper 6 years ago
59 changed file(s) with 583 addition(s) and 572 deletion(s). Raw diff Collapse all Expand all
4242 const MDNode *TBAAInfo;
4343 public:
4444 PointerRec(Value *V)
45 : Val(V), PrevInList(0), NextInList(0), AS(0), Size(0),
45 : Val(V), PrevInList(nullptr), NextInList(nullptr), AS(nullptr), Size(0),
4646 TBAAInfo(DenseMapInfo::getEmptyKey()) {}
4747
4848 Value *getValue() const { return Val; }
4949
5050 PointerRec *getNext() const { return NextInList; }
51 bool hasAliasSet() const { return AS != 0; }
51 bool hasAliasSet() const { return AS != nullptr; }
5252
5353 PointerRec** setPrevInList(PointerRec **PIL) {
5454 PrevInList = PIL;
7474 // If we have missing or conflicting TBAAInfo, return null.
7575 if (TBAAInfo == DenseMapInfo::getEmptyKey() ||
7676 TBAAInfo == DenseMapInfo::getTombstoneKey())
77 return 0;
77 return nullptr;
7878 return TBAAInfo;
7979 }
8080
9090 }
9191
9292 void setAliasSet(AliasSet *as) {
93 assert(AS == 0 && "Already have an alias set!");
93 assert(!AS && "Already have an alias set!");
9494 AS = as;
9595 }
9696
9999 *PrevInList = NextInList;
100100 if (AS->PtrListEnd == &NextInList) {
101101 AS->PtrListEnd = PrevInList;
102 assert(*AS->PtrListEnd == 0 && "List not terminated right!");
102 assert(*AS->PtrListEnd == nullptr && "List not terminated right!");
103103 }
104104 delete this;
105105 }
173173 class iterator;
174174 iterator begin() const { return iterator(PtrList); }
175175 iterator end() const { return iterator(); }
176 bool empty() const { return PtrList == 0; }
176 bool empty() const { return PtrList == nullptr; }
177177
178178 void print(raw_ostream &OS) const;
179179 void dump() const;
183183 PointerRec, ptrdiff_t> {
184184 PointerRec *CurNode;
185185 public:
186 explicit iterator(PointerRec *CN = 0) : CurNode(CN) {}
186 explicit iterator(PointerRec *CN = nullptr) : CurNode(CN) {}
187187
188188 bool operator==(const iterator& x) const {
189189 return CurNode == x.CurNode;
219219 // Can only be created by AliasSetTracker. Also, ilist creates one
220220 // to serve as a sentinel.
221221 friend struct ilist_sentinel_traits;
222 AliasSet() : PtrList(0), PtrListEnd(&PtrList), Forward(0), RefCount(0),
223 AccessTy(NoModRef), AliasTy(MustAlias), Volatile(false) {
222 AliasSet()
223 : PtrList(nullptr), PtrListEnd(&PtrList), Forward(nullptr), RefCount(0),
224 AccessTy(NoModRef), AliasTy(MustAlias), Volatile(false) {
224225 }
225226
226227 AliasSet(const AliasSet &AS) LLVM_DELETED_FUNCTION;
284285 void deleted() override;
285286 void allUsesReplacedWith(Value *) override;
286287 public:
287 ASTCallbackVH(Value *V, AliasSetTracker *AST = 0);
288 ASTCallbackVH(Value *V, AliasSetTracker *AST = nullptr);
288289 ASTCallbackVH &operator=(Value *V);
289290 };
290291 /// ASTCallbackVHDenseMapInfo - Traits to tell DenseMap that tell us how to
353354 /// pointer didn't alias anything).
354355 AliasSet &getAliasSetForPointer(Value *P, uint64_t Size,
355356 const MDNode *TBAAInfo,
356 bool *New = 0);
357 bool *New = nullptr);
357358
358359 /// getAliasSetForPointerIfExists - Return the alias set containing the
359360 /// location specified if one exists, otherwise return null.
407408 // entry for the pointer if it doesn't already exist.
408409 AliasSet::PointerRec &getEntryFor(Value *V) {
409410 AliasSet::PointerRec *&Entry = PointerMap[ASTCallbackVH(V, this)];
410 if (Entry == 0)
411 if (!Entry)
411412 Entry = new AliasSet::PointerRec(V);
412413 return *Entry;
413414 }
4646
4747 void getAnalysisUsage(AnalysisUsage &AU) const override;
4848 bool runOnFunction(Function &F) override;
49 void print(raw_ostream &OS, const Module *M = 0) const override;
49 void print(raw_ostream &OS, const Module *M = nullptr) const override;
5050
5151 /// \brief Get an edge's probability, relative to other out-edges of the Src.
5252 ///
6464 /// on branchy code but not loops, and LI is most useful on code with loops but
6565 /// does not help on branchy code outside loops.
6666 bool isPotentiallyReachable(const Instruction *From, const Instruction *To,
67 const DominatorTree *DT = 0,
68 const LoopInfo *LI = 0);
67 const DominatorTree *DT = nullptr,
68 const LoopInfo *LI = nullptr);
6969
7070 /// \brief Determine whether block 'To' is reachable from 'From', returning
7171 /// true if uncertain.
7474 /// Returns false only if we can prove that once 'From' has been reached then
7575 /// 'To' can not be executed. Conservatively returns true.
7676 bool isPotentiallyReachable(const BasicBlock *From, const BasicBlock *To,
77 const DominatorTree *DT = 0,
78 const LoopInfo *LI = 0);
77 const DominatorTree *DT = nullptr,
78 const LoopInfo *LI = nullptr);
7979
8080 } // End llvm namespace
8181
7272 Instruction *Destination) :
7373 Src(Source),
7474 Dst(Destination),
75 NextPredecessor(NULL),
76 NextSuccessor(NULL) {}
75 NextPredecessor(nullptr),
76 NextSuccessor(nullptr) {}
7777 virtual ~Dependence() {}
7878
7979 /// Dependence::DVEntry - Each level in the distance/direction vector
9595 bool Splitable : 1; // Splitting the loop will break dependence.
9696 const SCEV *Distance; // NULL implies no distance available.
9797 DVEntry() : Direction(ALL), Scalar(true), PeelFirst(false),
98 PeelLast(false), Splitable(false), Distance(NULL) { }
98 PeelLast(false), Splitable(false), Distance(nullptr) { }
9999 };
100100
101101 /// getSrc - Returns the source instruction for this dependence.
153153
154154 /// getDistance - Returns the distance (or NULL) associated with a
155155 /// particular level.
156 virtual const SCEV *getDistance(unsigned Level) const { return NULL; }
156 virtual const SCEV *getDistance(unsigned Level) const { return nullptr; }
157157
158158 /// isPeelFirst - Returns true if peeling the first iteration from
159159 /// this loop will break this dependence.
920920 bool runOnFunction(Function &F) override;
921921 void releaseMemory() override;
922922 void getAnalysisUsage(AnalysisUsage &) const override;
923 void print(raw_ostream &, const Module * = 0) const override;
923 void print(raw_ostream &, const Module * = nullptr) const override;
924924 }; // class DependenceAnalysis
925925
926926 /// createDependenceAnalysisPass - This creates an instance of the
141141
142142 /// print - Convert to human readable form
143143 ///
144 void print(raw_ostream &OS, const Module* = 0) const override;
144 void print(raw_ostream &OS, const Module* = nullptr) const override;
145145
146146 /// dump - Dump the dominance frontier to dbgs().
147147 void dump() const;
168168 return Processed.count(Inst);
169169 }
170170
171 void print(raw_ostream &OS, const Module* = 0) const override;
171 void print(raw_ostream &OS, const Module* = nullptr) const override;
172172
173173 /// dump - This method is used for debugging.
174174 void dump() const;
4747 public:
4848 static char ID; // Pass identification, replacement for typeid
4949
50 IntervalPartition() : FunctionPass(ID), RootInterval(0) {
50 IntervalPartition() : FunctionPass(ID), RootInterval(nullptr) {
5151 initializeIntervalPartitionPass(*PassRegistry::getPassRegistry());
5252 }
5353
6161 IntervalPartition(IntervalPartition &I, bool);
6262
6363 // print - Show contents in human readable format...
64 void print(raw_ostream &O, const Module* = 0) const override;
64 void print(raw_ostream &O, const Module* = nullptr) const override;
6565
6666 // getRootInterval() - Return the root interval that contains the starting
6767 // block of the function.
7676 // getBlockInterval - Return the interval that a basic block exists in.
7777 inline Interval *getBlockInterval(BasicBlock *BB) {
7878 IntervalMapTy::iterator I = IntervalMap.find(BB);
79 return I != IntervalMap.end() ? I->second : 0;
79 return I != IntervalMap.end() ? I->second : nullptr;
8080 }
8181
8282 // getAnalysisUsage - Implement the Pass API
3232 void operator=(const LazyValueInfo&) LLVM_DELETED_FUNCTION;
3333 public:
3434 static char ID;
35 LazyValueInfo() : FunctionPass(ID), PImpl(0) {
35 LazyValueInfo() : FunctionPass(ID), PImpl(nullptr) {
3636 initializeLazyValueInfoPass(*PassRegistry::getPassRegistry());
3737 }
38 ~LazyValueInfo() { assert(PImpl == 0 && "releaseMemory not called"); }
38 ~LazyValueInfo() { assert(!PImpl && "releaseMemory not called"); }
3939
4040 /// Tristate - This is used to return true/false/dunno results.
4141 enum Tristate {
2626
2727 LibCallInfo *LCI;
2828
29 explicit LibCallAliasAnalysis(LibCallInfo *LC = 0)
29 explicit LibCallAliasAnalysis(LibCallInfo *LC = nullptr)
3030 : FunctionPass(ID), LCI(LC) {
3131 initializeLibCallAliasAnalysisPass(*PassRegistry::getPassRegistry());
3232 }
129129 mutable const LibCallLocationInfo *Locations;
130130 mutable unsigned NumLocations;
131131 public:
132 LibCallInfo() : Impl(0), Locations(0), NumLocations(0) {}
132 LibCallInfo() : Impl(nullptr), Locations(nullptr), NumLocations(0) {}
133133 virtual ~LibCallInfo();
134134
135135 //===------------------------------------------------------------------===//
2626 /// specified pointer, we do a quick local scan of the basic block containing
2727 /// ScanFrom, to determine if the address is already accessed.
2828 bool isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom,
29 unsigned Align, const DataLayout *TD = 0);
29 unsigned Align,
30 const DataLayout *TD = nullptr);
3031
3132 /// FindAvailableLoadedValue - Scan the ScanBB block backwards (starting at
3233 /// the instruction before ScanFrom) checking to see if we have the value at
4849 Value *FindAvailableLoadedValue(Value *Ptr, BasicBlock *ScanBB,
4950 BasicBlock::iterator &ScanFrom,
5051 unsigned MaxInstsToScan = 6,
51 AliasAnalysis *AA = 0,
52 MDNode **TBAATag = 0);
52 AliasAnalysis *AA = nullptr,
53 MDNode **TBAATag = nullptr);
5354
5455 }
5556
232232 bool RoundToAlign;
233233
234234 SizeOffsetEvalType unknown() {
235 return std::make_pair((Value*)0, (Value*)0);
235 return std::make_pair(nullptr, nullptr);
236236 }
237237 SizeOffsetEvalType compute_(Value *V);
238238
9696 PairTy Value;
9797 explicit MemDepResult(PairTy V) : Value(V) {}
9898 public:
99 MemDepResult() : Value(0, Invalid) {}
99 MemDepResult() : Value(nullptr, Invalid) {}
100100
101101 /// get methods: These are static ctor methods for creating various
102102 /// MemDepResult kinds.
154154 /// getInst() - If this is a normal dependency, return the instruction that
155155 /// is depended on. Otherwise, return null.
156156 Instruction *getInst() const {
157 if (Value.getInt() == Other) return NULL;
157 if (Value.getInt() == Other) return nullptr;
158158 return Value.getPointer();
159159 }
160160
284284 /// pointer. May be null if there are no tags or conflicting tags.
285285 const MDNode *TBAATag;
286286
287 NonLocalPointerInfo() : Size(AliasAnalysis::UnknownSize), TBAATag(0) {}
287 NonLocalPointerInfo()
288 : Size(AliasAnalysis::UnknownSize), TBAATag(nullptr) {}
288289 };
289290
290291 /// CachedNonLocalPointerInfo - This map stores the cached results of doing
400401 bool isLoad,
401402 BasicBlock::iterator ScanIt,
402403 BasicBlock *BB,
403 Instruction *QueryInst = 0);
404 Instruction *QueryInst = nullptr);
404405
405406
406407 /// getLoadLoadClobberFullWidthSize - This is a little bit of analysis that
4444 /// InstInputs - The inputs for our symbolic address.
4545 SmallVector InstInputs;
4646 public:
47 PHITransAddr(Value *addr, const DataLayout *DL) : Addr(addr), DL(DL), TLI(0) {
47 PHITransAddr(Value *addr, const DataLayout *DL)
48 : Addr(addr), DL(DL), TLI(nullptr) {
4849 // If the address is an instruction, the whole thing is considered an input.
4950 if (Instruction *I = dyn_cast(Addr))
5051 InstInputs.push_back(I);
4747 /// analysis and whether the visit completed or aborted early.
4848 class PtrInfo {
4949 public:
50 PtrInfo() : AbortedInfo(0, false), EscapedInfo(0, false) {}
50 PtrInfo() : AbortedInfo(nullptr, false), EscapedInfo(nullptr, false) {}
5151
5252 /// \brief Reset the pointer info, clearing all state.
5353 void reset() {
54 AbortedInfo.setPointer(0);
54 AbortedInfo.setPointer(nullptr);
5555 AbortedInfo.setInt(false);
56 EscapedInfo.setPointer(0);
56 EscapedInfo.setPointer(nullptr);
5757 EscapedInfo.setInt(false);
5858 }
5959
7575
7676 /// \brief Mark the visit as aborted. Intended for use in a void return.
7777 /// \param I The instruction which caused the visit to abort, if available.
78 void setAborted(Instruction *I = 0) {
78 void setAborted(Instruction *I = nullptr) {
7979 AbortedInfo.setInt(true);
8080 AbortedInfo.setPointer(I);
8181 }
8282
8383 /// \brief Mark the pointer as escaped. Intended for use in a void return.
8484 /// \param I The instruction which escapes the pointer, if available.
85 void setEscaped(Instruction *I = 0) {
85 void setEscaped(Instruction *I = nullptr) {
8686 EscapedInfo.setInt(true);
8787 EscapedInfo.setPointer(I);
8888 }
9191 /// for use in a void return.
9292 /// \param I The instruction which both escapes the pointer and aborts the
9393 /// visit, if available.
94 void setEscapedAndAborted(Instruction *I = 0) {
94 void setEscapedAndAborted(Instruction *I = nullptr) {
9595 setEscaped(I);
9696 setAborted(I);
9797 }
245245 /// @param Parent The surrounding region or NULL if this is a top level
246246 /// region.
247247 Region(BasicBlock *Entry, BasicBlock *Exit, RegionInfo* RI,
248 DominatorTree *DT, Region *Parent = 0);
248 DominatorTree *DT, Region *Parent = nullptr);
249249
250250 /// Delete the Region and all its subregions.
251251 ~Region();
310310 /// @brief Check if a Region is the TopLevel region.
311311 ///
312312 /// The toplevel region represents the whole function.
313 bool isTopLevelRegion() const { return exit == NULL; }
313 bool isTopLevelRegion() const { return exit == nullptr; }
314314
315315 /// @brief Return a new (non-canonical) region, that is obtained by joining
316316 /// this region with its predecessors.
514514 }
515515
516516 // Construct the end iterator.
517 block_iterator_wrapper() : super(df_end((BasicBlock *)0)) {}
517 block_iterator_wrapper() : super(df_end((BasicBlock *)nullptr)) {}
518518
519519 /*implicit*/ block_iterator_wrapper(super I) : super(I) {}
520520
209209 void deleted() override;
210210 void allUsesReplacedWith(Value *New) override;
211211 public:
212 SCEVCallbackVH(Value *V, ScalarEvolution *SE = 0);
212 SCEVCallbackVH(Value *V, ScalarEvolution *SE = nullptr);
213213 };
214214
215215 friend class SCEVCallbackVH;
290290 const SCEV *ExactNotTaken;
291291 PointerIntPair NextExit;
292292
293 ExitNotTakenInfo() : ExitingBlock(0), ExactNotTaken(0) {}
293 ExitNotTakenInfo() : ExitingBlock(nullptr), ExactNotTaken(nullptr) {}
294294
295295 /// isCompleteList - Return true if all loop exits are computable.
296296 bool isCompleteList() const {
320320 const SCEV *Max;
321321
322322 public:
323 BackedgeTakenInfo() : Max(0) {}
323 BackedgeTakenInfo() : Max(nullptr) {}
324324
325325 /// Initialize BackedgeTakenInfo from a list of exact exit counts.
326326 BackedgeTakenInfo(
896896 bool runOnFunction(Function &F) override;
897897 void releaseMemory() override;
898898 void getAnalysisUsage(AnalysisUsage &AU) const override;
899 void print(raw_ostream &OS, const Module* = 0) const override;
899 void print(raw_ostream &OS, const Module* = nullptr) const override;
900900 void verifyAnalysis() const override;
901901
902902 private:
9191 public:
9292 /// SCEVExpander - Construct a SCEVExpander in "canonical" mode.
9393 explicit SCEVExpander(ScalarEvolution &se, const char *name)
94 : SE(se), IVName(name), IVIncInsertLoop(0), IVIncInsertPos(0),
94 : SE(se), IVName(name), IVIncInsertLoop(nullptr), IVIncInsertPos(nullptr),
9595 CanonicalMode(true), LSRMode(false),
9696 Builder(se.getContext(), TargetFolder(se.DL)) {
9797 #ifndef NDEBUG
130130 /// representative. Return the number of phis eliminated.
131131 unsigned replaceCongruentIVs(Loop *L, const DominatorTree *DT,
132132 SmallVectorImpl &DeadInsts,
133 const TargetTransformInfo *TTI = NULL);
133 const TargetTransformInfo *TTI = nullptr);
134134
135135 /// expandCodeFor - Insert code to directly compute the specified SCEV
136136 /// expression into the program. The inserted code is inserted into the
218218 /// expression into the program. The inserted code is inserted into the
219219 /// SCEVExpander's current insertion point. If a type is specified, the
220220 /// result will be expanded to have that type, with a cast if necessary.
221 Value *expandCodeFor(const SCEV *SH, Type *Ty = 0);
221 Value *expandCodeFor(const SCEV *SH, Type *Ty = nullptr);
222222
223223 /// getRelevantLoop - Determine the most "relevant" loop for the given SCEV.
224224 const Loop *getRelevantLoop(const SCEV *);
8181 /// constant value, return it. Otherwise return null. The returned value
8282 /// must be in the same LLVM type as Val.
8383 virtual Constant *GetConstant(LatticeVal LV, Value *Val, SparseSolver &SS) {
84 return 0;
84 return nullptr;
8585 }
8686
8787 /// ComputeArgument - Given a formal argument value, compute and return a
4343 if (Entry) return Entry;
4444
4545 SmallVector PredCache(pred_begin(BB), pred_end(BB));
46 PredCache.push_back(0); // null terminator.
46 PredCache.push_back(nullptr); // null terminator.
4747
4848 BlockToPredCountMap[BB] = PredCache.size()-1;
4949
472472 ///
473473 void AliasAnalysis::InitializeAliasAnalysis(Pass *P) {
474474 DataLayoutPass *DLP = P->getAnalysisIfAvailable();
475 DL = DLP ? &DLP->getDataLayout() : 0;
475 DL = DLP ? &DLP->getDataLayout() : nullptr;
476476 TLI = P->getAnalysisIfAvailable();
477477 AA = &P->getAnalysis();
478478 }
125125 AliasAnalysisCounter::alias(const Location &LocA, const Location &LocB) {
126126 AliasResult R = getAnalysis().alias(LocA, LocB);
127127
128 const char *AliasString = 0;
128 const char *AliasString = nullptr;
129129 switch (R) {
130130 case NoAlias: No++; AliasString = "No alias"; break;
131131 case MayAlias: May++; AliasString = "May alias"; break;
151151 const Location &Loc) {
152152 ModRefResult R = getAnalysis().getModRefInfo(CS, Loc);
153153
154 const char *MRString = 0;
154 const char *MRString = nullptr;
155155 switch (R) {
156156 case NoModRef: NoMR++; MRString = "NoModRef"; break;
157157 case Ref: JustRef++; MRString = "JustRef"; break;
7171 AS.PtrList->setPrevInList(PtrListEnd);
7272 PtrListEnd = AS.PtrListEnd;
7373
74 AS.PtrList = 0;
74 AS.PtrList = nullptr;
7575 AS.PtrListEnd = &AS.PtrList;
76 assert(*AS.PtrListEnd == 0 && "End of list is not null?");
76 assert(*AS.PtrListEnd == nullptr && "End of list is not null?");
7777 }
7878 }
7979
8080 void AliasSetTracker::removeAliasSet(AliasSet *AS) {
8181 if (AliasSet *Fwd = AS->Forward) {
8282 Fwd->dropRef(*this);
83 AS->Forward = 0;
83 AS->Forward = nullptr;
8484 }
8585 AliasSets.erase(AS);
8686 }
114114 Entry.updateSizeAndTBAAInfo(Size, TBAAInfo);
115115
116116 // Add it to the end of the list...
117 assert(*PtrListEnd == 0 && "End of list is not null?");
117 assert(*PtrListEnd == nullptr && "End of list is not null?");
118118 *PtrListEnd = &Entry;
119119 PtrListEnd = Entry.setPrevInList(PtrListEnd);
120 assert(*PtrListEnd == 0 && "End of list is not null?");
120 assert(*PtrListEnd == nullptr && "End of list is not null?");
121121 addRef(); // Entry points to alias set.
122122 }
123123
216216 AliasSet *AliasSetTracker::findAliasSetForPointer(const Value *Ptr,
217217 uint64_t Size,
218218 const MDNode *TBAAInfo) {
219 AliasSet *FoundSet = 0;
219 AliasSet *FoundSet = nullptr;
220220 for (iterator I = begin(), E = end(); I != E; ++I) {
221221 if (I->Forward || !I->aliasesPointer(Ptr, Size, TBAAInfo, AA)) continue;
222222
223 if (FoundSet == 0) { // If this is the first alias set ptr can go into.
223 if (!FoundSet) { // If this is the first alias set ptr can go into.
224224 FoundSet = I; // Remember it.
225225 } else { // Otherwise, we must merge the sets.
226226 FoundSet->mergeSetIn(*I, *this); // Merge in contents.
244244
245245
246246 AliasSet *AliasSetTracker::findAliasSetForUnknownInst(Instruction *Inst) {
247 AliasSet *FoundSet = 0;
247 AliasSet *FoundSet = nullptr;
248248 for (iterator I = begin(), E = end(); I != E; ++I) {
249249 if (I->Forward || !I->aliasesUnknownInst(Inst, AA))
250250 continue;
251251
252 if (FoundSet == 0) // If this is the first alias set ptr can go into.
252 if (!FoundSet) // If this is the first alias set ptr can go into.
253253 FoundSet = I; // Remember it.
254254 else if (!I->Forward) // Otherwise, we must merge the sets.
255255 FoundSet->mergeSetIn(*I, *this); // Merge in contents.
7272
7373 LLVMBool LLVMVerifyModule(LLVMModuleRef M, LLVMVerifierFailureAction Action,
7474 char **OutMessages) {
75 raw_ostream *DebugOS = Action != LLVMReturnStatusAction ? &errs() : 0;
75 raw_ostream *DebugOS = Action != LLVMReturnStatusAction ? &errs() : nullptr;
7676 std::string Messages;
7777 raw_string_ostream MsgsOS(Messages);
7878
9393
9494 LLVMBool LLVMVerifyFunction(LLVMValueRef Fn, LLVMVerifierFailureAction Action) {
9595 LLVMBool Result = verifyFunction(
96 *unwrap(Fn), Action != LLVMReturnStatusAction ? &errs() : 0);
96 *unwrap(Fn), Action != LLVMReturnStatusAction ? &errs()
97 : nullptr);
9798
9899 if (Action == LLVMAbortProcessAction && Result)
99100 report_fatal_error("Broken function found, compilation aborted!");
297297 do {
298298 // See if this is a bitcast or GEP.
299299 const Operator *Op = dyn_cast(V);
300 if (Op == 0) {
300 if (!Op) {
301301 // The only non-operator case we can handle are GlobalAliases.
302302 if (const GlobalAlias *GA = dyn_cast(V)) {
303303 if (!GA->mayBeOverridden()) {
314314 }
315315
316316 const GEPOperator *GEPOp = dyn_cast(Op);
317 if (GEPOp == 0) {
317 if (!GEPOp) {
318318 // If it's not a GEP, hand it off to SimplifyInstruction to see if it
319319 // can come up with something. This matches what GetUnderlyingObject does.
320320 if (const Instruction *I = dyn_cast(V))
335335 // If we are lacking DataLayout information, we can't compute the offets of
336336 // elements computed by GEPs. However, we can handle bitcast equivalent
337337 // GEPs.
338 if (DL == 0) {
338 if (!DL) {
339339 if (!GEPOp->hasAllZeroIndices())
340340 return V;
341341 V = GEPOp->getOperand(0);
432432 if (const Argument *arg = dyn_cast(V))
433433 return arg->getParent();
434434
435 return NULL;
435 return nullptr;
436436 }
437437
438438 static bool notDifferentParent(const Value *O1, const Value *O2) {
752752
753753 // Finally, handle specific knowledge of intrinsics.
754754 const IntrinsicInst *II = dyn_cast(CS.getInstruction());
755 if (II != 0)
755 if (II != nullptr)
756756 switch (II->getIntrinsicID()) {
757757 default: break;
758758 case Intrinsic::memcpy:
903903 // derived pointer.
904904 if (const GEPOperator *GEP2 = dyn_cast(V2)) {
905905 // Do the base pointers alias?
906 AliasResult BaseAlias = aliasCheck(UnderlyingV1, UnknownSize, 0,
907 UnderlyingV2, UnknownSize, 0);
906 AliasResult BaseAlias = aliasCheck(UnderlyingV1, UnknownSize, nullptr,
907 UnderlyingV2, UnknownSize, nullptr);
908908
909909 // Check for geps of non-aliasing underlying pointers where the offsets are
910910 // identical.
928928 // DecomposeGEPExpression and GetUnderlyingObject should return the
929929 // same result except when DecomposeGEPExpression has no DataLayout.
930930 if (GEP1BasePtr != UnderlyingV1 || GEP2BasePtr != UnderlyingV2) {
931 assert(DL == 0 &&
932 "DecomposeGEPExpression and GetUnderlyingObject disagree!");
931 assert(!DL &&
932 "DecomposeGEPExpression and GetUnderlyingObject disagree!");
933933 return MayAlias;
934934 }
935935 // If the max search depth is reached the result is undefined
965965 // DecomposeGEPExpression and GetUnderlyingObject should return the
966966 // same result except when DecomposeGEPExpression has no DataLayout.
967967 if (GEP1BasePtr != UnderlyingV1 || GEP2BasePtr != UnderlyingV2) {
968 assert(DL == 0 &&
968 assert(!DL &&
969969 "DecomposeGEPExpression and GetUnderlyingObject disagree!");
970970 return MayAlias;
971971 }
987987 if (V1Size == UnknownSize && V2Size == UnknownSize)
988988 return MayAlias;
989989
990 AliasResult R = aliasCheck(UnderlyingV1, UnknownSize, 0,
990 AliasResult R = aliasCheck(UnderlyingV1, UnknownSize, nullptr,
991991 V2, V2Size, V2TBAAInfo);
992992 if (R != MustAlias)
993993 // If V2 may alias GEP base pointer, conservatively returns MayAlias.
10041004 // DecomposeGEPExpression and GetUnderlyingObject should return the
10051005 // same result except when DecomposeGEPExpression has no DataLayout.
10061006 if (GEP1BasePtr != UnderlyingV1) {
1007 assert(DL == 0 &&
1007 assert(!DL &&
10081008 "DecomposeGEPExpression and GetUnderlyingObject disagree!");
10091009 return MayAlias;
10101010 }
13701370 // Use dominance or loop info if available.
13711371 DominatorTreeWrapperPass *DTWP =
13721372 getAnalysisIfAvailable();
1373 DominatorTree *DT = DTWP ? &DTWP->getDomTree() : 0;
1373 DominatorTree *DT = DTWP ? &DTWP->getDomTree() : nullptr;
13741374 LoopInfo *LI = getAnalysisIfAvailable();
13751375
13761376 // Make sure that the visited phis cannot reach the Value. This ensures that
559559 BasicBlock *BranchProbabilityInfo::getHotSucc(BasicBlock *BB) const {
560560 uint32_t Sum = 0;
561561 uint32_t MaxWeight = 0;
562 BasicBlock *MaxSucc = 0;
562 BasicBlock *MaxSucc = nullptr;
563563
564564 for (succ_iterator I = succ_begin(BB), E = succ_end(BB); I != E; ++I) {
565565 BasicBlock *Succ = *I;
579579 if (BranchProbability(MaxWeight, Sum) > BranchProbability(4, 5))
580580 return MaxSucc;
581581
582 return 0;
582 return nullptr;
583583 }
584584
585585 /// Get the raw edge weight for the edge. If can't find it, return
122122 const BasicBlock *BB1, const BasicBlock *BB2) {
123123 const Loop *L1 = getOutermostLoop(LI, BB1);
124124 const Loop *L2 = getOutermostLoop(LI, BB2);
125 return L1 != NULL && L1 == L2;
125 return L1 != nullptr && L1 == L2;
126126 }
127127
128128 static bool isPotentiallyReachableInner(SmallVectorImpl &Worklist,
132132 // When the stop block is unreachable, it's dominated from everywhere,
133133 // regardless of whether there's a path between the two blocks.
134134 if (DT && !DT->isReachableFromEntry(StopBB))
135 DT = 0;
135 DT = nullptr;
136136
137137 // Limit the number of blocks we visit. The goal is to avoid run-away compile
138138 // times on large CFGs without hampering sensible code. Arbitrarily chosen.
155155 return true;
156156 }
157157
158 if (const Loop *Outer = LI ? getOutermostLoop(LI, BB) : 0) {
158 if (const Loop *Outer = LI ? getOutermostLoop(LI, BB) : nullptr) {
159159 // All blocks in a single loop are reachable from all other blocks. From
160160 // any of these blocks, we can skip directly to the exits of the loop,
161161 // ignoring any other blocks inside the loop body.
199199
200200 // If the block is in a loop then we can reach any instruction in the block
201201 // from any other instruction in the block by going around a backedge.
202 if (LI && LI->getLoopFor(BB) != 0)
202 if (LI && LI->getLoopFor(BB) != nullptr)
203203 return true;
204204
205205 // Linear scan, start at 'A', see whether we hit 'B' or the end first.
3232 return false;
3333 }
3434
35 void print(raw_ostream &OS, const Module* = 0) const override {}
35 void print(raw_ostream &OS, const Module* = nullptr) const override {}
3636
3737 void getAnalysisUsage(AnalysisUsage &AU) const override {
3838 AU.setPreservesAll();
5555 return false;
5656 }
5757
58 void print(raw_ostream &OS, const Module* = 0) const override {}
58 void print(raw_ostream &OS, const Module* = nullptr) const override {}
5959
6060 void getAnalysisUsage(AnalysisUsage &AU) const override {
6161 AU.setPreservesAll();
8989 return false;
9090 }
9191
92 void print(raw_ostream &OS, const Module* = 0) const override {}
92 void print(raw_ostream &OS, const Module* = nullptr) const override {}
9393
9494 void getAnalysisUsage(AnalysisUsage &AU) const override {
9595 AU.setPreservesAll();
122122 errs() << "\n";
123123 return false;
124124 }
125 void print(raw_ostream &OS, const Module* = 0) const override {}
125 void print(raw_ostream &OS, const Module* = nullptr) const override {}
126126
127127 void getAnalysisUsage(AnalysisUsage &AU) const override {
128128 AU.setPreservesAll();
5555 // Handle a vector->integer cast.
5656 if (IntegerType *IT = dyn_cast(DestTy)) {
5757 VectorType *VTy = dyn_cast(C->getType());
58 if (VTy == 0)
58 if (!VTy)
5959 return ConstantExpr::getBitCast(C, DestTy);
6060
6161 unsigned NumSrcElts = VTy->getNumElements();
7272 }
7373
7474 ConstantDataVector *CDV = dyn_cast(C);
75 if (CDV == 0)
75 if (!CDV)
7676 return ConstantExpr::getBitCast(C, DestTy);
7777
7878 // Now that we know that the input value is a vector of integers, just shift
9292
9393 // The code below only handles casts to vectors currently.
9494 VectorType *DestVTy = dyn_cast(DestTy);
95 if (DestVTy == 0)
95 if (!DestVTy)
9696 return ConstantExpr::getBitCast(C, DestTy);
9797
9898 // If this is a scalar -> vector cast, convert the input into a <1 x scalar>
410410 TD.getTypeAllocSizeInBits(LoadTy),
411411 AS);
412412 } else
413 return 0;
413 return nullptr;
414414
415415 C = FoldBitCast(C, MapTy, TD);
416416 if (Constant *Res = FoldReinterpretLoadFromConstPtr(C, TD))
417417 return FoldBitCast(Res, LoadTy, TD);
418 return 0;
418 return nullptr;
419419 }
420420
421421 unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8;
422422 if (BytesLoaded > 32 || BytesLoaded == 0)
423 return 0;
423 return nullptr;
424424
425425 GlobalValue *GVal;
426426 APInt Offset;
427427 if (!IsConstantOffsetFromGlobal(C, GVal, Offset, TD))
428 return 0;
428 return nullptr;
429429
430430 GlobalVariable *GV = dyn_cast(GVal);
431431 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() ||
432432 !GV->getInitializer()->getType()->isSized())
433 return 0;
433 return nullptr;
434434
435435 // If we're loading off the beginning of the global, some bytes may be valid,
436436 // but we don't try to handle this.
437437 if (Offset.isNegative())
438 return 0;
438 return nullptr;
439439
440440 // If we're not accessing anything in this constant, the result is undefined.
441441 if (Offset.getZExtValue() >=
445445 unsigned char RawBytes[32] = {0};
446446 if (!ReadDataFromGlobal(GV->getInitializer(), Offset.getZExtValue(), RawBytes,
447447 BytesLoaded, TD))
448 return 0;
448 return nullptr;
449449
450450 APInt ResultVal = APInt(IntType->getBitWidth(), 0);
451451 if (TD.isLittleEndian()) {
478478 // If the loaded value isn't a constant expr, we can't handle it.
479479 ConstantExpr *CE = dyn_cast(C);
480480 if (!CE)
481 return 0;
481 return nullptr;
482482
483483 if (CE->getOpcode() == Instruction::GetElementPtr) {
484484 if (GlobalVariable *GV = dyn_cast(CE->getOperand(0))) {
541541 // Try hard to fold loads from bitcasted strange and non-type-safe things.
542542 if (TD)
543543 return FoldReinterpretLoadFromConstPtr(CE, *TD);
544 return 0;
544 return nullptr;
545545 }
546546
547547 static Constant *ConstantFoldLoadInst(const LoadInst *LI, const DataLayout *TD){
548 if (LI->isVolatile()) return 0;
548 if (LI->isVolatile()) return nullptr;
549549
550550 if (Constant *C = dyn_cast(LI->getOperand(0)))
551551 return ConstantFoldLoadFromConstPtr(C, TD);
552552
553 return 0;
553 return nullptr;
554554 }
555555
556556 /// SymbolicallyEvaluateBinop - One of Op0/Op1 is a constant expression.
607607 }
608608 }
609609
610 return 0;
610 return nullptr;
611611 }
612612
613613 /// CastGEPIndices - If array indices are not pointer-sized integers,
617617 Type *ResultTy, const DataLayout *TD,
618618 const TargetLibraryInfo *TLI) {
619619 if (!TD)
620 return 0;
620 return nullptr;
621621
622622 Type *IntPtrTy = TD->getIntPtrType(ResultTy);
623623
640640 }
641641
642642 if (!Any)
643 return 0;
643 return nullptr;
644644
645645 Constant *C = ConstantExpr::getGetElementPtr(Ops[0], NewIdxs);
646646 if (ConstantExpr *CE = dyn_cast(C)) {
675675 Constant *Ptr = Ops[0];
676676 if (!TD || !Ptr->getType()->getPointerElementType()->isSized() ||
677677 !Ptr->getType()->isPointerTy())
678 return 0;
678 return nullptr;
679679
680680 Type *IntPtrTy = TD->getIntPtrType(Ptr->getType());
681681 Type *ResultElementTy = ResultTy->getPointerElementType();
689689 // "inttoptr (sub (ptrtoint Ptr), V)"
690690 if (Ops.size() == 2 && ResultElementTy->isIntegerTy(8)) {
691691 ConstantExpr *CE = dyn_cast(Ops[1]);
692 assert((CE == 0 || CE->getType() == IntPtrTy) &&
692 assert((!CE || CE->getType() == IntPtrTy) &&
693693 "CastGEPIndices didn't canonicalize index types!");
694694 if (CE && CE->getOpcode() == Instruction::Sub &&
695695 CE->getOperand(0)->isNullValue()) {
701701 return Res;
702702 }
703703 }
704 return 0;
704 return nullptr;
705705 }
706706
707707 unsigned BitWidth = TD->getTypeSizeInBits(IntPtrTy);
764764
765765 // Only handle pointers to sized types, not pointers to functions.
766766 if (!ATy->getElementType()->isSized())
767 return 0;
767 return nullptr;
768768 }
769769
770770 // Determine which element of the array the offset points into.
809809 // type, then the offset is pointing into the middle of an indivisible
810810 // member, so we can't simplify it.
811811 if (Offset != 0)
812 return 0;
812 return nullptr;
813813
814814 // Create a GEP.
815815 Constant *C = ConstantExpr::getGetElementPtr(Ptr, NewIdxs);
840840 const TargetLibraryInfo *TLI) {
841841 // Handle PHI nodes quickly here...
842842 if (PHINode *PN = dyn_cast(I)) {
843 Constant *CommonValue = 0;
843 Constant *CommonValue = nullptr;
844844
845845 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
846846 Value *Incoming = PN->getIncomingValue(i);
853853 // If the incoming value is not a constant, then give up.
854854 Constant *C = dyn_cast(Incoming);
855855 if (!C)
856 return 0;
856 return nullptr;
857857 // Fold the PHI's operands.
858858 if (ConstantExpr *NewC = dyn_cast(C))
859859 C = ConstantFoldConstantExpression(NewC, TD, TLI);
860860 // If the incoming value is a different constant to
861861 // the one we saw previously, then give up.
862862 if (CommonValue && C != CommonValue)
863 return 0;
863 return nullptr;
864864 CommonValue = C;
865865 }
866866
875875 for (User::op_iterator i = I->op_begin(), e = I->op_end(); i != e; ++i) {
876876 Constant *Op = dyn_cast(*i);
877877 if (!Op)
878 return 0; // All operands not constant!
878 return nullptr; // All operands not constant!
879879
880880 // Fold the Instruction's operands.
881881 if (ConstantExpr *NewCE = dyn_cast(Op))
965965 }
966966
967967 switch (Opcode) {
968 default: return 0;
968 default: return nullptr;
969969 case Instruction::ICmp:
970970 case Instruction::FCmp: llvm_unreachable("Invalid for compares");
971971 case Instruction::Call:
972972 if (Function *F = dyn_cast(Ops.back()))
973973 if (canConstantFoldCallTo(F))
974974 return ConstantFoldCall(F, Ops.slice(0, Ops.size() - 1), TLI);
975 return 0;
975 return nullptr;
976976 case Instruction::PtrToInt:
977977 // If the input is a inttoptr, eliminate the pair. This requires knowing
978978 // the width of a pointer, so it can't be done in ConstantExpr::getCast.
11411141 Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C,
11421142 ConstantExpr *CE) {
11431143 if (!CE->getOperand(1)->isNullValue())
1144 return 0; // Do not allow stepping over the value!
1144 return nullptr; // Do not allow stepping over the value!
11451145
11461146 // Loop over all of the operands, tracking down which value we are
11471147 // addressing.
11481148 for (unsigned i = 2, e = CE->getNumOperands(); i != e; ++i) {
11491149 C = C->getAggregateElement(CE->getOperand(i));
1150 if (C == 0)
1151 return 0;
1150 if (!C)
1151 return nullptr;
11521152 }
11531153 return C;
11541154 }
11631163 // addressing.
11641164 for (unsigned i = 0, e = Indices.size(); i != e; ++i) {
11651165 C = C->getAggregateElement(Indices[i]);
1166 if (C == 0)
1167 return 0;
1166 if (!C)
1167 return nullptr;
11681168 }
11691169 return C;
11701170 }
12691269 V = NativeFP(V);
12701270 if (sys::llvm_fenv_testexcept()) {
12711271 sys::llvm_fenv_clearexcept();
1272 return 0;
1272 return nullptr;
12731273 }
12741274
12751275 return GetConstantFoldFPValue(V, Ty);
12811281 V = NativeFP(V, W);
12821282 if (sys::llvm_fenv_testexcept()) {
12831283 sys::llvm_fenv_clearexcept();
1284 return 0;
1284 return nullptr;
12851285 }
12861286
12871287 return GetConstantFoldFPValue(V, Ty);
13101310 /*isSigned=*/true, mode,
13111311 &isExact);
13121312 if (status != APFloat::opOK && status != APFloat::opInexact)
1313 return 0;
1313 return nullptr;
13141314 return ConstantInt::get(Ty, UIntVal, /*isSigned=*/true);
13151315 }
13161316
13441344 }
13451345
13461346 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
1347 return 0;
1347 return nullptr;
13481348
13491349 if (IntrinsicID == Intrinsic::round) {
13501350 APFloat V = Op->getValueAPF();
13561356 /// likely to be aborted with an exception anyway, and some host libms
13571357 /// have known errors raising exceptions.
13581358 if (Op->getValueAPF().isNaN() || Op->getValueAPF().isInfinity())
1359 return 0;
1359 return nullptr;
13601360
13611361 /// Currently APFloat versions of these functions do not exist, so we use
13621362 /// the host native double versions. Float versions are not called
13951395 }
13961396
13971397 if (!TLI)
1398 return 0;
1398 return nullptr;
13991399
14001400 switch (Name[0]) {
14011401 case 'a':
14661466 default:
14671467 break;
14681468 }
1469 return 0;
1469 return nullptr;
14701470 }
14711471
14721472 if (ConstantInt *Op = dyn_cast(Operands[0])) {
14901490 return ConstantFP::get(Ty->getContext(), Val);
14911491 }
14921492 default:
1493 return 0;
1493 return nullptr;
14941494 }
14951495 }
14961496
15221522 if (isa(Operands[0])) {
15231523 if (IntrinsicID == Intrinsic::bswap)
15241524 return Operands[0];
1525 return 0;
1526 }
1527
1528 return 0;
1525 return nullptr;
1526 }
1527
1528 return nullptr;
15291529 }
15301530
15311531 if (Operands.size() == 2) {
15321532 if (ConstantFP *Op1 = dyn_cast(Operands[0])) {
15331533 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
1534 return 0;
1534 return nullptr;
15351535 double Op1V = getValueAsDouble(Op1);
15361536
15371537 if (ConstantFP *Op2 = dyn_cast(Operands[1])) {
15381538 if (Op2->getType() != Op1->getType())
1539 return 0;
1539 return nullptr;
15401540
15411541 double Op2V = getValueAsDouble(Op2);
15421542 if (IntrinsicID == Intrinsic::pow) {
15491549 return ConstantFP::get(Ty->getContext(), V1);
15501550 }
15511551 if (!TLI)
1552 return 0;
1552 return nullptr;
15531553 if (Name == "pow" && TLI->has(LibFunc::pow))
15541554 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
15551555 if (Name == "fmod" && TLI->has(LibFunc::fmod))
15701570 APFloat((double)std::pow((double)Op1V,
15711571 (int)Op2C->getZExtValue())));
15721572 }
1573 return 0;
1573 return nullptr;
15741574 }
15751575
15761576 if (ConstantInt *Op1 = dyn_cast(Operands[0])) {
16231623 }
16241624 }
16251625
1626 return 0;
1627 }
1628 return 0;
1626 return nullptr;
1627 }
1628 return nullptr;
16291629 }
16301630
16311631 if (Operands.size() != 3)
1632 return 0;
1632 return nullptr;
16331633
16341634 if (const ConstantFP *Op1 = dyn_cast(Operands[0])) {
16351635 if (const ConstantFP *Op2 = dyn_cast(Operands[1])) {
16451645 if (s != APFloat::opInvalidOp)
16461646 return ConstantFP::get(Ty->getContext(), V);
16471647
1648 return 0;
1648 return nullptr;
16491649 }
16501650 }
16511651 }
16521652 }
16531653 }
16541654
1655 return 0;
1655 return nullptr;
16561656 }
16571657
16581658 static Constant *ConstantFoldVectorCall(StringRef Name, unsigned IntrinsicID,
16891689 llvm::ConstantFoldCall(Function *F, ArrayRef Operands,
16901690 const TargetLibraryInfo *TLI) {
16911691 if (!F->hasName())
1692 return 0;
1692 return nullptr;
16931693 StringRef Name = F->getName();
16941694
16951695 Type *Ty = F->getReturnType();
4040
4141 public:
4242 static char ID; // Class identification, replacement for typeinfo
43 CostModelAnalysis() : FunctionPass(ID), F(0), TTI(0) {
43 CostModelAnalysis() : FunctionPass(ID), F(nullptr), TTI(nullptr) {
4444 initializeCostModelAnalysisPass(
4545 *PassRegistry::getPassRegistry());
4646 }
100100 // Check for a splat of a constant or for a non uniform vector of constants.
101101 if (isa(V) || isa(V)) {
102102 OpInfo = TargetTransformInfo::OK_NonUniformConstantValue;
103 if (cast(V)->getSplatValue() != NULL)
103 if (cast(V)->getSplatValue() != nullptr)
104104 OpInfo = TargetTransformInfo::OK_UniformConstantValue;
105105 }
106106
149149 // %rdx.shuf.0.1 = shufflevector <4 x float> %rdx, <4 x float> undef,
150150 // <4 x i32>
151151 // %bin.rdx.0 = fadd <4 x float> %rdx.shuf.0.0, %rdx.shuf.0.1
152 if (BinOp == 0)
152 if (BinOp == nullptr)
153153 return false;
154154
155155 assert(BinOp->getType()->isVectorTy() && "Expecting a vector type");
170170 return false;
171171
172172 // Shuffle inputs must match.
173 Value *NextLevelOpL = LS ? LS->getOperand(0) : 0;
174 Value *NextLevelOpR = RS ? RS->getOperand(0) : 0;
175 Value *NextLevelOp = 0;
173 Value *NextLevelOpL = LS ? LS->getOperand(0) : nullptr;
174 Value *NextLevelOpR = RS ? RS->getOperand(0) : nullptr;
175 Value *NextLevelOp = nullptr;
176176 if (NextLevelOpR && NextLevelOpL) {
177177 // If we have two shuffles their operands must match.
178178 if (NextLevelOpL != NextLevelOpR)
197197
198198 // Check that the next levels binary operation exists and matches with the
199199 // current one.
200 BinaryOperator *NextLevelBinOp = 0;
200 BinaryOperator *NextLevelBinOp = nullptr;
201201 if (Level + 1 != NumLevels) {
202202 if (!(NextLevelBinOp = dyn_cast(NextLevelOp)))
203203 return false;
276276
277277 Value *L = B->getOperand(0);
278278 Value *R = B->getOperand(1);
279 ShuffleVectorInst *S = 0;
279 ShuffleVectorInst *S = nullptr;
280280
281281 if ((S = dyn_cast(L)))
282282 return std::make_pair(R, S);
336336 std::tie(NextRdxOp, Shuffle) = getShuffleAndOtherOprd(BinOp);
337337
338338 // Check the current reduction operation and the shuffle use the same value.
339 if (Shuffle == 0)
339 if (Shuffle == nullptr)
340340 return false;
341341 if (Shuffle->getOperand(0) != NextRdxOp)
342342 return false;
477477
478478 if (NumVecElems == Mask.size() && isReverseVectorMask(Mask))
479479 return TTI->getShuffleCost(TargetTransformInfo::SK_Reverse, VecTypOp0, 0,
480 0);
480 nullptr);
481481 return -1;
482482 }
483483 case Instruction::Call:
5050 }
5151 bool runOnFunction(Function &F) override;
5252 void getAnalysisUsage(AnalysisUsage &AU) const override;
53 void print(raw_ostream &O, const Module *M = 0) const override;
53 void print(raw_ostream &O, const Module *M = nullptr) const override;
5454 };
5555
5656 } // end anonymous namespace
7575 return Store->getPointerOperand();
7676 else if (GetElementPtrInst *Gep = dyn_cast(&Inst))
7777 return Gep->getPointerOperand();
78 return NULL;
78 return nullptr;
7979 }
8080
8181 void Delinearization::print(raw_ostream &O, const Module *) const {
9191 const BasicBlock *BB = Inst->getParent();
9292 // Delinearize the memory access as analyzed in all the surrounding loops.
9393 // Do not analyze memory accesses outside loops.
94 for (Loop *L = LI->getLoopFor(BB); L != NULL; L = L->getParentLoop()) {
94 for (Loop *L = LI->getLoopFor(BB); L != nullptr; L = L->getParentLoop()) {
9595 const SCEV *AccessFn = SE->getSCEVAtScope(getPointerOperand(*Inst), L);
9696 const SCEVAddRecExpr *AR = dyn_cast(AccessFn);
9797
233233 Levels(CommonLevels),
234234 LoopIndependent(PossiblyLoopIndependent) {
235235 Consistent = true;
236 DV = CommonLevels ? new DVEntry[CommonLevels] : NULL;
236 DV = CommonLevels ? new DVEntry[CommonLevels] : nullptr;
237237 }
238238
239239 // The rest are simple getters that hide the implementation.
657657 if (StoreInst *SI = dyn_cast(I))
658658 return SI->getPointerOperand();
659659 llvm_unreachable("Value is not load or store instruction");
660 return 0;
660 return nullptr;
661661 }
662662
663663
931931 const SCEV *UB = SE->getBackedgeTakenCount(L);
932932 return SE->getNoopOrZeroExtend(UB, T);
933933 }
934 return NULL;
934 return nullptr;
935935 }
936936
937937
942942 ) const {
943943 if (const SCEV *UB = collectUpperBound(L, T))
944944 return dyn_cast(UB);
945 return NULL;
945 return nullptr;
946946 }
947947
948948
21932193 if (const SCEVConstant *Constant = dyn_cast(Product->getOperand(Op)))
21942194 return Constant;
21952195 }
2196 return NULL;
2196 return nullptr;
21972197 }
21982198
21992199
26452645 CoefficientInfo *B,
26462646 BoundInfo *Bound,
26472647 unsigned K) const {
2648 Bound[K].Lower[Dependence::DVEntry::ALL] = NULL; // Default value = -infinity.
2649 Bound[K].Upper[Dependence::DVEntry::ALL] = NULL; // Default value = +infinity.
2648 Bound[K].Lower[Dependence::DVEntry::ALL] = nullptr; // Default value = -infinity.
2649 Bound[K].Upper[Dependence::DVEntry::ALL] = nullptr; // Default value = +infinity.
26502650 if (Bound[K].Iterations) {
26512651 Bound[K].Lower[Dependence::DVEntry::ALL] =
26522652 SE->getMulExpr(SE->getMinusSCEV(A[K].NegPart, B[K].PosPart),
26862686 CoefficientInfo *B,
26872687 BoundInfo *Bound,
26882688 unsigned K) const {
2689 Bound[K].Lower[Dependence::DVEntry::EQ] = NULL; // Default value = -infinity.
2690 Bound[K].Upper[Dependence::DVEntry::EQ] = NULL; // Default value = +infinity.
2689 Bound[K].Lower[Dependence::DVEntry::EQ] = nullptr; // Default value = -infinity.
2690 Bound[K].Upper[Dependence::DVEntry::EQ] = nullptr; // Default value = +infinity.
26912691 if (Bound[K].Iterations) {
26922692 const SCEV *Delta = SE->getMinusSCEV(A[K].Coeff, B[K].Coeff);
26932693 const SCEV *NegativePart = getNegativePart(Delta);
27282728 CoefficientInfo *B,
27292729 BoundInfo *Bound,
27302730 unsigned K) const {
2731 Bound[K].Lower[Dependence::DVEntry::LT] = NULL; // Default value = -infinity.
2732 Bound[K].Upper[Dependence::DVEntry::LT] = NULL; // Default value = +infinity.
2731 Bound[K].Lower[Dependence::DVEntry::LT] = nullptr; // Default value = -infinity.
2732 Bound[K].Upper[Dependence::DVEntry::LT] = nullptr; // Default value = +infinity.
27332733 if (Bound[K].Iterations) {
27342734 const SCEV *Iter_1 =
27352735 SE->getMinusSCEV(Bound[K].Iterations,
27752775 CoefficientInfo *B,
27762776 BoundInfo *Bound,
27772777 unsigned K) const {
2778 Bound[K].Lower[Dependence::DVEntry::GT] = NULL; // Default value = -infinity.
2779 Bound[K].Upper[Dependence::DVEntry::GT] = NULL; // Default value = +infinity.
2778 Bound[K].Lower[Dependence::DVEntry::GT] = nullptr; // Default value = -infinity.
2779 Bound[K].Upper[Dependence::DVEntry::GT] = nullptr; // Default value = +infinity.
27802780 if (Bound[K].Iterations) {
27812781 const SCEV *Iter_1 =
27822782 SE->getMinusSCEV(Bound[K].Iterations,
28282828 CI[K].Coeff = Zero;
28292829 CI[K].PosPart = Zero;
28302830 CI[K].NegPart = Zero;
2831 CI[K].Iterations = NULL;
2831 CI[K].Iterations = nullptr;
28322832 }
28332833 while (const SCEVAddRecExpr *AddRec = dyn_cast(Subscript)) {
28342834 const Loop *L = AddRec->getLoop();
28712871 if (Bound[K].Lower[Bound[K].Direction])
28722872 Sum = SE->getAddExpr(Sum, Bound[K].Lower[Bound[K].Direction]);
28732873 else
2874 Sum = NULL;
2874 Sum = nullptr;
28752875 }
28762876 return Sum;
28772877 }
28872887 if (Bound[K].Upper[Bound[K].Direction])
28882888 Sum = SE->getAddExpr(Sum, Bound[K].Upper[Bound[K].Direction]);
28892889 else
2890 Sum = NULL;
2890 Sum = nullptr;
28912891 }
28922892 return Sum;
28932893 }
31473147 }
31483148 else if (CurConstraint.isLine()) {
31493149 Level.Scalar = false;
3150 Level.Distance = NULL;
3150 Level.Distance = nullptr;
31513151 // direction should be accurate
31523152 }
31533153 else if (CurConstraint.isPoint()) {
31543154 Level.Scalar = false;
3155 Level.Distance = NULL;
3155 Level.Distance = nullptr;
31563156 unsigned NewDirection = Dependence::DVEntry::NONE;
31573157 if (!isKnownPredicate(CmpInst::ICMP_NE,
31583158 CurConstraint.getY(),
32893289 if ((!Src->mayReadFromMemory() && !Src->mayWriteToMemory()) ||
32903290 (!Dst->mayReadFromMemory() && !Dst->mayWriteToMemory()))
32913291 // if both instructions don't reference memory, there's no dependence
3292 return NULL;
3292 return nullptr;
32933293
32943294 if (!isLoadOrStore(Src) || !isLoadOrStore(Dst)) {
32953295 // can only analyze simple loads and stores, i.e., no calls, invokes, etc.
33093309 case AliasAnalysis::NoAlias:
33103310 // If the objects noalias, they are distinct, accesses are independent.
33113311 DEBUG(dbgs() << "no alias\n");
3312 return NULL;
3312 return nullptr;
33133313 case AliasAnalysis::MustAlias:
33143314 break; // The underlying objects alias; test accesses for dependence.
33153315 }
35043504 case Subscript::ZIV:
35053505 DEBUG(dbgs() << ", ZIV\n");
35063506 if (testZIV(Pair[SI].Src, Pair[SI].Dst, Result))
3507 return NULL;
3507 return nullptr;
35083508 break;
35093509 case Subscript::SIV: {
35103510 DEBUG(dbgs() << ", SIV\n");
35113511 unsigned Level;
3512 const SCEV *SplitIter = NULL;
3512 const SCEV *SplitIter = nullptr;
35133513 if (testSIV(Pair[SI].Src, Pair[SI].Dst, Level,
35143514 Result, NewConstraint, SplitIter))
3515 return NULL;
3515 return nullptr;
35163516 break;
35173517 }
35183518 case Subscript::RDIV:
35193519 DEBUG(dbgs() << ", RDIV\n");
35203520 if (testRDIV(Pair[SI].Src, Pair[SI].Dst, Result))
3521 return NULL;
3521 return nullptr;
35223522 break;
35233523 case Subscript::MIV:
35243524 DEBUG(dbgs() << ", MIV\n");
35253525 if (testMIV(Pair[SI].Src, Pair[SI].Dst, Pair[SI].Loops, Result))
3526 return NULL;
3526 return nullptr;
35273527 break;
35283528 default:
35293529 llvm_unreachable("subscript has unexpected classification");
35573557 DEBUG(dbgs() << "testing subscript " << SJ << ", SIV\n");
35583558 // SJ is an SIV subscript that's part of the current coupled group
35593559 unsigned Level;
3560 const SCEV *SplitIter = NULL;
3560 const SCEV *SplitIter = nullptr;
35613561 DEBUG(dbgs() << "SIV\n");
35623562 if (testSIV(Pair[SJ].Src, Pair[SJ].Dst, Level,
35633563 Result, NewConstraint, SplitIter))
3564 return NULL;
3564 return nullptr;
35653565 ConstrainedLevels.set(Level);
35663566 if (intersectConstraints(&Constraints[Level], &NewConstraint)) {
35673567 if (Constraints[Level].isEmpty()) {
35683568 ++DeltaIndependence;
3569 return NULL;
3569 return nullptr;
35703570 }
35713571 Changed = true;
35723572 }
35923592 case Subscript::ZIV:
35933593 DEBUG(dbgs() << "ZIV\n");
35943594 if (testZIV(Pair[SJ].Src, Pair[SJ].Dst, Result))
3595 return NULL;
3595 return nullptr;
35963596 Mivs.reset(SJ);
35973597 break;
35983598 case Subscript::SIV:
36153615 if (Pair[SJ].Classification == Subscript::RDIV) {
36163616 DEBUG(dbgs() << "RDIV test\n");
36173617 if (testRDIV(Pair[SJ].Src, Pair[SJ].Dst, Result))
3618 return NULL;
3618 return nullptr;
36193619 // I don't yet understand how to propagate RDIV results
36203620 Mivs.reset(SJ);
36213621 }
36283628 if (Pair[SJ].Classification == Subscript::MIV) {
36293629 DEBUG(dbgs() << "MIV test\n");
36303630 if (testMIV(Pair[SJ].Src, Pair[SJ].Dst, Pair[SJ].Loops, Result))
3631 return NULL;
3631 return nullptr;
36323632 }
36333633 else
36343634 llvm_unreachable("expected only MIV subscripts at this point");
36403640 SJ >= 0; SJ = ConstrainedLevels.find_next(SJ)) {
36413641 updateDirection(Result.DV[SJ - 1], Constraints[SJ]);
36423642 if (Result.DV[SJ - 1].Direction == Dependence::DVEntry::NONE)
3643 return NULL;
3643 return nullptr;
36443644 }
36453645 }
36463646 }
36753675 }
36763676 }
36773677 if (AllEqual)
3678 return NULL;
3678 return nullptr;
36793679 }
36803680
36813681 FullDependence *Final = new FullDependence(Result);
3682 Result.DV = NULL;
3682 Result.DV = nullptr;
36833683 return Final;
36843684 }
36853685
38523852 switch (Pair[SI].Classification) {
38533853 case Subscript::SIV: {
38543854 unsigned Level;
3855 const SCEV *SplitIter = NULL;
3855 const SCEV *SplitIter = nullptr;
38563856 (void) testSIV(Pair[SI].Src, Pair[SI].Dst, Level,
38573857 Result, NewConstraint, SplitIter);
38583858 if (Level == SplitLevel) {
3859 assert(SplitIter != NULL);
3859 assert(SplitIter != nullptr);
38603860 return SplitIter;
38613861 }
38623862 break;
38913891 for (int SJ = Sivs.find_first(); SJ >= 0; SJ = Sivs.find_next(SJ)) {
38923892 // SJ is an SIV subscript that's part of the current coupled group
38933893 unsigned Level;
3894 const SCEV *SplitIter = NULL;
3894 const SCEV *SplitIter = nullptr;
38953895 (void) testSIV(Pair[SJ].Src, Pair[SJ].Dst, Level,
38963896 Result, NewConstraint, SplitIter);
38973897 if (Level == SplitLevel && SplitIter)
39323932 }
39333933 }
39343934 llvm_unreachable("somehow reached end of routine");
3935 return NULL;
3936 }
3935 return nullptr;
3936 }
3939 DominanceFrontier::calculate(const DominatorTree &DT,
4040 const DomTreeNode *Node) {
4141 BasicBlock *BB = Node->getBlock();
42 DomSetType *Result = NULL;
42 DomSetType *Result = nullptr;
4343
4444 std::vector workList;
4545 SmallPtrSet visited;
4646
47 workList.push_back(DFCalculateWorkObject(BB, NULL, Node, NULL));
47 workList.push_back(DFCalculateWorkObject(BB, nullptr, Node, nullptr));
4848 do {
4949 DFCalculateWorkObject *currentW = &workList.back();
5050 assert (currentW && "Missing work object.");
8383 static bool isSimplifiedLoopNest(BasicBlock *BB, const DominatorTree *DT,
8484 const LoopInfo *LI,
8585 SmallPtrSet &SimpleLoopNests) {
86 Loop *NearestLoop = 0;
86 Loop *NearestLoop = nullptr;
8787 for (DomTreeNode *Rung = DT->getNode(BB);
8888 Rung; Rung = Rung->getIDom()) {
8989 BasicBlock *DomBB = Rung->getBlock();
252252 DT = &getAnalysis().getDomTree();
253253 SE = &getAnalysis();
254254 DataLayoutPass *DLP = getAnalysisIfAvailable();
255 DL = DLP ? &DLP->getDataLayout() : 0;
255 DL = DLP ? &DLP->getDataLayout() : nullptr;
256256
257257 // Find all uses of induction variables in this loop, and categorize
258258 // them by stride. Start by finding all of the PHI nodes in the header for
328328 I != E; ++I)
329329 if (const SCEVAddRecExpr *AR = findAddRecForLoop(*I, L))
330330 return AR;
331 return 0;
332 }
333
334 return 0;
331 return nullptr;
332 }
333
334 return nullptr;
335335 }
336336
337337 const SCEV *IVUsers::getStride(const IVStrideUse &IU, const Loop *L) const {
338338 if (const SCEVAddRecExpr *AR = findAddRecForLoop(getExpr(IU), L))
339339 return AR->getStepRecurrence(*SE);
340 return 0;
340 return nullptr;
341341 }
342342
343343 void IVStrideUse::transformToPostInc(const Loop *L) {
4646
4747 void visitInstruction(Instruction &I) {
4848 errs() << "Instruction Count does not know about " << I;
49 llvm_unreachable(0);
49 llvm_unreachable(nullptr);
5050 }
5151 public:
5252 static char ID; // Pass identification, replacement for typeid
130130 Instruction::BinaryOps OpcodeToExpand = (Instruction::BinaryOps)OpcToExpand;
131131 // Recursion is always used, so bail out at once if we already hit the limit.
132132 if (!MaxRecurse--)
133 return 0;
133 return nullptr;
134134
135135 // Check whether the expression has the form "(A op' B) op C".
136136 if (BinaryOperator *Op0 = dyn_cast(LHS))
178178 }
179179 }
180180
181 return 0;
181 return nullptr;
182182 }
183183
184184 /// FactorizeBinOp - Simplify "LHS Opcode RHS" by factorizing out a common term
191191 Instruction::BinaryOps OpcodeToExtract = (Instruction::BinaryOps)OpcToExtract;
192192 // Recursion is always used, so bail out at once if we already hit the limit.
193193 if (!MaxRecurse--)
194 return 0;
194 return nullptr;
195195
196196 BinaryOperator *Op0 = dyn_cast(LHS);
197197 BinaryOperator *Op1 = dyn_cast(RHS);
198198
199199 if (!Op0 || Op0->getOpcode() != OpcodeToExtract ||
200200 !Op1 || Op1->getOpcode() != OpcodeToExtract)
201 return 0;
201 return nullptr;
202202
203203 // The expression has the form "(A op' B) op (C op' D)".
204204 Value *A = Op0->getOperand(0), *B = Op0->getOperand(1);
250250 }
251251 }
252252
253 return 0;
253 return nullptr;
254254 }
255255
256256 /// SimplifyAssociativeBinOp - Generic simplifications for associative binary
262262
263263 // Recursion is always used, so bail out at once if we already hit the limit.
264264 if (!MaxRecurse--)
265 return 0;
265 return nullptr;
266266
267267 BinaryOperator *Op0 = dyn_cast(LHS);
268268 BinaryOperator *Op1 = dyn_cast(RHS);
307307
308308 // The remaining transforms require commutativity as well as associativity.
309309 if (!Instruction::isCommutative(Opcode))
310 return 0;
310 return nullptr;
311311
312312 // Transform: "(A op B) op C" ==> "(C op A) op B" if it simplifies completely.
313313 if (Op0 && Op0->getOpcode() == Opcode) {
347347 }
348348 }
349349
350 return 0;
350 return nullptr;
351351 }
352352
353353 /// ThreadBinOpOverSelect - In the case of a binary operation with a select
358358 const Query &Q, unsigned MaxRecurse) {
359359 // Recursion is always used, so bail out at once if we already hit the limit.
360360 if (!MaxRecurse--)
361 return 0;
361 return nullptr;
362362
363363 SelectInst *SI;
364364 if (isa(LHS)) {
419419 }
420420 }
421421
422 return 0;
422 return nullptr;
423423 }
424424
425425 /// ThreadCmpOverSelect - In the case of a comparison with a select instruction,
431431 unsigned MaxRecurse) {
432432 // Recursion is always used, so bail out at once if we already hit the limit.
433433 if (!MaxRecurse--)
434 return 0;
434 return nullptr;
435435
436436 // Make sure the select is on the LHS.
437437 if (!isa(LHS)) {
455455 // It didn't simplify. However if "cmp TV, RHS" is equal to the select
456456 // condition then we can replace it with 'true'. Otherwise give up.
457457 if (!isSameCompare(Cond, Pred, TV, RHS))
458 return 0;
458 return nullptr;
459459 TCmp = getTrue(Cond->getType());
460460 }
461461
469469 // It didn't simplify. However if "cmp FV, RHS" is equal to the select
470470 // condition then we can replace it with 'false'. Otherwise give up.
471471 if (!isSameCompare(Cond, Pred, FV, RHS))
472 return 0;
472 return nullptr;
473473 FCmp = getFalse(Cond->getType());
474474 }
475475
481481 // The remaining cases only make sense if the select condition has the same
482482 // type as the result of the comparison, so bail out if this is not so.
483483 if (Cond->getType()->isVectorTy() != RHS->getType()->isVectorTy())
484 return 0;
484 return nullptr;
485485 // If the false value simplified to false, then the result of the compare
486486 // is equal to "Cond && TCmp". This also catches the case when the false
487487 // value simplified to false and the true value to true, returning "Cond".
501501 Q, MaxRecurse))
502502 return V;
503503
504 return 0;
504 return nullptr;
505505 }
506506
507507 /// ThreadBinOpOverPHI - In the case of a binary operation with an operand that
512512 const Query &Q, unsigned MaxRecurse) {
513513 // Recursion is always used, so bail out at once if we already hit the limit.
514514 if (!MaxRecurse--)
515 return 0;
515 return nullptr;
516516
517517 PHINode *PI;
518518 if (isa(LHS)) {
519519 PI = cast(LHS);
520520 // Bail out if RHS and the phi may be mutually interdependent due to a loop.
521521 if (!ValueDominatesPHI(RHS, PI, Q.DT))
522 return 0;
522 return nullptr;
523523 } else {
524524 assert(isa(RHS) && "No PHI instruction operand!");
525525 PI = cast(RHS);
526526 // Bail out if LHS and the phi may be mutually interdependent due to a loop.
527527 if (!ValueDominatesPHI(LHS, PI, Q.DT))
528 return 0;
528 return nullptr;
529529 }
530530
531531 // Evaluate the BinOp on the incoming phi values.
532 Value *CommonValue = 0;
532 Value *CommonValue = nullptr;
533533 for (unsigned i = 0, e = PI->getNumIncomingValues(); i != e; ++i) {
534534 Value *Incoming = PI->getIncomingValue(i);
535535 // If the incoming value is the phi node itself, it can safely be skipped.
540540 // If the operation failed to simplify, or simplified to a different value
541541 // to previously, then give up.
542542 if (!V || (CommonValue && V != CommonValue))
543 return 0;
543 return nullptr;
544544 CommonValue = V;
545545 }
546546
555555 const Query &Q, unsigned MaxRecurse) {
556556 // Recursion is always used, so bail out at once if we already hit the limit.
557557 if (!MaxRecurse--)
558 return 0;
558 return nullptr;
559559
560560 // Make sure the phi is on the LHS.
561561 if (!isa(LHS)) {
567567
568568 // Bail out if RHS and the phi may be mutually interdependent due to a loop.
569569 if (!ValueDominatesPHI(RHS, PI, Q.DT))
570 return 0;
570 return nullptr;
571571
572572 // Evaluate the BinOp on the incoming phi values.
573 Value *CommonValue = 0;
573 Value *CommonValue = nullptr;
574574 for (unsigned i = 0, e = PI->getNumIncomingValues(); i != e; ++i) {
575575 Value *Incoming = PI->getIncomingValue(i);
576576 // If the incoming value is the phi node itself, it can safely be skipped.
579579 // If the operation failed to simplify, or simplified to a different value
580580 // to previously, then give up.
581581 if (!V || (CommonValue && V != CommonValue))
582 return 0;
582 return nullptr;
583583 CommonValue = V;
584584 }
585585
612612 // X + (Y - X) -> Y
613613 // (Y - X) + X -> Y
614614 // Eg: X + -X -> 0
615 Value *Y = 0;
615 Value *Y = nullptr;
616616 if (match(Op1, m_Sub(m_Value(Y), m_Specific(Op0))) ||
617617 match(Op0, m_Sub(m_Value(Y), m_Specific(Op1))))
618618 return Y;
646646 // "A+B" and "A+C" thus gains nothing, but costs compile time. Similarly
647647 // for threading over phi nodes.
648648
649 return 0;
649 return nullptr;
650650 }
651651
652652 Value *llvm::SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
719719 // If LHS and RHS are not related via constant offsets to the same base
720720 // value, there is nothing we can do here.
721721 if (LHS != RHS)
722 return 0;
722 return nullptr;
723723
724724 // Otherwise, the difference of LHS - RHS can be computed as:
725725 // LHS - RHS
754754
755755 // (X*2) - X -> X
756756 // (X<<1) - X -> X
757 Value *X = 0;
757 Value *X = nullptr;
758758 if (match(Op0, m_Mul(m_Specific(Op1), m_ConstantInt<2>())) ||
759759 match(Op0, m_Shl(m_Specific(Op1), m_One())))
760760 return Op1;
761761
762762 // (X + Y) - Z -> X + (Y - Z) or Y + (X - Z) if everything simplifies.
763763 // For example, (X + Y) - Y -> X; (Y + X) - Y -> X
764 Value *Y = 0, *Z = Op1;
764 Value *Y = nullptr, *Z = Op1;
765765 if (MaxRecurse && match(Op0, m_Add(m_Value(X), m_Value(Y)))) { // (X + Y) - Z
766766 // See if "V === Y - Z" simplifies.
767767 if (Value *V = SimplifyBinOp(Instruction::Sub, Y, Z, Q, MaxRecurse-1))
852852 // "A-B" and "A-C" thus gains nothing, but costs compile time. Similarly
853853 // for threading over phi nodes.
854854
855 return 0;
855 return nullptr;
856856 }
857857
858858 Value *llvm::SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
889889 // fadd [nnan ninf] X, (fsub [nnan ninf] 0, X) ==> 0
890890 // where nnan and ninf have to occur at least once somewhere in this
891891 // expression
892 Value *SubOp = 0;
892 Value *SubOp = nullptr;
893893 if (match(Op1, m_FSub(m_AnyZero(), m_Specific(Op0))))
894894 SubOp = Op1;
895895 else if (match(Op0, m_FSub(m_AnyZero(), m_Specific(Op1))))
901901 return Constant::getNullValue(Op0->getType());
902902 }
903903
904 return 0;
904 return nullptr;
905905 }
906906
907907 /// Given operands for an FSub, see if we can fold the result. If not, this
938938 if (FMF.noNaNs() && FMF.noInfs() && Op0 == Op1)
939939 return Constant::getNullValue(Op0->getType());
940940
941 return 0;
941 return nullptr;
942942 }
943943
944944 /// Given the operands for an FMul, see if we can fold the result
965965 if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op1, m_AnyZero()))
966966 return Op1;
967967
968 return 0;
968 return nullptr;
969969 }
970970
971971 /// SimplifyMulInst - Given operands for a Mul, see if we can
996996 return Op0;
997997
998998 // (X / Y) * Y -> X if the division is exact.
999 Value *X = 0;
999 Value *X = nullptr;
10001000 if (match(Op0, m_Exact(m_IDiv(m_Value(X), m_Specific(Op1)))) || // (X / Y) * Y
10011001 match(Op1, m_Exact(m_IDiv(m_Value(X), m_Specific(Op0))))) // Y * (X / Y)
10021002 return X;
10301030 MaxRecurse))
10311031 return V;
10321032
1033 return 0;
1033 return nullptr;
10341034 }
10351035
10361036 Value *llvm::SimplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF,
10971097 return ConstantInt::get(Op0->getType(), 1);
10981098
10991099 // (X * Y) / Y -> X if the multiplication does not overflow.
1100 Value *X = 0, *Y = 0;
1100 Value *X = nullptr, *Y = nullptr;
11011101 if (match(Op0, m_Mul(m_Value(X), m_Value(Y))) && (X == Op1 || Y == Op1)) {
11021102 if (Y != Op1) std::swap(X, Y); // Ensure expression is (X * Y) / Y, Y = Op1
11031103 OverflowingBinaryOperator *Mul = cast(Op0);
11281128 if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
11291129 return V;
11301130
1131 return 0;
1131 return nullptr;
11321132 }
11331133
11341134 /// SimplifySDivInst - Given operands for an SDiv, see if we can
11381138 if (Value *V = SimplifyDiv(Instruction::SDiv, Op0, Op1, Q, MaxRecurse))
11391139 return V;
11401140
1141 return 0;
1141 return nullptr;
11421142 }
11431143
11441144 Value *llvm::SimplifySDivInst(Value *Op0, Value *Op1, const DataLayout *DL,
11541154 if (Value *V = SimplifyDiv(Instruction::UDiv, Op0, Op1, Q, MaxRecurse))
11551155 return V;
11561156
1157 return 0;
1157 return nullptr;
11581158 }
11591159
11601160 Value *llvm::SimplifyUDivInst(Value *Op0, Value *Op1, const DataLayout *DL,
11731173 if (match(Op1, m_Undef()))
11741174 return Op1;
11751175
1176 return 0;
1176 return nullptr;
11771177 }
11781178
11791179 Value *llvm::SimplifyFDivInst(Value *Op0, Value *Op1, const DataLayout *DL,
12331233 if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
12341234 return V;
12351235
1236 return 0;
1236 return nullptr;
12371237 }
12381238
12391239 /// SimplifySRemInst - Given operands for an SRem, see if we can
12431243 if (Value *V = SimplifyRem(Instruction::SRem, Op0, Op1, Q, MaxRecurse))
12441244 return V;
12451245
1246 return 0;
1246 return nullptr;
12471247 }
12481248
12491249 Value *llvm::SimplifySRemInst(Value *Op0, Value *Op1, const DataLayout *DL,
12591259 if (Value *V = SimplifyRem(Instruction::URem, Op0, Op1, Q, MaxRecurse))
12601260 return V;
12611261
1262 return 0;
1262 return nullptr;
12631263 }
12641264
12651265 Value *llvm::SimplifyURemInst(Value *Op0, Value *Op1, const DataLayout *DL,
12781278 if (match(Op1, m_Undef()))
12791279 return Op1;
12801280
1281 return 0;
1281 return nullptr;
12821282 }
12831283
12841284 Value *llvm::SimplifyFRemInst(Value *Op0, Value *Op1, const DataLayout *DL,
13491349 if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
13501350 return V;
13511351
1352 return 0;
1352 return nullptr;
13531353 }
13541354
13551355 /// SimplifyShlInst - Given operands for an Shl, see if we can
13671367 Value *X;
13681368 if (match(Op0, m_Exact(m_Shr(m_Value(X), m_Specific(Op1)))))
13691369 return X;
1370 return 0;
1370 return nullptr;
13711371 }
13721372
13731373 Value *llvm::SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
13981398 cast(Op0)->hasNoUnsignedWrap())
13991399 return X;
14001400
1401 return 0;
1401 return nullptr;
14021402 }
14031403
14041404 Value *llvm::SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
14341434 cast(Op0)->hasNoSignedWrap())
14351435 return X;
14361436
1437 return 0;
1437 return nullptr;
14381438 }
14391439
14401440 Value *llvm::SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
14821482 return Constant::getNullValue(Op0->getType());
14831483
14841484 // (A | ?) & A = A
1485 Value *A = 0, *B = 0;
1485 Value *A = nullptr, *B = nullptr;
14861486 if (match(Op0, m_Or(m_Value(A), m_Value(B))) &&
14871487 (A == Op1 || B == Op1))
14881488 return Op1;
15351535 MaxRecurse))
15361536 return V;
15371537
1538 return 0;
1538 return nullptr;
15391539 }
15401540
15411541 Value *llvm::SimplifyAndInst(Value *Op0, Value *Op1, const DataLayout *DL,
15811581 return Constant::getAllOnesValue(Op0->getType());
15821582
15831583 // (A & ?) | A = A
1584 Value *A = 0, *B = 0;
1584 Value *A = nullptr, *B = nullptr;
15851585 if (match(Op0, m_And(m_Value(A), m_Value(B))) &&
15861586 (A == Op1 || B == Op1))
15871587 return Op1;
16291629 if (Value *V = ThreadBinOpOverPHI(Instruction::Or, Op0, Op1, Q, MaxRecurse))
16301630 return V;
16311631
1632 return 0;
1632 return nullptr;
16331633 }
16341634
16351635 Value *llvm::SimplifyOrInst(Value *Op0, Value *Op1, const DataLayout *DL,
16891689 // "A^B" and "A^C" thus gains nothing, but costs compile time. Similarly
16901690 // for threading over phi nodes.
16911691
1692 return 0;
1692 return nullptr;
16931693 }
16941694
16951695 Value *llvm::SimplifyXorInst(Value *Op0, Value *Op1, const DataLayout *DL,
17091709 Value *LHS, Value *RHS) {
17101710 SelectInst *SI = dyn_cast(V);
17111711 if (!SI)
1712 return 0;
1712 return nullptr;
17131713 CmpInst *Cmp = dyn_cast(SI->getCondition());
17141714 if (!Cmp)
1715 return 0;
1715 return nullptr;
17161716 Value *CmpLHS = Cmp->getOperand(0), *CmpRHS = Cmp->getOperand(1);
17171717 if (Pred == Cmp->getPredicate() && LHS == CmpLHS && RHS == CmpRHS)
17181718 return Cmp;
17191719 if (Pred == CmpInst::getSwappedPredicate(Cmp->getPredicate()) &&
17201720 LHS == CmpRHS && RHS == CmpLHS)
17211721 return Cmp;
1722 return 0;
1722 return nullptr;
17231723 }
17241724
17251725 // A significant optimization not implemented here is assuming that alloca
17671767 // We can only fold certain predicates on pointer comparisons.
17681768 switch (Pred) {
17691769 default:
1770 return 0;
1770 return nullptr;
17711771
17721772 // Equality comaprisons are easy to fold.
17731773 case CmpInst::ICMP_EQ:
18731873 }
18741874
18751875 // Otherwise, fail.
1876 return 0;
1876 return nullptr;
18771877 }
18781878
18791879 /// SimplifyICmpInst - Given operands for an ICmpInst, see if we can
22202220 BinaryOperator *RBO = dyn_cast(RHS);
22212221 if (MaxRecurse && (LBO || RBO)) {
22222222 // Analyze the case when either LHS or RHS is an add instruction.
2223 Value *A = 0, *B = 0, *C = 0, *D = 0;
2223 Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr;
22242224 // LHS = A + B (or A and B are null); RHS = C + D (or C and D are null).
22252225 bool NoLHSWrapProblem = false, NoRHSWrapProblem = false;
22262226 if (LBO && LBO->getOpcode() == Instruction::Add) {
26042604 if (Value *V = ThreadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse))
26052605 return V;
26062606
2607 return 0;
2607 return nullptr;
26082608 }
26092609
26102610 Value *llvm::SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
27012701 if (Value *V = ThreadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse))
27022702 return V;
27032703
2704 return 0;
2704 return nullptr;
27052705 }
27062706
27072707 Value *llvm::SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
27402740 if (isa(FalseVal)) // select C, X, undef -> X
27412741 return TrueVal;
27422742
2743 return 0;
2743 return nullptr;
27442744 }
27452745
27462746 Value *llvm::SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
27852785 // Check to see if this is constant foldable.
27862786 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
27872787 if (!isa(Ops[i]))
2788 return 0;
2788 return nullptr;
27892789
27902790 return ConstantExpr::getGetElementPtr(cast(Ops[0]), Ops.slice(1));
27912791 }
28222822 return Agg;
28232823 }
28242824
2825 return 0;
2825 return nullptr;
28262826 }
28272827
28282828 Value *llvm::SimplifyInsertValueInst(Value *Agg, Value *Val,
28382838 static Value *SimplifyPHINode(PHINode *PN, const Query &Q) {
28392839 // If all of the PHI's incoming values are the same then replace the PHI node
28402840 // with the common value.
2841 Value *CommonValue = 0;
2841 Value *CommonValue = nullptr;
28422842 bool HasUndefInput = false;
28432843 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
28442844 Value *Incoming = PN->getIncomingValue(i);
28502850 continue;
28512851 }
28522852 if (CommonValue && Incoming != CommonValue)
2853 return 0; // Not the same, bail out.
2853 return nullptr; // Not the same, bail out.
28542854 CommonValue = Incoming;
28552855 }
28562856
28632863 // instruction, we cannot return X as the result of the PHI node unless it
28642864 // dominates the PHI block.
28652865 if (HasUndefInput)
2866 return ValueDominatesPHI(CommonValue, PN, Q.DT) ? CommonValue : 0;
2866 return ValueDominatesPHI(CommonValue, PN, Q.DT) ? CommonValue : nullptr;
28672867
28682868 return CommonValue;
28692869 }
28722872 if (Constant *C = dyn_cast(Op))
28732873 return ConstantFoldInstOperands(Instruction::Trunc, Ty, C, Q.DL, Q.TLI);
28742874
2875 return 0;
2875 return nullptr;
28762876 }
28772877
28782878 Value *llvm::SimplifyTruncInst(Value *Op, Type *Ty, const DataLayout *DL,
29442944 if (Value *V = ThreadBinOpOverPHI(Opcode, LHS, RHS, Q, MaxRecurse))
29452945 return V;
29462946
2947 return 0;
2947 return nullptr;
29482948 }
29492949 }
29502950
29912991 const Query &Q, unsigned MaxRecurse) {
29922992 // Perform idempotent optimizations
29932993 if (!IsIdempotent(IID))
2994 return 0;
2994 return nullptr;
29952995
29962996 // Unary Ops
29972997 if (std::distance(ArgBegin, ArgEnd) == 1)
29992999 if (II->getIntrinsicID() == IID)
30003000 return II;
30013001
3002 return 0;
3002 return nullptr;
30033003 }
30043004
30053005 template
30163016
30173017 Function *F = dyn_cast(V);
30183018 if (!F)
3019 return 0;
3019 return nullptr;
30203020
30213021 if (unsigned IID = F->getIntrinsicID())
30223022 if (Value *Ret =
30243024 return Ret;
30253025
30263026 if (!canConstantFoldCallTo(F))
3027 return 0;
3027 return nullptr;
30283028
30293029 SmallVector ConstantArgs;
30303030 ConstantArgs.reserve(ArgEnd - ArgBegin);
30313031 for (IterTy I = ArgBegin, E = ArgEnd; I != E; ++I) {
30323032 Constant *C = dyn_cast(*I);
30333033 if (!C)
3034 return 0;
3034 return nullptr;
30353035 ConstantArgs.push_back(C);
30363036 }
30373037
32463246 const DataLayout *DL,
32473247 const TargetLibraryInfo *TLI,
32483248 const DominatorTree *DT) {
3249 return replaceAndRecursivelySimplifyImpl(I, 0, DL, TLI, DT);
3249 return replaceAndRecursivelySimplifyImpl(I, nullptr, DL, TLI, DT);
32503250 }
32513251
32523252 bool llvm::replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV,
2828 delete Intervals[i];
2929 IntervalMap.clear();
3030 Intervals.clear();
31 RootInterval = 0;
31 RootInterval = nullptr;
3232 }
3333
3434 void IntervalPartition::print(raw_ostream &O, const Module*) const {
8181 ConstantRange Range;
8282
8383 public:
84 LVILatticeVal() : Tag(undefined), Val(0), Range(1, true) {}
84 LVILatticeVal() : Tag(undefined), Val(nullptr), Range(1, true) {}
8585
8686 static LVILatticeVal get(Constant *C) {
8787 LVILatticeVal Res;
515515 BBLV.markOverdefined();
516516
517517 Instruction *BBI = dyn_cast(Val);
518 if (BBI == 0 || BBI->getParent() != BB) {
518 if (!BBI || BBI->getParent() != BB) {
519519 return ODCacheUpdater.markResult(solveBlockValueNonLocal(BBLV, Val, BB));
520520 }
521521
594594 Value *UnderlyingVal = GetUnderlyingObject(Val);
595595 // If 'GetUnderlyingObject' didn't converge, skip it. It won't converge
596596 // inside InstructionDereferencesPointer either.
597 if (UnderlyingVal == GetUnderlyingObject(UnderlyingVal, NULL, 1)) {
597 if (UnderlyingVal == GetUnderlyingObject(UnderlyingVal, nullptr, 1)) {
598598 for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();
599599 BI != BE; ++BI) {
600600 if (InstructionDereferencesPointer(BI, UnderlyingVal)) {
812812
813813 // Recognize the range checking idiom that InstCombine produces.
814814 // (X-C1) u< C2 --> [C1, C1+C2)
815 ConstantInt *NegOffset = 0;
815 ConstantInt *NegOffset = nullptr;
816816 if (ICI->getPredicate() == ICmpInst::ICMP_ULT)
817817 match(ICI->getOperand(0), m_Add(m_Specific(Val),
818818 m_ConstantInt(NegOffset)));
10131013 getCache(PImpl).clear();
10141014
10151015 DataLayoutPass *DLP = getAnalysisIfAvailable();
1016 DL = DLP ? &DLP->getDataLayout() : 0;
1016 DL = DLP ? &DLP->getDataLayout() : nullptr;
10171017 TLI = &getAnalysis();
10181018
10191019 // Fully lazy.
10291029 // If the cache was allocated, free it.
10301030 if (PImpl) {
10311031 delete &getCache(PImpl);
1032 PImpl = 0;
1032 PImpl = nullptr;
10331033 }
10341034 }
10351035
10431043 if (const APInt *SingleVal = CR.getSingleElement())
10441044 return ConstantInt::get(V->getContext(), *SingleVal);
10451045 }
1046 return 0;
1046 return nullptr;
10471047 }
10481048
10491049 /// getConstantOnEdge - Determine whether the specified value is known to be a
10591059 if (const APInt *SingleVal = CR.getSingleElement())
10601060 return ConstantInt::get(V->getContext(), *SingleVal);
10611061 }
1062 return 0;
1062 return nullptr;
10631063 }
10641064
10651065 /// getPredicateOnEdge - Determine whether the specified value comparison
10711071 LVILatticeVal Result = getCache(PImpl).getValueOnEdge(V, FromBB, ToBB);
10721072
10731073 // If we know the value is a constant, evaluate the conditional.
1074 Constant *Res = 0;
1074 Constant *Res = nullptr;
10751075 if (Result.isConstant()) {
10761076 Res = ConstantFoldCompareInstOperands(Pred, Result.getConstant(), C, DL,
10771077 TLI);
5353 // if we have detailed info and if 'P' is any of the locations we know
5454 // about.
5555 const LibCallFunctionInfo::LocationMRInfo *Details = FI->LocationDetails;
56 if (Details == 0)
56 if (Details == nullptr)
5757 return MRInfo;
5858
5959 // If the details array is of the 'DoesNot' kind, we only know something if
4545
4646 /// If this is the first time we are querying for this info, lazily construct
4747 /// the StringMap to index it.
48 if (Map == 0) {
48 if (!Map) {
4949 Impl = Map = new StringMap();
5050
5151 const LibCallFunctionInfo *Array = getFunctionInfoArray();
52 if (Array == 0) return 0;
52 if (!Array) return nullptr;
5353
5454 // We now have the array of entries. Populate the StringMap.
5555 for (unsigned i = 0; Array[i].Name; ++i)
136136 // that failed. This provides a nice place to put a breakpoint if you want
137137 // to see why something is not correct.
138138 void CheckFailed(const Twine &Message,
139 const Value *V1 = 0, const Value *V2 = 0,
140 const Value *V3 = 0, const Value *V4 = 0) {
139 const Value *V1 = nullptr, const Value *V2 = nullptr,
140 const Value *V3 = nullptr, const Value *V4 = nullptr) {
141141 MessagesStr << Message.str() << "\n";
142142 WriteValue(V1);
143143 WriteValue(V2);
176176 AA = &getAnalysis();
177177 DT = &getAnalysis().getDomTree();
178178 DataLayoutPass *DLP = getAnalysisIfAvailable();
179 DL = DLP ? &DLP->getDataLayout() : 0;
179 DL = DLP ? &DLP->getDataLayout() : nullptr;
180180 TLI = &getAnalysis();
181181 visit(F);
182182 dbgs() << MessagesStr.str();
198198 Value *Callee = CS.getCalledValue();
199199
200200 visitMemoryReference(I, Callee, AliasAnalysis::UnknownSize,
201 0, 0, MemRef::Callee);
201 0, nullptr, MemRef::Callee);
202202
203203 if (Function *F = dyn_cast(findValue(Callee, /*OffsetOk=*/false))) {
204204 Assert1(CS.getCallingConv() == F->getCallingConv(),
274274 MemCpyInst *MCI = cast(&I);
275275 // TODO: If the size is known, use it.
276276 visitMemoryReference(I, MCI->getDest(), AliasAnalysis::UnknownSize,
277 MCI->getAlignment(), 0,
277 MCI->getAlignment(), nullptr,
278278 MemRef::Write);
279279 visitMemoryReference(I, MCI->getSource(), AliasAnalysis::UnknownSize,
280 MCI->getAlignment(), 0,
280 MCI->getAlignment(), nullptr,
281281 MemRef::Read);
282282
283283 // Check that the memcpy arguments don't overlap. The AliasAnalysis API
298298 MemMoveInst *MMI = cast(&I);
299299 // TODO: If the size is known, use it.
300300 visitMemoryReference(I, MMI->getDest(), AliasAnalysis::UnknownSize,
301 MMI->getAlignment(), 0,
301 MMI->getAlignment(), nullptr,
302302 MemRef::Write);
303303 visitMemoryReference(I, MMI->getSource(), AliasAnalysis::UnknownSize,
304 MMI->getAlignment(), 0,
304 MMI->getAlignment(), nullptr,
305305 MemRef::Read);
306306 break;
307307 }
309309 MemSetInst *MSI = cast(&I);
310310 // TODO: If the size is known, use it.
311311 visitMemoryReference(I, MSI->getDest(), AliasAnalysis::UnknownSize,
312 MSI->getAlignment(), 0,
312 MSI->getAlignment(), nullptr,
313313 MemRef::Write);
314314 break;
315315 }
320320 &I);
321321
322322 visitMemoryReference(I, CS.getArgument(0), AliasAnalysis::UnknownSize,
323 0, 0, MemRef::Read | MemRef::Write);
323 0, nullptr, MemRef::Read | MemRef::Write);
324324 break;
325325 case Intrinsic::vacopy:
326326 visitMemoryReference(I, CS.getArgument(0), AliasAnalysis::UnknownSize,
327 0, 0, MemRef::Write);
327 0, nullptr, MemRef::Write);
328328 visitMemoryReference(I, CS.getArgument(1), AliasAnalysis::UnknownSize,
329 0, 0, MemRef::Read);
329 0, nullptr, MemRef::Read);
330330 break;
331331 case Intrinsic::vaend:
332332 visitMemoryReference(I, CS.getArgument(0), AliasAnalysis::UnknownSize,
333 0, 0, MemRef::Read | MemRef::Write);
333 0, nullptr, MemRef::Read | MemRef::Write);
334334 break;
335335
336336 case Intrinsic::stackrestore:
338338 // stack pointer, which the compiler may read from or write to
339339 // at any time, so check it for both readability and writeability.
340340 visitMemoryReference(I, CS.getArgument(0), AliasAnalysis::UnknownSize,
341 0, 0, MemRef::Read | MemRef::Write);
341 0, nullptr, MemRef::Read | MemRef::Write);
342342 break;
343343 }
344344 }
571571 }
572572
573573 void Lint::visitVAArgInst(VAArgInst &I) {
574 visitMemoryReference(I, I.getOperand(0), AliasAnalysis::UnknownSize, 0, 0,
575 MemRef::Read | MemRef::Write);
574 visitMemoryReference(I, I.getOperand(0), AliasAnalysis::UnknownSize, 0,
575 nullptr, MemRef::Read | MemRef::Write);
576576 }
577577
578578 void Lint::visitIndirectBrInst(IndirectBrInst &I) {
579 visitMemoryReference(I, I.getAddress(), AliasAnalysis::UnknownSize, 0, 0,
580 MemRef::Branchee);
579 visitMemoryReference(I, I.getAddress(), AliasAnalysis::UnknownSize, 0,
580 nullptr, MemRef::Branchee);
581581
582582 Assert1(I.getNumDestinations() != 0,
583583 "Undefined behavior: indirectbr with no destinations", &I);
6161 if (ByteOffset < 0) // out of bounds
6262 return false;
6363
64 Type *BaseType = 0;
64 Type *BaseType = nullptr;
6565 unsigned BaseAlign = 0;
6666 if (const AllocaInst *AI = dyn_cast(Base)) {
6767 // An alloca is safe to load from as load as it is suitably aligned.
160160 ScanFrom++;
161161
162162 // Don't scan huge blocks.
163 if (MaxInstsToScan-- == 0) return 0;
163 if (MaxInstsToScan-- == 0) return nullptr;
164164
165165 --ScanFrom;
166166 // If this is a load of Ptr, the loaded value is available.
197197
198198 // Otherwise the store that may or may not alias the pointer, bail out.
199199 ++ScanFrom;
200 return 0;
200 return nullptr;
201201 }
202202
203203 // If this is some other instruction that may clobber Ptr, bail out.
210210
211211 // May modify the pointer, bail out.
212212 ++ScanFrom;
213 return 0;
213 return nullptr;
214214 }
215215 }
216216
217217 // Got to the start of the block, we didn't find it, but are done for this
218218 // block.
219 return 0;
219 return nullptr;
220220 }
140140 PHINode *Loop::getCanonicalInductionVariable() const {
141141 BasicBlock *H = getHeader();
142142
143 BasicBlock *Incoming = 0, *Backedge = 0;
143 BasicBlock *Incoming = nullptr, *Backedge = nullptr;
144144 pred_iterator PI = pred_begin(H);
145145 assert(PI != pred_end(H) &&
146146 "Loop must have at least one backedge!");
147147 Backedge = *PI++;
148 if (PI == pred_end(H)) return 0; // dead loop
148 if (PI == pred_end(H)) return nullptr; // dead loop
149149 Incoming = *PI++;
150 if (PI != pred_end(H)) return 0; // multiple backedges?
150 if (PI != pred_end(H)) return nullptr; // multiple backedges?
151151
152152 if (contains(Incoming)) {
153153 if (contains(Backedge))
154 return 0;
154 return nullptr;
155155 std::swap(Incoming, Backedge);
156156 } else if (!contains(Backedge))
157 return 0;
157 return nullptr;
158158
159159 // Loop over all of the PHI nodes, looking for a canonical indvar.
160160 for (BasicBlock::iterator I = H->begin(); isa(I); ++I) {
170170 if (CI->equalsInt(1))
171171 return PN;
172172 }
173 return 0;
173 return nullptr;
174174 }
175175
176176 /// isLCSSAForm - Return true if the Loop is in LCSSA form
231231 }
232232
233233 MDNode *Loop::getLoopID() const {
234 MDNode *LoopID = 0;
234 MDNode *LoopID = nullptr;
235235 if (isLoopSimplifyForm()) {
236236 LoopID = getLoopLatch()->getTerminator()->getMetadata(LoopMDName);
237237 } else {
240240 BasicBlock *H = getHeader();
241241 for (block_iterator I = block_begin(), IE = block_end(); I != IE; ++I) {
242242 TerminatorInst *TI = (*I)->getTerminator();
243 MDNode *MD = 0;
243 MDNode *MD = nullptr;
244244
245245 // Check if this terminator branches to the loop header.
246246 for (unsigned i = 0, ie = TI->getNumSuccessors(); i != ie; ++i) {
250250 }
251251 }
252252 if (!MD)
253 return 0;
253 return nullptr;
254254
255255 if (!LoopID)
256256 LoopID = MD;
257257 else if (MD != LoopID)
258 return 0;
258 return nullptr;
259259 }
260260 }
261261 if (!LoopID || LoopID->getNumOperands() == 0 ||
262262 LoopID->getOperand(0) != LoopID)
263 return 0;
263 return nullptr;
264264 return LoopID;
265265 }
266266
401401 getUniqueExitBlocks(UniqueExitBlocks);
402402 if (UniqueExitBlocks.size() == 1)
403403 return UniqueExitBlocks[0];
404 return 0;
404 return nullptr;
405405 }
406406
407407 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
547547 // is considered uninitialized.
548548 Loop *NearLoop = BBLoop;
549549
550 Loop *Subloop = 0;
550 Loop *Subloop = nullptr;
551551 if (NearLoop != Unloop && Unloop->contains(NearLoop)) {
552552 Subloop = NearLoop;
553553 // Find the subloop ancestor that is directly contained within Unloop.
563563 succ_iterator I = succ_begin(BB), E = succ_end(BB);
564564 if (I == E) {
565565 assert(!Subloop && "subloop blocks must have a successor");
566 NearLoop = 0; // unloop blocks may now exit the function.
566 NearLoop = nullptr; // unloop blocks may now exit the function.
567567 }
568568 for (; I != E; ++I) {
569569 if (*I == BB)
636636
637637 // Blocks no longer have a parent but are still referenced by Unloop until
638638 // the Unloop object is deleted.
639 LI.changeLoopFor(*I, 0);
639 LI.changeLoopFor(*I, nullptr);
640640 }
641641
642642 // Remove the loop from the top-level LoopInfo object.
6060 : FunctionPass(ID), PMDataManager() {
6161 skipThisLoop = false;
6262 redoThisLoop = false;
63 LI = NULL;
64 CurrentLoop = NULL;
63 LI = nullptr;
64 CurrentLoop = nullptr;
6565 }
6666
6767 /// Delete loop from the loop queue and loop hierarchy (LoopInfo).
4545
4646 bool runOnFunction(Function &F) override;
4747
48 void print(raw_ostream &OS, const Module * = 0) const override;
48 void print(raw_ostream &OS, const Module * = nullptr) const override;
4949
5050 void getAnalysisUsage(AnalysisUsage &AU) const override {
5151 AU.addRequiredTransitive();
5555
5656 void releaseMemory() override {
5757 Deps.clear();
58 F = 0;
58 F = nullptr;
5959 }
6060
6161 private:
105105 MemDepResult Res = MDA.getDependency(Inst);
106106 if (!Res.isNonLocal()) {
107107 Deps[Inst].insert(std::make_pair(getInstTypePair(Res),
108 static_cast(0)));
108 static_cast(nullptr)));
109109 } else if (CallSite CS = cast(Inst)) {
110110 const MemoryDependenceAnalysis::NonLocalDepInfo &NLDI =
111111 MDA.getNonLocalCallDependency(CS);
121121 if (LoadInst *LI = dyn_cast(Inst)) {
122122 if (!LI->isUnordered()) {
123123 // FIXME: Handle atomic/volatile loads.
124 Deps[Inst].insert(std::make_pair(getInstTypePair(0, Unknown),
125 static_cast(0)));
124 Deps[Inst].insert(std::make_pair(getInstTypePair(nullptr, Unknown),
125 static_cast(nullptr)));
126126 continue;
127127 }
128128 AliasAnalysis::Location Loc = AA.getLocation(LI);
130130 } else if (StoreInst *SI = dyn_cast(Inst)) {
131131 if (!SI->isUnordered()) {
132132 // FIXME: Handle atomic/volatile stores.
133 Deps[Inst].insert(std::make_pair(getInstTypePair(0, Unknown),
134 static_cast(0)));
133 Deps[Inst].insert(std::make_pair(getInstTypePair(nullptr, Unknown),
134 static_cast(nullptr)));
135135 continue;
136136 }
137137 AliasAnalysis::Location Loc = AA.getLocation(SI);
7575
7676 CallSite CS(const_cast(V));
7777 if (!CS.getInstruction())
78 return 0;
78 return nullptr;
7979
8080 if (CS.isNoBuiltin())
81 return 0;
81 return nullptr;
8282
8383 Function *Callee = CS.getCalledFunction();
8484 if (!Callee || !Callee->isDeclaration())
85 return 0;
85 return nullptr;
8686 return Callee;
8787 }
8888
9393 bool LookThroughBitCast = false) {
9494 // Skip intrinsics
9595 if (isa(V))
96 return 0;
96 return nullptr;
9797
9898 Function *Callee = getCalledFunction(V, LookThroughBitCast);
9999 if (!Callee)
100 return 0;
100 return nullptr;
101101
102102 // Make sure that the function is available.
103103 StringRef FnName = Callee->getName();
104104 LibFunc::Func TLIFn;
105105 if (!TLI || !TLI->getLibFunc(FnName, TLIFn) || !TLI->has(TLIFn))
106 return 0;
106 return nullptr;
107107
108108 unsigned i = 0;
109109 bool found = false;
114114 }
115115 }
116116 if (!found)
117 return 0;
117 return nullptr;
118118
119119 const AllocFnsTy *FnData = &AllocationFnData[i];
120120 if ((FnData->AllocTy & AllocTy) != FnData->AllocTy)
121 return 0;
121 return nullptr;
122122
123123 // Check function prototype.
124124 int FstParam = FnData->FstParam;
134134 FTy->getParamType(SndParam)->isIntegerTy(32) ||
135135 FTy->getParamType(SndParam)->isIntegerTy(64)))
136136 return FnData;
137 return 0;
137 return nullptr;
138138 }
139139
140140 static bool hasNoAliasAttr(const Value *V, bool LookThroughBitCast) {
201201 /// ignore InvokeInst here.
202202 const CallInst *llvm::extractMallocCall(const Value *I,
203203 const TargetLibraryInfo *TLI) {
204 return isMallocLikeFn(I, TLI) ? dyn_cast(I) : 0;
204 return isMallocLikeFn(I, TLI) ? dyn_cast(I) : nullptr;
205205 }
206206
207207 static Value *computeArraySize(const CallInst *CI, const DataLayout *DL,
208208 const TargetLibraryInfo *TLI,
209209 bool LookThroughSExt = false) {
210210 if (!CI)
211 return 0;
211 return nullptr;
212212
213213 // The size of the malloc's result type must be known to determine array size.
214214 Type *T = getMallocAllocatedType(CI, TLI);
215215 if (!T || !T->isSized() || !DL)
216 return 0;
216 return nullptr;
217217
218218 unsigned ElementSize = DL->getTypeAllocSize(T);
219219 if (StructType *ST = dyn_cast(T))
222222 // If malloc call's arg can be determined to be a multiple of ElementSize,
223223 // return the multiple. Otherwise, return NULL.
224224 Value *MallocArg = CI->getArgOperand(0);
225 Value *Multiple = 0;
225 Value *Multiple = nullptr;
226226 if (ComputeMultiple(MallocArg, ElementSize, Multiple,
227227 LookThroughSExt))
228228 return Multiple;
229229
230 return 0;
230 return nullptr;
231231 }
232232
233233 /// isArrayMalloc - Returns the corresponding CallInst if the instruction
244244 return CI;
245245
246246 // CI is a non-array malloc or we can't figure out that it is an array malloc.
247 return 0;
247 return nullptr;
248248 }
249249
250250 /// getMallocType - Returns the PointerType resulting from the malloc call.
256256 const TargetLibraryInfo *TLI) {
257257 assert(isMallocLikeFn(CI, TLI) && "getMallocType and not malloc call");
258258
259 PointerType *MallocType = 0;
259 PointerType *MallocType = nullptr;
260260 unsigned NumOfBitCastUses = 0;
261261
262262 // Determine if CallInst has a bitcast use.
276276 return cast(CI->getType());
277277
278278 // Type could not be determined.
279 return 0;
279 return nullptr;
280280 }
281281
282282 /// getMallocAllocatedType - Returns the Type allocated by malloc call.
287287 Type *llvm::getMallocAllocatedType(const CallInst *CI,
288288 const TargetLibraryInfo *TLI) {
289289 PointerType *PT = getMallocType(CI, TLI);
290 return PT ? PT->getElementType() : 0;
290 return PT ? PT->getElementType() : nullptr;
291291 }
292292
293293 /// getMallocArraySize - Returns the array size of a malloc call. If the
307307 /// is a calloc call.
308308 const CallInst *llvm::extractCallocCall(const Value *I,
309309 const TargetLibraryInfo *TLI) {
310 return isCallocLikeFn(I, TLI) ? cast(I) : 0;
310 return isCallocLikeFn(I, TLI) ? cast(I) : nullptr;
311311 }
312312
313313
315315 const CallInst *llvm::isFreeCall(const Value *I, const TargetLibraryInfo *TLI) {
316316 const CallInst *CI = dyn_cast(I);
317317 if (!CI || isa(CI))
318 return 0;
318 return nullptr;
319319 Function *Callee = CI->getCalledFunction();
320 if (Callee == 0 || !Callee->isDeclaration())
321 return 0;
320 if (Callee == nullptr || !Callee->isDeclaration())
321 return nullptr;
322322
323323 StringRef FnName = Callee->getName();
324324 LibFunc::Func TLIFn;
325325 if (!TLI || !TLI->getLibFunc(FnName, TLIFn) || !TLI->has(TLIFn))
326 return 0;
326 return nullptr;
327327
328328 unsigned ExpectedNumParams;
329329 if (TLIFn == LibFunc::free ||
334334 TLIFn == LibFunc::ZdaPvRKSt9nothrow_t) // delete[](void*, nothrow)
335335 ExpectedNumParams = 2;
336336 else
337 return 0;
337 return nullptr;
338338
339339 // Check free prototype.
340340 // FIXME: workaround for PR5130, this will be obsolete when a nobuiltin
341341 // attribute will exist.
342342 FunctionType *FTy = Callee->getFunctionType();
343343 if (!FTy->getReturnType()->isVoidTy())
344 return 0;
344 return nullptr;
345345 if (FTy->getNumParams() != ExpectedNumParams)
346 return 0;
346 return nullptr;
347347 if (FTy->getParamType(0) != Type::getInt8PtrTy(Callee->getContext()))
348 return 0;
348 return nullptr;
349349
350350 return CI;
351351 }
8787 bool MemoryDependenceAnalysis::runOnFunction(Function &) {
8888 AA = &getAnalysis();
8989 DataLayoutPass *DLP = getAnalysisIfAvailable();
90 DL = DLP ? &DLP->getDataLayout() : 0;
90 DL = DLP ? &DLP->getDataLayout() : nullptr;
9191 DominatorTreeWrapperPass *DTWP =
9292 getAnalysisIfAvailable();
93 DT = DTWP ? &DTWP->getDomTree() : 0;
93 DT = DTWP ? &DTWP->getDomTree() : nullptr;
9494 if (!PredCache)
9595 PredCache.reset(new PredIteratorCache());
9696 return false;
260260 const LoadInst *LI,
261261 const DataLayout *DL) {
262262 // If we have no target data, we can't do this.
263 if (DL == 0) return false;
263 if (!DL) return false;
264264
265265 // If we haven't already computed the base/offset of MemLoc, do so now.
266 if (MemLocBase == 0)
266 if (!MemLocBase)
267267 MemLocBase = GetPointerBaseWithConstantOffset(MemLoc.Ptr, MemLocOffs, DL);
268268
269269 unsigned Size = MemoryDependenceAnalysis::
361361 BasicBlock::iterator ScanIt, BasicBlock *BB,
362362 Instruction *QueryInst) {
363363
364 const Value *MemLocBase = 0;
364 const Value *MemLocBase = nullptr;
365365 int64_t MemLocOffset = 0;
366366 unsigned Limit = BlockScanLimit;
367367 bool isInvariantLoad = false;
368368 if (isLoad && QueryInst) {
369369 LoadInst *LI = dyn_cast(QueryInst);
370 if (LI && LI->getMetadata(LLVMContext::MD_invariant_load) != 0)
370 if (LI && LI->getMetadata(LLVMContext::MD_invariant_load) != nullptr)
371371 isInvariantLoad = true;
372372 }
373373
695695 if (Entry != Cache.begin() && std::prev(Entry)->getBB() == DirtyBB)
696696 --Entry;
697697
698 NonLocalDepEntry *ExistingResult = 0;
698 NonLocalDepEntry *ExistingResult = nullptr;
699699 if (Entry != Cache.begin()+NumSortedEntries &&
700700 Entry->getBB() == DirtyBB) {
701701 // If we already have an entry, and if it isn't already dirty, the block
806806 if (Entry != Cache->begin() && (Entry-1)->getBB() == BB)
807807 --Entry;
808808
809 NonLocalDepEntry *ExistingResult = 0;
809 NonLocalDepEntry *ExistingResult = nullptr;
810810 if (Entry != Cache->begin()+NumSortedEntries && Entry->getBB() == BB)
811811 ExistingResult = &*Entry;
812812
959959 if (CacheInfo->TBAATag != Loc.TBAATag) {
960960 if (CacheInfo->TBAATag) {
961961 CacheInfo->Pair = BBSkipFirstBlockPair();
962 CacheInfo->TBAATag = 0;
962 CacheInfo->TBAATag = nullptr;
963963 for (NonLocalDepInfo::iterator DI = CacheInfo->NonLocalDeps.begin(),
964964 DE = CacheInfo->NonLocalDeps.end(); DI != DE; ++DI)
965965 if (Instruction *Inst = DI->getResult().getInst())
11151115 SortNonLocalDepInfoCache(*Cache, NumSortedEntries);
11161116 NumSortedEntries = Cache->size();
11171117 }
1118 Cache = 0;
1118 Cache = nullptr;
11191119
11201120 PredList.clear();
11211121 for (BasicBlock **PI = PredCache->GetPreds(BB); *PI; ++PI) {
11251125 // Get the PHI translated pointer in this predecessor. This can fail if
11261126 // not translatable, in which case the getAddr() returns null.
11271127 PHITransAddr &PredPointer = PredList.back().second;
1128 PredPointer.PHITranslateValue(BB, Pred, 0);
1128 PredPointer.PHITranslateValue(BB, Pred, nullptr);
11291129
11301130 Value *PredPtrVal = PredPointer.getAddr();
11311131
11741174 // predecessor, then we have to assume that the pointer is clobbered in
11751175 // that predecessor. We can still do PRE of the load, which would insert
11761176 // a computation of the pointer in this predecessor.
1177 if (PredPtrVal == 0)
1177 if (!PredPtrVal)
11781178 CanTranslate = false;
11791179
11801180 // FIXME: it is entirely possible that PHI translating will end up with
12231223 // for the given block. It assumes that we haven't modified any of
12241224 // our datastructures while processing the current block.
12251225
1226 if (Cache == 0) {
1226 if (!Cache) {
12271227 // Refresh the CacheInfo/Cache pointer if it got invalidated.
12281228 CacheInfo = &NonLocalPointerDeps[CacheKey];
12291229 Cache = &CacheInfo->NonLocalDeps;
12781278
12791279 for (unsigned i = 0, e = PInfo.size(); i != e; ++i) {
12801280 Instruction *Target = PInfo[i].getResult().getInst();
1281 if (Target == 0) continue; // Ignore non-local dep results.
1281 if (!Target) continue; // Ignore non-local dep results.
12821282 assert(Target->getParent() == PInfo[i].getBB());
12831283
12841284 // Eliminating the dirty entry from 'Cache', so update the reverse info.
3535 // Note: NoAA does not call InitializeAliasAnalysis because it's
3636 // special and does not support chaining.
3737 DataLayoutPass *DLP = getAnalysisIfAvailable();
38 DL = DLP ? &DLP->getDataLayout() : 0;
38 DL = DLP ? &DLP->getDataLayout() : nullptr;
3939 }
4040
4141 AliasResult alias(const Location &LocA, const Location &LocB) override {
4242
4343 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
4444 void PHITransAddr::dump() const {
45 if (Addr == 0) {
45 if (!Addr) {
4646 dbgs() << "PHITransAddr: null\n";
4747 return;
4848 }
5757 SmallVectorImpl &InstInputs) {
5858 // If this is a non-instruction value, there is nothing to do.
5959 Instruction *I = dyn_cast(Expr);
60 if (I == 0) return true;
60 if (!I) return true;
6161
6262 // If it's an instruction, it is either in Tmp or its operands recursively
6363 // are.
8989 /// structure is valid, it returns true. If invalid, it prints errors and
9090 /// returns false.
9191 bool PHITransAddr::Verify() const {
92 if (Addr == 0) return true;
92 if (!Addr) return true;
9393
9494 SmallVector Tmp(InstInputs.begin(), InstInputs.end());
9595
115115 // If the input value is not an instruction, or if it is not defined in CurBB,
116116 // then we don't need to phi translate it.
117117 Instruction *Inst = dyn_cast(Addr);
118 return Inst == 0 || CanPHITrans(Inst);
118 return !Inst || CanPHITrans(Inst);
119119 }
120120
121121
122122 static void RemoveInstInputs(Value *V,
123123 SmallVectorImpl &InstInputs) {
124124 Instruction *I = dyn_cast(V);
125 if (I == 0) return;
125 if (!I) return;
126126
127127 // If the instruction is in the InstInputs list, remove it.
128128 SmallVectorImpl::iterator Entry =
146146 const DominatorTree *DT) {
147147 // If this is a non-instruction value, it can't require PHI translation.
148148 Instruction *Inst = dyn_cast(V);
149 if (Inst == 0) return V;
149 if (!Inst) return V;
150150
151151 // Determine whether 'Inst' is an input to our PHI translatable expression.
152152 bool isInput = std::count(InstInputs.begin(), InstInputs.end(), Inst);
172172 // If this is a non-phi value, and it is analyzable, we can incorporate it
173173 // into the expression by making all instruction operands be inputs.
174174 if (!CanPHITrans(Inst))
175 return 0;
175 return nullptr;
176176
177177 // All instruction operands are now inputs (and of course, they may also be
178178 // defined in this block, so they may need to be phi translated themselves.
186186 // operands need to be phi translated, and if so, reconstruct it.
187187
188188 if (CastInst *Cast = dyn_cast(Inst)) {
189 if (!isSafeToSpeculativelyExecute(Cast)) return 0;
189 if (!isSafeToSpeculativelyExecute(Cast)) return nullptr;
190190 Value *PHIIn = PHITranslateSubExpr(Cast->getOperand(0), CurBB, PredBB, DT);
191 if (PHIIn == 0) return 0;
191 if (!PHIIn) return nullptr;
192192 if (PHIIn == Cast->getOperand(0))
193193 return Cast;
194194
208208 (!DT || DT->dominates(CastI->getParent(), PredBB)))
209209 return CastI;
210210 }
211 return 0;
211 return nullptr;
212212 }
213213
214214 // Handle getelementptr with at least one PHI translatable operand.
217217 bool AnyChanged = false;
218218 for (unsigned i = 0, e = GEP->getNumOperands(); i != e; ++i) {
219219 Value *GEPOp = PHITranslateSubExpr(GEP->getOperand(i), CurBB, PredBB, DT);
220 if (GEPOp == 0) return 0;
220 if (!GEPOp) return nullptr;
221221
222222 AnyChanged |= GEPOp != GEP->getOperand(i);
223223 GEPOps.push_back(GEPOp);
252252 return GEPI;
253253 }
254254 }
255 return 0;
255 return nullptr;
256256 }
257257
258258 // Handle add with a constant RHS.
264264 bool isNUW = cast(Inst)->hasNoUnsignedWrap();
265265
266266 Value *LHS = PHITranslateSubExpr(Inst->getOperand(0), CurBB, PredBB, DT);
267 if (LHS == 0) return 0;
267 if (!LHS) return nullptr;
268268
269269 // If the PHI translated LHS is an add of a constant, fold the immediates.
270270 if (BinaryOperator *BOp = dyn_cast(LHS))
303303 return BO;
304304 }
305305
306 return 0;
306 return nullptr;
307307 }
308308
309309 // Otherwise, we failed.
310 return 0;
310 return nullptr;
311311 }
312312
313313
325325 // Make sure the value is live in the predecessor.
326326 if (Instruction *Inst = dyn_cast_or_null(Addr))
327327 if (!DT->dominates(Inst->getParent(), PredBB))
328 Addr = 0;
329 }
330
331 return Addr == 0;
328 Addr = nullptr;
329 }
330
331 return Addr == nullptr;
332332 }
333333
334334 /// PHITranslateWithInsertion - PHI translate this value into the specified
353353 // If not, destroy any intermediate instructions inserted.
354354 while (NewInsts.size() != NISize)
355355 NewInsts.pop_back_val()->eraseFromParent();
356 return 0;
356 return nullptr;
357357 }
358358
359359
378378
379379 // Handle cast of PHI translatable value.
380380 if (CastInst *Cast = dyn_cast(Inst)) {
381 if (!isSafeToSpeculativelyExecute(Cast)) return 0;
381 if (!isSafeToSpeculativelyExecute(Cast)) return nullptr;
382382 Value *OpVal = InsertPHITranslatedSubExpr(Cast->getOperand(0),
383383 CurBB, PredBB, DT, NewInsts);
384 if (OpVal == 0) return 0;
384 if (!OpVal) return nullptr;
385385
386386 // Otherwise insert a cast at the end of PredBB.
387387 CastInst *New = CastInst::Create(Cast->getOpcode(),
399399 for (unsigned i = 0, e = GEP->getNumOperands(); i != e; ++i) {
400400 Value *OpVal = InsertPHITranslatedSubExpr(GEP->getOperand(i),
401401 CurBB, PredBB, DT, NewInsts);
402 if (OpVal == 0) return 0;
402 if (!OpVal) return nullptr;
403403 GEPOps.push_back(OpVal);
404404 }
405405
435435 }
436436 #endif
437437
438 return 0;
439 }
438 return nullptr;
439 }