llvm.org GIT mirror llvm / e0f5ddb
MemorySSA: Move to Analysis, from Transforms/Utils. It's used as Analysis, it has Analysis passes, and once NewGVN is made an Analysis, this removes the cross dependency from Analysis to Transform/Utils. NFC. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@299980 91177308-0d34-0410-b5e6-96231b3b80d8 Daniel Berlin 2 years ago
65 changed file(s) with 6248 addition(s) and 6248 deletion(s). Raw diff Collapse all Expand all
0 //===- MemorySSA.h - Build Memory SSA ---------------------------*- C++ -*-===//
1 //
2 // The LLVM Compiler Infrastructure
3 //
4 // This file is distributed under the University of Illinois Open Source
5 // License. See LICENSE.TXT for details.
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// \brief This file exposes an interface to building/using memory SSA to
11 /// walk memory instructions using a use/def graph.
12 ///
13 /// Memory SSA class builds an SSA form that links together memory access
14 /// instructions such as loads, stores, atomics, and calls. Additionally, it
15 /// does a trivial form of "heap versioning" Every time the memory state changes
16 /// in the program, we generate a new heap version. It generates
17 /// MemoryDef/Uses/Phis that are overlayed on top of the existing instructions.
18 ///
19 /// As a trivial example,
20 /// define i32 @main() #0 {
21 /// entry:
22 /// %call = call noalias i8* @_Znwm(i64 4) #2
23 /// %0 = bitcast i8* %call to i32*
24 /// %call1 = call noalias i8* @_Znwm(i64 4) #2
25 /// %1 = bitcast i8* %call1 to i32*
26 /// store i32 5, i32* %0, align 4
27 /// store i32 7, i32* %1, align 4
28 /// %2 = load i32* %0, align 4
29 /// %3 = load i32* %1, align 4
30 /// %add = add nsw i32 %2, %3
31 /// ret i32 %add
32 /// }
33 ///
34 /// Will become
35 /// define i32 @main() #0 {
36 /// entry:
37 /// ; 1 = MemoryDef(0)
38 /// %call = call noalias i8* @_Znwm(i64 4) #3
39 /// %2 = bitcast i8* %call to i32*
40 /// ; 2 = MemoryDef(1)
41 /// %call1 = call noalias i8* @_Znwm(i64 4) #3
42 /// %4 = bitcast i8* %call1 to i32*
43 /// ; 3 = MemoryDef(2)
44 /// store i32 5, i32* %2, align 4
45 /// ; 4 = MemoryDef(3)
46 /// store i32 7, i32* %4, align 4
47 /// ; MemoryUse(3)
48 /// %7 = load i32* %2, align 4
49 /// ; MemoryUse(4)
50 /// %8 = load i32* %4, align 4
51 /// %add = add nsw i32 %7, %8
52 /// ret i32 %add
53 /// }
54 ///
55 /// Given this form, all the stores that could ever effect the load at %8 can be
56 /// gotten by using the MemoryUse associated with it, and walking from use to
57 /// def until you hit the top of the function.
58 ///
59 /// Each def also has a list of users associated with it, so you can walk from
60 /// both def to users, and users to defs. Note that we disambiguate MemoryUses,
61 /// but not the RHS of MemoryDefs. You can see this above at %7, which would
62 /// otherwise be a MemoryUse(4). Being disambiguated means that for a given
63 /// store, all the MemoryUses on its use lists are may-aliases of that store
64 /// (but the MemoryDefs on its use list may not be).
65 ///
66 /// MemoryDefs are not disambiguated because it would require multiple reaching
67 /// definitions, which would require multiple phis, and multiple memoryaccesses
68 /// per instruction.
69 //===----------------------------------------------------------------------===//
70
71 #ifndef LLVM_ANALYSIS_MEMORYSSA_H
72 #define LLVM_ANALYSIS_MEMORYSSA_H
73
74 #include "llvm/ADT/DenseMap.h"
75 #include "llvm/ADT/GraphTraits.h"
76 #include "llvm/ADT/SmallPtrSet.h"
77 #include "llvm/ADT/SmallVector.h"
78 #include "llvm/ADT/ilist.h"
79 #include "llvm/ADT/ilist_node.h"
80 #include "llvm/ADT/iterator.h"
81 #include "llvm/ADT/iterator_range.h"
82 #include "llvm/Analysis/AliasAnalysis.h"
83 #include "llvm/Analysis/MemoryLocation.h"
84 #include "llvm/Analysis/PHITransAddr.h"
85 #include "llvm/IR/BasicBlock.h"
86 #include "llvm/IR/Dominators.h"
87 #include "llvm/IR/Module.h"
88 #include "llvm/IR/OperandTraits.h"
89 #include "llvm/IR/Type.h"
90 #include "llvm/IR/Use.h"
91 #include "llvm/IR/User.h"
92 #include "llvm/IR/Value.h"
93 #include "llvm/Pass.h"
94 #include "llvm/Support/Casting.h"
95 #include "llvm/Support/ErrorHandling.h"
96 #include
97 #include
98 #include
99 #include
100 #include
101 #include
102
103 namespace llvm {
104
105 class Function;
106 class Instruction;
107 class MemoryAccess;
108 class LLVMContext;
109 class raw_ostream;
110 namespace MSSAHelpers {
111 struct AllAccessTag {};
112 struct DefsOnlyTag {};
113 }
114
115 enum {
116 // Used to signify what the default invalid ID is for MemoryAccess's
117 // getID()
118 INVALID_MEMORYACCESS_ID = 0
119 };
120
121 template class memoryaccess_def_iterator_base;
122 using memoryaccess_def_iterator = memoryaccess_def_iterator_base;
123 using const_memoryaccess_def_iterator =
124 memoryaccess_def_iterator_base;
125
126 // \brief The base for all memory accesses. All memory accesses in a block are
127 // linked together using an intrusive list.
128 class MemoryAccess
129 : public User,
130 public ilist_node>,
131 public ilist_node> {
132 public:
133 using AllAccessType =
134 ilist_node>;
135 using DefsOnlyType =
136 ilist_node>;
137
138 // Methods for support type inquiry through isa, cast, and
139 // dyn_cast
140 static inline bool classof(const Value *V) {
141 unsigned ID = V->getValueID();
142 return ID == MemoryUseVal || ID == MemoryPhiVal || ID == MemoryDefVal;
143 }
144
145 MemoryAccess(const MemoryAccess &) = delete;
146 MemoryAccess &operator=(const MemoryAccess &) = delete;
147 ~MemoryAccess() override;
148
149 void *operator new(size_t, unsigned) = delete;
150 void *operator new(size_t) = delete;
151
152 BasicBlock *getBlock() const { return Block; }
153
154 virtual void print(raw_ostream &OS) const = 0;
155 virtual void dump() const;
156
157 /// \brief The user iterators for a memory access
158 typedef user_iterator iterator;
159 typedef const_user_iterator const_iterator;
160
161 /// \brief This iterator walks over all of the defs in a given
162 /// MemoryAccess. For MemoryPhi nodes, this walks arguments. For
163 /// MemoryUse/MemoryDef, this walks the defining access.
164 memoryaccess_def_iterator defs_begin();
165 const_memoryaccess_def_iterator defs_begin() const;
166 memoryaccess_def_iterator defs_end();
167 const_memoryaccess_def_iterator defs_end() const;
168
169 /// \brief Get the iterators for the all access list and the defs only list
170 /// We default to the all access list.
171 AllAccessType::self_iterator getIterator() {
172 return this->AllAccessType::getIterator();
173 }
174 AllAccessType::const_self_iterator getIterator() const {
175 return this->AllAccessType::getIterator();
176 }
177 AllAccessType::reverse_self_iterator getReverseIterator() {
178 return this->AllAccessType::getReverseIterator();
179 }
180 AllAccessType::const_reverse_self_iterator getReverseIterator() const {
181 return this->AllAccessType::getReverseIterator();
182 }
183 DefsOnlyType::self_iterator getDefsIterator() {
184 return this->DefsOnlyType::getIterator();
185 }
186 DefsOnlyType::const_self_iterator getDefsIterator() const {
187 return this->DefsOnlyType::getIterator();
188 }
189 DefsOnlyType::reverse_self_iterator getReverseDefsIterator() {
190 return this->DefsOnlyType::getReverseIterator();
191 }
192 DefsOnlyType::const_reverse_self_iterator getReverseDefsIterator() const {
193 return this->DefsOnlyType::getReverseIterator();
194 }
195
196 protected:
197 friend class MemorySSA;
198 friend class MemoryUseOrDef;
199 friend class MemoryUse;
200 friend class MemoryDef;
201 friend class MemoryPhi;
202
203 /// \brief Used by MemorySSA to change the block of a MemoryAccess when it is
204 /// moved.
205 void setBlock(BasicBlock *BB) { Block = BB; }
206
207 /// \brief Used for debugging and tracking things about MemoryAccesses.
208 /// Guaranteed unique among MemoryAccesses, no guarantees otherwise.
209 virtual unsigned getID() const = 0;
210
211 MemoryAccess(LLVMContext &C, unsigned Vty, BasicBlock *BB,
212 unsigned NumOperands)
213 : User(Type::getVoidTy(C), Vty, nullptr, NumOperands), Block(BB) {}
214
215 private:
216 BasicBlock *Block;
217 };
218
219 inline raw_ostream &operator<<(raw_ostream &OS, const MemoryAccess &MA) {
220 MA.print(OS);
221 return OS;
222 }
223
224 /// \brief Class that has the common methods + fields of memory uses/defs. It's
225 /// a little awkward to have, but there are many cases where we want either a
226 /// use or def, and there are many cases where uses are needed (defs aren't
227 /// acceptable), and vice-versa.
228 ///
229 /// This class should never be instantiated directly; make a MemoryUse or
230 /// MemoryDef instead.
231 class MemoryUseOrDef : public MemoryAccess {
232 public:
233 void *operator new(size_t, unsigned) = delete;
234 void *operator new(size_t) = delete;
235
236 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
237
238 /// \brief Get the instruction that this MemoryUse represents.
239 Instruction *getMemoryInst() const { return MemoryInst; }
240
241 /// \brief Get the access that produces the memory state used by this Use.
242 MemoryAccess *getDefiningAccess() const { return getOperand(0); }
243
244 static inline bool classof(const Value *MA) {
245 return MA->getValueID() == MemoryUseVal || MA->getValueID() == MemoryDefVal;
246 }
247
248 // Sadly, these have to be public because they are needed in some of the
249 // iterators.
250 virtual bool isOptimized() const = 0;
251 virtual MemoryAccess *getOptimized() const = 0;
252 virtual void setOptimized(MemoryAccess *) = 0;
253
254 /// \brief Reset the ID of what this MemoryUse was optimized to, causing it to
255 /// be rewalked by the walker if necessary.
256 /// This really should only be called by tests.
257 virtual void resetOptimized() = 0;
258
259 protected:
260 friend class MemorySSA;
261 friend class MemorySSAUpdater;
262 MemoryUseOrDef(LLVMContext &C, MemoryAccess *DMA, unsigned Vty,
263 Instruction *MI, BasicBlock *BB)
264 : MemoryAccess(C, Vty, BB, 1), MemoryInst(MI) {
265 setDefiningAccess(DMA);
266 }
267 void setDefiningAccess(MemoryAccess *DMA, bool Optimized = false) {
268 if (!Optimized) {
269 setOperand(0, DMA);
270 return;
271 }
272 setOptimized(DMA);
273 }
274
275 private:
276 Instruction *MemoryInst;
277 };
278
279 template <>
280 struct OperandTraits
281 : public FixedNumOperandTraits {};
282 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUseOrDef, MemoryAccess)
283
284 /// \brief Represents read-only accesses to memory
285 ///
286 /// In particular, the set of Instructions that will be represented by
287 /// MemoryUse's is exactly the set of Instructions for which
288 /// AliasAnalysis::getModRefInfo returns "Ref".
289 class MemoryUse final : public MemoryUseOrDef {
290 public:
291 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
292
293 MemoryUse(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB)
294 : MemoryUseOrDef(C, DMA, MemoryUseVal, MI, BB), OptimizedID(0) {}
295
296 // allocate space for exactly one operand
297 void *operator new(size_t s) { return User::operator new(s, 1); }
298 void *operator new(size_t, unsigned) = delete;
299
300 static inline bool classof(const Value *MA) {
301 return MA->getValueID() == MemoryUseVal;
302 }
303
304 void print(raw_ostream &OS) const override;
305
306 virtual void setOptimized(MemoryAccess *DMA) override {
307 OptimizedID = DMA->getID();
308 setOperand(0, DMA);
309 }
310
311 virtual bool isOptimized() const override {
312 return getDefiningAccess() && OptimizedID == getDefiningAccess()->getID();
313 }
314
315 virtual MemoryAccess *getOptimized() const override {
316 return getDefiningAccess();
317 }
318 virtual void resetOptimized() override {
319 OptimizedID = INVALID_MEMORYACCESS_ID;
320 }
321
322 protected:
323 friend class MemorySSA;
324
325 unsigned getID() const override {
326 llvm_unreachable("MemoryUses do not have IDs");
327 }
328
329 private:
330 unsigned int OptimizedID;
331 };
332
333 template <>
334 struct OperandTraits : public FixedNumOperandTraits {};
335 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUse, MemoryAccess)
336
337 /// \brief Represents a read-write access to memory, whether it is a must-alias,
338 /// or a may-alias.
339 ///
340 /// In particular, the set of Instructions that will be represented by
341 /// MemoryDef's is exactly the set of Instructions for which
342 /// AliasAnalysis::getModRefInfo returns "Mod" or "ModRef".
343 /// Note that, in order to provide def-def chains, all defs also have a use
344 /// associated with them. This use points to the nearest reaching
345 /// MemoryDef/MemoryPhi.
346 class MemoryDef final : public MemoryUseOrDef {
347 public:
348 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
349
350 MemoryDef(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB,
351 unsigned Ver)
352 : MemoryUseOrDef(C, DMA, MemoryDefVal, MI, BB), ID(Ver),
353 Optimized(nullptr), OptimizedID(INVALID_MEMORYACCESS_ID) {}
354
355 // allocate space for exactly one operand
356 void *operator new(size_t s) { return User::operator new(s, 1); }
357 void *operator new(size_t, unsigned) = delete;
358
359 static inline bool classof(const Value *MA) {
360 return MA->getValueID() == MemoryDefVal;
361 }
362
363 virtual void setOptimized(MemoryAccess *MA) override {
364 Optimized = MA;
365 OptimizedID = getDefiningAccess()->getID();
366 }
367 virtual MemoryAccess *getOptimized() const override { return Optimized; }
368 virtual bool isOptimized() const override {
369 return getOptimized() && getDefiningAccess() &&
370 OptimizedID == getDefiningAccess()->getID();
371 }
372 virtual void resetOptimized() override {
373 OptimizedID = INVALID_MEMORYACCESS_ID;
374 }
375
376 void print(raw_ostream &OS) const override;
377
378 protected:
379 friend class MemorySSA;
380
381 unsigned getID() const override { return ID; }
382
383 private:
384 const unsigned ID;
385 MemoryAccess *Optimized;
386 unsigned int OptimizedID;
387 };
388
389 template <>
390 struct OperandTraits : public FixedNumOperandTraits {};
391 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryDef, MemoryAccess)
392
393 /// \brief Represents phi nodes for memory accesses.
394 ///
395 /// These have the same semantic as regular phi nodes, with the exception that
396 /// only one phi will ever exist in a given basic block.
397 /// Guaranteeing one phi per block means guaranteeing there is only ever one
398 /// valid reaching MemoryDef/MemoryPHI along each path to the phi node.
399 /// This is ensured by not allowing disambiguation of the RHS of a MemoryDef or
400 /// a MemoryPhi's operands.
401 /// That is, given
402 /// if (a) {
403 /// store %a
404 /// store %b
405 /// }
406 /// it *must* be transformed into
407 /// if (a) {
408 /// 1 = MemoryDef(liveOnEntry)
409 /// store %a
410 /// 2 = MemoryDef(1)
411 /// store %b
412 /// }
413 /// and *not*
414 /// if (a) {
415 /// 1 = MemoryDef(liveOnEntry)
416 /// store %a
417 /// 2 = MemoryDef(liveOnEntry)
418 /// store %b
419 /// }
420 /// even if the two stores do not conflict. Otherwise, both 1 and 2 reach the
421 /// end of the branch, and if there are not two phi nodes, one will be
422 /// disconnected completely from the SSA graph below that point.
423 /// Because MemoryUse's do not generate new definitions, they do not have this
424 /// issue.
425 class MemoryPhi final : public MemoryAccess {
426 // allocate space for exactly zero operands
427 void *operator new(size_t s) { return User::operator new(s); }
428
429 public:
430 /// Provide fast operand accessors
431 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
432
433 MemoryPhi(LLVMContext &C, BasicBlock *BB, unsigned Ver, unsigned NumPreds = 0)
434 : MemoryAccess(C, MemoryPhiVal, BB, 0), ID(Ver), ReservedSpace(NumPreds) {
435 allocHungoffUses(ReservedSpace);
436 }
437
438 void *operator new(size_t, unsigned) = delete;
439
440 // Block iterator interface. This provides access to the list of incoming
441 // basic blocks, which parallels the list of incoming values.
442 typedef BasicBlock **block_iterator;
443 typedef BasicBlock *const *const_block_iterator;
444
445 block_iterator block_begin() {
446 auto *Ref = reinterpret_cast(op_begin() + ReservedSpace);
447 return reinterpret_cast(Ref + 1);
448 }
449
450 const_block_iterator block_begin() const {
451 const auto *Ref =
452 reinterpret_cast(op_begin() + ReservedSpace);
453 return reinterpret_cast(Ref + 1);
454 }
455
456 block_iterator block_end() { return block_begin() + getNumOperands(); }
457
458 const_block_iterator block_end() const {
459 return block_begin() + getNumOperands();
460 }
461
462 iterator_range blocks() {
463 return make_range(block_begin(), block_end());
464 }
465
466 iterator_range blocks() const {
467 return make_range(block_begin(), block_end());
468 }
469
470 op_range incoming_values() { return operands(); }
471
472 const_op_range incoming_values() const { return operands(); }
473
474 /// \brief Return the number of incoming edges
475 unsigned getNumIncomingValues() const { return getNumOperands(); }
476
477 /// \brief Return incoming value number x
478 MemoryAccess *getIncomingValue(unsigned I) const { return getOperand(I); }
479 void setIncomingValue(unsigned I, MemoryAccess *V) {
480 assert(V && "PHI node got a null value!");
481 setOperand(I, V);
482 }
483 static unsigned getOperandNumForIncomingValue(unsigned I) { return I; }
484 static unsigned getIncomingValueNumForOperand(unsigned I) { return I; }
485
486 /// \brief Return incoming basic block number @p i.
487 BasicBlock *getIncomingBlock(unsigned I) const { return block_begin()[I]; }
488
489 /// \brief Return incoming basic block corresponding
490 /// to an operand of the PHI.
491 BasicBlock *getIncomingBlock(const Use &U) const {
492 assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?");
493 return getIncomingBlock(unsigned(&U - op_begin()));
494 }
495
496 /// \brief Return incoming basic block corresponding
497 /// to value use iterator.
498 BasicBlock *getIncomingBlock(MemoryAccess::const_user_iterator I) const {
499 return getIncomingBlock(I.getUse());
500 }
501
502 void setIncomingBlock(unsigned I, BasicBlock *BB) {
503 assert(BB && "PHI node got a null basic block!");
504 block_begin()[I] = BB;
505 }
506
507 /// \brief Add an incoming value to the end of the PHI list
508 void addIncoming(MemoryAccess *V, BasicBlock *BB) {
509 if (getNumOperands() == ReservedSpace)
510 growOperands(); // Get more space!
511 // Initialize some new operands.
512 setNumHungOffUseOperands(getNumOperands() + 1);
513 setIncomingValue(getNumOperands() - 1, V);
514 setIncomingBlock(getNumOperands() - 1, BB);
515 }
516
517 /// \brief Return the first index of the specified basic
518 /// block in the value list for this PHI. Returns -1 if no instance.
519 int getBasicBlockIndex(const BasicBlock *BB) const {
520 for (unsigned I = 0, E = getNumOperands(); I != E; ++I)
521 if (block_begin()[I] == BB)
522 return I;
523 return -1;
524 }
525
526 Value *getIncomingValueForBlock(const BasicBlock *BB) const {
527 int Idx = getBasicBlockIndex(BB);
528 assert(Idx >= 0 && "Invalid basic block argument!");
529 return getIncomingValue(Idx);
530 }
531
532 static inline bool classof(const Value *V) {
533 return V->getValueID() == MemoryPhiVal;
534 }
535
536 void print(raw_ostream &OS) const override;
537
538 protected:
539 friend class MemorySSA;
540
541 /// \brief this is more complicated than the generic
542 /// User::allocHungoffUses, because we have to allocate Uses for the incoming
543 /// values and pointers to the incoming blocks, all in one allocation.
544 void allocHungoffUses(unsigned N) {
545 User::allocHungoffUses(N, /* IsPhi */ true);
546 }
547
548 unsigned getID() const final { return ID; }
549
550 private:
551 // For debugging only
552 const unsigned ID;
553 unsigned ReservedSpace;
554
555 /// \brief This grows the operand list in response to a push_back style of
556 /// operation. This grows the number of ops by 1.5 times.
557 void growOperands() {
558 unsigned E = getNumOperands();
559 // 2 op PHI nodes are VERY common, so reserve at least enough for that.
560 ReservedSpace = std::max(E + E / 2, 2u);
561 growHungoffUses(ReservedSpace, /* IsPhi */ true);
562 }
563 };
564
565 template <> struct OperandTraits : public HungoffOperandTraits<2> {};
566 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryPhi, MemoryAccess)
567
568 class MemorySSAWalker;
569
570 /// \brief Encapsulates MemorySSA, including all data associated with memory
571 /// accesses.
572 class MemorySSA {
573 public:
574 MemorySSA(Function &, AliasAnalysis *, DominatorTree *);
575 ~MemorySSA();
576
577 MemorySSAWalker *getWalker();
578
579 /// \brief Given a memory Mod/Ref'ing instruction, get the MemorySSA
580 /// access associated with it. If passed a basic block gets the memory phi
581 /// node that exists for that block, if there is one. Otherwise, this will get
582 /// a MemoryUseOrDef.
583 MemoryUseOrDef *getMemoryAccess(const Instruction *) const;
584 MemoryPhi *getMemoryAccess(const BasicBlock *BB) const;
585
586 void dump() const;
587 void print(raw_ostream &) const;
588
589 /// \brief Return true if \p MA represents the live on entry value
590 ///
591 /// Loads and stores from pointer arguments and other global values may be
592 /// defined by memory operations that do not occur in the current function, so
593 /// they may be live on entry to the function. MemorySSA represents such
594 /// memory state by the live on entry definition, which is guaranteed to occur
595 /// before any other memory access in the function.
596 inline bool isLiveOnEntryDef(const MemoryAccess *MA) const {
597 return MA == LiveOnEntryDef.get();
598 }
599
600 inline MemoryAccess *getLiveOnEntryDef() const {
601 return LiveOnEntryDef.get();
602 }
603
604 // Sadly, iplists, by default, owns and deletes pointers added to the
605 // list. It's not currently possible to have two iplists for the same type,
606 // where one owns the pointers, and one does not. This is because the traits
607 // are per-type, not per-tag. If this ever changes, we should make the
608 // DefList an iplist.
609 using AccessList = iplist>;
610 using DefsList =
611 simple_ilist>;
612
613 /// \brief Return the list of MemoryAccess's for a given basic block.
614 ///
615 /// This list is not modifiable by the user.
616 const AccessList *getBlockAccesses(const BasicBlock *BB) const {
617 return getWritableBlockAccesses(BB);
618 }
619
620 /// \brief Return the list of MemoryDef's and MemoryPhi's for a given basic
621 /// block.
622 ///
623 /// This list is not modifiable by the user.
624 const DefsList *getBlockDefs(const BasicBlock *BB) const {
625 return getWritableBlockDefs(BB);
626 }
627
628 /// \brief Given two memory accesses in the same basic block, determine
629 /// whether MemoryAccess \p A dominates MemoryAccess \p B.
630 bool locallyDominates(const MemoryAccess *A, const MemoryAccess *B) const;
631
632 /// \brief Given two memory accesses in potentially different blocks,
633 /// determine whether MemoryAccess \p A dominates MemoryAccess \p B.
634 bool dominates(const MemoryAccess *A, const MemoryAccess *B) const;
635
636 /// \brief Given a MemoryAccess and a Use, determine whether MemoryAccess \p A
637 /// dominates Use \p B.
638 bool dominates(const MemoryAccess *A, const Use &B) const;
639
640 /// \brief Verify that MemorySSA is self consistent (IE definitions dominate
641 /// all uses, uses appear in the right places). This is used by unit tests.
642 void verifyMemorySSA() const;
643
644 /// Used in various insertion functions to specify whether we are talking
645 /// about the beginning or end of a block.
646 enum InsertionPlace { Beginning, End };
647
648 protected:
649 // Used by Memory SSA annotater, dumpers, and wrapper pass
650 friend class MemorySSAAnnotatedWriter;
651 friend class MemorySSAPrinterLegacyPass;
652 friend class MemorySSAUpdater;
653
654 void verifyDefUses(Function &F) const;
655 void verifyDomination(Function &F) const;
656 void verifyOrdering(Function &F) const;
657
658 // This is used by the use optimizer and updater.
659 AccessList *getWritableBlockAccesses(const BasicBlock *BB) const {
660 auto It = PerBlockAccesses.find(BB);
661 return It == PerBlockAccesses.end() ? nullptr : It->second.get();
662 }
663
664 // This is used by the use optimizer and updater.
665 DefsList *getWritableBlockDefs(const BasicBlock *BB) const {
666 auto It = PerBlockDefs.find(BB);
667 return It == PerBlockDefs.end() ? nullptr : It->second.get();
668 }
669
670 // These is used by the updater to perform various internal MemorySSA
671 // machinsations. They do not always leave the IR in a correct state, and
672 // relies on the updater to fixup what it breaks, so it is not public.
673
674 void moveTo(MemoryUseOrDef *What, BasicBlock *BB, AccessList::iterator Where);
675 void moveTo(MemoryUseOrDef *What, BasicBlock *BB, InsertionPlace Point);
676 // Rename the dominator tree branch rooted at BB.
677 void renamePass(BasicBlock *BB, MemoryAccess *IncomingVal,
678 SmallPtrSetImpl &Visited) {
679 renamePass(DT->getNode(BB), IncomingVal, Visited, true, true);
680 }
681 void removeFromLookups(MemoryAccess *);
682 void removeFromLists(MemoryAccess *, bool ShouldDelete = true);
683 void insertIntoListsForBlock(MemoryAccess *, const BasicBlock *,
684 InsertionPlace);
685 void insertIntoListsBefore(MemoryAccess *, const BasicBlock *,
686 AccessList::iterator);
687 MemoryUseOrDef *createDefinedAccess(Instruction *, MemoryAccess *);
688
689 private:
690 class CachingWalker;
691 class OptimizeUses;
692
693 CachingWalker *getWalkerImpl();
694 void buildMemorySSA();
695 void optimizeUses();
696
697 void verifyUseInDefs(MemoryAccess *, MemoryAccess *) const;
698 using AccessMap = DenseMap>;
699 using DefsMap = DenseMap>;
700
701 void
702 determineInsertionPoint(const SmallPtrSetImpl &DefiningBlocks);
703 void markUnreachableAsLiveOnEntry(BasicBlock *BB);
704 bool dominatesUse(const MemoryAccess *, const MemoryAccess *) const;
705 MemoryPhi *createMemoryPhi(BasicBlock *BB);
706 MemoryUseOrDef *createNewAccess(Instruction *);
707 MemoryAccess *findDominatingDef(BasicBlock *, enum InsertionPlace);
708 void placePHINodes(const SmallPtrSetImpl &,
709 const DenseMap &);
710 MemoryAccess *renameBlock(BasicBlock *, MemoryAccess *, bool);
711 void renameSuccessorPhis(BasicBlock *, MemoryAccess *, bool);
712 void renamePass(DomTreeNode *, MemoryAccess *IncomingVal,
713 SmallPtrSetImpl &Visited,
714 bool SkipVisited = false, bool RenameAllUses = false);
715 AccessList *getOrCreateAccessList(const BasicBlock *);
716 DefsList *getOrCreateDefsList(const BasicBlock *);
717 void renumberBlock(const BasicBlock *) const;
718 AliasAnalysis *AA;
719 DominatorTree *DT;
720 Function &F;
721
722 // Memory SSA mappings
723 DenseMap ValueToMemoryAccess;
724 // These two mappings contain the main block to access/def mappings for
725 // MemorySSA. The list contained in PerBlockAccesses really owns all the
726 // MemoryAccesses.
727 // Both maps maintain the invariant that if a block is found in them, the
728 // corresponding list is not empty, and if a block is not found in them, the
729 // corresponding list is empty.
730 AccessMap PerBlockAccesses;
731 DefsMap PerBlockDefs;
732 std::unique_ptr LiveOnEntryDef;
733
734 // Domination mappings
735 // Note that the numbering is local to a block, even though the map is
736 // global.
737 mutable SmallPtrSet BlockNumberingValid;
738 mutable DenseMap BlockNumbering;
739
740 // Memory SSA building info
741 std::unique_ptr Walker;
742 unsigned NextID;
743 };
744
745 // Internal MemorySSA utils, for use by MemorySSA classes and walkers
746 class MemorySSAUtil {
747 protected:
748 friend class MemorySSAWalker;
749 friend class GVNHoist;
750 // This function should not be used by new passes.
751 static bool defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU,
752 AliasAnalysis &AA);
753 };
754
755 // This pass does eager building and then printing of MemorySSA. It is used by
756 // the tests to be able to build, dump, and verify Memory SSA.
757 class MemorySSAPrinterLegacyPass : public FunctionPass {
758 public:
759 MemorySSAPrinterLegacyPass();
760
761 bool runOnFunction(Function &) override;
762 void getAnalysisUsage(AnalysisUsage &AU) const override;
763
764 static char ID;
765 };
766
767 /// An analysis that produces \c MemorySSA for a function.
768 ///
769 class MemorySSAAnalysis : public AnalysisInfoMixin {
770 friend AnalysisInfoMixin;
771
772 static AnalysisKey Key;
773
774 public:
775 // Wrap MemorySSA result to ensure address stability of internal MemorySSA
776 // pointers after construction. Use a wrapper class instead of plain
777 // unique_ptr to avoid build breakage on MSVC.
778 struct Result {
779 Result(std::unique_ptr &&MSSA) : MSSA(std::move(MSSA)) {}
780 MemorySSA &getMSSA() { return *MSSA.get(); }
781
782 std::unique_ptr MSSA;
783 };
784
785 Result run(Function &F, FunctionAnalysisManager &AM);
786 };
787
788 /// \brief Printer pass for \c MemorySSA.
789 class MemorySSAPrinterPass : public PassInfoMixin {
790 raw_ostream &OS;
791
792 public:
793 explicit MemorySSAPrinterPass(raw_ostream &OS) : OS(OS) {}
794
795 PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
796 };
797
798 /// \brief Verifier pass for \c MemorySSA.
799 struct MemorySSAVerifierPass : PassInfoMixin {
800 PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
801 };
802
803 /// \brief Legacy analysis pass which computes \c MemorySSA.
804 class MemorySSAWrapperPass : public FunctionPass {
805 public:
806 MemorySSAWrapperPass();
807
808 static char ID;
809
810 bool runOnFunction(Function &) override;
811 void releaseMemory() override;
812 MemorySSA &getMSSA() { return *MSSA; }
813 const MemorySSA &getMSSA() const { return *MSSA; }
814
815 void getAnalysisUsage(AnalysisUsage &AU) const override;
816
817 void verifyAnalysis() const override;
818 void print(raw_ostream &OS, const Module *M = nullptr) const override;
819
820 private:
821 std::unique_ptr MSSA;
822 };
823
824 /// \brief This is the generic walker interface for walkers of MemorySSA.
825 /// Walkers are used to be able to further disambiguate the def-use chains
826 /// MemorySSA gives you, or otherwise produce better info than MemorySSA gives
827 /// you.
828 /// In particular, while the def-use chains provide basic information, and are
829 /// guaranteed to give, for example, the nearest may-aliasing MemoryDef for a
830 /// MemoryUse as AliasAnalysis considers it, a user mant want better or other
831 /// information. In particular, they may want to use SCEV info to further
832 /// disambiguate memory accesses, or they may want the nearest dominating
833 /// may-aliasing MemoryDef for a call or a store. This API enables a
834 /// standardized interface to getting and using that info.
835 class MemorySSAWalker {
836 public:
837 MemorySSAWalker(MemorySSA *);
838 virtual ~MemorySSAWalker() = default;
839
840 using MemoryAccessSet = SmallVector;
841
842 /// \brief Given a memory Mod/Ref/ModRef'ing instruction, calling this
843 /// will give you the nearest dominating MemoryAccess that Mod's the location
844 /// the instruction accesses (by skipping any def which AA can prove does not
845 /// alias the location(s) accessed by the instruction given).
846 ///
847 /// Note that this will return a single access, and it must dominate the
848 /// Instruction, so if an operand of a MemoryPhi node Mod's the instruction,
849 /// this will return the MemoryPhi, not the operand. This means that
850 /// given:
851 /// if (a) {
852 /// 1 = MemoryDef(liveOnEntry)
853 /// store %a
854 /// } else {
855 /// 2 = MemoryDef(liveOnEntry)
856 /// store %b
857 /// }
858 /// 3 = MemoryPhi(2, 1)
859 /// MemoryUse(3)
860 /// load %a
861 ///
862 /// calling this API on load(%a) will return the MemoryPhi, not the MemoryDef
863 /// in the if (a) branch.
864 MemoryAccess *getClobberingMemoryAccess(const Instruction *I) {
865 MemoryAccess *MA = MSSA->getMemoryAccess(I);
866 assert(MA && "Handed an instruction that MemorySSA doesn't recognize?");
867 return getClobberingMemoryAccess(MA);
868 }
869
870 /// Does the same thing as getClobberingMemoryAccess(const Instruction *I),
871 /// but takes a MemoryAccess instead of an Instruction.
872 virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) = 0;
873
874 /// \brief Given a potentially clobbering memory access and a new location,
875 /// calling this will give you the nearest dominating clobbering MemoryAccess
876 /// (by skipping non-aliasing def links).
877 ///
878 /// This version of the function is mainly used to disambiguate phi translated
879 /// pointers, where the value of a pointer may have changed from the initial
880 /// memory access. Note that this expects to be handed either a MemoryUse,
881 /// or an already potentially clobbering access. Unlike the above API, if
882 /// given a MemoryDef that clobbers the pointer as the starting access, it
883 /// will return that MemoryDef, whereas the above would return the clobber
884 /// starting from the use side of the memory def.
885 virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
886 const MemoryLocation &) = 0;
887
888 /// \brief Given a memory access, invalidate anything this walker knows about
889 /// that access.
890 /// This API is used by walkers that store information to perform basic cache
891 /// invalidation. This will be called by MemorySSA at appropriate times for
892 /// the walker it uses or returns.
893 virtual void invalidateInfo(MemoryAccess *) {}
894
895 virtual void verify(const MemorySSA *MSSA) { assert(MSSA == this->MSSA); }
896
897 protected:
898 friend class MemorySSA; // For updating MSSA pointer in MemorySSA move
899 // constructor.
900 MemorySSA *MSSA;
901 };
902
903 /// \brief A MemorySSAWalker that does no alias queries, or anything else. It
904 /// simply returns the links as they were constructed by the builder.
905 class DoNothingMemorySSAWalker final : public MemorySSAWalker {
906 public:
907 // Keep the overrides below from hiding the Instruction overload of
908 // getClobberingMemoryAccess.
909 using MemorySSAWalker::getClobberingMemoryAccess;
910
911 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) override;
912 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
913 const MemoryLocation &) override;
914 };
915
916 using MemoryAccessPair = std::pair;
917 using ConstMemoryAccessPair = std::pair;
918
919 /// \brief Iterator base class used to implement const and non-const iterators
920 /// over the defining accesses of a MemoryAccess.
921 template
922 class memoryaccess_def_iterator_base
923 : public iterator_facade_base,
924 std::forward_iterator_tag, T, ptrdiff_t, T *,
925 T *> {
926 using BaseT = typename memoryaccess_def_iterator_base::iterator_facade_base;
927
928 public:
929 memoryaccess_def_iterator_base(T *Start) : Access(Start) {}
930 memoryaccess_def_iterator_base() = default;
931
932 bool operator==(const memoryaccess_def_iterator_base &Other) const {
933 return Access == Other.Access && (!Access || ArgNo == Other.ArgNo);
934 }
935
936 // This is a bit ugly, but for MemoryPHI's, unlike PHINodes, you can't get the
937 // block from the operand in constant time (In a PHINode, the uselist has
938 // both, so it's just subtraction). We provide it as part of the
939 // iterator to avoid callers having to linear walk to get the block.
940 // If the operation becomes constant time on MemoryPHI's, this bit of
941 // abstraction breaking should be removed.
942 BasicBlock *getPhiArgBlock() const {
943 MemoryPhi *MP = dyn_cast(Access);
944 assert(MP && "Tried to get phi arg block when not iterating over a PHI");
945 return MP->getIncomingBlock(ArgNo);
946 }
947 typename BaseT::iterator::pointer operator*() const {
948 assert(Access && "Tried to access past the end of our iterator");
949 // Go to the first argument for phis, and the defining access for everything
950 // else.
951 if (MemoryPhi *MP = dyn_cast(Access))
952 return MP->getIncomingValue(ArgNo);
953 return cast(Access)->getDefiningAccess();
954 }
955 using BaseT::operator++;
956 memoryaccess_def_iterator &operator++() {
957 assert(Access && "Hit end of iterator");
958 if (MemoryPhi *MP = dyn_cast(Access)) {
959 if (++ArgNo >= MP->getNumIncomingValues()) {
960 ArgNo = 0;
961 Access = nullptr;
962 }
963 } else {
964 Access = nullptr;
965 }
966 return *this;
967 }
968
969 private:
970 T *Access = nullptr;
971 unsigned ArgNo = 0;
972 };
973
974 inline memoryaccess_def_iterator MemoryAccess::defs_begin() {
975 return memoryaccess_def_iterator(this);
976 }
977
978 inline const_memoryaccess_def_iterator MemoryAccess::defs_begin() const {
979 return const_memoryaccess_def_iterator(this);
980 }
981
982 inline memoryaccess_def_iterator MemoryAccess::defs_end() {
983 return memoryaccess_def_iterator();
984 }
985
986 inline const_memoryaccess_def_iterator MemoryAccess::defs_end() const {
987 return const_memoryaccess_def_iterator();
988 }
989
990 /// \brief GraphTraits for a MemoryAccess, which walks defs in the normal case,
991 /// and uses in the inverse case.
992 template <> struct GraphTraits {
993 using NodeRef = MemoryAccess *;
994 using ChildIteratorType = memoryaccess_def_iterator;
995
996 static NodeRef getEntryNode(NodeRef N) { return N; }
997 static ChildIteratorType child_begin(NodeRef N) { return N->defs_begin(); }
998 static ChildIteratorType child_end(NodeRef N) { return N->defs_end(); }
999 };
1000
1001 template <> struct GraphTraits> {
1002 using NodeRef = MemoryAccess *;
1003 using ChildIteratorType = MemoryAccess::iterator;
1004
1005 static NodeRef getEntryNode(NodeRef N) { return N; }
1006 static ChildIteratorType child_begin(NodeRef N) { return N->user_begin(); }
1007 static ChildIteratorType child_end(NodeRef N) { return N->user_end(); }
1008 };
1009
1010 /// \brief Provide an iterator that walks defs, giving both the memory access,
1011 /// and the current pointer location, updating the pointer location as it
1012 /// changes due to phi node translation.
1013 ///
1014 /// This iterator, while somewhat specialized, is what most clients actually
1015 /// want when walking upwards through MemorySSA def chains. It takes a pair of
1016 /// , and walks defs, properly translating the
1017 /// memory location through phi nodes for the user.
1018 class upward_defs_iterator
1019 : public iterator_facade_base
1020 std::forward_iterator_tag,
1021 const MemoryAccessPair> {
1022 using BaseT = upward_defs_iterator::iterator_facade_base;
1023
1024 public:
1025 upward_defs_iterator(const MemoryAccessPair &Info)
1026 : DefIterator(Info.first), Location(Info.second),
1027 OriginalAccess(Info.first) {
1028 CurrentPair.first = nullptr;
1029
1030 WalkingPhi = Info.first && isa(Info.first);
1031 fillInCurrentPair();
1032 }
1033
1034 upward_defs_iterator() { CurrentPair.first = nullptr; }
1035
1036 bool operator==(const upward_defs_iterator &Other) const {
1037 return DefIterator == Other.DefIterator;
1038 }
1039
1040 BaseT::iterator::reference operator*() const {
1041 assert(DefIterator != OriginalAccess->defs_end() &&
1042 "Tried to access past the end of our iterator");
1043 return CurrentPair;
1044 }
1045
1046 using BaseT::operator++;
1047 upward_defs_iterator &operator++() {
1048 assert(DefIterator != OriginalAccess->defs_end() &&
1049 "Tried to access past the end of the iterator");
1050 ++DefIterator;
1051 if (DefIterator != OriginalAccess->defs_end())
1052 fillInCurrentPair();
1053 return *this;
1054 }
1055
1056 BasicBlock *getPhiArgBlock() const { return DefIterator.getPhiArgBlock(); }
1057
1058 private:
1059 void fillInCurrentPair() {
1060 CurrentPair.first = *DefIterator;
1061 if (WalkingPhi && Location.Ptr) {
1062 PHITransAddr Translator(
1063 const_cast(Location.Ptr),
1064 OriginalAccess->getBlock()->getModule()->getDataLayout(), nullptr);
1065 if (!Translator.PHITranslateValue(OriginalAccess->getBlock(),
1066 DefIterator.getPhiArgBlock(), nullptr,
1067 false))
1068 if (Translator.getAddr() != Location.Ptr) {
1069 CurrentPair.second = Location.getWithNewPtr(Translator.getAddr());
1070 return;
1071 }
1072 }
1073 CurrentPair.second = Location;
1074 }
1075
1076 MemoryAccessPair CurrentPair;
1077 memoryaccess_def_iterator DefIterator;
1078 MemoryLocation Location;
1079 MemoryAccess *OriginalAccess = nullptr;
1080 bool WalkingPhi = false;
1081 };
1082
1083 inline upward_defs_iterator upward_defs_begin(const MemoryAccessPair &Pair) {
1084 return upward_defs_iterator(Pair);
1085 }
1086
1087 inline upward_defs_iterator upward_defs_end() { return upward_defs_iterator(); }
1088
1089 inline iterator_range
1090 upward_defs(const MemoryAccessPair &Pair) {
1091 return make_range(upward_defs_begin(Pair), upward_defs_end());
1092 }
1093
1094 /// Walks the defining accesses of MemoryDefs. Stops after we hit something that
1095 /// has no defining use (e.g. a MemoryPhi or liveOnEntry). Note that, when
1096 /// comparing against a null def_chain_iterator, this will compare equal only
1097 /// after walking said Phi/liveOnEntry.
1098 ///
1099 /// The UseOptimizedChain flag specifies whether to walk the clobbering
1100 /// access chain, or all the accesses.
1101 ///
1102 /// Normally, MemoryDef are all just def/use linked together, so a def_chain on
1103 /// a MemoryDef will walk all MemoryDefs above it in the program until it hits
1104 /// a phi node. The optimized chain walks the clobbering access of a store.
1105 /// So if you are just trying to find, given a store, what the next
1106 /// thing that would clobber the same memory is, you want the optimized chain.
1107 template
1108 struct def_chain_iterator
1109 : public iterator_facade_base,
1110 std::forward_iterator_tag, MemoryAccess *> {
1111 def_chain_iterator() : MA(nullptr) {}
1112 def_chain_iterator(T MA) : MA(MA) {}
1113
1114 T operator*() const { return MA; }
1115
1116 def_chain_iterator &operator++() {
1117 // N.B. liveOnEntry has a null defining access.
1118 if (auto *MUD = dyn_cast(MA)) {
1119 if (UseOptimizedChain && MUD->isOptimized())
1120 MA = MUD->getOptimized();
1121 else
1122 MA = MUD->getDefiningAccess();
1123 } else {
1124 MA = nullptr;
1125 }
1126
1127 return *this;
1128 }
1129
1130 bool operator==(const def_chain_iterator &O) const { return MA == O.MA; }
1131
1132 private:
1133 T MA;
1134 };
1135
1136 template
1137 inline iterator_range>
1138 def_chain(T MA, MemoryAccess *UpTo = nullptr) {
1139 #ifdef EXPENSIVE_CHECKS
1140 assert((!UpTo || find(def_chain(MA), UpTo) != def_chain_iterator()) &&
1141 "UpTo isn't in the def chain!");
1142 #endif
1143 return make_range(def_chain_iterator(MA), def_chain_iterator(UpTo));
1144 }
1145
1146 template
1147 inline iterator_range> optimized_def_chain(T MA) {
1148 return make_range(def_chain_iterator(MA),
1149 def_chain_iterator(nullptr));
1150 }
1151
1152 } // end namespace llvm
1153
1154 #endif // LLVM_ANALYSIS_MEMORYSSA_H
0 //===- MemorySSAUpdater.h - Memory SSA Updater-------------------*- C++ -*-===//
1 //
2 // The LLVM Compiler Infrastructure
3 //
4 // This file is distributed under the University of Illinois Open Source
5 // License. See LICENSE.TXT for details.
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // \file
10 // \brief An automatic updater for MemorySSA that handles arbitrary insertion,
11 // deletion, and moves. It performs phi insertion where necessary, and
12 // automatically updates the MemorySSA IR to be correct.
13 // While updating loads or removing instructions is often easy enough to not
14 // need this, updating stores should generally not be attemped outside this
15 // API.
16 //
17 // Basic API usage:
18 // Create the memory access you want for the instruction (this is mainly so
19 // we know where it is, without having to duplicate the entire set of create
20 // functions MemorySSA supports).
21 // Call insertDef or insertUse depending on whether it's a MemoryUse or a
22 // MemoryDef.
23 // That's it.
24 //
25 // For moving, first, move the instruction itself using the normal SSA
26 // instruction moving API, then just call moveBefore, moveAfter,or moveTo with
27 // the right arguments.
28 //
29 //===----------------------------------------------------------------------===//
30
31 #ifndef LLVM_ANALYSIS_MEMORYSSAUPDATER_H
32 #define LLVM_ANALYSIS_MEMORYSSAUPDATER_H
33
34 #include "llvm/ADT/SmallPtrSet.h"
35 #include "llvm/ADT/SmallVector.h"
36 #include "llvm/IR/BasicBlock.h"
37 #include "llvm/IR/Dominators.h"
38 #include "llvm/IR/Module.h"
39 #include "llvm/IR/OperandTraits.h"
40 #include "llvm/IR/Type.h"
41 #include "llvm/IR/Use.h"
42 #include "llvm/IR/User.h"
43 #include "llvm/IR/Value.h"
44 #include "llvm/Pass.h"
45 #include "llvm/Support/Casting.h"
46 #include "llvm/Support/ErrorHandling.h"
47 #include "llvm/Analysis/MemorySSA.h"
48
49 namespace llvm {
50
51 class Function;
52 class Instruction;
53 class MemoryAccess;
54 class LLVMContext;
55 class raw_ostream;
56
57 class MemorySSAUpdater {
58 private:
59 MemorySSA *MSSA;
60 SmallVector InsertedPHIs;
61 SmallPtrSet VisitedBlocks;
62
63 public:
64 MemorySSAUpdater(MemorySSA *MSSA) : MSSA(MSSA) {}
65 /// Insert a definition into the MemorySSA IR. RenameUses will rename any use
66 /// below the new def block (and any inserted phis). RenameUses should be set
67 /// to true if the definition may cause new aliases for loads below it. This
68 /// is not the case for hoisting or sinking or other forms of code *movement*.
69 /// It *is* the case for straight code insertion.
70 /// For example:
71 /// store a
72 /// if (foo) { }
73 /// load a
74 ///
75 /// Moving the store into the if block, and calling insertDef, does not
76 /// require RenameUses.
77 /// However, changing it to:
78 /// store a
79 /// if (foo) { store b }
80 /// load a
81 /// Where a mayalias b, *does* require RenameUses be set to true.
82 void insertDef(MemoryDef *Def, bool RenameUses = false);
83 void insertUse(MemoryUse *Use);
84 void moveBefore(MemoryUseOrDef *What, MemoryUseOrDef *Where);
85 void moveAfter(MemoryUseOrDef *What, MemoryUseOrDef *Where);
86 void moveToPlace(MemoryUseOrDef *What, BasicBlock *BB,
87 MemorySSA::InsertionPlace Where);
88
89 // The below are utility functions. Other than creation of accesses to pass
90 // to insertDef, and removeAccess to remove accesses, you should generally
91 // not attempt to update memoryssa yourself. It is very non-trivial to get
92 // the edge cases right, and the above calls already operate in near-optimal
93 // time bounds.
94
95 /// \brief Create a MemoryAccess in MemorySSA at a specified point in a block,
96 /// with a specified clobbering definition.
97 ///
98 /// Returns the new MemoryAccess.
99 /// This should be called when a memory instruction is created that is being
100 /// used to replace an existing memory instruction. It will *not* create PHI
101 /// nodes, or verify the clobbering definition. The insertion place is used
102 /// solely to determine where in the memoryssa access lists the instruction
103 /// will be placed. The caller is expected to keep ordering the same as
104 /// instructions.
105 /// It will return the new MemoryAccess.
106 /// Note: If a MemoryAccess already exists for I, this function will make it
107 /// inaccessible and it *must* have removeMemoryAccess called on it.
108 MemoryAccess *createMemoryAccessInBB(Instruction *I, MemoryAccess *Definition,
109 const BasicBlock *BB,
110 MemorySSA::InsertionPlace Point);
111
112 /// \brief Create a MemoryAccess in MemorySSA before or after an existing
113 /// MemoryAccess.
114 ///
115 /// Returns the new MemoryAccess.
116 /// This should be called when a memory instruction is created that is being
117 /// used to replace an existing memory instruction. It will *not* create PHI
118 /// nodes, or verify the clobbering definition.
119 ///
120 /// Note: If a MemoryAccess already exists for I, this function will make it
121 /// inaccessible and it *must* have removeMemoryAccess called on it.
122 MemoryUseOrDef *createMemoryAccessBefore(Instruction *I,
123 MemoryAccess *Definition,
124 MemoryUseOrDef *InsertPt);
125 MemoryUseOrDef *createMemoryAccessAfter(Instruction *I,
126 MemoryAccess *Definition,
127 MemoryAccess *InsertPt);
128
129 /// \brief Remove a MemoryAccess from MemorySSA, including updating all
130 /// definitions and uses.
131 /// This should be called when a memory instruction that has a MemoryAccess
132 /// associated with it is erased from the program. For example, if a store or
133 /// load is simply erased (not replaced), removeMemoryAccess should be called
134 /// on the MemoryAccess for that store/load.
135 void removeMemoryAccess(MemoryAccess *);
136
137 private:
138 // Move What before Where in the MemorySSA IR.
139 template
140 void moveTo(MemoryUseOrDef *What, BasicBlock *BB, WhereType Where);
141 MemoryAccess *getPreviousDef(MemoryAccess *);
142 MemoryAccess *getPreviousDefInBlock(MemoryAccess *);
143 MemoryAccess *getPreviousDefFromEnd(BasicBlock *);
144 MemoryAccess *getPreviousDefRecursive(BasicBlock *);
145 MemoryAccess *recursePhi(MemoryAccess *Phi);
146 template
147 MemoryAccess *tryRemoveTrivialPhi(MemoryPhi *Phi, RangeType &Operands);
148 void fixupDefs(const SmallVectorImpl &);
149 };
150 } // end namespace llvm
151
152 #endif // LLVM_ANALYSIS_MEMORYSSAUPDATER_H
1717
1818 #include "llvm/ADT/Hashing.h"
1919 #include "llvm/ADT/iterator_range.h"
20 #include "llvm/Analysis/MemorySSA.h"
2021 #include "llvm/IR/Constant.h"
2122 #include "llvm/IR/Instructions.h"
2223 #include "llvm/IR/Value.h"
2526 #include "llvm/Support/Casting.h"
2627 #include "llvm/Support/Debug.h"
2728 #include "llvm/Support/raw_ostream.h"
28 #include "llvm/Transforms/Utils/MemorySSA.h"
2929 #include
3030 #include
3131 #include
+0
-1155
include/llvm/Transforms/Utils/MemorySSA.h less more
None //===- MemorySSA.h - Build Memory SSA ---------------------------*- C++ -*-===//
1 //
2 // The LLVM Compiler Infrastructure
3 //
4 // This file is distributed under the University of Illinois Open Source
5 // License. See LICENSE.TXT for details.
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// \brief This file exposes an interface to building/using memory SSA to
11 /// walk memory instructions using a use/def graph.
12 ///
13 /// Memory SSA class builds an SSA form that links together memory access
14 /// instructions such as loads, stores, atomics, and calls. Additionally, it
15 /// does a trivial form of "heap versioning" Every time the memory state changes
16 /// in the program, we generate a new heap version. It generates
17 /// MemoryDef/Uses/Phis that are overlayed on top of the existing instructions.
18 ///
19 /// As a trivial example,
20 /// define i32 @main() #0 {
21 /// entry:
22 /// %call = call noalias i8* @_Znwm(i64 4) #2
23 /// %0 = bitcast i8* %call to i32*
24 /// %call1 = call noalias i8* @_Znwm(i64 4) #2
25 /// %1 = bitcast i8* %call1 to i32*
26 /// store i32 5, i32* %0, align 4
27 /// store i32 7, i32* %1, align 4
28 /// %2 = load i32* %0, align 4
29 /// %3 = load i32* %1, align 4
30 /// %add = add nsw i32 %2, %3
31 /// ret i32 %add
32 /// }
33 ///
34 /// Will become
35 /// define i32 @main() #0 {
36 /// entry:
37 /// ; 1 = MemoryDef(0)
38 /// %call = call noalias i8* @_Znwm(i64 4) #3
39 /// %2 = bitcast i8* %call to i32*
40 /// ; 2 = MemoryDef(1)
41 /// %call1 = call noalias i8* @_Znwm(i64 4) #3
42 /// %4 = bitcast i8* %call1 to i32*
43 /// ; 3 = MemoryDef(2)
44 /// store i32 5, i32* %2, align 4
45 /// ; 4 = MemoryDef(3)
46 /// store i32 7, i32* %4, align 4
47 /// ; MemoryUse(3)
48 /// %7 = load i32* %2, align 4
49 /// ; MemoryUse(4)
50 /// %8 = load i32* %4, align 4
51 /// %add = add nsw i32 %7, %8
52 /// ret i32 %add
53 /// }
54 ///
55 /// Given this form, all the stores that could ever effect the load at %8 can be
56 /// gotten by using the MemoryUse associated with it, and walking from use to
57 /// def until you hit the top of the function.
58 ///
59 /// Each def also has a list of users associated with it, so you can walk from
60 /// both def to users, and users to defs. Note that we disambiguate MemoryUses,
61 /// but not the RHS of MemoryDefs. You can see this above at %7, which would
62 /// otherwise be a MemoryUse(4). Being disambiguated means that for a given
63 /// store, all the MemoryUses on its use lists are may-aliases of that store
64 /// (but the MemoryDefs on its use list may not be).
65 ///
66 /// MemoryDefs are not disambiguated because it would require multiple reaching
67 /// definitions, which would require multiple phis, and multiple memoryaccesses
68 /// per instruction.
69 //===----------------------------------------------------------------------===//
70
71 #ifndef LLVM_TRANSFORMS_UTILS_MEMORYSSA_H
72 #define LLVM_TRANSFORMS_UTILS_MEMORYSSA_H
73
74 #include "llvm/ADT/DenseMap.h"
75 #include "llvm/ADT/GraphTraits.h"
76 #include "llvm/ADT/SmallPtrSet.h"
77 #include "llvm/ADT/SmallVector.h"
78 #include "llvm/ADT/ilist.h"
79 #include "llvm/ADT/ilist_node.h"
80 #include "llvm/ADT/iterator.h"
81 #include "llvm/ADT/iterator_range.h"
82 #include "llvm/Analysis/AliasAnalysis.h"
83 #include "llvm/Analysis/MemoryLocation.h"
84 #include "llvm/Analysis/PHITransAddr.h"
85 #include "llvm/IR/BasicBlock.h"
86 #include "llvm/IR/Dominators.h"
87 #include "llvm/IR/Module.h"
88 #include "llvm/IR/OperandTraits.h"
89 #include "llvm/IR/Type.h"
90 #include "llvm/IR/Use.h"
91 #include "llvm/IR/User.h"
92 #include "llvm/IR/Value.h"
93 #include "llvm/Pass.h"
94 #include "llvm/Support/Casting.h"
95 #include "llvm/Support/ErrorHandling.h"
96 #include
97 #include
98 #include
99 #include
100 #include
101 #include
102
103 namespace llvm {
104
105 class Function;
106 class Instruction;
107 class MemoryAccess;
108 class LLVMContext;
109 class raw_ostream;
110 namespace MSSAHelpers {
111 struct AllAccessTag {};
112 struct DefsOnlyTag {};
113 }
114
115 enum {
116 // Used to signify what the default invalid ID is for MemoryAccess's
117 // getID()
118 INVALID_MEMORYACCESS_ID = 0
119 };
120
121 template class memoryaccess_def_iterator_base;
122 using memoryaccess_def_iterator = memoryaccess_def_iterator_base;
123 using const_memoryaccess_def_iterator =
124 memoryaccess_def_iterator_base;
125
126 // \brief The base for all memory accesses. All memory accesses in a block are
127 // linked together using an intrusive list.
128 class MemoryAccess
129 : public User,
130 public ilist_node>,
131 public ilist_node> {
132 public:
133 using AllAccessType =
134 ilist_node>;
135 using DefsOnlyType =
136 ilist_node>;
137
138 // Methods for support type inquiry through isa, cast, and
139 // dyn_cast
140 static inline bool classof(const Value *V) {
141 unsigned ID = V->getValueID();
142 return ID == MemoryUseVal || ID == MemoryPhiVal || ID == MemoryDefVal;
143 }
144
145 MemoryAccess(const MemoryAccess &) = delete;
146 MemoryAccess &operator=(const MemoryAccess &) = delete;
147 ~MemoryAccess() override;
148
149 void *operator new(size_t, unsigned) = delete;
150 void *operator new(size_t) = delete;
151
152 BasicBlock *getBlock() const { return Block; }
153
154 virtual void print(raw_ostream &OS) const = 0;
155 virtual void dump() const;
156
157 /// \brief The user iterators for a memory access
158 typedef user_iterator iterator;
159 typedef const_user_iterator const_iterator;
160
161 /// \brief This iterator walks over all of the defs in a given
162 /// MemoryAccess. For MemoryPhi nodes, this walks arguments. For
163 /// MemoryUse/MemoryDef, this walks the defining access.
164 memoryaccess_def_iterator defs_begin();
165 const_memoryaccess_def_iterator defs_begin() const;
166 memoryaccess_def_iterator defs_end();
167 const_memoryaccess_def_iterator defs_end() const;
168
169 /// \brief Get the iterators for the all access list and the defs only list
170 /// We default to the all access list.
171 AllAccessType::self_iterator getIterator() {
172 return this->AllAccessType::getIterator();
173 }
174 AllAccessType::const_self_iterator getIterator() const {
175 return this->AllAccessType::getIterator();
176 }
177 AllAccessType::reverse_self_iterator getReverseIterator() {
178 return this->AllAccessType::getReverseIterator();
179 }
180 AllAccessType::const_reverse_self_iterator getReverseIterator() const {
181 return this->AllAccessType::getReverseIterator();
182 }
183 DefsOnlyType::self_iterator getDefsIterator() {
184 return this->DefsOnlyType::getIterator();
185 }
186 DefsOnlyType::const_self_iterator getDefsIterator() const {
187 return this->DefsOnlyType::getIterator();
188 }
189 DefsOnlyType::reverse_self_iterator getReverseDefsIterator() {
190 return this->DefsOnlyType::getReverseIterator();
191 }
192 DefsOnlyType::const_reverse_self_iterator getReverseDefsIterator() const {
193 return this->DefsOnlyType::getReverseIterator();
194 }
195
196 protected:
197 friend class MemorySSA;
198 friend class MemoryUseOrDef;
199 friend class MemoryUse;
200 friend class MemoryDef;
201 friend class MemoryPhi;
202
203 /// \brief Used by MemorySSA to change the block of a MemoryAccess when it is
204 /// moved.
205 void setBlock(BasicBlock *BB) { Block = BB; }
206
207 /// \brief Used for debugging and tracking things about MemoryAccesses.
208 /// Guaranteed unique among MemoryAccesses, no guarantees otherwise.
209 virtual unsigned getID() const = 0;
210
211 MemoryAccess(LLVMContext &C, unsigned Vty, BasicBlock *BB,
212 unsigned NumOperands)
213 : User(Type::getVoidTy(C), Vty, nullptr, NumOperands), Block(BB) {}
214
215 private:
216 BasicBlock *Block;
217 };
218
219 inline raw_ostream &operator<<(raw_ostream &OS, const MemoryAccess &MA) {
220 MA.print(OS);
221 return OS;
222 }
223
224 /// \brief Class that has the common methods + fields of memory uses/defs. It's
225 /// a little awkward to have, but there are many cases where we want either a
226 /// use or def, and there are many cases where uses are needed (defs aren't
227 /// acceptable), and vice-versa.
228 ///
229 /// This class should never be instantiated directly; make a MemoryUse or
230 /// MemoryDef instead.
231 class MemoryUseOrDef : public MemoryAccess {
232 public:
233 void *operator new(size_t, unsigned) = delete;
234 void *operator new(size_t) = delete;
235
236 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
237
238 /// \brief Get the instruction that this MemoryUse represents.
239 Instruction *getMemoryInst() const { return MemoryInst; }
240
241 /// \brief Get the access that produces the memory state used by this Use.
242 MemoryAccess *getDefiningAccess() const { return getOperand(0); }
243
244 static inline bool classof(const Value *MA) {
245 return MA->getValueID() == MemoryUseVal || MA->getValueID() == MemoryDefVal;
246 }
247
248 // Sadly, these have to be public because they are needed in some of the
249 // iterators.
250 virtual bool isOptimized() const = 0;
251 virtual MemoryAccess *getOptimized() const = 0;
252 virtual void setOptimized(MemoryAccess *) = 0;
253
254 /// \brief Reset the ID of what this MemoryUse was optimized to, causing it to
255 /// be rewalked by the walker if necessary.
256 /// This really should only be called by tests.
257 virtual void resetOptimized() = 0;
258
259 protected:
260 friend class MemorySSA;
261 friend class MemorySSAUpdater;
262 MemoryUseOrDef(LLVMContext &C, MemoryAccess *DMA, unsigned Vty,
263 Instruction *MI, BasicBlock *BB)
264 : MemoryAccess(C, Vty, BB, 1), MemoryInst(MI) {
265 setDefiningAccess(DMA);
266 }
267 void setDefiningAccess(MemoryAccess *DMA, bool Optimized = false) {
268 if (!Optimized) {
269 setOperand(0, DMA);
270 return;
271 }
272 setOptimized(DMA);
273 }
274
275 private:
276 Instruction *MemoryInst;
277 };
278
279 template <>
280 struct OperandTraits
281 : public FixedNumOperandTraits {};
282 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUseOrDef, MemoryAccess)
283
284 /// \brief Represents read-only accesses to memory
285 ///
286 /// In particular, the set of Instructions that will be represented by
287 /// MemoryUse's is exactly the set of Instructions for which
288 /// AliasAnalysis::getModRefInfo returns "Ref".
289 class MemoryUse final : public MemoryUseOrDef {
290 public:
291 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
292
293 MemoryUse(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB)
294 : MemoryUseOrDef(C, DMA, MemoryUseVal, MI, BB), OptimizedID(0) {}
295
296 // allocate space for exactly one operand
297 void *operator new(size_t s) { return User::operator new(s, 1); }
298 void *operator new(size_t, unsigned) = delete;
299
300 static inline bool classof(const Value *MA) {
301 return MA->getValueID() == MemoryUseVal;
302 }
303
304 void print(raw_ostream &OS) const override;
305
306 virtual void setOptimized(MemoryAccess *DMA) override {
307 OptimizedID = DMA->getID();
308 setOperand(0, DMA);
309 }
310
311 virtual bool isOptimized() const override {
312 return getDefiningAccess() && OptimizedID == getDefiningAccess()->getID();
313 }
314
315 virtual MemoryAccess *getOptimized() const override {
316 return getDefiningAccess();
317 }
318 virtual void resetOptimized() override {
319 OptimizedID = INVALID_MEMORYACCESS_ID;
320 }
321
322 protected:
323 friend class MemorySSA;
324
325 unsigned getID() const override {
326 llvm_unreachable("MemoryUses do not have IDs");
327 }
328
329 private:
330 unsigned int OptimizedID;
331 };
332
333 template <>
334 struct OperandTraits : public FixedNumOperandTraits {};
335 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUse, MemoryAccess)
336
337 /// \brief Represents a read-write access to memory, whether it is a must-alias,
338 /// or a may-alias.
339 ///
340 /// In particular, the set of Instructions that will be represented by
341 /// MemoryDef's is exactly the set of Instructions for which
342 /// AliasAnalysis::getModRefInfo returns "Mod" or "ModRef".
343 /// Note that, in order to provide def-def chains, all defs also have a use
344 /// associated with them. This use points to the nearest reaching
345 /// MemoryDef/MemoryPhi.
346 class MemoryDef final : public MemoryUseOrDef {
347 public:
348 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
349
350 MemoryDef(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB,
351 unsigned Ver)
352 : MemoryUseOrDef(C, DMA, MemoryDefVal, MI, BB), ID(Ver),
353 Optimized(nullptr), OptimizedID(INVALID_MEMORYACCESS_ID) {}
354
355 // allocate space for exactly one operand
356 void *operator new(size_t s) { return User::operator new(s, 1); }
357 void *operator new(size_t, unsigned) = delete;
358
359 static inline bool classof(const Value *MA) {
360 return MA->getValueID() == MemoryDefVal;
361 }
362
363 virtual void setOptimized(MemoryAccess *MA) override {
364 Optimized = MA;
365 OptimizedID = getDefiningAccess()->getID();
366 }
367 virtual MemoryAccess *getOptimized() const override { return Optimized; }
368 virtual bool isOptimized() const override {
369 return getOptimized() && getDefiningAccess() &&
370 OptimizedID == getDefiningAccess()->getID();
371 }
372 virtual void resetOptimized() override {
373 OptimizedID = INVALID_MEMORYACCESS_ID;
374 }
375
376 void print(raw_ostream &OS) const override;
377
378 protected:
379 friend class MemorySSA;
380
381 unsigned getID() const override { return ID; }
382
383 private:
384 const unsigned ID;
385 MemoryAccess *Optimized;
386 unsigned int OptimizedID;
387 };
388
389 template <>
390 struct OperandTraits : public FixedNumOperandTraits {};
391 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryDef, MemoryAccess)
392
393 /// \brief Represents phi nodes for memory accesses.
394 ///
395 /// These have the same semantic as regular phi nodes, with the exception that
396 /// only one phi will ever exist in a given basic block.
397 /// Guaranteeing one phi per block means guaranteeing there is only ever one
398 /// valid reaching MemoryDef/MemoryPHI along each path to the phi node.
399 /// This is ensured by not allowing disambiguation of the RHS of a MemoryDef or
400 /// a MemoryPhi's operands.
401 /// That is, given
402 /// if (a) {
403 /// store %a
404 /// store %b
405 /// }
406 /// it *must* be transformed into
407 /// if (a) {
408 /// 1 = MemoryDef(liveOnEntry)
409 /// store %a
410 /// 2 = MemoryDef(1)
411 /// store %b
412 /// }
413 /// and *not*
414 /// if (a) {
415 /// 1 = MemoryDef(liveOnEntry)
416 /// store %a
417 /// 2 = MemoryDef(liveOnEntry)
418 /// store %b
419 /// }
420 /// even if the two stores do not conflict. Otherwise, both 1 and 2 reach the
421 /// end of the branch, and if there are not two phi nodes, one will be
422 /// disconnected completely from the SSA graph below that point.
423 /// Because MemoryUse's do not generate new definitions, they do not have this
424 /// issue.
425 class MemoryPhi final : public MemoryAccess {
426 // allocate space for exactly zero operands
427 void *operator new(size_t s) { return User::operator new(s); }
428
429 public:
430 /// Provide fast operand accessors
431 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
432
433 MemoryPhi(LLVMContext &C, BasicBlock *BB, unsigned Ver, unsigned NumPreds = 0)
434 : MemoryAccess(C, MemoryPhiVal, BB, 0), ID(Ver), ReservedSpace(NumPreds) {
435 allocHungoffUses(ReservedSpace);
436 }
437
438 void *operator new(size_t, unsigned) = delete;
439
440 // Block iterator interface. This provides access to the list of incoming
441 // basic blocks, which parallels the list of incoming values.
442 typedef BasicBlock **block_iterator;
443 typedef BasicBlock *const *const_block_iterator;
444
445 block_iterator block_begin() {
446 auto *Ref = reinterpret_cast(op_begin() + ReservedSpace);
447 return reinterpret_cast(Ref + 1);
448 }
449
450 const_block_iterator block_begin() const {
451 const auto *Ref =
452 reinterpret_cast(op_begin() + ReservedSpace);
453 return reinterpret_cast(Ref + 1);
454 }
455
456 block_iterator block_end() { return block_begin() + getNumOperands(); }
457
458 const_block_iterator block_end() const {
459 return block_begin() + getNumOperands();
460 }
461
462 iterator_range blocks() {
463 return make_range(block_begin(), block_end());
464 }
465
466 iterator_range blocks() const {
467 return make_range(block_begin(), block_end());
468 }
469
470 op_range incoming_values() { return operands(); }
471
472 const_op_range incoming_values() const { return operands(); }
473
474 /// \brief Return the number of incoming edges
475 unsigned getNumIncomingValues() const { return getNumOperands(); }
476
477 /// \brief Return incoming value number x
478 MemoryAccess *getIncomingValue(unsigned I) const { return getOperand(I); }
479 void setIncomingValue(unsigned I, MemoryAccess *V) {
480 assert(V && "PHI node got a null value!");
481 setOperand(I, V);
482 }
483 static unsigned getOperandNumForIncomingValue(unsigned I) { return I; }
484 static unsigned getIncomingValueNumForOperand(unsigned I) { return I; }
485
486 /// \brief Return incoming basic block number @p i.
487 BasicBlock *getIncomingBlock(unsigned I) const { return block_begin()[I]; }
488
489 /// \brief Return incoming basic block corresponding
490 /// to an operand of the PHI.
491 BasicBlock *getIncomingBlock(const Use &U) const {
492 assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?");
493 return getIncomingBlock(unsigned(&U - op_begin()));
494 }
495
496 /// \brief Return incoming basic block corresponding
497 /// to value use iterator.
498 BasicBlock *getIncomingBlock(MemoryAccess::const_user_iterator I) const {
499 return getIncomingBlock(I.getUse());
500 }
501
502 void setIncomingBlock(unsigned I, BasicBlock *BB) {
503 assert(BB && "PHI node got a null basic block!");
504 block_begin()[I] = BB;
505 }
506
507 /// \brief Add an incoming value to the end of the PHI list
508 void addIncoming(MemoryAccess *V, BasicBlock *BB) {
509 if (getNumOperands() == ReservedSpace)
510 growOperands(); // Get more space!
511 // Initialize some new operands.
512 setNumHungOffUseOperands(getNumOperands() + 1);
513 setIncomingValue(getNumOperands() - 1, V);
514 setIncomingBlock(getNumOperands() - 1, BB);
515 }
516
517 /// \brief Return the first index of the specified basic
518 /// block in the value list for this PHI. Returns -1 if no instance.
519 int getBasicBlockIndex(const BasicBlock *BB) const {
520 for (unsigned I = 0, E = getNumOperands(); I != E; ++I)
521 if (block_begin()[I] == BB)
522 return I;
523 return -1;
524 }
525
526 Value *getIncomingValueForBlock(const BasicBlock *BB) const {
527 int Idx = getBasicBlockIndex(BB);
528 assert(Idx >= 0 && "Invalid basic block argument!");
529 return getIncomingValue(Idx);
530 }
531
532 static inline bool classof(const Value *V) {
533 return V->getValueID() == MemoryPhiVal;
534 }
535
536 void print(raw_ostream &OS) const override;
537
538 protected:
539 friend class MemorySSA;
540
541 /// \brief this is more complicated than the generic
542 /// User::allocHungoffUses, because we have to allocate Uses for the incoming
543 /// values and pointers to the incoming blocks, all in one allocation.
544 void allocHungoffUses(unsigned N) {
545 User::allocHungoffUses(N, /* IsPhi */ true);
546 }
547
548 unsigned getID() const final { return ID; }
549
550 private:
551 // For debugging only
552 const unsigned ID;
553 unsigned ReservedSpace;
554
555 /// \brief This grows the operand list in response to a push_back style of
556 /// operation. This grows the number of ops by 1.5 times.
557 void growOperands() {
558 unsigned E = getNumOperands();
559 // 2 op PHI nodes are VERY common, so reserve at least enough for that.
560 ReservedSpace = std::max(E + E / 2, 2u);
561 growHungoffUses(ReservedSpace, /* IsPhi */ true);
562 }
563 };
564
565 template <> struct OperandTraits : public HungoffOperandTraits<2> {};
566 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryPhi, MemoryAccess)
567
568 class MemorySSAWalker;
569
570 /// \brief Encapsulates MemorySSA, including all data associated with memory
571 /// accesses.
572 class MemorySSA {
573 public:
574 MemorySSA(Function &, AliasAnalysis *, DominatorTree *);
575 ~MemorySSA();
576
577 MemorySSAWalker *getWalker();
578
579 /// \brief Given a memory Mod/Ref'ing instruction, get the MemorySSA
580 /// access associated with it. If passed a basic block gets the memory phi
581 /// node that exists for that block, if there is one. Otherwise, this will get
582 /// a MemoryUseOrDef.
583 MemoryUseOrDef *getMemoryAccess(const Instruction *) const;
584 MemoryPhi *getMemoryAccess(const BasicBlock *BB) const;
585
586 void dump() const;
587 void print(raw_ostream &) const;
588
589 /// \brief Return true if \p MA represents the live on entry value
590 ///
591 /// Loads and stores from pointer arguments and other global values may be
592 /// defined by memory operations that do not occur in the current function, so
593 /// they may be live on entry to the function. MemorySSA represents such
594 /// memory state by the live on entry definition, which is guaranteed to occur
595 /// before any other memory access in the function.
596 inline bool isLiveOnEntryDef(const MemoryAccess *MA) const {
597 return MA == LiveOnEntryDef.get();
598 }
599
600 inline MemoryAccess *getLiveOnEntryDef() const {
601 return LiveOnEntryDef.get();
602 }
603
604 // Sadly, iplists, by default, owns and deletes pointers added to the
605 // list. It's not currently possible to have two iplists for the same type,
606 // where one owns the pointers, and one does not. This is because the traits
607 // are per-type, not per-tag. If this ever changes, we should make the
608 // DefList an iplist.
609 using AccessList = iplist>;
610 using DefsList =
611 simple_ilist>;
612
613 /// \brief Return the list of MemoryAccess's for a given basic block.
614 ///
615 /// This list is not modifiable by the user.
616 const AccessList *getBlockAccesses(const BasicBlock *BB) const {
617 return getWritableBlockAccesses(BB);
618 }
619
620 /// \brief Return the list of MemoryDef's and MemoryPhi's for a given basic
621 /// block.
622 ///
623 /// This list is not modifiable by the user.
624 const DefsList *getBlockDefs(const BasicBlock *BB) const {
625 return getWritableBlockDefs(BB);
626 }
627
628 /// \brief Given two memory accesses in the same basic block, determine
629 /// whether MemoryAccess \p A dominates MemoryAccess \p B.
630 bool locallyDominates(const MemoryAccess *A, const MemoryAccess *B) const;
631
632 /// \brief Given two memory accesses in potentially different blocks,
633 /// determine whether MemoryAccess \p A dominates MemoryAccess \p B.
634 bool dominates(const MemoryAccess *A, const MemoryAccess *B) const;
635
636 /// \brief Given a MemoryAccess and a Use, determine whether MemoryAccess \p A
637 /// dominates Use \p B.
638 bool dominates(const MemoryAccess *A, const Use &B) const;
639
640 /// \brief Verify that MemorySSA is self consistent (IE definitions dominate
641 /// all uses, uses appear in the right places). This is used by unit tests.
642 void verifyMemorySSA() const;
643
644 /// Used in various insertion functions to specify whether we are talking
645 /// about the beginning or end of a block.
646 enum InsertionPlace { Beginning, End };
647
648 protected:
649 // Used by Memory SSA annotater, dumpers, and wrapper pass
650 friend class MemorySSAAnnotatedWriter;
651 friend class MemorySSAPrinterLegacyPass;
652 friend class MemorySSAUpdater;
653
654 void verifyDefUses(Function &F) const;
655 void verifyDomination(Function &F) const;
656 void verifyOrdering(Function &F) const;
657
658 // This is used by the use optimizer and updater.
659 AccessList *getWritableBlockAccesses(const BasicBlock *BB) const {
660 auto It = PerBlockAccesses.find(BB);
661 return It == PerBlockAccesses.end() ? nullptr : It->second.get();
662 }
663
664 // This is used by the use optimizer and updater.
665 DefsList *getWritableBlockDefs(const BasicBlock *BB) const {
666 auto It = PerBlockDefs.find(BB);
667 return It == PerBlockDefs.end() ? nullptr : It->second.get();
668 }
669
670 // These is used by the updater to perform various internal MemorySSA
671 // machinsations. They do not always leave the IR in a correct state, and
672 // relies on the updater to fixup what it breaks, so it is not public.
673
674 void moveTo(MemoryUseOrDef *What, BasicBlock *BB, AccessList::iterator Where);
675 void moveTo(MemoryUseOrDef *What, BasicBlock *BB, InsertionPlace Point);
676 // Rename the dominator tree branch rooted at BB.
677 void renamePass(BasicBlock *BB, MemoryAccess *IncomingVal,
678 SmallPtrSetImpl &Visited) {
679 renamePass(DT->getNode(BB), IncomingVal, Visited, true, true);
680 }
681 void removeFromLookups(MemoryAccess *);
682 void removeFromLists(MemoryAccess *, bool ShouldDelete = true);
683 void insertIntoListsForBlock(MemoryAccess *, const BasicBlock *,
684 InsertionPlace);
685 void insertIntoListsBefore(MemoryAccess *, const BasicBlock *,
686 AccessList::iterator);
687 MemoryUseOrDef *createDefinedAccess(Instruction *, MemoryAccess *);
688
689 private:
690 class CachingWalker;
691 class OptimizeUses;
692
693 CachingWalker *getWalkerImpl();
694 void buildMemorySSA();
695 void optimizeUses();
696
697 void verifyUseInDefs(MemoryAccess *, MemoryAccess *) const;
698 using AccessMap = DenseMap>;
699 using DefsMap = DenseMap>;
700
701 void
702 determineInsertionPoint(const SmallPtrSetImpl &DefiningBlocks);
703 void markUnreachableAsLiveOnEntry(BasicBlock *BB);
704 bool dominatesUse(const MemoryAccess *, const MemoryAccess *) const;
705 MemoryPhi *createMemoryPhi(BasicBlock *BB);
706 MemoryUseOrDef *createNewAccess(Instruction *);
707 MemoryAccess *findDominatingDef(BasicBlock *, enum InsertionPlace);
708 void placePHINodes(const SmallPtrSetImpl &,
709 const DenseMap &);
710 MemoryAccess *renameBlock(BasicBlock *, MemoryAccess *, bool);
711 void renameSuccessorPhis(BasicBlock *, MemoryAccess *, bool);
712 void renamePass(DomTreeNode *, MemoryAccess *IncomingVal,
713 SmallPtrSetImpl &Visited,
714 bool SkipVisited = false, bool RenameAllUses = false);
715 AccessList *getOrCreateAccessList(const BasicBlock *);
716 DefsList *getOrCreateDefsList(const BasicBlock *);
717 void renumberBlock(const BasicBlock *) const;
718 AliasAnalysis *AA;
719 DominatorTree *DT;
720 Function &F;
721
722 // Memory SSA mappings
723 DenseMap ValueToMemoryAccess;
724 // These two mappings contain the main block to access/def mappings for
725 // MemorySSA. The list contained in PerBlockAccesses really owns all the
726 // MemoryAccesses.
727 // Both maps maintain the invariant that if a block is found in them, the
728 // corresponding list is not empty, and if a block is not found in them, the
729 // corresponding list is empty.
730 AccessMap PerBlockAccesses;
731 DefsMap PerBlockDefs;
732 std::unique_ptr LiveOnEntryDef;
733
734 // Domination mappings
735 // Note that the numbering is local to a block, even though the map is
736 // global.
737 mutable SmallPtrSet BlockNumberingValid;
738 mutable DenseMap BlockNumbering;
739
740 // Memory SSA building info
741 std::unique_ptr Walker;
742 unsigned NextID;
743 };
744
745 // Internal MemorySSA utils, for use by MemorySSA classes and walkers
746 class MemorySSAUtil {
747 protected:
748 friend class MemorySSAWalker;
749 friend class GVNHoist;
750 // This function should not be used by new passes.
751 static bool defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU,
752 AliasAnalysis &AA);
753 };
754
755 // This pass does eager building and then printing of MemorySSA. It is used by
756 // the tests to be able to build, dump, and verify Memory SSA.
757 class MemorySSAPrinterLegacyPass : public FunctionPass {
758 public:
759 MemorySSAPrinterLegacyPass();
760
761 bool runOnFunction(Function &) override;
762 void getAnalysisUsage(AnalysisUsage &AU) const override;
763
764 static char ID;
765 };
766
767 /// An analysis that produces \c MemorySSA for a function.
768 ///
769 class MemorySSAAnalysis : public AnalysisInfoMixin {
770 friend AnalysisInfoMixin;
771
772 static AnalysisKey Key;
773
774 public:
775 // Wrap MemorySSA result to ensure address stability of internal MemorySSA
776 // pointers after construction. Use a wrapper class instead of plain
777 // unique_ptr to avoid build breakage on MSVC.
778 struct Result {
779 Result(std::unique_ptr &&MSSA) : MSSA(std::move(MSSA)) {}
780 MemorySSA &getMSSA() { return *MSSA.get(); }
781
782 std::unique_ptr MSSA;
783 };
784
785 Result run(Function &F, FunctionAnalysisManager &AM);
786 };
787
788 /// \brief Printer pass for \c MemorySSA.
789 class MemorySSAPrinterPass : public PassInfoMixin {
790 raw_ostream &OS;
791
792 public:
793 explicit MemorySSAPrinterPass(raw_ostream &OS) : OS(OS) {}
794
795 PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
796 };
797
798 /// \brief Verifier pass for \c MemorySSA.
799 struct MemorySSAVerifierPass : PassInfoMixin {
800 PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
801 };
802
803 /// \brief Legacy analysis pass which computes \c MemorySSA.
804 class MemorySSAWrapperPass : public FunctionPass {
805 public:
806 MemorySSAWrapperPass();
807
808 static char ID;
809
810 bool runOnFunction(Function &) override;
811 void releaseMemory() override;
812 MemorySSA &getMSSA() { return *MSSA; }
813 const MemorySSA &getMSSA() const { return *MSSA; }
814
815 void getAnalysisUsage(AnalysisUsage &AU) const override;
816
817 void verifyAnalysis() const override;
818 void print(raw_ostream &OS, const Module *M = nullptr) const override;
819
820 private:
821 std::unique_ptr MSSA;
822 };
823
824 /// \brief This is the generic walker interface for walkers of MemorySSA.
825 /// Walkers are used to be able to further disambiguate the def-use chains
826 /// MemorySSA gives you, or otherwise produce better info than MemorySSA gives
827 /// you.
828 /// In particular, while the def-use chains provide basic information, and are
829 /// guaranteed to give, for example, the nearest may-aliasing MemoryDef for a
830 /// MemoryUse as AliasAnalysis considers it, a user mant want better or other
831 /// information. In particular, they may want to use SCEV info to further
832 /// disambiguate memory accesses, or they may want the nearest dominating
833 /// may-aliasing MemoryDef for a call or a store. This API enables a
834 /// standardized interface to getting and using that info.
835 class MemorySSAWalker {
836 public:
837 MemorySSAWalker(MemorySSA *);
838 virtual ~MemorySSAWalker() = default;
839
840 using MemoryAccessSet = SmallVector;
841
842 /// \brief Given a memory Mod/Ref/ModRef'ing instruction, calling this
843 /// will give you the nearest dominating MemoryAccess that Mod's the location
844 /// the instruction accesses (by skipping any def which AA can prove does not
845 /// alias the location(s) accessed by the instruction given).
846 ///
847 /// Note that this will return a single access, and it must dominate the
848 /// Instruction, so if an operand of a MemoryPhi node Mod's the instruction,
849 /// this will return the MemoryPhi, not the operand. This means that
850 /// given:
851 /// if (a) {
852 /// 1 = MemoryDef(liveOnEntry)
853 /// store %a
854 /// } else {
855 /// 2 = MemoryDef(liveOnEntry)
856 /// store %b
857 /// }
858 /// 3 = MemoryPhi(2, 1)
859 /// MemoryUse(3)
860 /// load %a
861 ///
862 /// calling this API on load(%a) will return the MemoryPhi, not the MemoryDef
863 /// in the if (a) branch.
864 MemoryAccess *getClobberingMemoryAccess(const Instruction *I) {
865 MemoryAccess *MA = MSSA->getMemoryAccess(I);
866 assert(MA && "Handed an instruction that MemorySSA doesn't recognize?");
867 return getClobberingMemoryAccess(MA);
868 }
869
870 /// Does the same thing as getClobberingMemoryAccess(const Instruction *I),
871 /// but takes a MemoryAccess instead of an Instruction.
872 virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) = 0;
873
874 /// \brief Given a potentially clobbering memory access and a new location,
875 /// calling this will give you the nearest dominating clobbering MemoryAccess
876 /// (by skipping non-aliasing def links).
877 ///
878 /// This version of the function is mainly used to disambiguate phi translated
879 /// pointers, where the value of a pointer may have changed from the initial
880 /// memory access. Note that this expects to be handed either a MemoryUse,
881 /// or an already potentially clobbering access. Unlike the above API, if
882 /// given a MemoryDef that clobbers the pointer as the starting access, it
883 /// will return that MemoryDef, whereas the above would return the clobber
884 /// starting from the use side of the memory def.
885 virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
886 const MemoryLocation &) = 0;
887
888 /// \brief Given a memory access, invalidate anything this walker knows about
889 /// that access.
890 /// This API is used by walkers that store information to perform basic cache
891 /// invalidation. This will be called by MemorySSA at appropriate times for
892 /// the walker it uses or returns.
893 virtual void invalidateInfo(MemoryAccess *) {}
894
895 virtual void verify(const MemorySSA *MSSA) { assert(MSSA == this->MSSA); }
896
897 protected:
898 friend class MemorySSA; // For updating MSSA pointer in MemorySSA move
899 // constructor.
900 MemorySSA *MSSA;
901 };
902
903 /// \brief A MemorySSAWalker that does no alias queries, or anything else. It
904 /// simply returns the links as they were constructed by the builder.
905 class DoNothingMemorySSAWalker final : public MemorySSAWalker {
906 public:
907 // Keep the overrides below from hiding the Instruction overload of
908 // getClobberingMemoryAccess.
909 using MemorySSAWalker::getClobberingMemoryAccess;
910
911 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) override;
912 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
913 const MemoryLocation &) override;
914 };
915
916 using MemoryAccessPair = std::pair;
917 using ConstMemoryAccessPair = std::pair;
918
919 /// \brief Iterator base class used to implement const and non-const iterators
920 /// over the defining accesses of a MemoryAccess.
921 template
922 class memoryaccess_def_iterator_base
923 : public iterator_facade_base,
924 std::forward_iterator_tag, T, ptrdiff_t, T *,
925 T *> {
926 using BaseT = typename memoryaccess_def_iterator_base::iterator_facade_base;
927
928 public:
929 memoryaccess_def_iterator_base(T *Start) : Access(Start) {}
930 memoryaccess_def_iterator_base() = default;
931
932 bool operator==(const memoryaccess_def_iterator_base &Other) const {
933 return Access == Other.Access && (!Access || ArgNo == Other.ArgNo);
934 }
935
936 // This is a bit ugly, but for MemoryPHI's, unlike PHINodes, you can't get the
937 // block from the operand in constant time (In a PHINode, the uselist has
938 // both, so it's just subtraction). We provide it as part of the
939 // iterator to avoid callers having to linear walk to get the block.
940 // If the operation becomes constant time on MemoryPHI's, this bit of
941 // abstraction breaking should be removed.
942 BasicBlock *getPhiArgBlock() const {
943 MemoryPhi *MP = dyn_cast(Access);
944 assert(MP && "Tried to get phi arg block when not iterating over a PHI");
945 return MP->getIncomingBlock(ArgNo);
946 }
947 typename BaseT::iterator::pointer operator*() const {
948 assert(Access && "Tried to access past the end of our iterator");
949 // Go to the first argument for phis, and the defining access for everything
950 // else.
951 if (MemoryPhi *MP = dyn_cast(Access))
952 return MP->getIncomingValue(ArgNo);
953 return cast(Access)->getDefiningAccess();
954 }
955 using BaseT::operator++;
956 memoryaccess_def_iterator &operator++() {
957 assert(Access && "Hit end of iterator");
958 if (MemoryPhi *MP = dyn_cast(Access)) {
959 if (++ArgNo >= MP->getNumIncomingValues()) {
960 ArgNo = 0;
961 Access = nullptr;
962 }
963 } else {
964 Access = nullptr;
965 }
966 return *this;
967 }
968
969 private:
970 T *Access = nullptr;
971 unsigned ArgNo = 0;
972 };
973
974 inline memoryaccess_def_iterator MemoryAccess::defs_begin() {
975 return memoryaccess_def_iterator(this);
976 }
977
978 inline const_memoryaccess_def_iterator MemoryAccess::defs_begin() const {
979 return const_memoryaccess_def_iterator(this);
980 }
981
982 inline memoryaccess_def_iterator MemoryAccess::defs_end() {
983 return memoryaccess_def_iterator();
984 }
985
986 inline const_memoryaccess_def_iterator MemoryAccess::defs_end() const {
987 return const_memoryaccess_def_iterator();
988 }
989
990 /// \brief GraphTraits for a MemoryAccess, which walks defs in the normal case,
991 /// and uses in the inverse case.
992 template <> struct GraphTraits {
993 using NodeRef = MemoryAccess *;
994 using ChildIteratorType = memoryaccess_def_iterator;
995
996 static NodeRef getEntryNode(NodeRef N) { return N; }
997 static ChildIteratorType child_begin(NodeRef N) { return N->defs_begin(); }
998 static ChildIteratorType child_end(NodeRef N) { return N->defs_end(); }
999 };
1000
1001 template <> struct GraphTraits> {
1002 using NodeRef = MemoryAccess *;
1003 using ChildIteratorType = MemoryAccess::iterator;
1004
1005 static NodeRef getEntryNode(NodeRef N) { return N; }
1006 static ChildIteratorType child_begin(NodeRef N) { return N->user_begin(); }
1007 static ChildIteratorType child_end(NodeRef N) { return N->user_end(); }
1008 };
1009
1010 /// \brief Provide an iterator that walks defs, giving both the memory access,
1011 /// and the current pointer location, updating the pointer location as it
1012 /// changes due to phi node translation.
1013 ///
1014 /// This iterator, while somewhat specialized, is what most clients actually
1015 /// want when walking upwards through MemorySSA def chains. It takes a pair of
1016 /// , and walks defs, properly translating the
1017 /// memory location through phi nodes for the user.
1018 class upward_defs_iterator
1019 : public iterator_facade_base
1020 std::forward_iterator_tag,
1021 const MemoryAccessPair> {
1022 using BaseT = upward_defs_iterator::iterator_facade_base;
1023
1024 public:
1025 upward_defs_iterator(const MemoryAccessPair &Info)
1026 : DefIterator(Info.first), Location(Info.second),
1027 OriginalAccess(Info.first) {
1028 CurrentPair.first = nullptr;
1029
1030 WalkingPhi = Info.first && isa(Info.first);
1031 fillInCurrentPair();
1032 }
1033
1034 upward_defs_iterator() { CurrentPair.first = nullptr; }
1035
1036 bool operator==(const upward_defs_iterator &Other) const {
1037 return DefIterator == Other.DefIterator;
1038 }
1039
1040 BaseT::iterator::reference operator*() const {
1041 assert(DefIterator != OriginalAccess->defs_end() &&
1042 "Tried to access past the end of our iterator");
1043 return CurrentPair;
1044 }
1045
1046 using BaseT::operator++;
1047 upward_defs_iterator &operator++() {
1048 assert(DefIterator != OriginalAccess->defs_end() &&
1049 "Tried to access past the end of the iterator");
1050 ++DefIterator;
1051 if (DefIterator != OriginalAccess->defs_end())
1052 fillInCurrentPair();
1053 return *this;
1054 }
1055
1056 BasicBlock *getPhiArgBlock() const { return DefIterator.getPhiArgBlock(); }
1057
1058 private:
1059 void fillInCurrentPair() {
1060 CurrentPair.first = *DefIterator;
1061 if (WalkingPhi && Location.Ptr) {
1062 PHITransAddr Translator(
1063 const_cast(Location.Ptr),
1064 OriginalAccess->getBlock()->getModule()->getDataLayout(), nullptr);
1065 if (!Translator.PHITranslateValue(OriginalAccess->getBlock(),
1066 DefIterator.getPhiArgBlock(), nullptr,
1067 false))
1068 if (Translator.getAddr() != Location.Ptr) {
1069 CurrentPair.second = Location.getWithNewPtr(Translator.getAddr());
1070 return;
1071 }
1072 }
1073 CurrentPair.second = Location;
1074 }
1075
1076 MemoryAccessPair CurrentPair;
1077 memoryaccess_def_iterator DefIterator;
1078 MemoryLocation Location;
1079 MemoryAccess *OriginalAccess = nullptr;
1080 bool WalkingPhi = false;
1081 };
1082
1083 inline upward_defs_iterator upward_defs_begin(const MemoryAccessPair &Pair) {
1084 return upward_defs_iterator(Pair);
1085 }
1086
1087 inline upward_defs_iterator upward_defs_end() { return upward_defs_iterator(); }
1088
1089 inline iterator_range
1090 upward_defs(const MemoryAccessPair &Pair) {
1091 return make_range(upward_defs_begin(Pair), upward_defs_end());
1092 }
1093
1094 /// Walks the defining accesses of MemoryDefs. Stops after we hit something that
1095 /// has no defining use (e.g. a MemoryPhi or liveOnEntry). Note that, when
1096 /// comparing against a null def_chain_iterator, this will compare equal only
1097 /// after walking said Phi/liveOnEntry.
1098 ///
1099 /// The UseOptimizedChain flag specifies whether to walk the clobbering
1100 /// access chain, or all the accesses.
1101 ///
1102 /// Normally, MemoryDef are all just def/use linked together, so a def_chain on
1103 /// a MemoryDef will walk all MemoryDefs above it in the program until it hits
1104 /// a phi node. The optimized chain walks the clobbering access of a store.
1105 /// So if you are just trying to find, given a store, what the next
1106 /// thing that would clobber the same memory is, you want the optimized chain.
1107 template
1108 struct def_chain_iterator
1109 : public iterator_facade_base,
1110 std::forward_iterator_tag, MemoryAccess *> {
1111 def_chain_iterator() : MA(nullptr) {}
1112 def_chain_iterator(T MA) : MA(MA) {}
1113
1114 T operator*() const { return MA; }
1115
1116 def_chain_iterator &operator++() {
1117 // N.B. liveOnEntry has a null defining access.
1118 if (auto *MUD = dyn_cast(MA)) {
1119 if (UseOptimizedChain && MUD->isOptimized())
1120 MA = MUD->getOptimized();
1121 else
1122 MA = MUD->getDefiningAccess();
1123 } else {
1124 MA = nullptr;
1125 }
1126
1127 return *this;
1128 }
1129
1130 bool operator==(const def_chain_iterator &O) const { return MA == O.MA; }
1131
1132 private:
1133 T MA;
1134 };
1135
1136 template
1137 inline iterator_range>
1138 def_chain(T MA, MemoryAccess *UpTo = nullptr) {
1139 #ifdef EXPENSIVE_CHECKS
1140 assert((!UpTo || find(def_chain(MA), UpTo) != def_chain_iterator()) &&
1141 "UpTo isn't in the def chain!");
1142 #endif
1143 return make_range(def_chain_iterator(MA), def_chain_iterator(UpTo));
1144 }
1145
1146 template
1147 inline iterator_range> optimized_def_chain(T MA) {
1148 return make_range(def_chain_iterator(MA),
1149 def_chain_iterator(nullptr));
1150 }
1151
1152 } // end namespace llvm
1153
1154 #endif // LLVM_TRANSFORMS_UTILS_MEMORYSSA_H
+0
-153
include/llvm/Transforms/Utils/MemorySSAUpdater.h less more
None //===- MemorySSAUpdater.h - Memory SSA Updater-------------------*- C++ -*-===//
1 //
2 // The LLVM Compiler Infrastructure
3 //
4 // This file is distributed under the University of Illinois Open Source
5 // License. See LICENSE.TXT for details.
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // \file
10 // \brief An automatic updater for MemorySSA that handles arbitrary insertion,
11 // deletion, and moves. It performs phi insertion where necessary, and
12 // automatically updates the MemorySSA IR to be correct.
13 // While updating loads or removing instructions is often easy enough to not
14 // need this, updating stores should generally not be attemped outside this
15 // API.
16 //
17 // Basic API usage:
18 // Create the memory access you want for the instruction (this is mainly so
19 // we know where it is, without having to duplicate the entire set of create
20 // functions MemorySSA supports).
21 // Call insertDef or insertUse depending on whether it's a MemoryUse or a
22 // MemoryDef.
23 // That's it.
24 //
25 // For moving, first, move the instruction itself using the normal SSA
26 // instruction moving API, then just call moveBefore, moveAfter,or moveTo with
27 // the right arguments.
28 //
29 //===----------------------------------------------------------------------===//
30
31 #ifndef LLVM_TRANSFORMS_UTILS_MEMORYSSAUPDATER_H
32 #define LLVM_TRANSFORMS_UTILS_MEMORYSSAUPDATER_H
33
34 #include "llvm/ADT/SmallPtrSet.h"
35 #include "llvm/ADT/SmallVector.h"
36 #include "llvm/IR/BasicBlock.h"
37 #include "llvm/IR/Dominators.h"
38 #include "llvm/IR/Module.h"
39 #include "llvm/IR/OperandTraits.h"
40 #include "llvm/IR/Type.h"
41 #include "llvm/IR/Use.h"
42 #include "llvm/IR/User.h"
43 #include "llvm/IR/Value.h"
44 #include "llvm/Pass.h"
45 #include "llvm/Support/Casting.h"
46 #include "llvm/Support/ErrorHandling.h"
47 #include "llvm/Transforms/Utils/MemorySSA.h"
48
49 namespace llvm {
50
51 class Function;
52 class Instruction;
53 class MemoryAccess;
54 class LLVMContext;
55 class raw_ostream;
56
57 class MemorySSAUpdater {
58 private:
59 MemorySSA *MSSA;
60 SmallVector InsertedPHIs;
61 SmallPtrSet VisitedBlocks;
62
63 public:
64 MemorySSAUpdater(MemorySSA *MSSA) : MSSA(MSSA) {}
65 /// Insert a definition into the MemorySSA IR. RenameUses will rename any use
66 /// below the new def block (and any inserted phis). RenameUses should be set
67 /// to true if the definition may cause new aliases for loads below it. This
68 /// is not the case for hoisting or sinking or other forms of code *movement*.
69 /// It *is* the case for straight code insertion.
70 /// For example:
71 /// store a
72 /// if (foo) { }
73 /// load a
74 ///
75 /// Moving the store into the if block, and calling insertDef, does not
76 /// require RenameUses.
77 /// However, changing it to:
78 /// store a
79 /// if (foo) { store b }
80 /// load a
81 /// Where a mayalias b, *does* require RenameUses be set to true.
82 void insertDef(MemoryDef *Def, bool RenameUses = false);
83 void insertUse(MemoryUse *Use);
84 void moveBefore(MemoryUseOrDef *What, MemoryUseOrDef *Where);
85 void moveAfter(MemoryUseOrDef *What, MemoryUseOrDef *Where);
86 void moveToPlace(MemoryUseOrDef *What, BasicBlock *BB,
87 MemorySSA::InsertionPlace Where);
88
89 // The below are utility functions. Other than creation of accesses to pass
90 // to insertDef, and removeAccess to remove accesses, you should generally
91 // not attempt to update memoryssa yourself. It is very non-trivial to get
92 // the edge cases right, and the above calls already operate in near-optimal
93 // time bounds.
94
95 /// \brief Create a MemoryAccess in MemorySSA at a specified point in a block,
96 /// with a specified clobbering definition.
97 ///
98 /// Returns the new MemoryAccess.
99 /// This should be called when a memory instruction is created that is being
100 /// used to replace an existing memory instruction. It will *not* create PHI
101 /// nodes, or verify the clobbering definition. The insertion place is used
102 /// solely to determine where in the memoryssa access lists the instruction
103 /// will be placed. The caller is expected to keep ordering the same as
104 /// instructions.
105 /// It will return the new MemoryAccess.
106 /// Note: If a MemoryAccess already exists for I, this function will make it
107 /// inaccessible and it *must* have removeMemoryAccess called on it.
108 MemoryAccess *createMemoryAccessInBB(Instruction *I, MemoryAccess *Definition,
109 const BasicBlock *BB,
110 MemorySSA::InsertionPlace Point);
111
112 /// \brief Create a MemoryAccess in MemorySSA before or after an existing
113 /// MemoryAccess.
114 ///
115 /// Returns the new MemoryAccess.
116 /// This should be called when a memory instruction is created that is being
117 /// used to replace an existing memory instruction. It will *not* create PHI
118 /// nodes, or verify the clobbering definition.
119 ///
120 /// Note: If a MemoryAccess already exists for I, this function will make it
121 /// inaccessible and it *must* have removeMemoryAccess called on it.
122 MemoryUseOrDef *createMemoryAccessBefore(Instruction *I,
123 MemoryAccess *Definition,
124 MemoryUseOrDef *InsertPt);
125 MemoryUseOrDef *createMemoryAccessAfter(Instruction *I,
126 MemoryAccess *Definition,
127 MemoryAccess *InsertPt);
128
129 /// \brief Remove a MemoryAccess from MemorySSA, including updating all
130 /// definitions and uses.
131 /// This should be called when a memory instruction that has a MemoryAccess
132 /// associated with it is erased from the program. For example, if a store or
133 /// load is simply erased (not replaced), removeMemoryAccess should be called
134 /// on the MemoryAccess for that store/load.
135 void removeMemoryAccess(MemoryAccess *);
136
137 private:
138 // Move What before Where in the MemorySSA IR.
139 template
140 void moveTo(MemoryUseOrDef *What, BasicBlock *BB, WhereType Where);
141 MemoryAccess *getPreviousDef(MemoryAccess *);
142 MemoryAccess *getPreviousDefInBlock(MemoryAccess *);
143 MemoryAccess *getPreviousDefFromEnd(BasicBlock *);
144 MemoryAccess *getPreviousDefRecursive(BasicBlock *);
145 MemoryAccess *recursePhi(MemoryAccess *Phi);
146 template
147 MemoryAccess *tryRemoveTrivialPhi(MemoryPhi *Phi, RangeType &Operands);
148 void fixupDefs(const SmallVectorImpl &);
149 };
150 } // end namespace llvm
151
152 #endif // LLVM_TRANSFORMS_UTILS_MEMORYSSAUPDATER_H
7878 initializeTypeBasedAAWrapperPassPass(Registry);
7979 initializeScopedNoAliasAAWrapperPassPass(Registry);
8080 initializeLCSSAVerificationPassPass(Registry);
81 initializeMemorySSAWrapperPassPass(Registry);
82 initializeMemorySSAPrinterLegacyPassPass(Registry);
8183 }
8284
8385 void LLVMInitializeAnalysis(LLVMPassRegistryRef R) {
5252 MemoryBuiltins.cpp
5353 MemoryDependenceAnalysis.cpp
5454 MemoryLocation.cpp
55 MemorySSA.cpp
56 MemorySSAUpdater.cpp
5557 ModuleDebugInfoPrinter.cpp
5658 ModuleSummaryAnalysis.cpp
5759 ObjCARCAliasAnalysis.cpp
0 //===-- MemorySSA.cpp - Memory SSA Builder---------------------------===//
1 //
2 // The LLVM Compiler Infrastructure
3 //
4 // This file is distributed under the University of Illinois Open Source
5 // License. See LICENSE.TXT for details.
6 //
7 //===----------------------------------------------------------------===//
8 //
9 // This file implements the MemorySSA class.
10 //
11 //===----------------------------------------------------------------===//
12 #include "llvm/Analysis/MemorySSA.h"
13 #include "llvm/ADT/DenseMap.h"
14 #include "llvm/ADT/DenseSet.h"
15 #include "llvm/ADT/DepthFirstIterator.h"
16 #include "llvm/ADT/GraphTraits.h"
17 #include "llvm/ADT/PostOrderIterator.h"
18 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/SmallBitVector.h"
20 #include "llvm/ADT/SmallPtrSet.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/AliasAnalysis.h"
24 #include "llvm/Analysis/CFG.h"
25 #include "llvm/Analysis/GlobalsModRef.h"
26 #include "llvm/Analysis/IteratedDominanceFrontier.h"
27 #include "llvm/Analysis/MemoryLocation.h"
28 #include "llvm/Analysis/PHITransAddr.h"
29 #include "llvm/IR/AssemblyAnnotationWriter.h"
30 #include "llvm/IR/DataLayout.h"
31 #include "llvm/IR/Dominators.h"
32 #include "llvm/IR/GlobalVariable.h"
33 #include "llvm/IR/IRBuilder.h"
34 #include "llvm/IR/IntrinsicInst.h"
35 #include "llvm/IR/LLVMContext.h"
36 #include "llvm/IR/Metadata.h"
37 #include "llvm/IR/Module.h"
38 #include "llvm/IR/PatternMatch.h"
39 #include "llvm/Support/Debug.h"
40 #include "llvm/Support/FormattedStream.h"
41 #include "llvm/Transforms/Scalar.h"
42 #include
43
44 #define DEBUG_TYPE "memoryssa"
45 using namespace llvm;
46 INITIALIZE_PASS_BEGIN(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,
47 true)
48 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
49 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
50 INITIALIZE_PASS_END(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,
51 true)
52
53 INITIALIZE_PASS_BEGIN(MemorySSAPrinterLegacyPass, "print-memoryssa",
54 "Memory SSA Printer", false, false)
55 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
56 INITIALIZE_PASS_END(MemorySSAPrinterLegacyPass, "print-memoryssa",
57 "Memory SSA Printer", false, false)
58
59 static cl::opt MaxCheckLimit(
60 "memssa-check-limit", cl::Hidden, cl::init(100),
61 cl::desc("The maximum number of stores/phis MemorySSA"
62 "will consider trying to walk past (default = 100)"));
63
64 static cl::opt
65 VerifyMemorySSA("verify-memoryssa", cl::init(false), cl::Hidden,
66 cl::desc("Verify MemorySSA in legacy printer pass."));
67
68 namespace llvm {
69 /// \brief An assembly annotator class to print Memory SSA information in
70 /// comments.
71 class MemorySSAAnnotatedWriter : public AssemblyAnnotationWriter {
72 friend class MemorySSA;
73 const MemorySSA *MSSA;
74
75 public:
76 MemorySSAAnnotatedWriter(const MemorySSA *M) : MSSA(M) {}
77
78 virtual void emitBasicBlockStartAnnot(const BasicBlock *BB,
79 formatted_raw_ostream &OS) {
80 if (MemoryAccess *MA = MSSA->getMemoryAccess(BB))
81 OS << "; " << *MA << "\n";
82 }
83
84 virtual void emitInstructionAnnot(const Instruction *I,
85 formatted_raw_ostream &OS) {
86 if (MemoryAccess *MA = MSSA->getMemoryAccess(I))
87 OS << "; " << *MA << "\n";
88 }
89 };
90 }
91
92 namespace {
93 /// Our current alias analysis API differentiates heavily between calls and
94 /// non-calls, and functions called on one usually assert on the other.
95 /// This class encapsulates the distinction to simplify other code that wants
96 /// "Memory affecting instructions and related data" to use as a key.
97 /// For example, this class is used as a densemap key in the use optimizer.
98 class MemoryLocOrCall {
99 public:
100 MemoryLocOrCall() : IsCall(false) {}
101 MemoryLocOrCall(MemoryUseOrDef *MUD)
102 : MemoryLocOrCall(MUD->getMemoryInst()) {}
103 MemoryLocOrCall(const MemoryUseOrDef *MUD)
104 : MemoryLocOrCall(MUD->getMemoryInst()) {}
105
106 MemoryLocOrCall(Instruction *Inst) {
107 if (ImmutableCallSite(Inst)) {
108 IsCall = true;
109 CS = ImmutableCallSite(Inst);
110 } else {
111 IsCall = false;
112 // There is no such thing as a memorylocation for a fence inst, and it is
113 // unique in that regard.
114 if (!isa(Inst))
115 Loc = MemoryLocation::get(Inst);
116 }
117 }
118
119 explicit MemoryLocOrCall(const MemoryLocation &Loc)
120 : IsCall(false), Loc(Loc) {}
121
122 bool IsCall;
123 ImmutableCallSite getCS() const {
124 assert(IsCall);
125 return CS;
126 }
127 MemoryLocation getLoc() const {
128 assert(!IsCall);
129 return Loc;
130 }
131
132 bool operator==(const MemoryLocOrCall &Other) const {
133 if (IsCall != Other.IsCall)
134 return false;
135
136 if (IsCall)
137 return CS.getCalledValue() == Other.CS.getCalledValue();
138 return Loc == Other.Loc;
139 }
140
141 private:
142 union {
143 ImmutableCallSite CS;
144 MemoryLocation Loc;
145 };
146 };
147 }
148
149 namespace llvm {
150 template <> struct DenseMapInfo {
151 static inline MemoryLocOrCall getEmptyKey() {
152 return MemoryLocOrCall(DenseMapInfo::getEmptyKey());
153 }
154 static inline MemoryLocOrCall getTombstoneKey() {
155 return MemoryLocOrCall(DenseMapInfo::getTombstoneKey());
156 }
157 static unsigned getHashValue(const MemoryLocOrCall &MLOC) {
158 if (MLOC.IsCall)
159 return hash_combine(MLOC.IsCall,
160 DenseMapInfo::getHashValue(
161 MLOC.getCS().getCalledValue()));
162 return hash_combine(
163 MLOC.IsCall, DenseMapInfo::getHashValue(MLOC.getLoc()));
164 }
165 static bool isEqual(const MemoryLocOrCall &LHS, const MemoryLocOrCall &RHS) {
166 return LHS == RHS;
167 }
168 };
169
170 enum class Reorderability { Always, IfNoAlias, Never };
171
172 /// This does one-way checks to see if Use could theoretically be hoisted above
173 /// MayClobber. This will not check the other way around.
174 ///
175 /// This assumes that, for the purposes of MemorySSA, Use comes directly after
176 /// MayClobber, with no potentially clobbering operations in between them.
177 /// (Where potentially clobbering ops are memory barriers, aliased stores, etc.)
178 static Reorderability getLoadReorderability(const LoadInst *Use,
179 const LoadInst *MayClobber) {
180 bool VolatileUse = Use->isVolatile();
181 bool VolatileClobber = MayClobber->isVolatile();
182 // Volatile operations may never be reordered with other volatile operations.
183 if (VolatileUse && VolatileClobber)
184 return Reorderability::Never;
185
186 // The lang ref allows reordering of volatile and non-volatile operations.
187 // Whether an aliasing nonvolatile load and volatile load can be reordered,
188 // though, is ambiguous. Because it may not be best to exploit this ambiguity,
189 // we only allow volatile/non-volatile reordering if the volatile and
190 // non-volatile operations don't alias.
191 Reorderability Result = VolatileUse || VolatileClobber
192 ? Reorderability::IfNoAlias
193 : Reorderability::Always;
194
195 // If a load is seq_cst, it cannot be moved above other loads. If its ordering
196 // is weaker, it can be moved above other loads. We just need to be sure that
197 // MayClobber isn't an acquire load, because loads can't be moved above
198 // acquire loads.
199 //
200 // Note that this explicitly *does* allow the free reordering of monotonic (or
201 // weaker) loads of the same address.
202 bool SeqCstUse = Use->getOrdering() == AtomicOrdering::SequentiallyConsistent;
203 bool MayClobberIsAcquire = isAtLeastOrStrongerThan(MayClobber->getOrdering(),
204 AtomicOrdering::Acquire);
205 if (SeqCstUse || MayClobberIsAcquire)
206 return Reorderability::Never;
207 return Result;
208 }
209
210 static bool instructionClobbersQuery(MemoryDef *MD,
211 const MemoryLocation &UseLoc,
212 const Instruction *UseInst,
213 AliasAnalysis &AA) {
214 Instruction *DefInst = MD->getMemoryInst();
215 assert(DefInst && "Defining instruction not actually an instruction");
216 ImmutableCallSite UseCS(UseInst);
217
218 if (const IntrinsicInst *II = dyn_cast(DefInst)) {
219 // These intrinsics will show up as affecting memory, but they are just
220 // markers.
221 switch (II->getIntrinsicID()) {
222 case Intrinsic::lifetime_start:
223 if (UseCS)
224 return false;
225 return AA.isMustAlias(MemoryLocation(II->getArgOperand(1)), UseLoc);
226 case Intrinsic::lifetime_end:
227 case Intrinsic::invariant_start:
228 case Intrinsic::invariant_end:
229 case Intrinsic::assume:
230 return false;
231 default:
232 break;
233 }
234 }
235
236 if (UseCS) {
237 ModRefInfo I = AA.getModRefInfo(DefInst, UseCS);
238 return I != MRI_NoModRef;
239 }
240
241 if (auto *DefLoad = dyn_cast(DefInst)) {
242 if (auto *UseLoad = dyn_cast(UseInst)) {
243 switch (getLoadReorderability(UseLoad, DefLoad)) {
244 case Reorderability::Always:
245 return false;
246 case Reorderability::Never:
247 return true;
248 case Reorderability::IfNoAlias:
249 return !AA.isNoAlias(UseLoc, MemoryLocation::get(DefLoad));
250 }
251 }
252 }
253
254 return AA.getModRefInfo(DefInst, UseLoc) & MRI_Mod;
255 }
256
257 static bool instructionClobbersQuery(MemoryDef *MD, const MemoryUseOrDef *MU,
258 const MemoryLocOrCall &UseMLOC,
259 AliasAnalysis &AA) {
260 // FIXME: This is a temporary hack to allow a single instructionClobbersQuery
261 // to exist while MemoryLocOrCall is pushed through places.
262 if (UseMLOC.IsCall)
263 return instructionClobbersQuery(MD, MemoryLocation(), MU->getMemoryInst(),
264 AA);
265 return instructionClobbersQuery(MD, UseMLOC.getLoc(), MU->getMemoryInst(),
266 AA);
267 }
268
269 // Return true when MD may alias MU, return false otherwise.
270 bool MemorySSAUtil::defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU,
271 AliasAnalysis &AA) {
272 return instructionClobbersQuery(MD, MU, MemoryLocOrCall(MU), AA);
273 }
274 }
275
276 namespace {
277 struct UpwardsMemoryQuery {
278 // True if our original query started off as a call
279 bool IsCall;
280 // The pointer location we started the query with. This will be empty if
281 // IsCall is true.
282 MemoryLocation StartingLoc;
283 // This is the instruction we were querying about.
284 const Instruction *Inst;
285 // The MemoryAccess we actually got called with, used to test local domination
286 const MemoryAccess *OriginalAccess;
287
288 UpwardsMemoryQuery()
289 : IsCall(false), Inst(nullptr), OriginalAccess(nullptr) {}
290
291 UpwardsMemoryQuery(const Instruction *Inst, const MemoryAccess *Access)
292 : IsCall(ImmutableCallSite(Inst)), Inst(Inst), OriginalAccess(Access) {
293 if (!IsCall)
294 StartingLoc = MemoryLocation::get(Inst);
295 }
296 };
297
298 static bool lifetimeEndsAt(MemoryDef *MD, const MemoryLocation &Loc,
299 AliasAnalysis &AA) {
300 Instruction *Inst = MD->getMemoryInst();
301 if (IntrinsicInst *II = dyn_cast(Inst)) {
302 switch (II->getIntrinsicID()) {
303 case Intrinsic::lifetime_end:
304 return AA.isMustAlias(MemoryLocation(II->getArgOperand(1)), Loc);
305 default:
306 return false;
307 }
308 }
309 return false;
310 }
311
312 static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysis &AA,
313 const Instruction *I) {
314 // If the memory can't be changed, then loads of the memory can't be
315 // clobbered.
316 //
317 // FIXME: We should handle invariant groups, as well. It's a bit harder,
318 // because we need to pay close attention to invariant group barriers.
319 return isa(I) && (I->getMetadata(LLVMContext::MD_invariant_load) ||
320 AA.pointsToConstantMemory(cast(I)->
321 getPointerOperand()));
322 }
323
324 /// Verifies that `Start` is clobbered by `ClobberAt`, and that nothing
325 /// inbetween `Start` and `ClobberAt` can clobbers `Start`.
326 ///
327 /// This is meant to be as simple and self-contained as possible. Because it
328 /// uses no cache, etc., it can be relatively expensive.
329 ///
330 /// \param Start The MemoryAccess that we want to walk from.
331 /// \param ClobberAt A clobber for Start.
332 /// \param StartLoc The MemoryLocation for Start.
333 /// \param MSSA The MemorySSA isntance that Start and ClobberAt belong to.
334 /// \param Query The UpwardsMemoryQuery we used for our search.
335 /// \param AA The AliasAnalysis we used for our search.
336 static void LLVM_ATTRIBUTE_UNUSED
337 checkClobberSanity(MemoryAccess *Start, MemoryAccess *ClobberAt,
338 const MemoryLocation &StartLoc, const MemorySSA &MSSA,
339 const UpwardsMemoryQuery &Query, AliasAnalysis &AA) {
340 assert(MSSA.dominates(ClobberAt, Start) && "Clobber doesn't dominate start?");
341
342 if (MSSA.isLiveOnEntryDef(Start)) {
343 assert(MSSA.isLiveOnEntryDef(ClobberAt) &&
344 "liveOnEntry must clobber itself");
345 return;
346 }
347
348 bool FoundClobber = false;
349 DenseSet VisitedPhis;
350 SmallVector Worklist;
351 Worklist.emplace_back(Start, StartLoc);
352 // Walk all paths from Start to ClobberAt, while looking for clobbers. If one
353 // is found, complain.
354 while (!Worklist.empty()) {
355 MemoryAccessPair MAP = Worklist.pop_back_val();
356 // All we care about is that nothing from Start to ClobberAt clobbers Start.
357 // We learn nothing from revisiting nodes.
358 if (!VisitedPhis.insert(MAP).second)
359 continue;
360
361 for (MemoryAccess *MA : def_chain(MAP.first)) {
362 if (MA == ClobberAt) {
363 if (auto *MD = dyn_cast(MA)) {
364 // instructionClobbersQuery isn't essentially free, so don't use `|=`,
365 // since it won't let us short-circuit.
366 //
367 // Also, note that this can't be hoisted out of the `Worklist` loop,
368 // since MD may only act as a clobber for 1 of N MemoryLocations.
369 FoundClobber =
370 FoundClobber || MSSA.isLiveOnEntryDef(MD) ||
371 instructionClobbersQuery(MD, MAP.second, Query.Inst, AA);
372 }
373 break;
374 }
375
376 // We should never hit liveOnEntry, unless it's the clobber.
377 assert(!MSSA.isLiveOnEntryDef(MA) && "Hit liveOnEntry before clobber?");
378
379 if (auto *MD = dyn_cast(MA)) {
380 (void)MD;
381 assert(!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA) &&
382 "Found clobber before reaching ClobberAt!");
383 continue;
384 }
385
386 assert(isa(MA));
387 Worklist.append(upward_defs_begin({MA, MAP.second}), upward_defs_end());
388 }
389 }
390
391 // If ClobberAt is a MemoryPhi, we can assume something above it acted as a
392 // clobber. Otherwise, `ClobberAt` should've acted as a clobber at some point.
393 assert((isa(ClobberAt) || FoundClobber) &&
394 "ClobberAt never acted as a clobber");
395 }
396
397 /// Our algorithm for walking (and trying to optimize) clobbers, all wrapped up
398 /// in one class.
399 class ClobberWalker {
400 /// Save a few bytes by using unsigned instead of size_t.
401 using ListIndex = unsigned;
402
403 /// Represents a span of contiguous MemoryDefs, potentially ending in a
404 /// MemoryPhi.
405 struct DefPath {
406 MemoryLocation Loc;
407 // Note that, because we always walk in reverse, Last will always dominate
408 // First. Also note that First and Last are inclusive.
409 MemoryAccess *First;
410 MemoryAccess *Last;
411 Optional Previous;
412
413 DefPath(const MemoryLocation &Loc, MemoryAccess *First, MemoryAccess *Last,
414 Optional Previous)
415 : Loc(Loc), First(First), Last(Last), Previous(Previous) {}
416
417 DefPath(const MemoryLocation &Loc, MemoryAccess *Init,
418 Optional Previous)
419 : DefPath(Loc, Init, Init, Previous) {}
420 };
421
422 const MemorySSA &MSSA;
423 AliasAnalysis &AA;
424 DominatorTree &DT;
425 UpwardsMemoryQuery *Query;
426
427 // Phi optimization bookkeeping
428 SmallVector Paths;
429 DenseSet VisitedPhis;
430
431 /// Find the nearest def or phi that `From` can legally be optimized to.
432 const MemoryAccess *getWalkTarget(const MemoryPhi *From) const {
433 assert(From->getNumOperands() && "Phi with no operands?");
434
435 BasicBlock *BB = From->getBlock();
436 MemoryAccess *Result = MSSA.getLiveOnEntryDef();
437 DomTreeNode *Node = DT.getNode(BB);
438 while ((Node = Node->getIDom())) {
439 auto *Defs = MSSA.getBlockDefs(Node->getBlock());
440 if (Defs)
441 return &*Defs->rbegin();
442 }
443 return Result;
444 }
445
446 /// Result of calling walkToPhiOrClobber.
447 struct UpwardsWalkResult {
448 /// The "Result" of the walk. Either a clobber, the last thing we walked, or
449 /// both.
450 MemoryAccess *Result;
451 bool IsKnownClobber;
452 };
453
454 /// Walk to the next Phi or Clobber in the def chain starting at Desc.Last.
455 /// This will update Desc.Last as it walks. It will (optionally) also stop at
456 /// StopAt.
457 ///
458 /// This does not test for whether StopAt is a clobber
459 UpwardsWalkResult
460 walkToPhiOrClobber(DefPath &Desc,
461 const MemoryAccess *StopAt = nullptr) const {
462 assert(!isa(Desc.Last) && "Uses don't exist in my world");
463
464 for (MemoryAccess *Current : def_chain(Desc.Last)) {
465 Desc.Last = Current;
466 if (Current == StopAt)
467 return {Current, false};
468
469 if (auto *MD = dyn_cast(Current))
470 if (MSSA.isLiveOnEntryDef(MD) ||
471 instructionClobbersQuery(MD, Desc.Loc, Query->Inst, AA))
472 return {MD, true};
473 }
474
475 assert(isa(Desc.Last) &&
476 "Ended at a non-clobber that's not a phi?");
477 return {Desc.Last, false};
478 }
479
480 void addSearches(MemoryPhi *Phi, SmallVectorImpl &PausedSearches,
481 ListIndex PriorNode) {
482 auto UpwardDefs = make_range(upward_defs_begin({Phi, Paths[PriorNode].Loc}),
483 upward_defs_end());
484 for (const MemoryAccessPair &P : UpwardDefs) {
485 PausedSearches.push_back(Paths.size());
486 Paths.emplace_back(P.second, P.first, PriorNode);
487 }
488 }
489
490 /// Represents a search that terminated after finding a clobber. This clobber
491 /// may or may not be present in the path of defs from LastNode..SearchStart,
492 /// since it may have been retrieved from cache.
493 struct TerminatedPath {
494 MemoryAccess *Clobber;
495 ListIndex LastNode;
496 };
497
498 /// Get an access that keeps us from optimizing to the given phi.
499 ///
500 /// PausedSearches is an array of indices into the Paths array. Its incoming
501 /// value is the indices of searches that stopped at the last phi optimization
502 /// target. It's left in an unspecified state.
503 ///
504 /// If this returns None, NewPaused is a vector of searches that terminated
505 /// at StopWhere. Otherwise, NewPaused is left in an unspecified state.
506 Optional
507 getBlockingAccess(const MemoryAccess *StopWhere,
508 SmallVectorImpl &PausedSearches,
509 SmallVectorImpl &NewPaused,
510 SmallVectorImpl &Terminated) {
511 assert(!PausedSearches.empty() && "No searches to continue?");
512
513 // BFS vs DFS really doesn't make a difference here, so just do a DFS with
514 // PausedSearches as our stack.
515 while (!PausedSearches.empty()) {
516 ListIndex PathIndex = PausedSearches.pop_back_val();
517 DefPath &Node = Paths[PathIndex];
518
519 // If we've already visited this path with this MemoryLocation, we don't
520 // need to do so again.
521 //
522 // NOTE: That we just drop these paths on the ground makes caching
523 // behavior sporadic. e.g. given a diamond:
524 // A
525 // B C
526 // D
527 //
528 // ...If we walk D, B, A, C, we'll only cache the result of phi
529 // optimization for A, B, and D; C will be skipped because it dies here.
530 // This arguably isn't the worst thing ever, since:
531 // - We generally query things in a top-down order, so if we got below D
532 // without needing cache entries for {C, MemLoc}, then chances are
533 // that those cache entries would end up ultimately unused.
534 // - We still cache things for A, so C only needs to walk up a bit.
535 // If this behavior becomes problematic, we can fix without a ton of extra
536 // work.
537 if (!VisitedPhis.insert({Node.Last, Node.Loc}).second)
538 continue;
539
540 UpwardsWalkResult Res = walkToPhiOrClobber(Node, /*StopAt=*/StopWhere);
541 if (Res.IsKnownClobber) {
542 assert(Res.Result != StopWhere);
543 // If this wasn't a cache hit, we hit a clobber when walking. That's a
544 // failure.
545 TerminatedPath Term{Res.Result, PathIndex};
546 if (!MSSA.dominates(Res.Result, StopWhere))
547 return Term;
548
549 // Otherwise, it's a valid thing to potentially optimize to.
550 Terminated.push_back(Term);
551 continue;
552 }
553
554 if (Res.Result == StopWhere) {
555 // We've hit our target. Save this path off for if we want to continue
556 // walking.
557 NewPaused.push_back(PathIndex);
558 continue;
559 }
560
561 assert(!MSSA.isLiveOnEntryDef(Res.Result) && "liveOnEntry is a clobber");
562 addSearches(cast(Res.Result), PausedSearches, PathIndex);
563 }
564
565 return None;
566 }
567
568 template
569 struct generic_def_path_iterator
570 : public iterator_facade_base,
571 std::forward_iterator_tag, T *> {
572 generic_def_path_iterator() : W(nullptr), N(None) {}
573 generic_def_path_iterator(Walker *W, ListIndex N) : W(W), N(N) {}
574
575 T &operator*() const { return curNode(); }
576
577 generic_def_path_iterator &operator++() {
578 N = curNode().Previous;
579 return *this;
580 }
581
582 bool operator==(const generic_def_path_iterator &O) const {
583 if (N.hasValue() != O.N.hasValue())
584 return false;
585 return !N.hasValue() || *N == *O.N;
586 }
587
588 private:
589 T &curNode() const { return W->Paths[*N]; }
590
591 Walker *W;
592 Optional N;
593 };
594
595 using def_path_iterator = generic_def_path_iterator;
596 using const_def_path_iterator =
597 generic_def_path_iterator;
598
599 iterator_range def_path(ListIndex From) {
600 return make_range(def_path_iterator(this, From), def_path_iterator());
601 }
602
603 iterator_range const_def_path(ListIndex From) const {
604 return make_range(const_def_path_iterator(this, From),
605 const_def_path_iterator());
606 }
607
608 struct OptznResult {
609 /// The path that contains our result.
610 TerminatedPath PrimaryClobber;
611 /// The paths that we can legally cache back from, but that aren't
612 /// necessarily the result of the Phi optimization.
613 SmallVector OtherClobbers;
614 };
615
616 ListIndex defPathIndex(const DefPath &N) const {
617 // The assert looks nicer if we don't need to do &N
618 const DefPath *NP = &N;
619 assert(!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() &&
620 "Out of bounds DefPath!");
621 return NP - &Paths.front();
622 }
623
624 /// Try to optimize a phi as best as we can. Returns a SmallVector of Paths
625 /// that act as legal clobbers. Note that this won't return *all* clobbers.
626 ///
627 /// Phi optimization algorithm tl;dr:
628 /// - Find the earliest def/phi, A, we can optimize to
629 /// - Find if all paths from the starting memory access ultimately reach A
630 /// - If not, optimization isn't possible.
631 /// - Otherwise, walk from A to another clobber or phi, A'.
632 /// - If A' is a def, we're done.
633 /// - If A' is a phi, try to optimize it.
634 ///
635 /// A path is a series of {MemoryAccess, MemoryLocation} pairs. A path
636 /// terminates when a MemoryAccess that clobbers said MemoryLocation is found.
637 OptznResult tryOptimizePhi(MemoryPhi *Phi, MemoryAccess *Start,
638 const MemoryLocation &Loc) {
639 assert(Paths.empty() && VisitedPhis.empty() &&
640 "Reset the optimization state.");
641
642 Paths.emplace_back(Loc, Start, Phi, None);
643 // Stores how many "valid" optimization nodes we had prior to calling
644 // addSearches/getBlockingAccess. Necessary for caching if we had a blocker.
645 auto PriorPathsSize = Paths.size();
646
647 SmallVector PausedSearches;
648 SmallVector NewPaused;
649 SmallVector TerminatedPaths;
650
651 addSearches(Phi, PausedSearches, 0);
652
653 // Moves the TerminatedPath with the "most dominated" Clobber to the end of
654 // Paths.
655 auto MoveDominatedPathToEnd = [&](SmallVectorImpl &Paths) {
656 assert(!Paths.empty() && "Need a path to move");
657 auto Dom = Paths.begin();
658 for (auto I = std::next(Dom), E = Paths.end(); I != E; ++I)
659 if (!MSSA.dominates(I->Clobber, Dom->Clobber))
660 Dom = I;
661 auto Last = Paths.end() - 1;
662 if (Last != Dom)
663 std::iter_swap(Last, Dom);
664 };
665
666 MemoryPhi *Current = Phi;
667 while (1) {
668 assert(!MSSA.isLiveOnEntryDef(Current) &&
669 "liveOnEntry wasn't treated as a clobber?");
670
671 const auto *Target = getWalkTarget(Current);
672 // If a TerminatedPath doesn't dominate Target, then it wasn't a legal
673 // optimization for the prior phi.
674 assert(all_of(TerminatedPaths, [&](const TerminatedPath &P) {
675 return MSSA.dominates(P.Clobber, Target);
676 }));
677
678 // FIXME: This is broken, because the Blocker may be reported to be
679 // liveOnEntry, and we'll happily wait for that to disappear (read: never)
680 // For the moment, this is fine, since we do nothing with blocker info.
681 if (Optional Blocker = getBlockingAccess(
682 Target, PausedSearches, NewPaused, TerminatedPaths)) {
683
684 // Find the node we started at. We can't search based on N->Last, since
685 // we may have gone around a loop with a different MemoryLocation.
686 auto Iter = find_if(def_path(Blocker->LastNode), [&](const DefPath &N) {
687 return defPathIndex(N) < PriorPathsSize;
688 });
689 assert(Iter != def_path_iterator());
690
691 DefPath &CurNode = *Iter;
692 assert(CurNode.Last == Current);
693
694 // Two things:
695 // A. We can't reliably cache all of NewPaused back. Consider a case
696 // where we have two paths in NewPaused; one of which can't optimize
697 // above this phi, whereas the other can. If we cache the second path
698 // back, we'll end up with suboptimal cache entries. We can handle
699 // cases like this a bit better when we either try to find all
700 // clobbers that block phi optimization, or when our cache starts
701 // supporting unfinished searches.
702 // B. We can't reliably cache TerminatedPaths back here without doing
703 // extra checks; consider a case like:
704 // T
705 // / \
706 // D C
707 // \ /
708 // S
709 // Where T is our target, C is a node with a clobber on it, D is a
710 // diamond (with a clobber *only* on the left or right node, N), and
711 // S is our start. Say we walk to D, through the node opposite N
712 // (read: ignoring the clobber), and see a cache entry in the top
713 // node of D. That cache entry gets put into TerminatedPaths. We then
714 // walk up to C (N is later in our worklist), find the clobber, and
715 // quit. If we append TerminatedPaths to OtherClobbers, we'll cache
716 // the bottom part of D to the cached clobber, ignoring the clobber
717 // in N. Again, this problem goes away if we start tracking all
718 // blockers for a given phi optimization.
719 TerminatedPath Result{CurNode.Last, defPathIndex(CurNode)};
720 return {Result, {}};
721 }
722
723 // If there's nothing left to search, then all paths led to valid clobbers
724 // that we got from our cache; pick the nearest to the start, and allow
725 // the rest to be cached back.
726 if (NewPaused.empty()) {
727 MoveDominatedPathToEnd(TerminatedPaths);
728 TerminatedPath Result = TerminatedPaths.pop_back_val();
729 return {Result, std::move(TerminatedPaths)};
730 }
731
732 MemoryAccess *DefChainEnd = nullptr;
733 SmallVector Clobbers;
734 for (ListIndex Paused : NewPaused) {
735 UpwardsWalkResult WR = walkToPhiOrClobber(Paths[Paused]);
736 if (WR.IsKnownClobber)
737 Clobbers.push_back({WR.Result, Paused});
738 else
739 // Micro-opt: If we hit the end of the chain, save it.
740 DefChainEnd = WR.Result;
741 }
742
743 if (!TerminatedPaths.empty()) {
744 // If we couldn't find the dominating phi/liveOnEntry in the above loop,
745 // do it now.
746 if (!DefChainEnd)
747 for (auto *MA : def_chain(const_cast(Target)))
748 DefChainEnd = MA;
749
750 // If any of the terminated paths don't dominate the phi we'll try to
751 // optimize, we need to figure out what they are and quit.
752 const BasicBlock *ChainBB = DefChainEnd->getBlock();
753 for (const TerminatedPath &TP : TerminatedPaths) {
754 // Because we know that DefChainEnd is as "high" as we can go, we
755 // don't need local dominance checks; BB dominance is sufficient.
756 if (DT.dominates(ChainBB, TP.Clobber->getBlock()))
757 Clobbers.push_back(TP);
758 }
759 }
760
761 // If we have clobbers in the def chain, find the one closest to Current
762 // and quit.
763 if (!Clobbers.empty()) {
764 MoveDominatedPathToEnd(Clobbers);
765 TerminatedPath Result = Clobbers.pop_back_val();
766 return {Result, std::move(Clobbers)};
767 }
768
769 assert(all_of(NewPaused,
770 [&](ListIndex I) { return Paths[I].Last == DefChainEnd; }));
771
772 // Because liveOnEntry is a clobber, this must be a phi.
773 auto *DefChainPhi = cast(DefChainEnd);
774
775 PriorPathsSize = Paths.size();
776 PausedSearches.clear();
777 for (ListIndex I : NewPaused)
778 addSearches(DefChainPhi, PausedSearches, I);
779 NewPaused.clear();
780
781 Current = DefChainPhi;
782 }
783 }
784
785 void verifyOptResult(const OptznResult &R) const {
786 assert(all_of(R.OtherClobbers, [&](const TerminatedPath &P) {
787 return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber);
788 }));
789 }
790
791 void resetPhiOptznState() {
792 Paths.clear();
793 VisitedPhis.clear();
794 }
795
796 public:
797 ClobberWalker(const MemorySSA &MSSA, AliasAnalysis &AA, DominatorTree &DT)
798 : MSSA(MSSA), AA(AA), DT(DT) {}
799
800 void reset() {}
801
802 /// Finds the nearest clobber for the given query, optimizing phis if
803 /// possible.
804 MemoryAccess *findClobber(MemoryAccess *Start, UpwardsMemoryQuery &Q) {
805 Query = &Q;
806
807 MemoryAccess *Current = Start;
808 // This walker pretends uses don't exist. If we're handed one, silently grab
809 // its def. (This has the nice side-effect of ensuring we never cache uses)
810 if (auto *MU = dyn_cast(Start))
811 Current = MU->getDefiningAccess();
812
813 DefPath FirstDesc(Q.StartingLoc, Current, Current, None);
814 // Fast path for the overly-common case (no crazy phi optimization
815 // necessary)
816 UpwardsWalkResult WalkResult = walkToPhiOrClobber(FirstDesc);
817 MemoryAccess *Result;
818 if (WalkResult.IsKnownClobber) {
819 Result = WalkResult.Result;
820 } else {
821 OptznResult OptRes = tryOptimizePhi(cast(FirstDesc.Last),
822 Current, Q.StartingLoc);
823 verifyOptResult(OptRes);
824 resetPhiOptznState();
825 Result = OptRes.PrimaryClobber.Clobber;
826 }
827
828 #ifdef EXPENSIVE_CHECKS
829 checkClobberSanity(Current, Result, Q.StartingLoc, MSSA, Q, AA);
830 #endif
831 return Result;
832 }
833
834 void verify(const MemorySSA *MSSA) { assert(MSSA == &this->MSSA); }
835 };
836
837 struct RenamePassData {
838 DomTreeNode *DTN;
839 DomTreeNode::const_iterator ChildIt;
840 MemoryAccess *IncomingVal;
841
842 RenamePassData(DomTreeNode *D, DomTreeNode::const_iterator It,
843 MemoryAccess *M)
844 : DTN(D), ChildIt(It), IncomingVal(M) {}
845 void swap(RenamePassData &RHS) {
846 std::swap(DTN, RHS.DTN);
847 std::swap(ChildIt, RHS.ChildIt);
848 std::swap(IncomingVal, RHS.IncomingVal);
849 }
850 };
851 } // anonymous namespace
852
853 namespace llvm {
854 /// \brief A MemorySSAWalker that does AA walks to disambiguate accesses. It no
855 /// longer does caching on its own,
856 /// but the name has been retained for the moment.
857 class MemorySSA::CachingWalker final : public MemorySSAWalker {
858 ClobberWalker Walker;
859 bool AutoResetWalker;
860
861 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, UpwardsMemoryQuery &);
862 void verifyRemoved(MemoryAccess *);
863
864 public:
865 CachingWalker(MemorySSA *, AliasAnalysis *, DominatorTree *);
866 ~CachingWalker() override;
867
868 using MemorySSAWalker::getClobberingMemoryAccess;
869 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) override;
870 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
871 const MemoryLocation &) override;
872 void invalidateInfo(MemoryAccess *) override;
873
874 /// Whether we call resetClobberWalker() after each time we *actually* walk to
875 /// answer a clobber query.
876 void setAutoResetWalker(bool AutoReset) { AutoResetWalker = AutoReset; }
877
878 /// Drop the walker's persistent data structures.
879 void resetClobberWalker() { Walker.reset(); }
880
881 void verify(const MemorySSA *MSSA) override {
882 MemorySSAWalker::verify(MSSA);
883 Walker.verify(MSSA);
884 }
885 };
886
887 void MemorySSA::renameSuccessorPhis(BasicBlock *BB, MemoryAccess *IncomingVal,
888 bool RenameAllUses) {
889 // Pass through values to our successors
890 for (const BasicBlock *S : successors(BB)) {
891 auto It = PerBlockAccesses.find(S);
892 // Rename the phi nodes in our successor block
893 if (It == PerBlockAccesses.end() || !isa(It->second->front()))
894 continue;
895 AccessList *Accesses = It->second.get();
896 auto *Phi = cast(&Accesses->front());
897 if (RenameAllUses) {
898 int PhiIndex = Phi->getBasicBlockIndex(BB);
899 assert(PhiIndex != -1 && "Incomplete phi during partial rename");
900 Phi->setIncomingValue(PhiIndex, IncomingVal);
901 } else
902 Phi->addIncoming(IncomingVal, BB);
903 }
904 }