llvm.org GIT mirror llvm / 2a752bf
[MI] Change the array of `MachineMemOperand` pointers to be a generically extensible collection of extra info attached to a `MachineInstr`. The primary change here is cleaning up the APIs used for setting and manipulating the `MachineMemOperand` pointer arrays so chat we can change how they are allocated. Then we introduce an extra info object that using the trailing object pattern to attach some number of MMOs but also other extra info. The design of this is specifically so that this extra info has a fixed necessary cost (the header tracking what extra info is included) and everything else can be tail allocated. This pattern works especially well with a `BumpPtrAllocator` which we use here. I've also added the basic scaffolding for putting interesting pointers into this, namely pre- and post-instruction symbols. These aren't used anywhere yet, they're just there to ensure I've actually gotten the data structure types correct. I'll flesh out support for these in a subsequent patch (MIR dumping, parsing, the works). Finally, I've included an optimization where we store any single pointer inline in the `MachineInstr` to avoid the allocation overhead. This is expected to be the overwhelmingly most common case and so should avoid any memory usage growth due to slightly less clever / dense allocation when dealing with >1 MMO. This did require several ergonomic improvements to the `PointerSumType` to reasonably support the various usage models. This also has a side effect of freeing up 8 bits within the `MachineInstr` which could be repurposed for something else. The suggested direction here came largely from Hal Finkel. I hope it was worth it. ;] It does hopefully clear a path for subsequent extensions w/o nearly as much leg work. Lots of thanks to Reid and Justin for careful reviews and ideas about how to do all of this. Differential Revision: https://reviews.llvm.org/D50701 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@339940 91177308-0d34-0410-b5e6-96231b3b80d8 Chandler Carruth 2 years ago
43 changed file(s) with 734 addition(s) and 512 deletion(s). Raw diff Collapse all Expand all
5757 /// and may be desirable to set to a state that is particularly desirable to
5858 /// default construct.
5959 ///
60 /// Having a supported zero-valued tag also enables getting the address of a
61 /// pointer stored with that tag provided it is stored in its natural bit
62 /// representation. This works because in the case of a zero-valued tag, the
63 /// pointer's value is directly stored into this object and we can expose the
64 /// address of that internal storage. This is especially useful when building an
65 /// `ArrayRef` of a single pointer stored in a sum type.
66 ///
6067 /// There is no support for constructing or accessing with a dynamic tag as
6168 /// that would fundamentally violate the type safety provided by the sum type.
6269 template class PointerSumType {
63 uintptr_t Value = 0;
64
6570 using HelperT = detail::PointerSumTypeHelper;
71
72 // We keep both the raw value and the min tag value's pointer in a union. When
73 // the minimum tag value is zero, this allows code below to cleanly expose the
74 // address of the zero-tag pointer instead of just the zero-tag pointer
75 // itself. This is especially useful when building `ArrayRef`s out of a single
76 // pointer. However, we have to carefully access the union due to the active
77 // member potentially changing. When we *store* a new value, we directly
78 // access the union to allow us to store using the obvious types. However,
79 // when we *read* a value, we copy the underlying storage out to avoid relying
80 // on one member or the other being active.
81 union StorageT {
82 // Ensure we get a null default constructed value.
83 uintptr_t Value = 0;
84
85 typename HelperT::template Lookup::PointerT MinTagPointer;
86 };
87
88 StorageT Storage;
6689
6790 public:
6891 constexpr PointerSumType() = default;
92
93 /// A typed setter to a given tagged member of the sum type.
94 template
95 void set(typename HelperT::template Lookup::PointerT Pointer) {
96 void *V = HelperT::template Lookup::TraitsT::getAsVoidPointer(Pointer);
97 assert((reinterpret_cast(V) & HelperT::TagMask) == 0 &&
98 "Pointer is insufficiently aligned to store the discriminant!");
99 Storage.Value = reinterpret_cast(V) | N;
100 }
69101
70102 /// A typed constructor for a specific tagged member of the sum type.
71103 template
72104 static PointerSumType
73105 create(typename HelperT::template Lookup::PointerT Pointer) {
74106 PointerSumType Result;
75 void *V = HelperT::template Lookup::TraitsT::getAsVoidPointer(Pointer);
76 assert((reinterpret_cast(V) & HelperT::TagMask) == 0 &&
77 "Pointer is insufficiently aligned to store the discriminant!");
78 Result.Value = reinterpret_cast(V) | N;
107 Result.set(Pointer);
79108 return Result;
80109 }
81110
82 TagT getTag() const { return static_cast(Value & HelperT::TagMask); }
111 /// Clear the value to null with the min tag type.
112 void clear() { set(nullptr); }
113
114 TagT getTag() const {
115 return static_cast(getOpaqueValue() & HelperT::TagMask);
116 }
83117
84118 template bool is() const { return N == getTag(); }
85119
86120 template typename HelperT::template Lookup::PointerT get() const {
87 void *P = is() ? getImpl() : nullptr;
121 void *P = is() ? getVoidPtr() : nullptr;
88122 return HelperT::template Lookup::TraitsT::getFromVoidPointer(P);
89123 }
90124
91125 template
92126 typename HelperT::template Lookup::PointerT cast() const {
93127 assert(is() && "This instance has a different active member.");
94 return HelperT::template Lookup::TraitsT::getFromVoidPointer(getImpl());
95 }
96
97 explicit operator bool() const { return Value & HelperT::PointerMask; }
98 bool operator==(const PointerSumType &R) const { return Value == R.Value; }
99 bool operator!=(const PointerSumType &R) const { return Value != R.Value; }
100 bool operator<(const PointerSumType &R) const { return Value < R.Value; }
101 bool operator>(const PointerSumType &R) const { return Value > R.Value; }
102 bool operator<=(const PointerSumType &R) const { return Value <= R.Value; }
103 bool operator>=(const PointerSumType &R) const { return Value >= R.Value; }
104
105 uintptr_t getOpaqueValue() const { return Value; }
128 return HelperT::template Lookup::TraitsT::getFromVoidPointer(
129 getVoidPtr());
130 }
131
132 /// If the tag is zero and the pointer's value isn't changed when being
133 /// stored, get the address of the stored value type-punned to the zero-tag's
134 /// pointer type.
135 typename HelperT::template Lookup::PointerT const *
136 getAddrOfZeroTagPointer() const {
137 return const_cast(this)->getAddrOfZeroTagPointer();
138 }
139
140 /// If the tag is zero and the pointer's value isn't changed when being
141 /// stored, get the address of the stored value type-punned to the zero-tag's
142 /// pointer type.
143 typename HelperT::template Lookup::PointerT *
144 getAddrOfZeroTagPointer() {
145 static_assert(HelperT::MinTag == 0, "Non-zero minimum tag value!");
146 assert(is() && "The active tag is not zero!");
147 // Store the initial value of the pointer when read out of our storage.
148 auto InitialPtr = get();
149 // Now update the active member of the union to be the actual pointer-typed
150 // member so that accessing it indirectly through the returned address is
151 // valid.
152 Storage.MinTagPointer = InitialPtr;
153 // Finally, validate that this was a no-op as expected by reading it back
154 // out using the same underlying-storage read as above.
155 assert(InitialPtr == get() &&
156 "Switching to typed storage changed the pointer returned!");
157 // Now we can correctly return an address to typed storage.
158 return &Storage.MinTagPointer;
159 }
160
161 explicit operator bool() const {
162 return getOpaqueValue() & HelperT::PointerMask;
163 }
164 bool operator==(const PointerSumType &R) const {
165 return getOpaqueValue() == R.getOpaqueValue();
166 }
167 bool operator!=(const PointerSumType &R) const {
168 return getOpaqueValue() != R.getOpaqueValue();
169 }
170 bool operator<(const PointerSumType &R) const {
171 return getOpaqueValue() < R.getOpaqueValue();
172 }
173 bool operator>(const PointerSumType &R) const {
174 return getOpaqueValue() > R.getOpaqueValue();
175 }
176 bool operator<=(const PointerSumType &R) const {
177 return getOpaqueValue() <= R.getOpaqueValue();
178 }
179 bool operator>=(const PointerSumType &R) const {
180 return getOpaqueValue() >= R.getOpaqueValue();
181 }
182
183 uintptr_t getOpaqueValue() const {
184 uintptr_t Value;
185 // Read the underlying storage of the union, regardless of the active
186 // member.
187 memcpy(&Value, &Storage, sizeof(Value));
188 return Value;
189 }
106190
107191 protected:
108 void *getImpl() const {
109 return reinterpret_cast(Value & HelperT::PointerMask);
192 void *getVoidPtr() const {
193 return reinterpret_cast(getOpaqueValue() & HelperT::PointerMask);
110194 }
111195 };
112196
150234 enum { NumTagBits = Min::value };
151235
152236 // Also compute the smallest discriminant and various masks for convenience.
237 constexpr static TagT MinTag =
238 static_cast(Min::value);
153239 enum : uint64_t {
154 MinTag = Min::value,
155240 PointerMask = static_cast(-1) << NumTagBits,
156241 TagMask = ~PointerMask
157242 };
710710 /// Allocate and initialize a register mask with @p NumRegister bits.
711711 uint32_t *allocateRegMask();
712712
713 /// allocateMemRefsArray - Allocate an array to hold MachineMemOperand
714 /// pointers. This array is owned by the MachineFunction.
715 MachineInstr::mmo_iterator allocateMemRefsArray(unsigned long Num);
716
717 /// extractLoadMemRefs - Allocate an array and populate it with just the
718 /// load information from the given MachineMemOperand sequence.
719 std::pair
720 MachineInstr::mmo_iterator>
721 extractLoadMemRefs(MachineInstr::mmo_iterator Begin,
722 MachineInstr::mmo_iterator End);
723
724 /// extractStoreMemRefs - Allocate an array and populate it with just the
725 /// store information from the given MachineMemOperand sequence.
726 std::pair
727 MachineInstr::mmo_iterator>
728 extractStoreMemRefs(MachineInstr::mmo_iterator Begin,
729 MachineInstr::mmo_iterator End);
713 /// Allocate and construct an extra info structure for a `MachineInstr`.
714 ///
715 /// This is allocated on the function's allocator and so lives the life of
716 /// the function.
717 MachineInstr::ExtraInfo *
718 createMIExtraInfo(ArrayRef MMOs,
719 MCSymbol *PreInstrSymbol = nullptr,
720 MCSymbol *PostInstrSymbol = nullptr);
730721
731722 /// Allocate a string and populate it with the given external symbol name.
732723 const char *createExternalSymbolName(StringRef Name);
1616 #define LLVM_CODEGEN_MACHINEINSTR_H
1717
1818 #include "llvm/ADT/DenseMapInfo.h"
19 #include "llvm/ADT/PointerSumType.h"
1920 #include "llvm/ADT/ilist.h"
2021 #include "llvm/ADT/ilist_node.h"
2122 #include "llvm/ADT/iterator_range.h"
2223 #include "llvm/Analysis/AliasAnalysis.h"
24 #include "llvm/CodeGen/MachineMemOperand.h"
2325 #include "llvm/CodeGen/MachineOperand.h"
2426 #include "llvm/CodeGen/TargetOpcodes.h"
2527 #include "llvm/IR/DebugLoc.h"
2628 #include "llvm/IR/InlineAsm.h"
2729 #include "llvm/MC/MCInstrDesc.h"
30 #include "llvm/MC/MCSymbol.h"
2831 #include "llvm/Support/ArrayRecycler.h"
32 #include "llvm/Support/TrailingObjects.h"
2933 #include
3034 #include
3135 #include
6064 : public ilist_node_with_parent
6165 ilist_sentinel_tracking> {
6266 public:
63 using mmo_iterator = MachineMemOperand **;
67 using mmo_iterator = ArrayRef::iterator;
6468
6569 /// Flags to specify different kinds of comments to output in
6670 /// assembly code. These flags carry semantic information not
117121 // anything other than to convey comment
118122 // information to AsmPrinter.
119123
120 uint8_t NumMemRefs = 0; // Information on memory references.
121 // Note that MemRefs == nullptr, means 'don't know', not 'no memory access'.
122 // Calling code must treat missing information conservatively. If the number
123 // of memory operands required to be precise exceeds the maximum value of
124 // NumMemRefs - currently 256 - we remove the operands entirely. Note also
125 // that this is a non-owning reference to a shared copy on write buffer owned
126 // by the MachineFunction and created via MF.allocateMemRefsArray.
127 mmo_iterator MemRefs = nullptr;
124 /// Internal implementation detail class that provides out-of-line storage for
125 /// extra info used by the machine instruction when this info cannot be stored
126 /// in-line within the instruction itself.
127 ///
128 /// This has to be defined eagerly due to the implementation constraints of
129 /// `PointerSumType` where it is used.
130 class ExtraInfo final
131 : TrailingObjects {
132 public:
133 static ExtraInfo *create(BumpPtrAllocator &Allocator,
134 ArrayRef MMOs,
135 MCSymbol *PreInstrSymbol = nullptr,
136 MCSymbol *PostInstrSymbol = nullptr) {
137 bool HasPreInstrSymbol = PreInstrSymbol != nullptr;
138 bool HasPostInstrSymbol = PostInstrSymbol != nullptr;
139 auto *Result = new (Allocator.Allocate(
140 totalSizeToAlloc(
141 MMOs.size(), HasPreInstrSymbol + HasPostInstrSymbol),
142 alignof(ExtraInfo)))
143 ExtraInfo(MMOs.size(), HasPreInstrSymbol, HasPostInstrSymbol);
144
145 // Copy the actual data into the trailing objects.
146 std::copy(MMOs.begin(), MMOs.end(),
147 Result->getTrailingObjects());
148
149 if (HasPreInstrSymbol)
150 Result->getTrailingObjects()[0] = PreInstrSymbol;
151 if (HasPostInstrSymbol)
152 Result->getTrailingObjects()[HasPreInstrSymbol] =
153 PostInstrSymbol;
154
155 return Result;
156 }
157
158 ArrayRef getMMOs() const {
159 return makeArrayRef(getTrailingObjects(), NumMMOs);
160 }
161
162 MCSymbol *getPreInstrSymbol() const {
163 return HasPreInstrSymbol ? getTrailingObjects()[0] : nullptr;
164 }
165
166 MCSymbol *getPostInstrSymbol() const {
167 return HasPostInstrSymbol
168 ? getTrailingObjects()[HasPreInstrSymbol]
169 : nullptr;
170 }
171
172 private:
173 friend TrailingObjects;
174
175 // Description of the extra info, used to interpret the actual optional
176 // data appended.
177 //
178 // Note that this is not terribly space optimized. This leaves a great deal
179 // of flexibility to fit more in here later.
180 const int NumMMOs;
181 const bool HasPreInstrSymbol;
182 const bool HasPostInstrSymbol;
183
184 // Implement the `TrailingObjects` internal API.
185 size_t numTrailingObjects(OverloadToken) const {
186 return NumMMOs;
187 }
188 size_t numTrailingObjects(OverloadToken) const {
189 return HasPreInstrSymbol + HasPostInstrSymbol;
190 }
191
192 // Just a boring constructor to allow us to initialize the sizes. Always use
193 // the `create` routine above.
194 ExtraInfo(int NumMMOs, bool HasPreInstrSymbol, bool HasPostInstrSymbol)
195 : NumMMOs(NumMMOs), HasPreInstrSymbol(HasPreInstrSymbol),
196 HasPostInstrSymbol(HasPostInstrSymbol) {}
197 };
198
199 /// Enumeration of the kinds of inline extra info available. It is important
200 /// that the `MachineMemOperand` inline kind has a tag value of zero to make
201 /// it accessible as an `ArrayRef`.
202 enum ExtraInfoInlineKinds {
203 EIIK_MMO = 0,
204 EIIK_PreInstrSymbol,
205 EIIK_PostInstrSymbol,
206 EIIK_OutOfLine
207 };
208
209 // We store extra information about the instruction here. The common case is
210 // expected to be nothing or a single pointer (typically a MMO or a symbol).
211 // We work to optimize this common case by storing it inline here rather than
212 // requiring a separate allocation, but we fall back to an allocation when
213 // multiple pointers are needed.
214 PointerSumType
215 PointerSumTypeMember,
216 PointerSumTypeMember,
217 PointerSumTypeMember,
218 PointerSumTypeMember>
219 Info;
128220
129221 DebugLoc debugLoc; // Source line information.
130222
411503 return I - operands_begin();
412504 }
413505
414 /// Access to memory operands of the instruction
415 mmo_iterator memoperands_begin() const { return MemRefs; }
416 mmo_iterator memoperands_end() const { return MemRefs + NumMemRefs; }
506 /// Access to memory operands of the instruction. If there are none, that does
507 /// not imply anything about whether the function accesses memory. Instead,
508 /// the caller must behave conservatively.
509 ArrayRef memoperands() const {
510 if (!Info)
511 return {};
512
513 if (Info.is())
514 return makeArrayRef(Info.getAddrOfZeroTagPointer(), 1);
515
516 if (ExtraInfo *EI = Info.get())
517 return EI->getMMOs();
518
519 return {};
520 }
521
522 /// Access to memory operands of the instruction.
523 ///
524 /// If `memoperands_begin() == memoperands_end()`, that does not imply
525 /// anything about whether the function accesses memory. Instead, the caller
526 /// must behave conservatively.
527 mmo_iterator memoperands_begin() const { return memoperands().begin(); }
528
529 /// Access to memory operands of the instruction.
530 ///
531 /// If `memoperands_begin() == memoperands_end()`, that does not imply
532 /// anything about whether the function accesses memory. Instead, the caller
533 /// must behave conservatively.
534 mmo_iterator memoperands_end() const { return memoperands().end(); }
535
417536 /// Return true if we don't have any memory operands which described the
418537 /// memory access done by this instruction. If this is true, calling code
419538 /// must be conservative.
420 bool memoperands_empty() const { return NumMemRefs == 0; }
421
422 iterator_range memoperands() {
423 return make_range(memoperands_begin(), memoperands_end());
424 }
425 iterator_range memoperands() const {
426 return make_range(memoperands_begin(), memoperands_end());
427 }
539 bool memoperands_empty() const { return memoperands().empty(); }
428540
429541 /// Return true if this instruction has exactly one MachineMemOperand.
430 bool hasOneMemOperand() const {
431 return NumMemRefs == 1;
432 }
542 bool hasOneMemOperand() const { return memoperands().size() == 1; }
433543
434544 /// Return the number of memory operands.
435 unsigned getNumMemOperands() const { return NumMemRefs; }
545 unsigned getNumMemOperands() const { return memoperands().size(); }
546
547 /// Helper to extract a pre-instruction symbol if one has been added.
548 MCSymbol *getPreInstrSymbol() const {
549 if (!Info)
550 return nullptr;
551 if (MCSymbol *S = Info.get())
552 return S;
553 if (ExtraInfo *EI = Info.get())
554 return EI->getPreInstrSymbol();
555
556 return nullptr;
557 }
558
559 /// Helper to extract a post-instruction symbol if one has been added.
560 MCSymbol *getPostInstrSymbol() const {
561 if (!Info)
562 return nullptr;
563 if (MCSymbol *S = Info.get())
564 return S;
565 if (ExtraInfo *EI = Info.get())
566 return EI->getPostInstrSymbol();
567
568 return nullptr;
569 }
436570
437571 /// API for querying MachineInstr properties. They are the same as MCInstrDesc
438572 /// queries but they are bundle aware.
13221456 /// fewer operand than it started with.
13231457 void RemoveOperand(unsigned OpNo);
13241458
1459 /// Clear this MachineInstr's memory reference descriptor list. This resets
1460 /// the memrefs to their most conservative state. This should be used only
1461 /// as a last resort since it greatly pessimizes our knowledge of the memory
1462 /// access performed by the instruction.
1463 void dropMemRefs(MachineFunction &MF);
1464
1465 /// Assign this MachineInstr's memory reference descriptor list.
1466 ///
1467 /// Unlike other methods, this *will* allocate them into a new array
1468 /// associated with the provided `MachineFunction`.
1469 void setMemRefs(MachineFunction &MF, ArrayRef MemRefs);
1470
13251471 /// Add a MachineMemOperand to the machine instruction.
13261472 /// This function should be used only occasionally. The setMemRefs function
13271473 /// is the primary method for setting up a MachineInstr's MemRefs list.
13281474 void addMemOperand(MachineFunction &MF, MachineMemOperand *MO);
13291475
1330 /// Assign this MachineInstr's memory reference descriptor list.
1331 /// This does not transfer ownership.
1332 void setMemRefs(mmo_iterator NewMemRefs, mmo_iterator NewMemRefsEnd) {
1333 setMemRefs(std::make_pair(NewMemRefs, NewMemRefsEnd-NewMemRefs));
1334 }
1335
1336 /// Assign this MachineInstr's memory reference descriptor list. First
1337 /// element in the pair is the begin iterator/pointer to the array; the
1338 /// second is the number of MemoryOperands. This does not transfer ownership
1339 /// of the underlying memory.
1340 void setMemRefs(std::pair NewMemRefs) {
1341 MemRefs = NewMemRefs.first;
1342 NumMemRefs = uint8_t(NewMemRefs.second);
1343 assert(NumMemRefs == NewMemRefs.second &&
1344 "Too many memrefs - must drop memory operands");
1345 }
1346
1347 /// Return a set of memrefs (begin iterator, size) which conservatively
1348 /// describe the memory behavior of both MachineInstrs. This is appropriate
1349 /// for use when merging two MachineInstrs into one. This routine does not
1350 /// modify the memrefs of the this MachineInstr.
1351 std::pair mergeMemRefsWith(const MachineInstr& Other);
1476 /// Clone another MachineInstr's memory reference descriptor list and replace
1477 /// ours with it.
1478 ///
1479 /// Note that `*this` may be the incoming MI!
1480 ///
1481 /// Prefer this API whenever possible as it can avoid allocations in common
1482 /// cases.
1483 void cloneMemRefs(MachineFunction &MF, const MachineInstr &MI);
1484
1485 /// Clone the merge of multiple MachineInstrs' memory reference descriptors
1486 /// list and replace ours with it.
1487 ///
1488 /// Note that `*this` may be one of the incoming MIs!
1489 ///
1490 /// Prefer this API whenever possible as it can avoid allocations in common
1491 /// cases.
1492 void cloneMergedMemRefs(MachineFunction &MF,
1493 ArrayRef MIs);
1494
1495 /// Get or create a temporary symbol that will be emitted just prior to the
1496 /// instruction itself.
1497 ///
1498 /// FIXME: This is not fully implemented yet.
1499 MCSymbol *getOrCreatePreInstrTempSymbol(MCContext &MCCtx);
1500
1501 /// Get or create a temporary symbol that will be emitted just after the
1502 /// instruction itself.
1503 ///
1504 /// FIXME: This is not fully implemented yet.
1505 MCSymbol *getOrCreatePostInstrTempSymbol(MCContext &MCCtx);
13521506
13531507 /// Return the MIFlags which represent both MachineInstrs. This
13541508 /// should be used when merging two MachineInstrs into one. This routine does
13551509 /// not modify the MIFlags of this MachineInstr.
13561510 uint16_t mergeFlagsWith(const MachineInstr& Other) const;
1357
1358 /// Clear this MachineInstr's memory reference descriptor list. This resets
1359 /// the memrefs to their most conservative state. This should be used only
1360 /// as a last resort since it greatly pessimizes our knowledge of the memory
1361 /// access performed by the instruction.
1362 void dropMemRefs() {
1363 MemRefs = nullptr;
1364 NumMemRefs = 0;
1365 }
13661511
13671512 /// Break any tie involving OpIdx.
13681513 void untieRegOperand(unsigned OpIdx) {
190190 return *this;
191191 }
192192
193 const MachineInstrBuilder &setMemRefs(MachineInstr::mmo_iterator b,
194 MachineInstr::mmo_iterator e) const {
195 MI->setMemRefs(b, e);
196 return *this;
197 }
198
199 const MachineInstrBuilder &setMemRefs(std::pair
200 unsigned> MemOperandsRef) const {
201 MI->setMemRefs(MemOperandsRef);
193 const MachineInstrBuilder &
194 setMemRefs(ArrayRef MMOs) const {
195 MI->setMemRefs(*MF, MMOs);
196 return *this;
197 }
198
199 const MachineInstrBuilder &cloneMemRefs(const MachineInstr &OtherMI) const {
200 MI->cloneMemRefs(*MF, OtherMI);
201 return *this;
202 }
203
204 const MachineInstrBuilder &
205 cloneMergedMemRefs(ArrayRef OtherMIs) const {
206 MI->cloneMergedMemRefs(*MF, OtherMIs);
202207 return *this;
203208 }
204209
864864
865865 // Merge MMOs from memory operations in the common block.
866866 if (MBBICommon->mayLoad() || MBBICommon->mayStore())
867 MBBICommon->setMemRefs(MBBICommon->mergeMemRefsWith(*MBBI));
867 MBBICommon->cloneMergedMemRefs(*MBB->getParent(), {&*MBBICommon, &*MBBI});
868868 // Drop undef flags if they aren't present in all merged instructions.
869869 for (unsigned I = 0, E = MBBICommon->getNumOperands(); I != E; ++I) {
870870 MachineOperand &MO = MBBICommon->getOperand(I);
705705 return;
706706
707707 MachinePointerInfo MPInfo(Global);
708 MachineInstr::mmo_iterator MemRefs = MF->allocateMemRefsArray(1);
709708 auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
710709 MachineMemOperand::MODereferenceable;
711 *MemRefs =
710 MachineMemOperand *MemRef =
712711 MF->getMachineMemOperand(MPInfo, Flags, DL->getPointerSizeInBits() / 8,
713712 DL->getPointerABIAlignment(0));
714 MIB.setMemRefs(MemRefs, MemRefs + 1);
713 MIB.setMemRefs({MemRef});
715714 }
716715
717716 bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
650650 }
651651 }
652652
653 MIB.setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
653 MIB.setMemRefs(MI->memoperands());
654654
655655 return MIB;
656656 }
796796 return true;
797797 if (MemOperands.empty())
798798 return false;
799 MachineInstr::mmo_iterator MemRefs =
800 MF.allocateMemRefsArray(MemOperands.size());
801 std::copy(MemOperands.begin(), MemOperands.end(), MemRefs);
802 MI->setMemRefs(MemRefs, MemRefs + MemOperands.size());
799 MI->setMemRefs(MF, MemOperands);
803800 return false;
804801 }
805802
405405 MMO->getOrdering(), MMO->getFailureOrdering());
406406 }
407407
408 MachineInstr::mmo_iterator
409 MachineFunction::allocateMemRefsArray(unsigned long Num) {
410 return Allocator.Allocate(Num);
411 }
412
413 std::pair
414 MachineFunction::extractLoadMemRefs(MachineInstr::mmo_iterator Begin,
415 MachineInstr::mmo_iterator End) {
416 // Count the number of load mem refs.
417 unsigned Num = 0;
418 for (MachineInstr::mmo_iterator I = Begin; I != End; ++I)
419 if ((*I)->isLoad())
420 ++Num;
421
422 // Allocate a new array and populate it with the load information.
423 MachineInstr::mmo_iterator Result = allocateMemRefsArray(Num);
424 unsigned Index = 0;
425 for (MachineInstr::mmo_iterator I = Begin; I != End; ++I) {
426 if ((*I)->isLoad()) {
427 if (!(*I)->isStore())
428 // Reuse the MMO.
429 Result[Index] = *I;
430 else {
431 // Clone the MMO and unset the store flag.
432 MachineMemOperand *JustLoad =
433 getMachineMemOperand((*I)->getPointerInfo(),
434 (*I)->getFlags() & ~MachineMemOperand::MOStore,
435 (*I)->getSize(), (*I)->getBaseAlignment(),
436 (*I)->getAAInfo(), nullptr,
437 (*I)->getSyncScopeID(), (*I)->getOrdering(),
438 (*I)->getFailureOrdering());
439 Result[Index] = JustLoad;
440 }
441 ++Index;
442 }
443 }
444 return std::make_pair(Result, Result + Num);
445 }
446
447 std::pair
448 MachineFunction::extractStoreMemRefs(MachineInstr::mmo_iterator Begin,
449 MachineInstr::mmo_iterator End) {
450 // Count the number of load mem refs.
451 unsigned Num = 0;
452 for (MachineInstr::mmo_iterator I = Begin; I != End; ++I)
453 if ((*I)->isStore())
454 ++Num;
455
456 // Allocate a new array and populate it with the store information.
457 MachineInstr::mmo_iterator Result = allocateMemRefsArray(Num);
458 unsigned Index = 0;
459 for (MachineInstr::mmo_iterator I = Begin; I != End; ++I) {
460 if ((*I)->isStore()) {
461 if (!(*I)->isLoad())
462 // Reuse the MMO.
463 Result[Index] = *I;
464 else {
465 // Clone the MMO and unset the load flag.
466 MachineMemOperand *JustStore =
467 getMachineMemOperand((*I)->getPointerInfo(),
468 (*I)->getFlags() & ~MachineMemOperand::MOLoad,
469 (*I)->getSize(), (*I)->getBaseAlignment(),
470 (*I)->getAAInfo(), nullptr,
471 (*I)->getSyncScopeID(), (*I)->getOrdering(),
472 (*I)->getFailureOrdering());
473 Result[Index] = JustStore;
474 }
475 ++Index;
476 }
477 }
478 return std::make_pair(Result, Result + Num);
408 MachineInstr::ExtraInfo *
409 MachineFunction::createMIExtraInfo(ArrayRef MMOs,
410 MCSymbol *PreInstrSymbol,
411 MCSymbol *PostInstrSymbol) {
412 return MachineInstr::ExtraInfo::create(Allocator, MMOs, PreInstrSymbol,
413 PostInstrSymbol);
479414 }
480415
481416 const char *MachineFunction::createExternalSymbolName(StringRef Name) {
130130 /// MachineInstr ctor - Copies MachineInstr arg exactly
131131 ///
132132 MachineInstr::MachineInstr(MachineFunction &MF, const MachineInstr &MI)
133 : MCID(&MI.getDesc()), NumMemRefs(MI.NumMemRefs), MemRefs(MI.MemRefs),
134 debugLoc(MI.getDebugLoc()) {
133 : MCID(&MI.getDesc()), Info(MI.Info), debugLoc(MI.getDebugLoc()) {
135134 assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor");
136135
137136 CapOperands = OperandCapacity::get(MI.getNumOperands());
314313 --NumOperands;
315314 }
316315
317 /// addMemOperand - Add a MachineMemOperand to the machine instruction.
318 /// This function should be used only occasionally. The setMemRefs function
319 /// is the primary method for setting up a MachineInstr's MemRefs list.
316 void MachineInstr::dropMemRefs(MachineFunction &MF) {
317 if (memoperands_empty())
318 return;
319
320 // See if we can just drop all of our extra info.
321 if (!getPreInstrSymbol() && !getPostInstrSymbol()) {
322 Info.clear();
323 return;
324 }
325 if (!getPostInstrSymbol()) {
326 Info.set(getPreInstrSymbol());
327 return;
328 }
329 if (!getPreInstrSymbol()) {
330 Info.set(getPostInstrSymbol());
331 return;
332 }
333
334 // Otherwise allocate a fresh extra info with just these symbols.
335 Info.set(
336 MF.createMIExtraInfo({}, getPreInstrSymbol(), getPostInstrSymbol()));
337 }
338
339 void MachineInstr::setMemRefs(MachineFunction &MF,
340 ArrayRef MMOs) {
341 if (MMOs.empty()) {
342 dropMemRefs(MF);
343 return;
344 }
345
346 // Try to store a single MMO inline.
347 if (MMOs.size() == 1 && !getPreInstrSymbol() && !getPostInstrSymbol()) {
348 Info.set(MMOs[0]);
349 return;
350 }
351
352 // Otherwise create an extra info struct with all of our info.
353 Info.set(
354 MF.createMIExtraInfo(MMOs, getPreInstrSymbol(), getPostInstrSymbol()));
355 }
356
320357 void MachineInstr::addMemOperand(MachineFunction &MF,
321358 MachineMemOperand *MO) {
322 mmo_iterator OldMemRefs = MemRefs;
323 unsigned OldNumMemRefs = NumMemRefs;
324
325 unsigned NewNum = NumMemRefs + 1;
326 mmo_iterator NewMemRefs = MF.allocateMemRefsArray(NewNum);
327
328 std::copy(OldMemRefs, OldMemRefs + OldNumMemRefs, NewMemRefs);
329 NewMemRefs[NewNum - 1] = MO;
330 setMemRefs(NewMemRefs, NewMemRefs + NewNum);
359 SmallVector MMOs;
360 MMOs.append(memoperands_begin(), memoperands_end());
361 MMOs.push_back(MO);
362 setMemRefs(MF, MMOs);
363 }
364
365 void MachineInstr::cloneMemRefs(MachineFunction &MF, const MachineInstr &MI) {
366 if (this == &MI)
367 // Nothing to do for a self-clone!
368 return;
369
370 assert(&MF == MI.getMF() &&
371 "Invalid machine functions when cloning memory refrences!");
372 // See if we can just steal the extra info already allocated for the
373 // instruction. We can do this whenever the pre- and post-instruction symbols
374 // are the same (including null).
375 if (getPreInstrSymbol() == MI.getPreInstrSymbol() &&
376 getPostInstrSymbol() == MI.getPostInstrSymbol()) {
377 Info = MI.Info;
378 return;
379 }
380
381 // Otherwise, fall back on a copy-based clone.
382 setMemRefs(MF, MI.memoperands());
331383 }
332384
333385 /// Check to see if the MMOs pointed to by the two MemRefs arrays are
334386 /// identical.
335 static bool hasIdenticalMMOs(const MachineInstr &MI1, const MachineInstr &MI2) {
336 auto I1 = MI1.memoperands_begin(), E1 = MI1.memoperands_end();
337 auto I2 = MI2.memoperands_begin(), E2 = MI2.memoperands_end();
338 if ((E1 - I1) != (E2 - I2))
387 static bool hasIdenticalMMOs(ArrayRef LHS,
388 ArrayRef RHS) {
389 if (LHS.size() != RHS.size())
339390 return false;
340 for (; I1 != E1; ++I1, ++I2) {
341 if (**I1 != **I2)
342 return false;
343 }
344 return true;
345 }
346
347 std::pair
348 MachineInstr::mergeMemRefsWith(const MachineInstr& Other) {
349
350 // If either of the incoming memrefs are empty, we must be conservative and
351 // treat this as if we've exhausted our space for memrefs and dropped them.
352 if (memoperands_empty() || Other.memoperands_empty())
353 return std::make_pair(nullptr, 0);
354
355 // If both instructions have identical memrefs, we don't need to merge them.
356 // Since many instructions have a single memref, and we tend to merge things
357 // like pairs of loads from the same location, this catches a large number of
358 // cases in practice.
359 if (hasIdenticalMMOs(*this, Other))
360 return std::make_pair(MemRefs, NumMemRefs);
361
362 // TODO: consider uniquing elements within the operand lists to reduce
363 // space usage and fall back to conservative information less often.
364 size_t CombinedNumMemRefs = NumMemRefs + Other.NumMemRefs;
365
366 // If we don't have enough room to store this many memrefs, be conservative
367 // and drop them. Otherwise, we'd fail asserts when trying to add them to
368 // the new instruction.
369 if (CombinedNumMemRefs != uint8_t(CombinedNumMemRefs))
370 return std::make_pair(nullptr, 0);
371
372 MachineFunction *MF = getMF();
373 mmo_iterator MemBegin = MF->allocateMemRefsArray(CombinedNumMemRefs);
374 mmo_iterator MemEnd = std::copy(memoperands_begin(), memoperands_end(),
375 MemBegin);
376 MemEnd = std::copy(Other.memoperands_begin(), Other.memoperands_end(),
377 MemEnd);
378 assert(MemEnd - MemBegin == (ptrdiff_t)CombinedNumMemRefs &&
379 "missing memrefs");
380
381 return std::make_pair(MemBegin, CombinedNumMemRefs);
391
392 auto LHSPointees = make_pointee_range(LHS);
393 auto RHSPointees = make_pointee_range(RHS);
394 return std::equal(LHSPointees.begin(), LHSPointees.end(),
395 RHSPointees.begin());
396 }
397
398 void MachineInstr::cloneMergedMemRefs(MachineFunction &MF,
399 ArrayRef MIs) {
400 // Try handling easy numbers of MIs with simpler mechanisms.
401 if (MIs.empty()) {
402 dropMemRefs(MF);
403 return;
404 }
405 if (MIs.size() == 1) {
406 cloneMemRefs(MF, *MIs[0]);
407 return;
408 }
409 // Because an empty memoperands list provides *no* information and must be
410 // handled conservatively (assuming the instruction can do anything), the only
411 // way to merge with it is to drop all other memoperands.
412 if (MIs[0]->memoperands_empty()) {
413 dropMemRefs(MF);
414 return;
415 }
416
417 // Handle the general case.
418 SmallVector MergedMMOs;
419 // Start with the first instruction.
420 assert(&MF == MIs[0]->getMF() &&
421 "Invalid machine functions when cloning memory references!");
422 MergedMMOs.append(MIs[0]->memoperands_begin(), MIs[0]->memoperands_end());
423 // Now walk all the other instructions and accumulate any different MMOs.
424 for (const MachineInstr &MI : make_pointee_range(MIs.slice(1))) {
425 assert(&MF == MI.getMF() &&
426 "Invalid machine functions when cloning memory references!");
427
428 // Skip MIs with identical operands to the first. This is a somewhat
429 // arbitrary hack but will catch common cases without being quadratic.
430 // TODO: We could fully implement merge semantics here if needed.
431 if (hasIdenticalMMOs(MIs[0]->memoperands(), MI.memoperands()))
432 continue;
433
434 // Because an empty memoperands list provides *no* information and must be
435 // handled conservatively (assuming the instruction can do anything), the
436 // only way to merge with it is to drop all other memoperands.
437 if (MI.memoperands_empty()) {
438 dropMemRefs(MF);
439 return;
440 }
441
442 // Otherwise accumulate these into our temporary buffer of the merged state.
443 MergedMMOs.append(MI.memoperands_begin(), MI.memoperands_end());
444 }
445
446 setMemRefs(MF, MergedMMOs);
447 }
448
449 MCSymbol *MachineInstr::getOrCreatePreInstrTempSymbol(MCContext &MCCtx) {
450 MCSymbol *S = getPreInstrSymbol();
451 if (S)
452 return S;
453
454 // Create a new temp symbol.
455 S = MCCtx.createTempSymbol();
456
457 if (!Info) {
458 // If we don't have any other extra info, we can store this inline.
459 Info.set(S);
460 return S;
461 }
462
463 // Otherwise, allocate a fully set of extra info.
464 Info.set(
465 getMF()->createMIExtraInfo(memoperands(), S, getPostInstrSymbol()));
466
467 return S;
468 }
469
470 MCSymbol *MachineInstr::getOrCreatePostInstrTempSymbol(MCContext &MCCtx) {
471 MCSymbol *S = getPostInstrSymbol();
472 if (S)
473 return S;
474
475 // Create a new temp symbol.
476 S = MCCtx.createTempSymbol();
477
478 if (!Info) {
479 // If we don't have any other extra info, we can store this inline.
480 Info.set(S);
481 return S;
482 }
483
484 // Otherwise, allocate a fully set of extra info.
485 Info.set(
486 getMF()->createMIExtraInfo(memoperands(), getPreInstrSymbol(), S));
487 return S;
382488 }
383489
384490 uint16_t MachineInstr::mergeFlagsWith(const MachineInstr &Other) const {
11961196 for (unsigned Str : OF.Sequence) {
11971197 MachineInstr *NewMI =
11981198 MF.CloneMachineInstr(Mapper.IntegerInstructionMap.find(Str)->second);
1199 NewMI->dropMemRefs();
1199 NewMI->dropMemRefs(MF);
12001200
12011201 // Don't keep debug information for outlined instructions.
12021202 NewMI->setDebugLoc(DebugLoc());
31743174 return;
31753175 // If the instruction has memory operands, then adjust the offset
31763176 // when the instruction appears in different stages.
3177 unsigned NumRefs = NewMI.memoperands_end() - NewMI.memoperands_begin();
3178 if (NumRefs == 0)
3177 if (NewMI.memoperands_empty())
31793178 return;
3180 MachineInstr::mmo_iterator NewMemRefs = MF.allocateMemRefsArray(NumRefs);
3181 unsigned Refs = 0;
3179 SmallVector NewMMOs;
31823180 for (MachineMemOperand *MMO : NewMI.memoperands()) {
31833181 if (MMO->isVolatile() || (MMO->isInvariant() && MMO->isDereferenceable()) ||
31843182 (!MMO->getValue())) {
3185 NewMemRefs[Refs++] = MMO;
3183 NewMMOs.push_back(MMO);
31863184 continue;
31873185 }
31883186 unsigned Delta;
31893187 if (Num != UINT_MAX && computeDelta(OldMI, Delta)) {
31903188 int64_t AdjOffset = Delta * Num;
3191 NewMemRefs[Refs++] =
3192 MF.getMachineMemOperand(MMO, AdjOffset, MMO->getSize());
3189 NewMMOs.push_back(
3190 MF.getMachineMemOperand(MMO, AdjOffset, MMO->getSize()));
31933191 } else {
3194 NewMI.dropMemRefs();
3192 NewMI.dropMemRefs(MF);
31953193 return;
31963194 }
31973195 }
3198 NewMI.setMemRefs(NewMemRefs, NewMemRefs + NumRefs);
3196 NewMI.setMemRefs(MF, NewMMOs);
31993197 }
32003198
32013199 /// Clone the instruction for the new pipelined loop and update the
885885 MIB.addReg(ScratchRegs[i], RegState::ImplicitDefine |
886886 RegState::EarlyClobber);
887887
888 // Transfer all of the memory reference descriptions of this instruction.
889 ArrayRef SDNodeMemRefs =
890 cast(Node)->memoperands();
891 MachineMemOperand **MemRefs = MF->allocateMemRefsArray(SDNodeMemRefs.size());
892 std::copy(SDNodeMemRefs.begin(), SDNodeMemRefs.end(), MemRefs);
893 MIB.setMemRefs({MemRefs, SDNodeMemRefs.size()});
888 // Set the memory reference descriptions of this instruction now that it is
889 // part of the function.
890 MIB.setMemRefs(cast(Node)->memoperands());
894891
895892 // Insert the instruction into position in the block. This needs to
896893 // happen before any custom inserter hook is called so that the
10211021 }
10221022
10231023 // We adjust AliasAnalysis information for merged stack slots.
1024 MachineInstr::mmo_iterator NewMemOps =
1025 MF->allocateMemRefsArray(I.getNumMemOperands());
1026 unsigned MemOpIdx = 0;
1024 SmallVector NewMMOs;
10271025 bool ReplaceMemOps = false;
10281026 for (MachineMemOperand *MMO : I.memoperands()) {
10291027 // If this memory location can be a slot remapped here,
10501048 }
10511049 }
10521050 if (MayHaveConflictingAAMD) {
1053 NewMemOps[MemOpIdx++] = MF->getMachineMemOperand(MMO, AAMDNodes());
1051 NewMMOs.push_back(MF->getMachineMemOperand(MMO, AAMDNodes()));
10541052 ReplaceMemOps = true;
1053 } else {
1054 NewMMOs.push_back(MMO);
10551055 }
1056 else
1057 NewMemOps[MemOpIdx++] = MMO;
10581056 }
10591057
10601058 // If any memory operand is updated, set memory references of
10611059 // this instruction.
10621060 if (ReplaceMemOps)
1063 I.setMemRefs(std::make_pair(NewMemOps, I.getNumMemOperands()));
1061 I.setMemRefs(*MF, NewMMOs);
10641062 }
10651063
10661064 // Update the location of C++ catch objects for the MSVC personality routine.
582582 }
583583
584584 if (NewMI) {
585 NewMI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
585 NewMI->setMemRefs(MF, MI.memoperands());
586586 // Add a memory operand, foldMemoryOperandImpl doesn't do that.
587587 assert((!(Flags & MachineMemOperand::MOStore) ||
588588 NewMI->mayStore()) &&
652652
653653 // Copy the memoperands from the load to the folded instruction.
654654 if (MI.memoperands_empty()) {
655 NewMI->setMemRefs(LoadMI.memoperands_begin(), LoadMI.memoperands_end());
655 NewMI->setMemRefs(MF, LoadMI.memoperands());
656656 } else {
657657 // Handle the rare case of folding multiple loads.
658 NewMI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
658 NewMI->setMemRefs(MF, MI.memoperands());
659659 for (MachineInstr::mmo_iterator I = LoadMI.memoperands_begin(),
660660 E = LoadMI.memoperands_end();
661661 I != E; ++I) {
967967 MIB.add(MI->getOperand(i));
968968
969969 // Inherit previous memory operands.
970 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
970 MIB.cloneMemRefs(*MI);
971971 assert(MIB->mayLoad() && "Folded a stackmap use to a non-load!");
972972
973973 // Add a new memory operand for this FI.
478478 MIB.addImm(CSStackSizeIncImm);
479479
480480 MIB.setMIFlags(MBBI->getFlags());
481 MIB.setMemRefs(MBBI->memoperands_begin(), MBBI->memoperands_end());
481 MIB.setMemRefs(MBBI->memoperands());
482482
483483 return std::prev(MBB.erase(MBBI));
484484 }
701701 .addReg(isNarrowStore(Opc) ? AArch64::WZR : AArch64::XZR)
702702 .add(BaseRegOp)
703703 .addImm(OffsetImm)
704 .setMemRefs(I->mergeMemRefsWith(*MergeMI))
704 .cloneMergedMemRefs({&*I, &*MergeMI})
705705 .setMIFlags(I->mergeFlagsWith(*MergeMI));
706706 (void)MIB;
707707
818818 .add(RegOp1)
819819 .add(BaseRegOp)
820820 .addImm(OffsetImm)
821 .setMemRefs(I->mergeMemRefsWith(*Paired))
821 .cloneMergedMemRefs({&*I, &*Paired})
822822 .setMIFlags(I->mergeFlagsWith(*Paired));
823823
824824 (void)MIB;
13371337 .add(getLdStRegOp(*I))
13381338 .add(getLdStBaseOp(*I))
13391339 .addImm(Value)
1340 .setMemRefs(I->memoperands_begin(), I->memoperands_end())
1340 .setMemRefs(I->memoperands())
13411341 .setMIFlags(I->mergeFlagsWith(*Update));
13421342 } else {
13431343 // Paired instruction.
13481348 .add(getLdStRegOp(*I, 1))
13491349 .add(getLdStBaseOp(*I))
13501350 .addImm(Value / Scale)
1351 .setMemRefs(I->memoperands_begin(), I->memoperands_end())
1351 .setMemRefs(I->memoperands())
13521352 .setMIFlags(I->mergeFlagsWith(*Update));
13531353 }
13541354 (void)MIB;
34183418 for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I)
34193419 MIB.add(MI.getOperand(I));
34203420
3421 MIB.setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
3421 MIB.cloneMemRefs(MI);
34223422 MI.eraseFromParent();
34233423 return BB;
34243424 }
37343734 MIB.addImm(TFE->getImm());
37353735 }
37363736
3737 MIB.setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
3737 MIB.cloneMemRefs(MI);
37383738 Addr64 = MIB;
37393739 } else {
37403740 // Atomics with return.
37483748 .add(*SOffset)
37493749 .add(*Offset)
37503750 .addImm(getNamedImmOperand(MI, AMDGPU::OpName::slc))
3751 .setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
3751 .cloneMemRefs(MI);
37523752 }
37533753
37543754 MI.removeFromParent();
39543954 }
39553955
39563956 MachineInstr *NewInstr =
3957 BuildMI(*MBB, Inst, Inst.getDebugLoc(),
3958 get(AMDGPU::BUFFER_LOAD_DWORD_OFFEN), VDst)
3959 .add(*VAddr) // vaddr
3960 .add(*getNamedOperand(Inst, AMDGPU::OpName::sbase)) // srsrc
3961 .addImm(0) // soffset
3962 .addImm(Offset) // offset
3963 .addImm(getNamedOperand(Inst, AMDGPU::OpName::glc)->getImm())
3964 .addImm(0) // slc
3965 .addImm(0) // tfe
3966 .setMemRefs(Inst.memoperands_begin(), Inst.memoperands_end())
3967 .getInstr();
3957 BuildMI(*MBB, Inst, Inst.getDebugLoc(),
3958 get(AMDGPU::BUFFER_LOAD_DWORD_OFFEN), VDst)
3959 .add(*VAddr) // vaddr
3960 .add(*getNamedOperand(Inst, AMDGPU::OpName::sbase)) // srsrc
3961 .addImm(0) // soffset
3962 .addImm(Offset) // offset
3963 .addImm(getNamedOperand(Inst, AMDGPU::OpName::glc)->getImm())
3964 .addImm(0) // slc
3965 .addImm(0) // tfe
3966 .cloneMemRefs(Inst)
3967 .getInstr();
39683968
39693969 MRI.replaceRegWith(getNamedOperand(Inst, AMDGPU::OpName::sdst)->getReg(),
39703970 VDst);
527527 .addReg(AddrReg->getReg());
528528 }
529529
530 MachineInstrBuilder Read2 =
531 BuildMI(*MBB, CI.Paired, DL, Read2Desc, DestReg)
532 .addReg(BaseReg, BaseRegFlags) // addr
533 .addImm(NewOffset0) // offset0
534 .addImm(NewOffset1) // offset1
535 .addImm(0) // gds
536 .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
530 MachineInstrBuilder Read2 = BuildMI(*MBB, CI.Paired, DL, Read2Desc, DestReg)
531 .addReg(BaseReg, BaseRegFlags) // addr
532 .addImm(NewOffset0) // offset0
533 .addImm(NewOffset1) // offset1
534 .addImm(0) // gds
535 .cloneMergedMemRefs({&*CI.I, &*CI.Paired});
537536
538537 (void)Read2;
539538
615614 .addReg(AddrReg->getReg());
616615 }
617616
618 MachineInstrBuilder Write2 =
619 BuildMI(*MBB, CI.Paired, DL, Write2Desc)
620 .addReg(BaseReg, BaseRegFlags) // addr
621 .add(*Data0) // data0
622 .add(*Data1) // data1
623 .addImm(NewOffset0) // offset0
624 .addImm(NewOffset1) // offset1
625 .addImm(0) // gds
626 .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
617 MachineInstrBuilder Write2 = BuildMI(*MBB, CI.Paired, DL, Write2Desc)
618 .addReg(BaseReg, BaseRegFlags) // addr
619 .add(*Data0) // data0
620 .add(*Data1) // data1
621 .addImm(NewOffset0) // offset0
622 .addImm(NewOffset1) // offset1
623 .addImm(0) // gds
624 .cloneMergedMemRefs({&*CI.I, &*CI.Paired});
627625
628626 moveInstsAfter(Write2, CI.InstsToMove);
629627
651649 .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::sbase))
652650 .addImm(MergedOffset) // offset
653651 .addImm(CI.GLC0) // glc
654 .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
652 .cloneMergedMemRefs({&*CI.I, &*CI.Paired});
655653
656654 unsigned SubRegIdx0 = CI.IsX2 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
657655 unsigned SubRegIdx1 = CI.IsX2 ? AMDGPU::sub2_sub3 : AMDGPU::sub1;
710708 .addImm(CI.GLC0) // glc
711709 .addImm(CI.SLC0) // slc
712710 .addImm(0) // tfe
713 .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
711 .cloneMergedMemRefs({&*CI.I, &*CI.Paired});
714712
715713 unsigned SubRegIdx0 = CI.IsX2 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
716714 unsigned SubRegIdx1 = CI.IsX2 ? AMDGPU::sub2_sub3 : AMDGPU::sub1;
810808 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc))
811809 .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset))
812810 .addImm(std::min(CI.Offset0, CI.Offset1)) // offset
813 .addImm(CI.GLC0) // glc
814 .addImm(CI.SLC0) // slc
815 .addImm(0) // tfe
816 .setMemRefs(CI.I->mergeMemRefsWith(*CI.Paired));
811 .addImm(CI.GLC0) // glc
812 .addImm(CI.SLC0) // slc
813 .addImm(0) // tfe
814 .cloneMergedMemRefs({&*CI.I, &*CI.Paired});
817815
818816 moveInstsAfter(MIB, CI.InstsToMove);
819817
494494 return false;
495495
496496 const MachineOperand *Reg = TII->getNamedOperand(*MI, AMDGPU::OpName::vdata);
497 MachineInstrBuilder NewMI = BuildMI(*MBB, MI, DL, TII->get(LoadStoreOp))
498 .add(*Reg)
499 .add(*TII->getNamedOperand(*MI, AMDGPU::OpName::srsrc))
500 .add(*TII->getNamedOperand(*MI, AMDGPU::OpName::soffset))
501 .addImm(Offset)
502 .addImm(0) // glc
503 .addImm(0) // slc
504 .addImm(0) // tfe
505 .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
497 MachineInstrBuilder NewMI =
498 BuildMI(*MBB, MI, DL, TII->get(LoadStoreOp))
499 .add(*Reg)
500 .add(*TII->getNamedOperand(*MI, AMDGPU::OpName::srsrc))
501 .add(*TII->getNamedOperand(*MI, AMDGPU::OpName::soffset))
502 .addImm(Offset)
503 .addImm(0) // glc
504 .addImm(0) // slc
505 .addImm(0) // tfe
506 .cloneMemRefs(*MI);
506507
507508 const MachineOperand *VDataIn = TII->getNamedOperand(*MI,
508509 AMDGPU::OpName::vdata_in);
15891589 MachineFunction &MF = *MBB.getParent();
15901590 unsigned CPI = Orig.getOperand(1).getIndex();
15911591 unsigned PCLabelId = duplicateCPV(MF, CPI);
1592 MachineInstrBuilder MIB =
1593 BuildMI(MBB, I, Orig.getDebugLoc(), get(Opcode), DestReg)
1594 .addConstantPoolIndex(CPI)
1595 .addImm(PCLabelId);
1596 MIB->setMemRefs(Orig.memoperands_begin(), Orig.memoperands_end());
1592 BuildMI(MBB, I, Orig.getDebugLoc(), get(Opcode), DestReg)
1593 .addConstantPoolIndex(CPI)
1594 .addImm(PCLabelId)
1595 .cloneMemRefs(Orig);
15971596 break;
15981597 }
15991598 }
45334532
45344533 MIB = BuildMI(MBB, MI, DL, get(LoadOpc), Reg);
45354534 MIB.addReg(Reg, RegState::Kill)
4536 .addImm(0)
4537 .setMemRefs(MI->memoperands_begin(), MI->memoperands_end())
4538 .add(predOps(ARMCC::AL));
4535 .addImm(0)
4536 .cloneMemRefs(*MI)
4537 .add(predOps(ARMCC::AL));
45394538 }
45404539
45414540 bool
569569 TransferImpOps(MI, MIB, MIB);
570570
571571 // Transfer memoperands.
572 MIB->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
572 MIB.cloneMemRefs(MI);
573573
574574 MI.eraseFromParent();
575575 }
644644 TransferImpOps(MI, MIB, MIB);
645645
646646 // Transfer memoperands.
647 MIB->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
647 MIB.cloneMemRefs(MI);
648648
649649 MI.eraseFromParent();
650650 }
734734 MIB.addReg(DstReg, RegState::ImplicitDefine | getDeadRegState(DstIsDead));
735735 TransferImpOps(MI, MIB, MIB);
736736 // Transfer memoperands.
737 MIB->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
737 MIB.cloneMemRefs(MI);
738738 MI.eraseFromParent();
739739 }
740740
847847 unsigned SOImmValV2 = ARM_AM::getSOImmTwoPartSecond(ImmVal);
848848 LO16 = LO16.addImm(SOImmValV1);
849849 HI16 = HI16.addImm(SOImmValV2);
850 LO16->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
851 HI16->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
850 LO16.cloneMemRefs(MI);
851 HI16.cloneMemRefs(MI);
852852 LO16.addImm(Pred).addReg(PredReg).add(condCodeOp());
853853 HI16.addImm(Pred).addReg(PredReg).add(condCodeOp());
854854 if (isCC)
898898 }
899899 }
900900
901 LO16->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
902 HI16->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
901 LO16.cloneMemRefs(MI);
902 HI16.cloneMemRefs(MI);
903903 LO16.addImm(Pred).addReg(PredReg);
904904 HI16.addImm(Pred).addReg(PredReg);
905905
14241424 MIB.addExternalSymbol("__aeabi_read_tp", 0);
14251425 }
14261426
1427 MIB->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
1427 MIB.cloneMemRefs(MI);
14281428 TransferImpOps(MI, MIB, MIB);
14291429 MI.eraseFromParent();
14301430 return true;
14391439 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(NewLdOpc), DstReg)
14401440 .add(MI.getOperand(1))
14411441 .add(predOps(ARMCC::AL));
1442 MIB1->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
1442 MIB1.cloneMemRefs(MI);
14431443 MachineInstrBuilder MIB2 =
14441444 BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(ARM::tPICADD))
14451445 .addReg(DstReg, RegState::Define | getDeadRegState(DstIsDead))
15431543 if (isARM) {
15441544 MIB3.add(predOps(ARMCC::AL));
15451545 if (Opcode == ARM::MOV_ga_pcrel_ldr)
1546 MIB3->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
1546 MIB3.cloneMemRefs(MI);
15471547 }
15481548 TransferImpOps(MI, MIB1, MIB3);
15491549 MI.eraseFromParent();
15951595 // Add an implicit def for the super-register.
15961596 MIB.addReg(DstReg, RegState::ImplicitDefine | getDeadRegState(DstIsDead));
15971597 TransferImpOps(MI, MIB, MIB);
1598 MIB.setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
1598 MIB.cloneMemRefs(MI);
15991599 MI.eraseFromParent();
16001600 return true;
16011601 }
16281628 MIB->addRegisterKilled(SrcReg, TRI, true);
16291629
16301630 TransferImpOps(MI, MIB, MIB);
1631 MIB.setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
1631 MIB.cloneMemRefs(MI);
16321632 MI.eraseFromParent();
16331633 return true;
16341634 }
131131 BuildMI(MBB, MI, DL, get(ARM::LDRi12), Reg)
132132 .addReg(Reg, RegState::Kill)
133133 .addImm(0)
134 .setMemRefs(MI->memoperands_begin(), MI->memoperands_end())
134 .cloneMemRefs(*MI)
135135 .add(predOps(ARMCC::AL));
136136 }
137137
13021302 MIB.add(MI->getOperand(OpNum));
13031303
13041304 // Transfer memoperands.
1305 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
1305 MIB.setMemRefs(MI->memoperands());
13061306
13071307 MBB.erase(MBBI);
13081308 return true;
15261526 // Transfer implicit operands.
15271527 for (const MachineOperand &MO : MI.implicit_operands())
15281528 MIB.add(MO);
1529 MIB->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
1529 MIB.setMemRefs(MI.memoperands());
15301530
15311531 MBB.erase(MBBI);
15321532 return true;
22892289 if (!isT2)
22902290 MIB.addReg(0);
22912291 MIB.addImm(Offset).addImm(Pred).addReg(PredReg);
2292 MIB.setMemRefs(Op0->mergeMemRefsWith(*Op1));
2292 MIB.cloneMergedMemRefs({Op0, Op1});
22932293 LLVM_DEBUG(dbgs() << "Formed " << *MIB << "\n");
22942294 ++NumLDRDFormed;
22952295 } else {
23032303 if (!isT2)
23042304 MIB.addReg(0);
23052305 MIB.addImm(Offset).addImm(Pred).addReg(PredReg);
2306 MIB.setMemRefs(Op0->mergeMemRefsWith(*Op1));
2306 MIB.cloneMergedMemRefs({Op0, Op1});
23072307 LLVM_DEBUG(dbgs() << "Formed " << *MIB << "\n");
23082308 ++NumSTRDFormed;
23092309 }
484484 .addReg(Rt, IsStore ? 0 : RegState::Define);
485485
486486 // Transfer memoperands.
487 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
487 MIB.setMemRefs(MI->memoperands());
488488
489489 // Transfer MI flags.
490490 MIB.setMIFlags(MI->getFlags());
604604 MIB.add(MI->getOperand(OpNum));
605605
606606 // Transfer memoperands.
607 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
607 MIB.setMemRefs(MI->memoperands());
608608
609609 // Transfer MI flags.
610610 MIB.setMIFlags(MI->getFlags());
567567 llvm_unreachable("Unknown operand type!");
568568 }
569569
570 MIBLO->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
571 MIBHI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
570 MIBLO.setMemRefs(MI.memoperands());
571 MIBHI.setMemRefs(MI.memoperands());
572572
573573 MI.eraseFromParent();
574574 return true;
616616 buildMI(MBB, MBBI, AVR::POPRd).addReg(DstLoReg);
617617 }
618618
619 MIBLO->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
620 MIBHI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
619 MIBLO.setMemRefs(MI.memoperands());
620 MIBHI.setMemRefs(MI.memoperands());
621621
622622 MI.eraseFromParent();
623623 return true;
647647 .addReg(SrcReg, RegState::Define | getDeadRegState(SrcIsDead))
648648 .addReg(SrcReg, RegState::Kill);
649649
650 MIBLO->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
651 MIBHI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
650 MIBLO.setMemRefs(MI.memoperands());
651 MIBHI.setMemRefs(MI.memoperands());
652652
653653 MI.eraseFromParent();
654654 return true;
678678 .addReg(SrcReg, RegState::Define | getDeadRegState(SrcIsDead))
679679 .addReg(SrcReg, RegState::Kill);
680680
681 MIBLO->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
682 MIBHI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
681 MIBLO.setMemRefs(MI.memoperands());
682 MIBHI.setMemRefs(MI.memoperands());
683683
684684 MI.eraseFromParent();
685685 return true;
733733 buildMI(MBB, MBBI, AVR::POPRd).addReg(DstLoReg);
734734 }
735735
736 MIBLO->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
737 MIBHI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
736 MIBLO.setMemRefs(MI.memoperands());
737 MIBHI.setMemRefs(MI.memoperands());
738738
739739 MI.eraseFromParent();
740740 return true;
781781 buildMI(MBB, MBBI, AVR::POPRd).addReg(DstLoReg);
782782 }
783783
784 MIBLO->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
785 MIBHI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
784 MIBLO.setMemRefs(MI.memoperands());
785 MIBHI.setMemRefs(MI.memoperands());
786786
787787 MI.eraseFromParent();
788788 return true;
10021002 MIBLO.addReg(SrcLoReg, getKillRegState(SrcIsKill));
10031003 MIBHI.addReg(SrcHiReg, getKillRegState(SrcIsKill));
10041004
1005 MIBLO->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
1006 MIBHI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
1005 MIBLO.setMemRefs(MI.memoperands());
1006 MIBHI.setMemRefs(MI.memoperands());
10071007
10081008 MI.eraseFromParent();
10091009 return true;
10301030 .addImm(1)
10311031 .addReg(SrcHiReg, getKillRegState(SrcIsKill));
10321032
1033 MIBLO->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
1034 MIBHI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
1033 MIBLO.setMemRefs(MI.memoperands());
1034 MIBHI.setMemRefs(MI.memoperands());
10351035
10361036 MI.eraseFromParent();
10371037 return true;
10641064 .addReg(SrcHiReg, getKillRegState(SrcIsKill))
10651065 .addImm(Imm);
10661066
1067 MIBLO->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
1068 MIBHI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
1067 MIBLO.setMemRefs(MI.memoperands());
1068 MIBHI.setMemRefs(MI.memoperands());
10691069
10701070 MI.eraseFromParent();
10711071 return true;
10981098 .addReg(SrcLoReg, getKillRegState(SrcIsKill))
10991099 .addImm(Imm);
11001100
1101 MIBLO->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
1102 MIBHI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
1101 MIBLO.setMemRefs(MI.memoperands());
1102 MIBHI.setMemRefs(MI.memoperands());
11031103
11041104 MI.eraseFromParent();
11051105 return true;
11321132 .addImm(Imm + 1)
11331133 .addReg(SrcHiReg, getKillRegState(SrcIsKill));
11341134
1135 MIBLO->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
1136 MIBHI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
1135 MIBLO.setMemRefs(MI.memoperands());
1136 MIBHI.setMemRefs(MI.memoperands());
11371137
11381138 MI.eraseFromParent();
11391139 return true;
11621162 .addReg(DstHiReg, RegState::Define | getDeadRegState(DstIsDead))
11631163 .addImm(Imm + 1);
11641164
1165 MIBLO->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
1166 MIBHI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
1165 MIBLO.setMemRefs(MI.memoperands());
1166 MIBHI.setMemRefs(MI.memoperands());
11671167
11681168 MI.eraseFromParent();
11691169 return true;
11931193 .addImm(Imm)
11941194 .addReg(SrcLoReg, getKillRegState(SrcIsKill));
11951195
1196 MIBLO->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
1197 MIBHI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
1196 MIBLO.setMemRefs(MI.memoperands());
1197 MIBHI.setMemRefs(MI.memoperands());
11981198
11991199 MI.eraseFromParent();
12001200 return true;
16281628 else
16291629 MIB.add(MachineOperand(ExtR));
16301630 }
1631 MIB.setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
1631 MIB.cloneMemRefs(MI);
16321632 MBB.erase(MI);
16331633 return true;
16341634 }
16791679 // Add the stored value for stores.
16801680 if (MI.mayStore())
16811681 MIB.add(getStoredValueOp(MI));
1682 MIB.setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
1682 MIB.cloneMemRefs(MI);
16831683 MBB.erase(MI);
16841684 return true;
16851685 }
17961796 // Add the stored value for stores.
17971797 if (MI.mayStore())
17981798 MIB.add(getStoredValueOp(MI));
1799 MIB.setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
1799 MIB.cloneMemRefs(MI);
18001800 MBB.erase(MI);
18011801 return true;
18021802 }
730730 MIB.add(MO);
731731
732732 // Set memory references.
733 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
734 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
735 MIB.setMemRefs(MMOBegin, MMOEnd);
733 MIB.cloneMemRefs(*MI);
736734
737735 MI->eraseFromParent();
738736 return;
890890 MB.add(MO);
891891 Ox++;
892892 }
893
894 MachineFunction &MF = *B.getParent();
895 MachineInstr::mmo_iterator I = MI.memoperands_begin();
896 unsigned NR = std::distance(I, MI.memoperands_end());
897 MachineInstr::mmo_iterator MemRefs = MF.allocateMemRefsArray(NR);
898 for (unsigned i = 0; i < NR; ++i)
899 MemRefs[i] = *I++;
900 MB.setMemRefs(MemRefs, MemRefs+NR);
893 MB.cloneMemRefs(MI);
901894
902895 MachineInstr *NewI = MB;
903896 NewI->clearKillInfo();
15781578
15791579 // S2_storeri_io FI, 0, TmpR
15801580 BuildMI(B, It, DL, HII.get(Hexagon::S2_storeri_io))
1581 .addFrameIndex(FI)
1582 .addImm(0)
1583 .addReg(TmpR, RegState::Kill)
1584 .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
1581 .addFrameIndex(FI)
1582 .addImm(0)
1583 .addReg(TmpR, RegState::Kill)
1584 .cloneMemRefs(*MI);
15851585
15861586 NewRegs.push_back(TmpR);
15871587 B.erase(It);
16031603 // TmpR = L2_loadri_io FI, 0
16041604 unsigned TmpR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
16051605 BuildMI(B, It, DL, HII.get(Hexagon::L2_loadri_io), TmpR)
1606 .addFrameIndex(FI)
1607 .addImm(0)
1608 .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
1606 .addFrameIndex(FI)
1607 .addImm(0)
1608 .cloneMemRefs(*MI);
16091609
16101610 // DstR = C2_tfrrp TmpR if DstR is a predicate register
16111611 // DstR = A2_tfrrcr TmpR if DstR is a modifier register
17301730 StoreOpc = NeedAlign <= HasAlign ? Hexagon::V6_vS32b_ai
17311731 : Hexagon::V6_vS32Ub_ai;
17321732 BuildMI(B, It, DL, HII.get(StoreOpc))
1733 .addFrameIndex(FI)
1734 .addImm(0)
1735 .addReg(SrcLo, getKillRegState(IsKill))
1736 .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
1733 .addFrameIndex(FI)
1734 .addImm(0)
1735 .addReg(SrcLo, getKillRegState(IsKill))
1736 .cloneMemRefs(*MI);
17371737 }
17381738
17391739 // Store high part.
17411741 StoreOpc = NeedAlign <= MinAlign(HasAlign, Size) ? Hexagon::V6_vS32b_ai
17421742 : Hexagon::V6_vS32Ub_ai;
17431743 BuildMI(B, It, DL, HII.get(StoreOpc))
1744 .addFrameIndex(FI)
1745 .addImm(Size)
1746 .addReg(SrcHi, getKillRegState(IsKill))
1747 .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
1744 .addFrameIndex(FI)
1745 .addImm(Size)
1746 .addReg(SrcHi, getKillRegState(IsKill))
1747 .cloneMemRefs(*MI);
17481748 }
17491749
17501750 B.erase(It);
17761776 LoadOpc = NeedAlign <= HasAlign ? Hexagon::V6_vL32b_ai
17771777 : Hexagon::V6_vL32Ub_ai;
17781778 BuildMI(B, It, DL, HII.get(LoadOpc), DstLo)
1779 .addFrameIndex(FI)
1780 .addImm(0)
1781 .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
1779 .addFrameIndex(FI)
1780 .addImm(0)
1781 .cloneMemRefs(*MI);
17821782
17831783 // Load high part.
17841784 LoadOpc = NeedAlign <= MinAlign(HasAlign, Size) ? Hexagon::V6_vL32b_ai
17851785 : Hexagon::V6_vL32Ub_ai;
17861786 BuildMI(B, It, DL, HII.get(LoadOpc), DstHi)
1787 .addFrameIndex(FI)
1788 .addImm(Size)
1789 .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
1787 .addFrameIndex(FI)
1788 .addImm(Size)
1789 .cloneMemRefs(*MI);
17901790
17911791 B.erase(It);
17921792 return true;
18121812 unsigned StoreOpc = NeedAlign <= HasAlign ? Hexagon::V6_vS32b_ai
18131813 : Hexagon::V6_vS32Ub_ai;
18141814 BuildMI(B, It, DL, HII.get(StoreOpc))
1815 .addFrameIndex(FI)
1816 .addImm(0)
1817 .addReg(SrcR, getKillRegState(IsKill))
1818 .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
1815 .addFrameIndex(FI)
1816 .addImm(0)
1817 .addReg(SrcR, getKillRegState(IsKill))
1818 .cloneMemRefs(*MI);
18191819
18201820 B.erase(It);
18211821 return true;
18401840 unsigned LoadOpc = NeedAlign <= HasAlign ? Hexagon::V6_vL32b_ai
18411841 : Hexagon::V6_vL32Ub_ai;
18421842 BuildMI(B, It, DL, HII.get(LoadOpc), DstR)
1843 .addFrameIndex(FI)
1844 .addImm(0)
1845 .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
1843 .addFrameIndex(FI)
1844 .addImm(0)
1845 .cloneMemRefs(*MI);
18461846
18471847 B.erase(It);
18481848 return true;
10851085 unsigned NewOpc = Aligned ? Hexagon::V6_vS32b_ai : Hexagon::V6_vS32Ub_ai;
10861086 unsigned Offset = HRI.getSpillSize(Hexagon::HvxVRRegClass);
10871087
1088 MachineInstr *MI1New =
1089 BuildMI(MBB, MI, DL, get(NewOpc))
1090 .add(MI.getOperand(0))
1091 .addImm(MI.getOperand(1).getImm())
1092 .addReg(SrcSubLo)
1093 .setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
1088 MachineInstr *MI1New = BuildMI(MBB, MI, DL, get(NewOpc))
1089 .add(MI.getOperand(0))
1090 .addImm(MI.getOperand(1).getImm())
1091 .addReg(SrcSubLo)
1092 .cloneMemRefs(MI);
10941093 MI1New->getOperand(0).setIsKill(false);
10951094 BuildMI(MBB, MI, DL, get(NewOpc))
10961095 .add(MI.getOperand(0))
10971096 // The Vectors are indexed in multiples of vector size.
10981097 .addImm(MI.getOperand(1).getImm() + Offset)
10991098 .addReg(SrcSubHi)
1100 .setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
1099 .cloneMemRefs(MI);
11011100 MBB.erase(MI);
11021101 return true;
11031102 }
11101109
11111110 MachineInstr *MI1New = BuildMI(MBB, MI, DL, get(NewOpc),
11121111 HRI.getSubReg(DstReg, Hexagon::vsub_lo))
1113 .add(MI.getOperand(1))
1114 .addImm(MI.getOperand(2).getImm())
1115 .setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
1112 .add(MI.getOperand(1))
1113 .addImm(MI.getOperand(2).getImm())
1114 .cloneMemRefs(MI);
11161115 MI1New->getOperand(1).setIsKill(false);
11171116 BuildMI(MBB, MI, DL, get(NewOpc), HRI.getSubReg(DstReg, Hexagon::vsub_hi))
11181117 .add(MI.getOperand(1))
11191118 // The Vectors are indexed in multiples of vector size.
11201119 .addImm(MI.getOperand(2).getImm() + Offset)
1121 .setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
1120 .cloneMemRefs(MI);
11221121 MBB.erase(MI);
11231122 return true;
11241123 }
276276 InstrBuilder.addImm(LPAC::makePostOp(AluOpcode));
277277
278278 // Transfer memory operands.
279 InstrBuilder->setMemRefs(MemInstr->memoperands_begin(),
280 MemInstr->memoperands_end());
279 InstrBuilder.setMemRefs(MemInstr->memoperands());
281280 }
282281
283282 // Function determines if ALU operation (in alu_iter) can be combined with
662662 }
663663
664664 MIB.copyImplicitOps(*I);
665
666 MIB.setMemRefs(I->memoperands_begin(), I->memoperands_end());
665 MIB.cloneMemRefs(*I);
667666 return MIB;
668667 }
669668
99639963 const BasicBlock *BB = MBB->getBasicBlock();
99649964 MachineFunction::iterator I = ++MBB->getIterator();
99659965
9966 // Memory Reference
9967 MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin();
9968 MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end();
9969
99709966 unsigned DstReg = MI.getOperand(0).getReg();
99719967 const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
99729968 assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!");
1002910025 if (Subtarget.isPPC64() && Subtarget.isSVR4ABI()) {
1003010026 setUsesTOCBasePtr(*MBB->getParent());
1003110027 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD))
10032 .addReg(PPC::X2)
10033 .addImm(TOCOffset)
10034 .addReg(BufReg);
10035 MIB.setMemRefs(MMOBegin, MMOEnd);
10028 .addReg(PPC::X2)
10029 .addImm(TOCOffset)
10030 .addReg(BufReg)
10031 .cloneMemRefs(MI);
1003610032 }
1003710033
1003810034 // Naked functions never have a base pointer, and so we use r1. For all
1004710043 TII->get(Subtarget.isPPC64() ? PPC::STD : PPC::STW))
1004810044 .addReg(BaseReg)
1004910045 .addImm(BPOffset)
10050 .addReg(BufReg);
10051 MIB.setMemRefs(MMOBegin, MMOEnd);
10046 .addReg(BufReg)
10047 .cloneMemRefs(MI);
1005210048
1005310049 // Setup
1005410050 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB);
1008110077 .addImm(LabelOffset)
1008210078 .addReg(BufReg);
1008310079 }
10084
10085 MIB.setMemRefs(MMOBegin, MMOEnd);
10080 MIB.cloneMemRefs(MI);
1008610081
1008710082 BuildMI(mainMBB, DL, TII->get(PPC::LI), mainDstReg).addImm(0);
1008810083 mainMBB->addSuccessor(sinkMBB);
1010510100
1010610101 MachineFunction *MF = MBB->getParent();
1010710102 MachineRegisterInfo &MRI = MF->getRegInfo();
10108
10109 // Memory Reference
10110 MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin();
10111 MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end();
1011210103
1011310104 MVT PVT = getPointerTy(MF->getDataLayout());
1011410105 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
1014710138 .addImm(0)
1014810139 .addReg(BufReg);
1014910140 }
10150 MIB.setMemRefs(MMOBegin, MMOEnd);
10141 MIB.cloneMemRefs(MI);
1015110142
1015210143 // Reload IP
1015310144 if (PVT == MVT::i64) {
1015910150 .addImm(LabelOffset)
1016010151 .addReg(BufReg);
1016110152 }
10162 MIB.setMemRefs(MMOBegin, MMOEnd);
10153 MIB.cloneMemRefs(MI);
1016310154
1016410155 // Reload SP
1016510156 if (PVT == MVT::i64) {
1017110162 .addImm(SPOffset)
1017210163 .addReg(BufReg);
1017310164 }
10174 MIB.setMemRefs(MMOBegin, MMOEnd);
10165 MIB.cloneMemRefs(MI);
1017510166
1017610167 // Reload BP
1017710168 if (PVT == MVT::i64) {
1018310174 .addImm(BPOffset)
1018410175 .addReg(BufReg);
1018510176 }
10186 MIB.setMemRefs(MMOBegin, MMOEnd);
10177 MIB.cloneMemRefs(MI);
1018710178
1018810179 // Reload TOC
1018910180 if (PVT == MVT::i64 && Subtarget.isSVR4ABI()) {
1019010181 setUsesTOCBasePtr(*MBB->getParent());
1019110182 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2)
10192 .addImm(TOCOffset)
10193 .addReg(BufReg);
10194
10195 MIB.setMemRefs(MMOBegin, MMOEnd);
10183 .addImm(TOCOffset)
10184 .addReg(BufReg)
10185 .cloneMemRefs(MI);
1019610186 }
1019710187
1019810188 // Jump
293293 return false;
294294
295295 // Rebuild to get the CC operand in the right place.
296 MachineInstr *BuiltMI =
297 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), TII->get(Opcode));
296 auto MIB = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), TII->get(Opcode));
298297 for (const auto &MO : MI.operands())
299 BuiltMI->addOperand(MO);
300 BuiltMI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
298 MIB.add(MO);
299 MIB.setMemRefs(MI.memoperands());
301300 MI.eraseFromParent();
302301
303302 return true;
68516851 .addImm(ThisLength)
68526852 .add(SrcBase)
68536853 .addImm(SrcDisp)
6854 ->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
6854 .setMemRefs(MI.memoperands());
68556855 DestDisp += ThisLength;
68566856 SrcDisp += ThisLength;
68576857 Length -= ThisLength;
287287 for (unsigned i = 1; i < NumArgs; ++i)
288288 MIB.add(MI->getOperand(i));
289289
290 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
290 MIB.setMemRefs(MI->memoperands());
291291
292292 return MIB;
293293 }
10471047
10481048 MIB.addReg(CondReg);
10491049
1050 MIB->setMemRefs(SetCCI.memoperands_begin(), SetCCI.memoperands_end());
1050 MIB.setMemRefs(SetCCI.memoperands());
10511051
10521052 SetCCI.eraseFromParent();
10531053 return;
2679426794
2679526795 // Memory Reference
2679626796 assert(MI.hasOneMemOperand() && "Expected VAARG_64 to have one memoperand");
26797 MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin();
26798 MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end();
26797 SmallVector MMOs(MI.memoperands_begin(),
26798 MI.memoperands_end());
2679926799
2680026800 // Machine Information
2680126801 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
2689326893 .add(Index)
2689426894 .addDisp(Disp, UseFPOffset ? 4 : 0)
2689526895 .add(Segment)
26896 .setMemRefs(MMOBegin, MMOEnd);
26896 .setMemRefs(MMOs);
2689726897
2689826898 // Check if there is enough room left to pull this argument.
2689926899 BuildMI(thisMBB, DL, TII->get(X86::CMP32ri))
2691826918 .add(Index)
2691926919 .addDisp(Disp, 16)
2692026920 .add(Segment)
26921 .setMemRefs(MMOBegin, MMOEnd);
26921 .setMemRefs(MMOs);
2692226922
2692326923 // Zero-extend the offset
2692426924 unsigned OffsetReg64 = MRI.createVirtualRegister(AddrRegClass);
2694626946 .addDisp(Disp, UseFPOffset ? 4 : 0)
2694726947 .add(Segment)
2694826948 .addReg(NextOffsetReg)
26949 .setMemRefs(MMOBegin, MMOEnd);
26949 .setMemRefs(MMOs);
2695026950
2695126951 // Jump to endMBB
2695226952 BuildMI(offsetMBB, DL, TII->get(X86::JMP_1))
2696526965 .add(Index)
2696626966 .addDisp(Disp, 8)
2696726967 .add(Segment)
26968 .setMemRefs(MMOBegin, MMOEnd);
26968 .setMemRefs(MMOs);
2696926969
2697026970 // If we need to align it, do so. Otherwise, just copy the address
2697126971 // to OverflowDestReg.
2700227002 .addDisp(Disp, 8)
2700327003 .add(Segment)
2700427004 .addReg(NextAddrReg)
27005 .setMemRefs(MMOBegin, MMOEnd);
27005 .setMemRefs(MMOs);
2700627006
2700727007 // If we branched, emit the PHI to the front of endMBB.
2700827008 if (offsetMBB) {
2797627976 MachineInstrBuilder MIB;
2797727977
2797827978 // Memory Reference.
27979 MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin();
27980 MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end();
27979 SmallVector MMOs(MI.memoperands_begin(),
27980 MI.memoperands_end());
2798127981
2798227982 // Initialize a register with zero.
2798327983 MVT PVT = getPointerTy(MF->getDataLayout());
2800628006 MIB.add(MI.getOperand(MemOpndSlot + i));
2800728007 }
2800828008 MIB.addReg(SSPCopyReg);
28009 MIB.setMemRefs(MMOBegin, MMOEnd);
28009 MIB.setMemRefs(MMOs);
2801028010 }
2801128011
2801228012 MachineBasicBlock *
2802228022 MachineFunction::iterator I = ++MBB->getIterator();
2802328023
2802428024 // Memory Reference
28025 MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin();
28026 MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end();
28025 SmallVector MMOs(MI.memoperands_begin(),
28026 MI.memoperands_end());
2802728027
2802828028 unsigned DstReg;
2802928029 unsigned MemOpndSlot = 0;
2811728117 MIB.addReg(LabelReg);
2811828118 else
2811928119 MIB.addMBB(restoreMBB);
28120 MIB.setMemRefs(MMOBegin, MMOEnd);
28120 MIB.setMemRefs(MMOs);
2812128121
2812228122 if (MF->getMMI().getModule()->getModuleFlag("cf-protection-return")) {
2812328123 emitSetJmpShadowStackFix(MI, thisMBB);
2817828178 MachineRegisterInfo &MRI = MF->getRegInfo();
2817928179
2818028180 // Memory Reference
28181 MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin();
28182 MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end();
28181 SmallVector MMOs(MI.memoperands_begin(),
28182 MI.memoperands_end());
2818328183
2818428184 MVT PVT = getPointerTy(MF->getDataLayout());
2818528185 const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
2826628266 else
2826728267 MIB.add(MI.getOperand(i));
2826828268 }
28269 MIB.setMemRefs(MMOBegin, MMOEnd);
28269 MIB.setMemRefs(MMOs);
2827028270
2827128271 // Subtract the current SSP from the previous SSP.
2827228272 unsigned SspSubReg = MRI.createVirtualRegister(PtrRC);
2835028350 MachineRegisterInfo &MRI = MF->getRegInfo();
2835128351
2835228352 // Memory Reference
28353 MachineInstr::mmo_iterator MMOBegin = MI.memoperands_begin();
28354 MachineInstr::mmo_iterator MMOEnd = MI.memoperands_end();
28353 SmallVector MMOs(MI.memoperands_begin(),
28354 MI.memoperands_end());
2835528355
2835628356 MVT PVT = getPointerTy(MF->getDataLayout());
2835728357 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
2838428384 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), FP);
2838528385 for (unsigned i = 0; i < X86::AddrNumOperands; ++i)
2838628386 MIB.add(MI.getOperand(i));
28387 MIB.setMemRefs(MMOBegin, MMOEnd);
28387 MIB.setMemRefs(MMOs);
2838828388
2838928389 // Reload IP
2839028390 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), Tmp);
2839428394 else
2839528395 MIB.add(MI.getOperand(i));
2839628396 }
28397 MIB.setMemRefs(MMOBegin, MMOEnd);
28397 MIB.setMemRefs(MMOs);
2839828398
2839928399 // Reload SP
2840028400 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), SP);
2840428404 else
2840528405 MIB.add(MI.getOperand(i));
2840628406 }
28407 MIB.setMemRefs(MMOBegin, MMOEnd);
28407 MIB.setMemRefs(MMOs);
2840828408
2840928409 // Jump
2841028410 BuildMI(*thisMBB, MI, DL, TII->get(IJmpOpc)).addReg(Tmp);
33073307 .addReg(SrcReg, getKillRegState(isKill));
33083308 }
33093309
3310 void X86InstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
3311 bool isKill,
3312 SmallVectorImpl &Addr,
3313 const TargetRegisterClass *RC,
3314 MachineInstr::mmo_iterator MMOBegin,
3315 MachineInstr::mmo_iterator MMOEnd,
3316 SmallVectorImpl &NewMIs) const {
3310 void X86InstrInfo::storeRegToAddr(
3311 MachineFunction &MF, unsigned SrcReg, bool isKill,
3312 SmallVectorImpl &Addr, const TargetRegisterClass *RC,
3313 ArrayRef MMOs,
3314 SmallVectorImpl &NewMIs) const {
33173315 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
33183316 unsigned Alignment = std::max(TRI.getSpillSize(*RC), 16);
3319 bool isAligned = MMOBegin != MMOEnd &&
3320 (*MMOBegin)->getAlignment() >= Alignment;
3317 bool isAligned = !MMOs.empty() && MMOs.front()->getAlignment() >= Alignment;
33213318 unsigned Opc = getStoreRegOpcode(SrcReg, RC, isAligned, Subtarget);
33223319 DebugLoc DL;
33233320 MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc));
33243321 for (unsigned i = 0, e = Addr.size(); i != e; ++i)
33253322 MIB.add(Addr[i]);
33263323 MIB.addReg(SrcReg, getKillRegState(isKill));
3327 (*MIB).setMemRefs(MMOBegin, MMOEnd);
3324 MIB.setMemRefs(MMOs);
33283325 NewMIs.push_back(MIB);
33293326 }
33303327
33443341 addFrameReference(BuildMI(MBB, MI, DL, get(Opc), DestReg), FrameIdx);
33453342 }
33463343
3347 void X86InstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
3348 SmallVectorImpl &Addr,
3349 const TargetRegisterClass *RC,
3350 MachineInstr::mmo_iterator MMOBegin,
3351 MachineInstr::mmo_iterator MMOEnd,
3352 SmallVectorImpl &NewMIs) const {
3344 void X86InstrInfo::loadRegFromAddr(
3345 MachineFunction &MF, unsigned DestReg,
3346 SmallVectorImpl &Addr, const TargetRegisterClass *RC,
3347 ArrayRef MMOs,
3348 SmallVectorImpl &NewMIs) const {
33533349 const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
33543350 unsigned Alignment = std::max(TRI.getSpillSize(*RC), 16);
3355 bool isAligned = MMOBegin != MMOEnd &&
3356 (*MMOBegin)->getAlignment() >= Alignment;
3351 bool isAligned = !MMOs.empty() && MMOs.front()->getAlignment() >= Alignment;
33573352 unsigned Opc = getLoadRegOpcode(DestReg, RC, isAligned, Subtarget);
33583353 DebugLoc DL;
33593354 MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc), DestReg);
33603355 for (unsigned i = 0, e = Addr.size(); i != e; ++i)
33613356 MIB.add(Addr[i]);
3362 (*MIB).setMemRefs(MMOBegin, MMOEnd);
3357 MIB.setMemRefs(MMOs);
33633358 NewMIs.push_back(MIB);
33643359 }
33653360
54495444
54505445 // Emit the load instruction.
54515446 if (UnfoldLoad) {
5452 std::pair MMOs =
5453 MF.extractLoadMemRefs(MI.memoperands_begin(), MI.memoperands_end());
5454 loadRegFromAddr(MF, Reg, AddrOps, RC, MMOs.first, MMOs.second, NewMIs);
5447 auto MMOs = extractLoadMMOs(MI.memoperands(), MF);
5448 loadRegFromAddr(MF, Reg, AddrOps, RC, MMOs, NewMIs);
54555449 if (UnfoldStore) {
54565450 // Address operands cannot be marked isKill.
54575451 for (unsigned i = 1; i != 1 + X86::AddrNumOperands; ++i) {
55165510 // Emit the store instruction.
55175511 if (UnfoldStore) {
55185512 const TargetRegisterClass *DstRC = getRegClass(MCID, 0, &RI, MF);
5519 std::pair MMOs =
5520 MF.extractStoreMemRefs(MI.memoperands_begin(), MI.memoperands_end());
5521 storeRegToAddr(MF, Reg, true, AddrOps, DstRC, MMOs.first, MMOs.second, NewMIs);
5513 auto MMOs = extractStoreMMOs(MI.memoperands(), MF);
5514 storeRegToAddr(MF, Reg, true, AddrOps, DstRC, MMOs, NewMIs);
55225515 }
55235516
55245517 return true;
358358 void storeRegToAddr(MachineFunction &MF, unsigned SrcReg, bool isKill,
359359 SmallVectorImpl &Addr,
360360 const TargetRegisterClass *RC,
361 MachineInstr::mmo_iterator MMOBegin,
362 MachineInstr::mmo_iterator MMOEnd,
361 ArrayRef MMOs,
363362 SmallVectorImpl &NewMIs) const;
364363
365364 void loadRegFromStackSlot(MachineBasicBlock &MBB,
370369 void loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
371370 SmallVectorImpl &Addr,
372371 const TargetRegisterClass *RC,
373 MachineInstr::mmo_iterator MMOBegin,
374 MachineInstr::mmo_iterator MMOEnd,
372 ArrayRef MMOs,
375373 SmallVectorImpl &NewMIs) const;
376374
377375 bool expandPostRAPseudo(MachineInstr &MI) const override;