llvm.org GIT mirror llvm / a2b05bc
CodeGen: Introduce a class for registers Avoids using a plain unsigned for registers throughoug codegen. Doesn't attempt to change every register use, just something a little more than the set needed to build after changing the return type of MachineOperand::getReg(). git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@364191 91177308-0d34-0410-b5e6-96231b3b80d8 Matt Arsenault 1 year, 1 month ago
95 changed file(s) with 559 addition(s) and 493 deletion(s). Raw diff Collapse all Expand all
162162 ///
163163 /// \return True if the lowering succeeds, false otherwise.
164164 virtual bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val,
165 ArrayRef VRegs,
166 unsigned SwiftErrorVReg) const {
165 ArrayRef VRegs,
166 Register SwiftErrorVReg) const {
167167 if (!supportSwiftError()) {
168168 assert(SwiftErrorVReg == 0 && "attempt to use unsupported swifterror");
169169 return lowerReturn(MIRBuilder, Val, VRegs);
174174 /// This hook behaves as the extended lowerReturn function, but for targets
175175 /// that do not support swifterror value promotion.
176176 virtual bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val,
177 ArrayRef<unsigned> VRegs) const {
177 ArrayRef<Register> VRegs) const {
178178 return false;
179179 }
180180
190190 /// \return True if the lowering succeeded, false otherwise.
191191 virtual bool lowerFormalArguments(MachineIRBuilder &MIRBuilder,
192192 const Function &F,
193 ArrayRef<unsigned> VRegs) const {
193 ArrayRef<Register> VRegs) const {
194194 return false;
195195 }
196196
215215 virtual bool lowerCall(MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv,
216216 const MachineOperand &Callee, const ArgInfo &OrigRet,
217217 ArrayRef OrigArgs,
218 unsigned SwiftErrorVReg) const {
218 Register SwiftErrorVReg) const {
219219 if (!supportSwiftError()) {
220220 assert(SwiftErrorVReg == 0 && "trying to use unsupported swifterror");
221221 return lowerCall(MIRBuilder, CallConv, Callee, OrigRet, OrigArgs);
253253 ///
254254 /// \return true if the lowering succeeded, false otherwise.
255255 bool lowerCall(MachineIRBuilder &MIRBuilder, ImmutableCallSite CS,
256 unsigned ResReg, ArrayRef ArgRegs,
257 unsigned SwiftErrorVReg,
256 Register ResReg, ArrayRef ArgRegs,
257 Register SwiftErrorVReg,
258258 std::function GetCalleeReg) const;
259259
260260 };
7070 public:
7171 ValueToVRegInfo() = default;
7272
73 using VRegListT = SmallVector<unsigned, 1>;
73 using VRegListT = SmallVector<Register, 1>;
7474 using OffsetListT = SmallVector;
7575
7676 using const_vreg_iterator =
558558 /// Non-aggregate types have just one corresponding VReg and the list can be
559559 /// used as a single "unsigned". Aggregates get flattened. If such VRegs do
560560 /// not exist, they are created.
561 ArrayRef getOrCreateVRegs(const Value &Val);
562
563 unsigned getOrCreateVReg(const Value &Val) {
561 ArrayRef getOrCreateVRegs(const Value &Val);
562
563 Register getOrCreateVReg(const Value &Val) {
564564 auto Regs = getOrCreateVRegs(Val);
565565 if (Regs.empty())
566566 return 0;
3838 return false;
3939
4040 Builder.setInstr(MI);
41 unsigned DstReg = MI.getOperand(0).getReg();
42 unsigned SrcReg = lookThroughCopyInstrs(MI.getOperand(1).getReg());
41 Register DstReg = MI.getOperand(0).getReg();
42 Register SrcReg = lookThroughCopyInstrs(MI.getOperand(1).getReg());
4343
4444 // aext(trunc x) - > aext/copy/trunc x
45 unsigned TruncSrc;
45 Register TruncSrc;
4646 if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc)))) {
4747 LLVM_DEBUG(dbgs() << ".. Combine MI: " << MI;);
4848 Builder.buildAnyExtOrTrunc(DstReg, TruncSrc);
5151 }
5252
5353 // aext([asz]ext x) -> [asz]ext x
54 unsigned ExtSrc;
54 Register ExtSrc;
5555 MachineInstr *ExtMI;
5656 if (mi_match(SrcReg, MRI,
5757 m_all_of(m_MInstr(ExtMI), m_any_of(m_GAnyExt(m_Reg(ExtSrc)),
8888 return false;
8989
9090 Builder.setInstr(MI);
91 unsigned DstReg = MI.getOperand(0).getReg();
92 unsigned SrcReg = lookThroughCopyInstrs(MI.getOperand(1).getReg());
91 Register DstReg = MI.getOperand(0).getReg();
92 Register SrcReg = lookThroughCopyInstrs(MI.getOperand(1).getReg());
9393
9494 // zext(trunc x) - > and (aext/copy/trunc x), mask
95 unsigned TruncSrc;
95 Register TruncSrc;
9696 if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc)))) {
9797 LLT DstTy = MRI.getType(DstReg);
9898 if (isInstUnsupported({TargetOpcode::G_AND, {DstTy}}) ||
117117 return false;
118118
119119 Builder.setInstr(MI);
120 unsigned DstReg = MI.getOperand(0).getReg();
121 unsigned SrcReg = lookThroughCopyInstrs(MI.getOperand(1).getReg());
120 Register DstReg = MI.getOperand(0).getReg();
121 Register SrcReg = lookThroughCopyInstrs(MI.getOperand(1).getReg());
122122
123123 // sext(trunc x) - > ashr (shl (aext/copy/trunc x), c), c
124 unsigned TruncSrc;
124 Register TruncSrc;
125125 if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc)))) {
126126 LLT DstTy = MRI.getType(DstReg);
127127 // Guess on the RHS shift amount type, which should be re-legalized if
155155 if (MachineInstr *DefMI = getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF,
156156 MI.getOperand(1).getReg(), MRI)) {
157157 Builder.setInstr(MI);
158 unsigned DstReg = MI.getOperand(0).getReg();
158 Register DstReg = MI.getOperand(0).getReg();
159159 LLT DstTy = MRI.getType(DstReg);
160160
161161 if (Opcode == TargetOpcode::G_ANYEXT) {
223223
224224 const unsigned NewNumDefs = NumDefs / NumMergeRegs;
225225 for (unsigned Idx = 0; Idx < NumMergeRegs; ++Idx) {
226 SmallVector<unsigned, 2> DstRegs;
226 SmallVector<Register, 2> DstRegs;
227227 for (unsigned j = 0, DefIdx = Idx * NewNumDefs; j < NewNumDefs;
228228 ++j, ++DefIdx)
229229 DstRegs.push_back(MI.getOperand(DefIdx).getReg());
245245
246246 const unsigned NumRegs = NumMergeRegs / NumDefs;
247247 for (unsigned DefIdx = 0; DefIdx < NumDefs; ++DefIdx) {
248 SmallVector<unsigned, 2> Regs;
248 SmallVector<Register, 2> Regs;
249249 for (unsigned j = 0, Idx = NumRegs * DefIdx + 1; j < NumRegs;
250250 ++j, ++Idx)
251251 Regs.push_back(MergeI->getOperand(Idx).getReg());
456456
457457 /// Looks through copy instructions and returns the actual
458458 /// source register.
459 unsigned lookThroughCopyInstrs(unsigned Reg) {
460 unsigned TmpReg;
459 unsigned lookThroughCopyInstrs(Register Reg) {
460 Register TmpReg;
461461 while (mi_match(Reg, MRI, m_Copy(m_Reg(TmpReg)))) {
462462 if (MRI.getType(TmpReg).isValid())
463463 Reg = TmpReg;
140140 /// Helper function to split a wide generic register into bitwise blocks with
141141 /// the given Type (which implies the number of blocks needed). The generic
142142 /// registers created are appended to Ops, starting at bit 0 of Reg.
143 void extractParts(unsigned Reg, LLT Ty, int NumParts,
144 SmallVectorImpl &VRegs);
143 void extractParts(Register Reg, LLT Ty, int NumParts,
144 SmallVectorImpl &VRegs);
145145
146146 /// Version which handles irregular splits.
147 bool extractParts(unsigned Reg, LLT RegTy, LLT MainTy,
147 bool extractParts(Register Reg, LLT RegTy, LLT MainTy,
148148 LLT &LeftoverTy,
149 SmallVectorImpl &VRegs,
150 SmallVectorImpl<unsigned> &LeftoverVRegs);
149 SmallVectorImpl<Register> &VRegs,
150 SmallVectorImpl &LeftoverVRegs);
151151
152152 /// Helper function to build a wide generic register \p DstReg of type \p
153153 /// RegTy from smaller parts. This will produce a G_MERGE_VALUES,
158158 ///
159159 /// If \p ResultTy does not evenly break into \p PartTy sized pieces, the
160160 /// remainder must be specified with \p LeftoverRegs of type \p LeftoverTy.
161 void insertParts(unsigned DstReg, LLT ResultTy,
162 LLT PartTy, ArrayRef PartRegs,
163 LLT LeftoverTy = LLT(), ArrayRef LeftoverRegs = {});
161 void insertParts(Register DstReg, LLT ResultTy,
162 LLT PartTy, ArrayRef PartRegs,
163 LLT LeftoverTy = LLT(), ArrayRef LeftoverRegs = {});
164164
165165 /// Perform generic multiplication of values held in multiple registers.
166166 /// Generated instructions use only types NarrowTy and i1.
167167 /// Destination can be same or two times size of the source.
168 void multiplyRegisters(SmallVectorImpl &DstRegs,
169 ArrayRef Src1Regs,
170 ArrayRef Src2Regs, LLT NarrowTy);
168 void multiplyRegisters(SmallVectorImpl &DstRegs,
169 ArrayRef Src1Regs,
170 ArrayRef Src2Regs, LLT NarrowTy);
171171
172172 LegalizeResult fewerElementsVectorImplicitDef(MachineInstr &MI,
173173 unsigned TypeIdx, LLT NarrowTy);
159159 }
160160 };
161161
162 inline bind_ty<unsigned> m_Reg(unsigned &R) { return R; }
162 inline bind_ty<Register> m_Reg(Register &R) { return R; }
163163 inline bind_ty m_MInstr(MachineInstr *&MI) { return MI; }
164164 inline bind_ty m_Type(LLT &Ty) { return Ty; }
165165
6565 public:
6666 enum class DstType { Ty_LLT, Ty_Reg, Ty_RC };
6767 DstOp(unsigned R) : Reg(R), Ty(DstType::Ty_Reg) {}
68 DstOp(Register R) : Reg(R), Ty(DstType::Ty_Reg) {}
6869 DstOp(const MachineOperand &Op) : Reg(Op.getReg()), Ty(DstType::Ty_Reg) {}
6970 DstOp(const LLT &T) : LLTTy(T), Ty(DstType::Ty_LLT) {}
7071 DstOp(const TargetRegisterClass *TRC) : RC(TRC), Ty(DstType::Ty_RC) {}
125126 public:
126127 enum class SrcType { Ty_Reg, Ty_MIB, Ty_Predicate };
127128 SrcOp(unsigned R) : Reg(R), Ty(SrcType::Ty_Reg) {}
129 SrcOp(Register R) : Reg(R), Ty(SrcType::Ty_Reg) {}
128130 SrcOp(const MachineOperand &Op) : Reg(Op.getReg()), Ty(SrcType::Ty_Reg) {}
129131 SrcOp(const MachineInstrBuilder &MIB) : SrcMIB(MIB), Ty(SrcType::Ty_MIB) {}
130132 SrcOp(const CmpInst::Predicate P) : Pred(P), Ty(SrcType::Ty_Predicate) {}
400402 /// type as \p Op0 or \p Op0 itself.
401403 ///
402404 /// \return a MachineInstrBuilder for the newly created instruction.
403 Optional materializeGEP(unsigned &Res, unsigned Op0,
405 Optional materializeGEP(Register &Res, Register Op0,
404406 const LLT &ValueTy,
405407 uint64_t Value);
406408
716718 /// \pre The bits defined by each Op (derived from index and scalar size) must
717719 /// not overlap.
718720 /// \pre \p Indices must be in ascending order of bit position.
719 void buildSequence(unsigned Res, ArrayRef> Ops,
721 void buildSequence(Register Res, ArrayRef> Ops,
720722 ArrayRef Indices);
721723
722724 /// Build and insert \p Res = G_MERGE_VALUES \p Op0, ...
730732 /// \pre The type of all \p Ops registers must be identical.
731733 ///
732734 /// \return a MachineInstrBuilder for the newly created instruction.
733 MachineInstrBuilder buildMerge(const DstOp &Res, ArrayRef<unsigned> Ops);
735 MachineInstrBuilder buildMerge(const DstOp &Res, ArrayRef<Register> Ops);
734736
735737 /// Build and insert \p Res0, ... = G_UNMERGE_VALUES \p Op
736738 ///
743745 ///
744746 /// \return a MachineInstrBuilder for the newly created instruction.
745747 MachineInstrBuilder buildUnmerge(ArrayRef Res, const SrcOp &Op);
746 MachineInstrBuilder buildUnmerge(ArrayRef<unsigned> Res, const SrcOp &Op);
748 MachineInstrBuilder buildUnmerge(ArrayRef<Register> Res, const SrcOp &Op);
747749
748750 /// Build and insert an unmerge of \p Res sized pieces to cover \p Op
749751 MachineInstrBuilder buildUnmerge(LLT Res, const SrcOp &Op);
758760 ///
759761 /// \return a MachineInstrBuilder for the newly created instruction.
760762 MachineInstrBuilder buildBuildVector(const DstOp &Res,
761 ArrayRef<unsigned> Ops);
763 ArrayRef<Register> Ops);
762764
763765 /// Build and insert \p Res = G_BUILD_VECTOR with \p Src replicated to fill
764766 /// the number of elements
779781 ///
780782 /// \return a MachineInstrBuilder for the newly created instruction.
781783 MachineInstrBuilder buildBuildVectorTrunc(const DstOp &Res,
782 ArrayRef<unsigned> Ops);
784 ArrayRef<Register> Ops);
783785
784786 /// Build and insert \p Res = G_CONCAT_VECTORS \p Op0, ...
785787 ///
793795 ///
794796 /// \return a MachineInstrBuilder for the newly created instruction.
795797 MachineInstrBuilder buildConcatVectors(const DstOp &Res,
796 ArrayRef Ops);
797
798 MachineInstrBuilder buildInsert(unsigned Res, unsigned Src,
799 unsigned Op, unsigned Index);
798 ArrayRef Ops);
799
800 MachineInstrBuilder buildInsert(Register Res, Register Src,
801 Register Op, unsigned Index);
800802
801803 /// Build and insert either a G_INTRINSIC (if \p HasSideEffects is false) or
802804 /// G_INTRINSIC_W_SIDE_EFFECTS instruction. Its first operand will be the
808810 /// \pre setBasicBlock or setMI must have been called.
809811 ///
810812 /// \return a MachineInstrBuilder for the newly created instruction.
811 MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef<unsigned> Res,
813 MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef<Register> Res,
812814 bool HasSideEffects);
813815 MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef Res,
814816 bool HasSideEffects);
1313 #define LLVM_CODEGEN_MACHINEOPERAND_H
1414
1515 #include "llvm/ADT/DenseMap.h"
16 #include "llvm/CodeGen/Register.h"
1617 #include "llvm/IR/Intrinsics.h"
1718 #include "llvm/Support/DataTypes.h"
1819 #include "llvm/Support/LowLevelTypeImpl.h"
344345 //===--------------------------------------------------------------------===//
345346
346347 /// getReg - Returns the register number.
347 unsigned getReg() const {
348 Register getReg() const {
348349 assert(isReg() && "This is not a register operand!");
349 return SmallContents.RegNo;
350 return Register(SmallContents.RegNo);
350351 }
351352
352353 unsigned getSubReg() const {
711711
712712 /// createVirtualRegister - Create and return a new virtual register in the
713713 /// function with the specified register class.
714 unsigned createVirtualRegister(const TargetRegisterClass *RegClass,
714 Register createVirtualRegister(const TargetRegisterClass *RegClass,
715715 StringRef Name = "");
716716
717717 /// Create and return a new virtual register in the function with the same
718718 /// attributes as the given register.
719 unsigned cloneVirtualRegister(unsigned VReg, StringRef Name = "");
719 Register cloneVirtualRegister(Register VReg, StringRef Name = "");
720720
721721 /// Get the low-level type of \p Reg or LLT{} if Reg is not a generic
722722 /// (target independent) virtual register.
731731
732732 /// Create and return a new generic virtual register with low-level
733733 /// type \p Ty.
734 unsigned createGenericVirtualRegister(LLT Ty, StringRef Name = "");
734 Register createGenericVirtualRegister(LLT Ty, StringRef Name = "");
735735
736736 /// Remove all types associated to virtual registers (after instruction
737737 /// selection and constraining of all generic virtual registers).
0 //===-- llvm/CodeGen/Register.h ---------------------------------*- C++ -*-===//
1 //
2 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
3 // See https://llvm.org/LICENSE.txt for license information.
4 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
5 //
6 //===----------------------------------------------------------------------===//
7
8 #ifndef LLVM_CODEGEN_REGISTER_H
9 #define LLVM_CODEGEN_REGISTER_H
10
11 #include
12
13 namespace llvm {
14
15 /// Wrapper class representing virtual and physical registers. Should be passed
16 /// by value.
17 class Register {
18 unsigned Reg;
19
20 public:
21 Register(unsigned Val = 0): Reg(Val) {}
22
23 /// Return true if the specified register number is in the virtual register
24 /// namespace.
25 bool isVirtual() const {
26 return int(Reg) < 0;
27 }
28
29 /// Return true if the specified register number is in the physical register
30 /// namespace.
31 bool isPhysical() const {
32 return int(Reg) > 0;
33 }
34
35 /// Convert a virtual register number to a 0-based index. The first virtual
36 /// register in a function will get the index 0.
37 unsigned virtRegIndex() const {
38 assert(isVirtual() && "Not a virtual register");
39 return Reg & ~(1u << 31);
40 }
41
42 /// Convert a 0-based index to a virtual register number.
43 /// This is the inverse operation of VirtReg2IndexFunctor below.
44 static Register index2VirtReg(unsigned Index) {
45 return Register(Index | (1u << 31));
46 }
47
48 operator unsigned() const {
49 return Reg;
50 }
51
52 bool isValid() const {
53 return Reg != 0;
54 }
55 };
56
57 }
58
59 #endif
1616
1717 #include "llvm/ADT/DenseMap.h"
1818 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/CodeGen/Register.h"
1920 #include "llvm/IR/BasicBlock.h"
2021 #include "llvm/IR/DebugLoc.h"
2122 #include
4041
4142 /// A map from swifterror value in a basic block to the virtual register it is
4243 /// currently represented by.
43 DenseMap, unsigned>
44 DenseMap, Register>
4445 VRegDefMap;
4546
4647 /// A list of upward exposed vreg uses that need to be satisfied by either a
4748 /// copy def or a phi node at the beginning of the basic block representing
4849 /// the predecessor(s) swifterror value.
49 DenseMap, unsigned>
50 DenseMap, Register>
5051 VRegUpwardsUse;
5152
5253 /// A map from instructions that define/use a swifterror value to the virtual
5354 /// register that represents that def/use.
54 llvm::DenseMap, unsigned>
55 llvm::DenseMap, Register>
5556 VRegDefUses;
5657
5758 /// The swifterror argument of the current function.
7980
8081 /// Set the swifterror virtual register in the VRegDefMap for this
8182 /// basic block.
82 void setCurrentVReg(const MachineBasicBlock *MBB, const Value *, unsigned);
83 void setCurrentVReg(const MachineBasicBlock *MBB, const Value *, Register);
8384
8485 /// Get or create the swifterror value virtual register for a def of a
8586 /// swifterror by an instruction.
989989
990990 /// getFrameRegister - This method should return the register used as a base
991991 /// for values allocated in the current stack frame.
992 virtual unsigned getFrameRegister(const MachineFunction &MF) const = 0;
992 virtual Register getFrameRegister(const MachineFunction &MF) const = 0;
993993
994994 /// Mark a register and all its aliases as reserved in the given set.
995995 void markSuperRegs(BitVector &RegisterSet, unsigned Reg) const;
9696
9797 /// returns the physical register mapped to the specified
9898 /// virtual register
99 unsigned getPhys(unsigned virtReg) const {
100 assert(TargetRegisterInfo::isVirtualRegister(virtReg));
99 Register getPhys(Register virtReg) const {
100 assert(virtReg.isVirtual());
101101 return Virt2PhysMap[virtReg];
102102 }
103103
3737 // If @MI is a DBG_VALUE with debug value described by a
3838 // defined register, returns the number of this register.
3939 // In the other case, returns 0.
40 static unsigned isDescribedByReg(const MachineInstr &MI) {
40 static Register isDescribedByReg(const MachineInstr &MI) {
4141 assert(MI.isDebugValue());
4242 assert(MI.getNumOperands() == 4);
4343 // If location of variable is described using a register (directly or
4444 // indirectly), this register is always a first operand.
45 return MI.getOperand(0).isReg() ? MI.getOperand(0).getReg() : 0;
45 return MI.getOperand(0).isReg() ? MI.getOperand(0).getReg() : Register();
4646 }
4747
4848 bool DbgValueHistoryMap::startDbgValue(InlinedEntity Var,
2626 void CallLowering::anchor() {}
2727
2828 bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, ImmutableCallSite CS,
29 unsigned ResReg, ArrayRef ArgRegs,
30 unsigned SwiftErrorVReg,
29 Register ResReg, ArrayRef ArgRegs,
30 Register SwiftErrorVReg,
3131 std::function GetCalleeReg) const {
3232 auto &DL = CS.getParent()->getParent()->getParent()->getDataLayout();
3333
130130 if (Handler.assignArg(i, CurVT, CurVT, CCValAssign::Full, Args[i], CCInfo)) {
131131 // Try to use the register type if we couldn't assign the VT.
132132 if (!Handler.isArgumentHandler() || !CurVT.isValid())
133 return false;
133 return false;
134134 CurVT = TLI->getRegisterTypeForCallingConv(
135135 F.getContext(), F.getCallingConv(), EVT(CurVT));
136136 if (Handler.assignArg(i, CurVT, CurVT, CCValAssign::Full, Args[i], CCInfo))
168168 return *Regs;
169169 }
170170
171 ArrayRef<unsigned> IRTranslator::getOrCreateVRegs(const Value &Val) {
171 ArrayRef<Register> IRTranslator::getOrCreateVRegs(const Value &Val) {
172172 auto VRegsIt = VMap.findVRegs(Val);
173173 if (VRegsIt != VMap.vregs_end())
174174 return *VRegsIt->second;
362362 if (Ret && DL->getTypeStoreSize(Ret->getType()) == 0)
363363 Ret = nullptr;
364364
365 ArrayRef<unsigned> VRegs;
365 ArrayRef<Register> VRegs;
366366 if (Ret)
367367 VRegs = getOrCreateVRegs(*Ret);
368368
369 unsigned SwiftErrorVReg = 0;
369 Register SwiftErrorVReg = 0;
370370 if (CLI->supportSwiftError() && SwiftError.getFunctionArg()) {
371371 SwiftErrorVReg = SwiftError.getOrCreateVRegUseAt(
372372 &RI, &MIRBuilder.getMBB(), SwiftError.getFunctionArg());
857857 if (DL->getTypeStoreSize(LI.getType()) == 0)
858858 return true;
859859
860 ArrayRef<unsigned> Regs = getOrCreateVRegs(LI);
860 ArrayRef<Register> Regs = getOrCreateVRegs(LI);
861861 ArrayRef Offsets = *VMap.getOffsets(LI);
862862 unsigned Base = getOrCreateVReg(*LI.getPointerOperand());
863863
874874
875875
876876 for (unsigned i = 0; i < Regs.size(); ++i) {
877 unsigned Addr = 0;
877 Register Addr;
878878 MIRBuilder.materializeGEP(Addr, Base, OffsetTy, Offsets[i] / 8);
879879
880880 MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i] / 8);
898898 if (DL->getTypeStoreSize(SI.getValueOperand()->getType()) == 0)
899899 return true;
900900
901 ArrayRef<unsigned> Vals = getOrCreateVRegs(*SI.getValueOperand());
901 ArrayRef<Register> Vals = getOrCreateVRegs(*SI.getValueOperand());
902902 ArrayRef Offsets = *VMap.getOffsets(*SI.getValueOperand());
903903 unsigned Base = getOrCreateVReg(*SI.getPointerOperand());
904904
915915 }
916916
917917 for (unsigned i = 0; i < Vals.size(); ++i) {
918 unsigned Addr = 0;
918 Register Addr;
919919 MIRBuilder.materializeGEP(Addr, Base, OffsetTy, Offsets[i] / 8);
920920
921921 MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i] / 8);
957957 MachineIRBuilder &MIRBuilder) {
958958 const Value *Src = U.getOperand(0);
959959 uint64_t Offset = getOffsetFromIndices(U, *DL);
960 ArrayRef<unsigned> SrcRegs = getOrCreateVRegs(*Src);
960 ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src);
961961 ArrayRef Offsets = *VMap.getOffsets(*Src);
962962 unsigned Idx = llvm::lower_bound(Offsets, Offset) - Offsets.begin();
963963 auto &DstRegs = allocateVRegs(U);
974974 uint64_t Offset = getOffsetFromIndices(U, *DL);
975975 auto &DstRegs = allocateVRegs(U);
976976 ArrayRef DstOffsets = *VMap.getOffsets(U);
977 ArrayRef SrcRegs = getOrCreateVRegs(*Src);
978 ArrayRef<unsigned> InsertedRegs = getOrCreateVRegs(*U.getOperand(1));
977 ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src);
978 ArrayRef InsertedRegs = getOrCreateVRegs(*U.getOperand(1));
979979 auto InsertedIt = InsertedRegs.begin();
980980
981981 for (unsigned i = 0; i < DstRegs.size(); ++i) {
991991 bool IRTranslator::translateSelect(const User &U,
992992 MachineIRBuilder &MIRBuilder) {
993993 unsigned Tst = getOrCreateVReg(*U.getOperand(0));
994 ArrayRef ResRegs = getOrCreateVRegs(U);
995 ArrayRef Op0Regs = getOrCreateVRegs(*U.getOperand(1));
996 ArrayRef<unsigned> Op1Regs = getOrCreateVRegs(*U.getOperand(2));
994 ArrayRef<Register> ResRegs = getOrCreateVRegs(U);
995 ArrayRef Op0Regs = getOrCreateVRegs(*U.getOperand(1));
996 ArrayRef Op1Regs = getOrCreateVRegs(*U.getOperand(2));
997997
998998 const SelectInst &SI = cast(U);
999999 uint16_t Flags = 0;
11851185
11861186 bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
11871187 MachineIRBuilder &MIRBuilder) {
1188 ArrayRef<unsigned> ResRegs = getOrCreateVRegs(CI);
1188 ArrayRef<Register> ResRegs = getOrCreateVRegs(CI);
11891189 MIRBuilder.buildInstr(Op)
11901190 .addDef(ResRegs[0])
11911191 .addDef(ResRegs[1])
15381538
15391539 unsigned IRTranslator::packRegs(const Value &V,
15401540 MachineIRBuilder &MIRBuilder) {
1541 ArrayRef<unsigned> Regs = getOrCreateVRegs(V);
1541 ArrayRef<Register> Regs = getOrCreateVRegs(V);
15421542 ArrayRef Offsets = *VMap.getOffsets(V);
15431543 LLT BigTy = getLLTForType(*V.getType(), *DL);
15441544
15571557
15581558 void IRTranslator::unpackRegs(const Value &V, unsigned Src,
15591559 MachineIRBuilder &MIRBuilder) {
1560 ArrayRef<unsigned> Regs = getOrCreateVRegs(V);
1560 ArrayRef<Register> Regs = getOrCreateVRegs(V);
15611561 ArrayRef Offsets = *VMap.getOffsets(V);
15621562
15631563 for (unsigned i = 0; i < Regs.size(); ++i)
15851585
15861586 if (!F || !F->isIntrinsic() || ID == Intrinsic::not_intrinsic) {
15871587 bool IsSplitType = valueIsSplit(CI);
1588 unsigned Res = IsSplitType ? MRI->createGenericVirtualRegister(
1588 Register Res = IsSplitType ? MRI->createGenericVirtualRegister(
15891589 getLLTForType(*CI.getType(), *DL))
15901590 : getOrCreateVReg(CI);
15911591
1592 SmallVector Args;
1593 unsigned SwiftErrorVReg = 0;
1592 SmallVector Args;
1593 Register SwiftErrorVReg;
15941594 for (auto &Arg: CI.arg_operands()) {
15951595 if (CLI->supportSwiftError() && isSwiftError(Arg)) {
15961596 LLT Ty = getLLTForType(*Arg->getType(), *DL);
16211621 if (translateKnownIntrinsic(CI, ID, MIRBuilder))
16221622 return true;
16231623
1624 ArrayRef<unsigned> ResultRegs;
1624 ArrayRef<Register> ResultRegs;
16251625 if (!CI.getType()->isVoidTy())
16261626 ResultRegs = getOrCreateVRegs(CI);
16271627
16891689 unsigned Res = 0;
16901690 if (!I.getType()->isVoidTy())
16911691 Res = MRI->createGenericVirtualRegister(getLLTForType(*I.getType(), *DL));
1692 SmallVector Args;
1693 unsigned SwiftErrorVReg = 0;
1692 SmallVector Args;
1693 Register SwiftErrorVReg;
16941694 for (auto &Arg : I.arg_operands()) {
16951695 if (CLI->supportSwiftError() && isSwiftError(Arg)) {
16961696 LLT Ty = getLLTForType(*Arg->getType(), *DL);
17751775 return false;
17761776
17771777 MBB.addLiveIn(ExceptionReg);
1778 ArrayRef<unsigned> ResRegs = getOrCreateVRegs(LP);
1778 ArrayRef<Register> ResRegs = getOrCreateVRegs(LP);
17791779 MIRBuilder.buildCopy(ResRegs[0], ExceptionReg);
17801780
17811781 unsigned SelectorReg = TLI.getExceptionSelectorRegister(PersonalityFn);
20682068 SmallSet SeenPreds;
20692069 for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) {
20702070 auto IRPred = PI->getIncomingBlock(i);
2071 ArrayRef<unsigned> ValRegs = getOrCreateVRegs(*PI->getIncomingValue(i));
2071 ArrayRef<Register> ValRegs = getOrCreateVRegs(*PI->getIncomingValue(i));
20722072 for (auto Pred : getMachinePredBBs({IRPred, PI->getParent()})) {
20732073 if (SeenPreds.count(Pred))
20742074 continue;
21352135 // Return the scalar if it is a <1 x Ty> vector.
21362136 if (CAZ->getNumElements() == 1)
21372137 return translate(*CAZ->getElementValue(0u), Reg);
2138 SmallVector<unsigned, 4> Ops;
2138 SmallVector<Register, 4> Ops;
21392139 for (unsigned i = 0; i < CAZ->getNumElements(); ++i) {
21402140 Constant &Elt = *CAZ->getElementValue(i);
21412141 Ops.push_back(getOrCreateVReg(Elt));
21452145 // Return the scalar if it is a <1 x Ty> vector.
21462146 if (CV->getNumElements() == 1)
21472147 return translate(*CV->getElementAsConstant(0), Reg);
2148 SmallVector<unsigned, 4> Ops;
2148 SmallVector<Register, 4> Ops;
21492149 for (unsigned i = 0; i < CV->getNumElements(); ++i) {
21502150 Constant &Elt = *CV->getElementAsConstant(i);
21512151 Ops.push_back(getOrCreateVReg(Elt));
21632163 } else if (auto CV = dyn_cast(&C)) {
21642164 if (CV->getNumOperands() == 1)
21652165 return translate(*CV->getOperand(0), Reg);
2166 SmallVector<unsigned, 4> Ops;
2166 SmallVector<Register, 4> Ops;
21672167 for (unsigned i = 0; i < CV->getNumOperands(); ++i) {
21682168 Ops.push_back(getOrCreateVReg(*CV->getOperand(i)));
21692169 }
22732273 EntryBB->addSuccessor(&getMBB(F.front()));
22742274
22752275 // Lower the actual args into this basic block.
2276 SmallVector<unsigned, 8> VRegArgs;
2276 SmallVector<Register, 8> VRegArgs;
22772277 for (const Argument &Arg: F.args()) {
22782278 if (DL->getTypeStoreSize(Arg.getType()) == 0)
22792279 continue; // Don't handle zero sized types.
114114 }
115115 }
116116
117 void LegalizerHelper::extractParts(unsigned Reg, LLT Ty, int NumParts,
118 SmallVectorImpl &VRegs) {
117 void LegalizerHelper::extractParts(Register Reg, LLT Ty, int NumParts,
118 SmallVectorImpl &VRegs) {
119119 for (int i = 0; i < NumParts; ++i)
120120 VRegs.push_back(MRI.createGenericVirtualRegister(Ty));
121121 MIRBuilder.buildUnmerge(VRegs, Reg);
122122 }
123123
124 bool LegalizerHelper::extractParts(unsigned Reg, LLT RegTy,
124 bool LegalizerHelper::extractParts(Register Reg, LLT RegTy,
125125 LLT MainTy, LLT &LeftoverTy,
126 SmallVectorImpl &VRegs,
127 SmallVectorImpl<unsigned> &LeftoverRegs) {
126 SmallVectorImpl<Register> &VRegs,
127 SmallVectorImpl &LeftoverRegs) {
128128 assert(!LeftoverTy.isValid() && "this is an out argument");
129129
130130 unsigned RegSize = RegTy.getSizeInBits();
151151
152152 // For irregular sizes, extract the individual parts.
153153 for (unsigned I = 0; I != NumParts; ++I) {
154 unsigned NewReg = MRI.createGenericVirtualRegister(MainTy);
154 Register NewReg = MRI.createGenericVirtualRegister(MainTy);
155155 VRegs.push_back(NewReg);
156156 MIRBuilder.buildExtract(NewReg, Reg, MainSize * I);
157157 }
158158
159159 for (unsigned Offset = MainSize * NumParts; Offset < RegSize;
160160 Offset += LeftoverSize) {
161 unsigned NewReg = MRI.createGenericVirtualRegister(LeftoverTy);
161 Register NewReg = MRI.createGenericVirtualRegister(LeftoverTy);
162162 LeftoverRegs.push_back(NewReg);
163163 MIRBuilder.buildExtract(NewReg, Reg, Offset);
164164 }
166166 return true;
167167 }
168168
169 void LegalizerHelper::insertParts(unsigned DstReg,
169 void LegalizerHelper::insertParts(Register DstReg,
170170 LLT ResultTy, LLT PartTy,
171 ArrayRef<unsigned> PartRegs,
171 ArrayRef<Register> PartRegs,
172172 LLT LeftoverTy,
173 ArrayRef<unsigned> LeftoverRegs) {
173 ArrayRef<Register> LeftoverRegs) {
174174 if (!LeftoverTy.isValid()) {
175175 assert(LeftoverRegs.empty());
176176
468468 return UnableToLegalize;
469469 int NumParts = SizeOp0 / NarrowSize;
470470
471 SmallVector<unsigned, 2> DstRegs;
471 SmallVector<Register, 2> DstRegs;
472472 for (int i = 0; i < NumParts; ++i)
473473 DstRegs.push_back(
474474 MIRBuilder.buildUndef(NarrowTy)->getOperand(0).getReg());
488488 unsigned NarrowSize = NarrowTy.getSizeInBits();
489489 int NumParts = TotalSize / NarrowSize;
490490
491 SmallVector<unsigned, 4> PartRegs;
491 SmallVector<Register, 4> PartRegs;
492492 for (int I = 0; I != NumParts; ++I) {
493493 unsigned Offset = I * NarrowSize;
494494 auto K = MIRBuilder.buildConstant(NarrowTy,
498498
499499 LLT LeftoverTy;
500500 unsigned LeftoverBits = TotalSize - NumParts * NarrowSize;
501 SmallVector<unsigned, 1> LeftoverRegs;
501 SmallVector<Register, 1> LeftoverRegs;
502502 if (LeftoverBits != 0) {
503503 LeftoverTy = LLT::scalar(LeftoverBits);
504504 auto K = MIRBuilder.buildConstant(
521521 // Expand in terms of carry-setting/consuming G_ADDE instructions.
522522 int NumParts = SizeOp0 / NarrowTy.getSizeInBits();
523523
524 SmallVector<unsigned, 2> Src1Regs, Src2Regs, DstRegs;
524 SmallVector<Register, 2> Src1Regs, Src2Regs, DstRegs;
525525 extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, Src1Regs);
526526 extractParts(MI.getOperand(2).getReg(), NarrowTy, NumParts, Src2Regs);
527527
554554
555555 int NumParts = SizeOp0 / NarrowTy.getSizeInBits();
556556
557 SmallVector<unsigned, 2> Src1Regs, Src2Regs, DstRegs;
557 SmallVector<Register, 2> Src1Regs, Src2Regs, DstRegs;
558558 extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, Src1Regs);
559559 extractParts(MI.getOperand(2).getReg(), NarrowTy, NumParts, Src2Regs);
560560
760760
761761 // Use concat_vectors if the result is a multiple of the number of elements.
762762 if (NumParts * OldElts == NewElts) {
763 SmallVector<unsigned, 8> Parts;
763 SmallVector<Register, 8> Parts;
764764 Parts.push_back(MO.getReg());
765765
766766 unsigned ImpDef = MIRBuilder.buildUndef(OldTy).getReg(0);
784784 if (TypeIdx != 1)
785785 return UnableToLegalize;
786786
787 unsigned DstReg = MI.getOperand(0).getReg();
787 Register DstReg = MI.getOperand(0).getReg();
788788 LLT DstTy = MRI.getType(DstReg);
789789 if (!DstTy.isScalar())
790790 return UnableToLegalize;
794794 unsigned PartSize = DstTy.getSizeInBits() / NumSrc;
795795
796796 unsigned Src1 = MI.getOperand(1).getReg();
797 unsigned ResultReg = MIRBuilder.buildZExt(DstTy, Src1)->getOperand(0).getReg();
797 Register ResultReg = MIRBuilder.buildZExt(DstTy, Src1)->getOperand(0).getReg();
798798
799799 for (unsigned I = 2; I != NumOps; ++I) {
800800 const unsigned Offset = (I - 1) * PartSize;
801801
802 unsigned SrcReg = MI.getOperand(I).getReg();
802 Register SrcReg = MI.getOperand(I).getReg();
803803 assert(MRI.getType(SrcReg) == LLT::scalar(PartSize));
804804
805805 auto ZextInput = MIRBuilder.buildZExt(DstTy, SrcReg);
806806
807 unsigned NextResult = I + 1 == NumOps ? DstReg :
807 Register NextResult = I + 1 == NumOps ? DstReg :
808808 MRI.createGenericVirtualRegister(DstTy);
809809
810810 auto ShiftAmt = MIRBuilder.buildConstant(DstTy, Offset);
824824 return UnableToLegalize;
825825
826826 unsigned NumDst = MI.getNumOperands() - 1;
827 unsigned SrcReg = MI.getOperand(NumDst).getReg();
827 Register SrcReg = MI.getOperand(NumDst).getReg();
828828 LLT SrcTy = MRI.getType(SrcReg);
829829 if (!SrcTy.isScalar())
830830 return UnableToLegalize;
831831
832 unsigned Dst0Reg = MI.getOperand(0).getReg();
832 Register Dst0Reg = MI.getOperand(0).getReg();
833833 LLT DstTy = MRI.getType(Dst0Reg);
834834 if (!DstTy.isScalar())
835835 return UnableToLegalize;
860860 LegalizerHelper::LegalizeResult
861861 LegalizerHelper::widenScalarExtract(MachineInstr &MI, unsigned TypeIdx,
862862 LLT WideTy) {
863 unsigned DstReg = MI.getOperand(0).getReg();
864 unsigned SrcReg = MI.getOperand(1).getReg();
863 Register DstReg = MI.getOperand(0).getReg();
864 Register SrcReg = MI.getOperand(1).getReg();
865865 LLT SrcTy = MRI.getType(SrcReg);
866866
867867 LLT DstTy = MRI.getType(DstReg);
16161616
16171617 LegalizerHelper::LegalizeResult LegalizerHelper::fewerElementsVectorImplicitDef(
16181618 MachineInstr &MI, unsigned TypeIdx, LLT NarrowTy) {
1619 SmallVector<unsigned, 2> DstRegs;
1619 SmallVector<Register, 2> DstRegs;
16201620
16211621 unsigned NarrowSize = NarrowTy.getSizeInBits();
16221622 unsigned DstReg = MI.getOperand(0).getReg();
17011701 return Legalized;
17021702 }
17031703
1704 SmallVector<unsigned, 2> DstRegs, Src0Regs, Src1Regs, Src2Regs;
1704 SmallVector<Register, 2> DstRegs, Src0Regs, Src1Regs, Src2Regs;
17051705
17061706 extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, Src0Regs);
17071707
17721772
17731773 SmallVector NewInsts;
17741774
1775 SmallVector DstRegs, LeftoverDstRegs;
1776 SmallVector<unsigned, 4> PartRegs, LeftoverRegs;
1775 SmallVector<Register, 4> DstRegs, LeftoverDstRegs;
1776 SmallVector PartRegs, LeftoverRegs;
17771777
17781778 for (unsigned I = 1, E = MI.getNumOperands(); I != E; ++I) {
17791779 LLT LeftoverTy;
18601860 NarrowTy1 = SrcTy.getElementType();
18611861 }
18621862
1863 SmallVector<unsigned, 4> SrcRegs, DstRegs;
1863 SmallVector<Register, 4> SrcRegs, DstRegs;
18641864 extractParts(SrcReg, NarrowTy1, NumParts, SrcRegs);
18651865
18661866 for (unsigned I = 0; I < NumParts; ++I) {
19231923 CmpInst::Predicate Pred
19241924 = static_cast(MI.getOperand(1).getPredicate());
19251925
1926 SmallVector<unsigned, 2> Src1Regs, Src2Regs, DstRegs;
1926 SmallVector<Register, 2> Src1Regs, Src2Regs, DstRegs;
19271927 extractParts(MI.getOperand(2).getReg(), NarrowTy1, NumParts, Src1Regs);
19281928 extractParts(MI.getOperand(3).getReg(), NarrowTy1, NumParts, Src2Regs);
19291929
19521952 LegalizerHelper::LegalizeResult
19531953 LegalizerHelper::fewerElementsVectorSelect(MachineInstr &MI, unsigned TypeIdx,
19541954 LLT NarrowTy) {
1955 unsigned DstReg = MI.getOperand(0).getReg();
1956 unsigned CondReg = MI.getOperand(1).getReg();
1955 Register DstReg = MI.getOperand(0).getReg();
1956 Register CondReg = MI.getOperand(1).getReg();
19571957
19581958 unsigned NumParts = 0;
19591959 LLT NarrowTy0, NarrowTy1;
19981998 }
19991999 }
20002000
2001 SmallVector<unsigned, 2> DstRegs, Src0Regs, Src1Regs, Src2Regs;
2001 SmallVector<Register, 2> DstRegs, Src0Regs, Src1Regs, Src2Regs;
20022002 if (CondTy.isVector())
20032003 extractParts(MI.getOperand(1).getReg(), NarrowTy1, NumParts, Src0Regs);
20042004
20062006 extractParts(MI.getOperand(3).getReg(), NarrowTy0, NumParts, Src2Regs);
20072007
20082008 for (unsigned i = 0; i < NumParts; ++i) {
2009 unsigned DstReg = MRI.createGenericVirtualRegister(NarrowTy0);
2009 Register DstReg = MRI.createGenericVirtualRegister(NarrowTy0);
20102010 MIRBuilder.buildSelect(DstReg, CondTy.isVector() ? Src0Regs[i] : CondReg,
20112011 Src1Regs[i], Src2Regs[i]);
20122012 DstRegs.push_back(DstReg);
20372037 if (NumParts < 0)
20382038 return UnableToLegalize;
20392039
2040 SmallVector<unsigned, 4> DstRegs, LeftoverDstRegs;
2040 SmallVector<Register, 4> DstRegs, LeftoverDstRegs;
20412041 SmallVector NewInsts;
20422042
20432043 const int TotalNumParts = NumParts + NumLeftover;
20452045 // Insert the new phis in the result block first.
20462046 for (int I = 0; I != TotalNumParts; ++I) {
20472047 LLT Ty = I < NumParts ? NarrowTy : LeftoverTy;
2048 unsigned PartDstReg = MRI.createGenericVirtualRegister(Ty);
2048 Register PartDstReg = MRI.createGenericVirtualRegister(Ty);
20492049 NewInsts.push_back(MIRBuilder.buildInstr(TargetOpcode::G_PHI)
20502050 .addDef(PartDstReg));
20512051 if (I < NumParts)
20582058 MIRBuilder.setInsertPt(*MBB, MBB->getFirstNonPHI());
20592059 insertParts(DstReg, PhiTy, NarrowTy, DstRegs, LeftoverTy, LeftoverDstRegs);
20602060
2061 SmallVector<unsigned, 4> PartRegs, LeftoverRegs;
2061 SmallVector<Register, 4> PartRegs, LeftoverRegs;
20622062
20632063 // Insert code to extract the incoming values in each predecessor block.
20642064 for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) {
21042104 return UnableToLegalize;
21052105
21062106 bool IsLoad = MI.getOpcode() == TargetOpcode::G_LOAD;
2107 unsigned ValReg = MI.getOperand(0).getReg();
2108 unsigned AddrReg = MI.getOperand(1).getReg();
2107 Register ValReg = MI.getOperand(0).getReg();
2108 Register AddrReg = MI.getOperand(1).getReg();
21092109 LLT ValTy = MRI.getType(ValReg);
21102110
21112111 int NumParts = -1;
21122112 int NumLeftover = -1;
21132113 LLT LeftoverTy;
2114 SmallVector<unsigned, 8> NarrowRegs, NarrowLeftoverRegs;
2114 SmallVector<Register, 8> NarrowRegs, NarrowLeftoverRegs;
21152115 if (IsLoad) {
21162116 std::tie(NumParts, NumLeftover) = getNarrowTypeBreakDown(ValTy, NarrowTy, LeftoverTy);
21172117 } else {
21332133 // is a load, return the new registers in ValRegs. For a store, each elements
21342134 // of ValRegs should be PartTy. Returns the next offset that needs to be
21352135 // handled.
2136 auto splitTypePieces = [=](LLT PartTy, SmallVectorImpl<unsigned> &ValRegs,
2136 auto splitTypePieces = [=](LLT PartTy, SmallVectorImpl<Register> &ValRegs,
21372137 unsigned Offset) -> unsigned {
21382138 MachineFunction &MF = MIRBuilder.getMF();
21392139 unsigned PartSize = PartTy.getSizeInBits();
21412141 Offset += PartSize, ++Idx) {
21422142 unsigned ByteSize = PartSize / 8;
21432143 unsigned ByteOffset = Offset / 8;
2144 unsigned NewAddrReg = 0;
2144 Register NewAddrReg;
21452145
21462146 MIRBuilder.materializeGEP(NewAddrReg, AddrReg, OffsetTy, ByteOffset);
21472147
21492149 MF.getMachineMemOperand(MMO, ByteOffset, ByteSize);
21502150
21512151 if (IsLoad) {
2152 unsigned Dst = MRI.createGenericVirtualRegister(PartTy);
2152 Register Dst = MRI.createGenericVirtualRegister(PartTy);
21532153 ValRegs.push_back(Dst);
21542154 MIRBuilder.buildLoad(Dst, NewAddrReg, *NewMMO);
21552155 } else {
24002400 auto IsShort = MIRBuilder.buildICmp(ICmpInst::ICMP_ULT, CondTy, Amt, NewBits);
24012401 auto IsZero = MIRBuilder.buildICmp(ICmpInst::ICMP_EQ, CondTy, Amt, Zero);
24022402
2403 unsigned ResultRegs[2];
2403 Register ResultRegs[2];
24042404 switch (MI.getOpcode()) {
24052405 case TargetOpcode::G_SHL: {
24062406 // Short: ShAmt < NewBitSize
25552555 }
25562556 }
25572557
2558 void LegalizerHelper::multiplyRegisters(SmallVectorImpl &DstRegs,
2559 ArrayRef Src1Regs,
2560 ArrayRef Src2Regs,
2558 void LegalizerHelper::multiplyRegisters(SmallVectorImpl &DstRegs,
2559 ArrayRef Src1Regs,
2560 ArrayRef Src2Regs,
25612561 LLT NarrowTy) {
25622562 MachineIRBuilder &B = MIRBuilder;
25632563 unsigned SrcParts = Src1Regs.size();
25692569 DstRegs[DstIdx] = FactorSum;
25702570
25712571 unsigned CarrySumPrevDstIdx;
2572 SmallVector<unsigned, 4> Factors;
2572 SmallVector<Register, 4> Factors;
25732573
25742574 for (DstIdx = 1; DstIdx < DstParts; DstIdx++) {
25752575 // Collect low parts of muls for DstIdx.
26202620
26212621 LegalizerHelper::LegalizeResult
26222622 LegalizerHelper::narrowScalarMul(MachineInstr &MI, LLT NarrowTy) {
2623 unsigned DstReg = MI.getOperand(0).getReg();
2624 unsigned Src1 = MI.getOperand(1).getReg();
2625 unsigned Src2 = MI.getOperand(2).getReg();
2623 Register DstReg = MI.getOperand(0).getReg();
2624 Register Src1 = MI.getOperand(1).getReg();
2625 Register Src2 = MI.getOperand(2).getReg();
26262626
26272627 LLT Ty = MRI.getType(DstReg);
26282628 if (Ty.isVector())
26392639 bool IsMulHigh = MI.getOpcode() == TargetOpcode::G_UMULH;
26402640 unsigned DstTmpParts = NumDstParts * (IsMulHigh ? 2 : 1);
26412641
2642 SmallVector<unsigned, 2> Src1Parts, Src2Parts, DstTmpRegs;
2642 SmallVector<Register, 2> Src1Parts, Src2Parts, DstTmpRegs;
26432643 extractParts(Src1, NarrowTy, NumSrcParts, Src1Parts);
26442644 extractParts(Src2, NarrowTy, NumSrcParts, Src2Parts);
26452645 DstTmpRegs.resize(DstTmpParts);
26462646 multiplyRegisters(DstTmpRegs, Src1Parts, Src2Parts, NarrowTy);
26472647
26482648 // Take only high half of registers if this is high mul.
2649 ArrayRef<unsigned> DstRegs(
2649 ArrayRef<Register> DstRegs(
26502650 IsMulHigh ? &DstTmpRegs[DstTmpParts / 2] : &DstTmpRegs[0], NumDstParts);
26512651 MIRBuilder.buildMerge(DstReg, DstRegs);
26522652 MI.eraseFromParent();
26682668 return UnableToLegalize;
26692669 int NumParts = SizeOp1 / NarrowSize;
26702670
2671 SmallVector<unsigned, 2> SrcRegs, DstRegs;
2671 SmallVector<Register, 2> SrcRegs, DstRegs;
26722672 SmallVector Indexes;
26732673 extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, SrcRegs);
26742674
27352735
27362736 int NumParts = SizeOp0 / NarrowSize;
27372737
2738 SmallVector<unsigned, 2> SrcRegs, DstRegs;
2738 SmallVector<Register, 2> SrcRegs, DstRegs;
27392739 SmallVector Indexes;
27402740 extractParts(MI.getOperand(1).getReg(), NarrowTy, NumParts, SrcRegs);
27412741
28012801
28022802 assert(MI.getNumOperands() == 3 && TypeIdx == 0);
28032803
2804 SmallVector DstRegs, DstLeftoverRegs;
2805 SmallVector Src0Regs, Src0LeftoverRegs;
2806 SmallVector<unsigned, 4> Src1Regs, Src1LeftoverRegs;
2804 SmallVector<Register, 4> DstRegs, DstLeftoverRegs;
2805 SmallVector Src0Regs, Src0LeftoverRegs;
2806 SmallVector Src1Regs, Src1LeftoverRegs;
28072807 LLT LeftoverTy;
28082808 if (!extractParts(MI.getOperand(1).getReg(), DstTy, NarrowTy, LeftoverTy,
28092809 Src0Regs, Src0LeftoverRegs))
28482848 unsigned DstReg = MI.getOperand(0).getReg();
28492849 LLT DstTy = MRI.getType(DstReg);
28502850
2851 SmallVector DstRegs, DstLeftoverRegs;
2852 SmallVector Src1Regs, Src1LeftoverRegs;
2853 SmallVector<unsigned, 4> Src2Regs, Src2LeftoverRegs;
2851 SmallVector<Register, 4> DstRegs, DstLeftoverRegs;
2852 SmallVector Src1Regs, Src1LeftoverRegs;
2853 SmallVector Src2Regs, Src2LeftoverRegs;
28542854 LLT LeftoverTy;
28552855 if (!extractParts(MI.getOperand(2).getReg(), DstTy, NarrowTy, LeftoverTy,
28562856 Src1Regs, Src1LeftoverRegs))
209209 }
210210
211211 Optional
212 MachineIRBuilder::materializeGEP(unsigned &Res, unsigned Op0,
212 MachineIRBuilder::materializeGEP(Register &Res, Register Op0,
213213 const LLT &ValueTy, uint64_t Value) {
214214 assert(Res == 0 && "Res is a result argument");
215215 assert(ValueTy.isScalar() && "invalid offset type");
505505 return Extract;
506506 }
507507
508 void MachineIRBuilder::buildSequence(unsigned Res, ArrayRef> Ops,
508 void MachineIRBuilder::buildSequence(Register Res, ArrayRef> Ops,
509509 ArrayRef Indices) {
510510 #ifndef NDEBUG
511511 assert(Ops.size() == Indices.size() && "incompatible args");
534534 return;
535535 }
536536
537 unsigned ResIn = getMRI()->createGenericVirtualRegister(ResTy);
537 Register ResIn = getMRI()->createGenericVirtualRegister(ResTy);
538538 buildUndef(ResIn);
539539
540540 for (unsigned i = 0; i < Ops.size(); ++i) {
541 unsigned ResOut = i + 1 == Ops.size()
541 Register ResOut = i + 1 == Ops.size()
542542 ? Res
543543 : getMRI()->createGenericVirtualRegister(ResTy);
544544 buildInsert(ResOut, ResIn, Ops[i], Indices[i]);
551551 }
552552
553553 MachineInstrBuilder MachineIRBuilder::buildMerge(const DstOp &Res,
554 ArrayRef<unsigned> Ops) {
554 ArrayRef<Register> Ops) {
555555 // Unfortunately to convert from ArrayRef to ArrayRef,
556556 // we need some temporary storage for the DstOp objects. Here we use a
557557 // sufficiently large SmallVector to not go through the heap.
571571 MachineInstrBuilder MachineIRBuilder::buildUnmerge(LLT Res,
572572 const SrcOp &Op) {
573573 unsigned NumReg = Op.getLLTTy(*getMRI()).getSizeInBits() / Res.getSizeInBits();
574 SmallVector<unsigned, 8> TmpVec;
574 SmallVector<Register, 8> TmpVec;
575575 for (unsigned I = 0; I != NumReg; ++I)
576576 TmpVec.push_back(getMRI()->createGenericVirtualRegister(Res));
577577 return buildUnmerge(TmpVec, Op);
578578 }
579579
580 MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<unsigned> Res,
580 MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<Register> Res,
581581 const SrcOp &Op) {
582582 // Unfortunately to convert from ArrayRef to ArrayRef,
583583 // we need some temporary storage for the DstOp objects. Here we use a
587587 }
588588
589589 MachineInstrBuilder MachineIRBuilder::buildBuildVector(const DstOp &Res,
590 ArrayRef<unsigned> Ops) {
590 ArrayRef<Register> Ops) {
591591 // Unfortunately to convert from ArrayRef to ArrayRef,
592592 // we need some temporary storage for the DstOp objects. Here we use a
593593 // sufficiently large SmallVector to not go through the heap.
603603
604604 MachineInstrBuilder
605605 MachineIRBuilder::buildBuildVectorTrunc(const DstOp &Res,
606 ArrayRef<unsigned> Ops) {
606 ArrayRef<Register> Ops) {
607607 // Unfortunately to convert from ArrayRef to ArrayRef,
608608 // we need some temporary storage for the DstOp objects. Here we use a
609609 // sufficiently large SmallVector to not go through the heap.
612612 }
613613
614614 MachineInstrBuilder
615 MachineIRBuilder::buildConcatVectors(const DstOp &Res, ArrayRef<unsigned> Ops) {
615 MachineIRBuilder::buildConcatVectors(const DstOp &Res, ArrayRef<Register> Ops) {
616616 // Unfortunately to convert from ArrayRef to ArrayRef,
617617 // we need some temporary storage for the DstOp objects. Here we use a
618618 // sufficiently large SmallVector to not go through the heap.
620620 return buildInstr(TargetOpcode::G_CONCAT_VECTORS, Res, TmpVec);
621621 }
622622
623 MachineInstrBuilder MachineIRBuilder::buildInsert(unsigned Res, unsigned Src,
624 unsigned Op, unsigned Index) {
623 MachineInstrBuilder MachineIRBuilder::buildInsert(Register Res, Register Src,
624 Register Op, unsigned Index) {
625625 assert(Index + getMRI()->getType(Op).getSizeInBits() <=
626626 getMRI()->getType(Res).getSizeInBits() &&
627627 "insertion past the end of a register");
639639 }
640640
641641 MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
642 ArrayRef<unsigned> ResultRegs,
642 ArrayRef<Register> ResultRegs,
643643 bool HasSideEffects) {
644644 auto MIB =
645645 buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
6969
7070 // If @MI is a DBG_VALUE with debug value described by a defined
7171 // register, returns the number of this register. In the other case, returns 0.
72 static unsigned isDbgValueDescribedByReg(const MachineInstr &MI) {
72 static Register isDbgValueDescribedByReg(const MachineInstr &MI) {
7373 assert(MI.isDebugValue() && "expected a DBG_VALUE");
7474 assert(MI.getNumOperands() == 4 && "malformed DBG_VALUE");
7575 // If location of variable is described using a register (directly
7676 // or indirectly), this register is always a first operand.
77 return MI.getOperand(0).isReg() ? MI.getOperand(0).getReg() : 0;
77 return MI.getOperand(0).isReg() ? MI.getOperand(0).getReg() : Register();
7878 }
7979
8080 namespace {
341341 switch (MO.getType()) {
342342 case MachineOperand::MO_Register:
343343 // Register operands don't have target flags.
344 return hash_combine(MO.getType(), MO.getReg(), MO.getSubReg(), MO.isDef());
344 return hash_combine(MO.getType(), (unsigned)MO.getReg(), MO.getSubReg(), MO.isDef());
345345 case MachineOperand::MO_Immediate:
346346 return hash_combine(MO.getType(), MO.getTargetFlags(), MO.getImm());
347347 case MachineOperand::MO_CImmediate:
153153 /// createVirtualRegister - Create and return a new virtual register in the
154154 /// function with the specified register class.
155155 ///
156 unsigned
156 Register
157157 MachineRegisterInfo::createVirtualRegister(const TargetRegisterClass *RegClass,
158158 StringRef Name) {
159159 assert(RegClass && "Cannot create register without RegClass!");
168168 return Reg;
169169 }
170170
171 unsigned MachineRegisterInfo::cloneVirtualRegister(unsigned VReg,
171 Register MachineRegisterInfo::cloneVirtualRegister(Register VReg,
172172 StringRef Name) {
173173 unsigned Reg = createIncompleteVirtualRegister(Name);
174174 VRegInfo[Reg].first = VRegInfo[VReg].first;
183183 VRegToType[VReg] = Ty;
184184 }
185185
186 unsigned
186 Register
187187 MachineRegisterInfo::createGenericVirtualRegister(LLT Ty, StringRef Name) {
188188 // New virtual register number.
189189 unsigned Reg = createIncompleteVirtualRegister(Name);
28732873 if (!Instr.isFullCopy())
28742874 continue;
28752875 // Look for the other end of the copy.
2876 unsigned OtherReg = Instr.getOperand(0).getReg();
2876 Register OtherReg = Instr.getOperand(0).getReg();
28772877 if (OtherReg == Reg) {
28782878 OtherReg = Instr.getOperand(1).getReg();
28792879 if (OtherReg == Reg)
28802880 continue;
28812881 }
28822882 // Get the current assignment.
2883 unsigned OtherPhysReg = TargetRegisterInfo::isPhysicalRegister(OtherReg)
2883 Register OtherPhysReg = TargetRegisterInfo::isPhysicalRegister(OtherReg)
28842884 ? OtherReg
28852885 : VRM->getPhys(OtherReg);
28862886 // Push the collected information.
78787878
78797879 for (; NumRegs; --NumRegs, ++I) {
78807880 assert(I != RC->end() && "Ran out of registers to allocate!");
7881 auto R = (AssignedReg) ? *I : RegInfo.createVirtualRegister(RC);
7881 Register R = AssignedReg ? Register(*I) : RegInfo.createVirtualRegister(RC);
78827882 Regs.push_back(R);
78837883 }
78847884
569569 for (unsigned i = 0, e = FuncInfo->ArgDbgValues.size(); i != e; ++i) {
570570 MachineInstr *MI = FuncInfo->ArgDbgValues[e-i-1];
571571 bool hasFI = MI->getOperand(0).isFI();
572 unsigned Reg =
572 Register Reg =
573573 hasFI ? TRI.getFrameRegister(*MF) : MI->getOperand(0).getReg();
574574 if (TargetRegisterInfo::isPhysicalRegister(Reg))
575575 EntryMBB->insert(EntryMBB->begin(), MI);
4141 }
4242
4343 void SwiftErrorValueTracking::setCurrentVReg(const MachineBasicBlock *MBB,
44 const Value *Val, unsigned VReg) {
44 const Value *Val, Register VReg) {
4545 VRegDefMap[std::make_pair(MBB, Val)] = VReg;
4646 }
4747
160160 auto UUseIt = VRegUpwardsUse.find(Key);
161161 auto VRegDefIt = VRegDefMap.find(Key);
162162 bool UpwardsUse = UUseIt != VRegUpwardsUse.end();
163 unsigned UUseVReg = UpwardsUse ? UUseIt->second : 0;
163 Register UUseVReg = UpwardsUse ? UUseIt->second : Register();
164164 bool DownwardDef = VRegDefIt != VRegDefMap.end();
165165 assert(!(UpwardsUse && !DownwardDef) &&
166166 "We can't have an upwards use but no downwards def");
237237 // destination virtual register number otherwise we generate a new one.
238238 auto &DL = MF->getDataLayout();
239239 auto const *RC = TLI->getRegClassFor(TLI->getPointerTy(DL));
240 unsigned PHIVReg =
240 Register PHIVReg =
241241 UpwardsUse ? UUseVReg : MF->getRegInfo().createVirtualRegister(RC);
242242 MachineInstrBuilder PHI =
243243 BuildMI(*MBB, MBB->getFirstNonPHI(), DLoc,
162162 assert(MI.getOperand(Idx1).isReg() && MI.getOperand(Idx2).isReg() &&
163163 "This only knows how to commute register operands so far");
164164
165 unsigned Reg0 = HasDef ? MI.getOperand(0).getReg() : 0;
166 unsigned Reg1 = MI.getOperand(Idx1).getReg();
167 unsigned Reg2 = MI.getOperand(Idx2).getReg();
165 Register Reg0 = HasDef ? MI.getOperand(0).getReg() : Register();
166 Register Reg1 = MI.getOperand(Idx1).getReg();
167 Register Reg2 = MI.getOperand(Idx2).getReg();
168168 unsigned SubReg0 = HasDef ? MI.getOperand(0).getSubReg() : 0;
169169 unsigned SubReg1 = MI.getOperand(Idx1).getSubReg();
170170 unsigned SubReg2 = MI.getOperand(Idx2).getSubReg();
231231
232232 bool AArch64CallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
233233 const Value *Val,
234 ArrayRef VRegs,
235 unsigned SwiftErrorVReg) const {
234 ArrayRef VRegs,
235 Register SwiftErrorVReg) const {
236236 auto MIB = MIRBuilder.buildInstrNoInsert(AArch64::RET_ReallyLR);
237237 assert(((Val && !VRegs.empty()) || (!Val && VRegs.empty())) &&
238238 "Return value without a vreg");
351351
352352 bool AArch64CallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
353353 const Function &F,
354 ArrayRef<unsigned> VRegs) const {
354 ArrayRef<Register> VRegs) const {
355355 MachineFunction &MF = MIRBuilder.getMF();
356356 MachineBasicBlock &MBB = MIRBuilder.getMBB();
357357 MachineRegisterInfo &MRI = MF.getRegInfo();
426426 const MachineOperand &Callee,
427427 const ArgInfo &OrigRet,
428428 ArrayRef OrigArgs,
429 unsigned SwiftErrorVReg) const {
429 Register SwiftErrorVReg) const {
430430 MachineFunction &MF = MIRBuilder.getMF();
431431 const Function &F = MF.getFunction();
432432 MachineRegisterInfo &MRI = MF.getRegInfo();
494494 SplitArgs.clear();
495495
496496 SmallVector RegOffsets;
497 SmallVector<unsigned, 8> SplitRegs;
497 SmallVector<Register, 8> SplitRegs;
498498 splitToValueTypes(OrigRet, SplitArgs, DL, MRI, F.getCallingConv(),
499499 [&](unsigned Reg, uint64_t Offset) {
500500 RegOffsets.push_back(Offset);
3333 AArch64CallLowering(const AArch64TargetLowering &TLI);
3434
3535 bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val,
36 ArrayRef VRegs,
37 unsigned SwiftErrorVReg) const override;
36 ArrayRef VRegs,
37 Register SwiftErrorVReg) const override;
3838
3939 bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F,
40 ArrayRef<unsigned> VRegs) const override;
40 ArrayRef<Register> VRegs) const override;
4141
4242 bool lowerCall(MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv,
4343 const MachineOperand &Callee, const ArgInfo &OrigRet,
4444 ArrayRef OrigArgs,
45 unsigned SwiftErrorVReg) const override;
45 Register SwiftErrorVReg) const override;
4646
4747 bool lowerCall(MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv,
4848 const MachineOperand &Callee, const ArgInfo &OrigRet,
211211 struct LoadInfo {
212212 LoadInfo() = default;
213213
214 unsigned DestReg = 0;
215 unsigned BaseReg = 0;
214 Register DestReg;
215 Register BaseReg;
216216 int BaseRegIdx = -1;
217217 const MachineOperand *OffsetOpnd = nullptr;
218218 bool IsPrePost = false;
646646 return None;
647647
648648 LoadInfo LI;
649 LI.DestReg = DestRegIdx == -1 ? 0 : MI.getOperand(DestRegIdx).getReg();
649 LI.DestReg = DestRegIdx == -1 ? Register() : MI.getOperand(DestRegIdx).getReg();
650650 LI.BaseReg = BaseReg;
651651 LI.BaseRegIdx = BaseRegIdx;
652652 LI.OffsetOpnd = OffsetIdx == -1 ? nullptr : &MI.getOperand(OffsetIdx);
10171017 MovZ->addOperand(MF, MachineOperand::CreateImm(0));
10181018 constrainSelectedInstRegOperands(*MovZ, TII, TRI, RBI);
10191019
1020 auto BuildMovK = [&](unsigned SrcReg, unsigned char Flags, unsigned Offset,
1021 unsigned ForceDstReg) {
1022 unsigned DstReg = ForceDstReg
1020 auto BuildMovK = [&](Register SrcReg, unsigned char Flags, unsigned Offset,
1021 Register ForceDstReg) {
1022 Register DstReg = ForceDstReg
10231023 ? ForceDstReg
10241024 : MRI.createVirtualRegister(&AArch64::GPR64RegClass);
10251025 auto MovI = MIB.buildInstr(AArch64::MOVKXi).addDef(DstReg).addUse(SrcReg);
378378 bool IsCopy = MI->isCopy();
379379 bool IsMoveImm = MI->isMoveImmediate();
380380 if (IsCopy || IsMoveImm) {
381 MCPhysReg DefReg = MI->getOperand(0).getReg();
382 MCPhysReg SrcReg = IsCopy ? MI->getOperand(1).getReg() : 0;
381 Register DefReg = MI->getOperand(0).getReg();
382 Register SrcReg = IsCopy ? MI->getOperand(1).getReg() : Register();
383383 int64_t SrcImm = IsMoveImm ? MI->getOperand(1).getImm() : 0;
384384 if (!MRI->isReserved(DefReg) &&
385385 ((IsCopy && (SrcReg == AArch64::XZR || SrcReg == AArch64::WZR)) ||
278278 return false;
279279 }
280280
281 unsigned
281 Register
282282 AArch64RegisterInfo::getFrameRegister(const MachineFunction &MF) const {
283283 const AArch64FrameLowering *TFI = getFrameLowering(MF);
284284 return TFI->hasFP(MF) ? AArch64::FP : AArch64::SP;
112112 unsigned getBaseRegister() const;
113113
114114 // Debug information queries.
115 unsigned getFrameRegister(const MachineFunction &MF) const override;
115 Register getFrameRegister(const MachineFunction &MF) const override;
116116
117117 unsigned getRegPressureLimit(const TargetRegisterClass *RC,
118118 MachineFunction &MF) const override;
6868
6969 bool AMDGPUCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
7070 const Value *Val,
71 ArrayRef<unsigned> VRegs) const {
71 ArrayRef<Register> VRegs) const {
7272
7373 MachineFunction &MF = MIRBuilder.getMF();
7474 MachineRegisterInfo &MRI = MF.getRegInfo();
8080 return true;
8181 }
8282
83 unsigned VReg = VRegs[0];
83 Register VReg = VRegs[0];
8484
8585 const Function &F = MF.getFunction();
8686 auto &DL = F.getParent()->getDataLayout();
137137 void AMDGPUCallLowering::lowerParameter(MachineIRBuilder &MIRBuilder,
138138 Type *ParamTy, uint64_t Offset,
139139 unsigned Align,
140 unsigned DstReg) const {
140 Register DstReg) const {
141141 MachineFunction &MF = MIRBuilder.getMF();
142142 const Function &F = MF.getFunction();
143143 const DataLayout &DL = F.getParent()->getDataLayout();
144144 PointerType *PtrTy = PointerType::get(ParamTy, AMDGPUAS::CONSTANT_ADDRESS);
145145 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
146146 unsigned TypeSize = DL.getTypeStoreSize(ParamTy);
147 unsigned PtrReg = lowerParameterPtr(MIRBuilder, ParamTy, Offset);
147 Register PtrReg = lowerParameterPtr(MIRBuilder, ParamTy, Offset);
148148
149149 MachineMemOperand *MMO =
150150 MF.getMachineMemOperand(PtrInfo, MachineMemOperand::MOLoad |
194194
195195 bool AMDGPUCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
196196 const Function &F,
197 ArrayRef<unsigned> VRegs) const {
197 ArrayRef<Register> VRegs) const {
198198 // AMDGPU_GS and AMDGP_HS are not supported yet.
199199 if (F.getCallingConv() == CallingConv::AMDGPU_GS ||
200200 F.getCallingConv() == CallingConv::AMDGPU_HS)
2626
2727 void lowerParameter(MachineIRBuilder &MIRBuilder, Type *ParamTy,
2828 uint64_t Offset, unsigned Align,
29 unsigned DstReg) const;
29 Register DstReg) const;
3030
3131 public:
3232 AMDGPUCallLowering(const AMDGPUTargetLowering &TLI);
3333
3434 bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val,
35 ArrayRef<unsigned> VRegs) const override;
35 ArrayRef<Register> VRegs) const override;
3636 bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F,
37 ArrayRef<unsigned> VRegs) const override;
37 ArrayRef<Register> VRegs) const override;
3838 static CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg);
3939 static CCAssignFn *CCAssignFnForReturn(CallingConv::ID CC, bool IsVarArg);
4040 };
790790 4,
791791 MinAlign(64, StructOffset));
792792
793 unsigned LoadResult = MRI.createGenericVirtualRegister(S32);
794 unsigned LoadAddr = AMDGPU::NoRegister;
793 Register LoadResult = MRI.createGenericVirtualRegister(S32);
794 Register LoadAddr;
795795
796796 MIRBuilder.materializeGEP(LoadAddr, QueuePtr, LLT::scalar(64), StructOffset);
797797 MIRBuilder.buildLoad(LoadResult, LoadAddr, *MMO);
805805
806806 MIRBuilder.setInstr(MI);
807807
808 unsigned Dst = MI.getOperand(0).getReg();
809 unsigned Src = MI.getOperand(1).getReg();
808 Register Dst = MI.getOperand(0).getReg();
809 Register Src = MI.getOperand(1).getReg();
810810
811811 LLT DstTy = MRI.getType(Dst);
812812 LLT SrcTy = MRI.getType(Src);
374374
375375 void AMDGPURegisterBankInfo::split64BitValueForMapping(
376376 MachineIRBuilder &B,
377 SmallVector<unsigned, 2> &Regs,
377 SmallVector<Register, 2> &Regs,
378378 LLT HalfTy,
379379 unsigned Reg) const {
380380 assert(HalfTy.getSizeInBits() == 32);
395395 }
396396
397397 /// Replace the current type each register in \p Regs has with \p NewTy
398 static void setRegsToType(MachineRegisterInfo &MRI, ArrayRef<unsigned> Regs,
398 static void setRegsToType(MachineRegisterInfo &MRI, ArrayRef<Register> Regs,
399399 LLT NewTy) {
400400 for (unsigned Reg : Regs) {
401401 assert(MRI.getType(Reg).getSizeInBits() == NewTy.getSizeInBits());
444444
445445 // Use a set to avoid extra readfirstlanes in the case where multiple operands
446446 // are the same register.
447 SmallSet<unsigned, 4> SGPROperandRegs;
447 SmallSet<Register, 4> SGPROperandRegs;
448448 for (unsigned Op : OpIndices) {
449449 assert(MI.getOperand(Op).isUse());
450450 unsigned Reg = MI.getOperand(Op).getReg();
458458 return;
459459
460460 MachineIRBuilder B(MI);
461 SmallVector ResultRegs;
462 SmallVector InitResultRegs;
463 SmallVector<unsigned, 4> PhiRegs;
461 SmallVector<Register, 4> ResultRegs;
462 SmallVector InitResultRegs;
463 SmallVector PhiRegs;
464464 for (MachineOperand &Def : MI.defs()) {
465465 LLT ResTy = MRI.getType(Def.getReg());
466466 const RegisterBank *DefBank = getRegBank(Def.getReg(), MRI, *TRI);
574574 }
575575 } else {
576576 LLT S32 = LLT::scalar(32);
577 SmallVector<unsigned, 8> ReadlanePieces;
577 SmallVector<Register, 8> ReadlanePieces;
578578
579579 // The compares can be done as 64-bit, but the extract needs to be done
580580 // in 32-bit pieces.
731731
732732 LLT HalfTy = getHalfSizedType(DstTy);
733733
734 SmallVector DefRegs(OpdMapper.getVRegs(0));
735 SmallVector Src0Regs(OpdMapper.getVRegs(1));
736 SmallVector Src1Regs(OpdMapper.getVRegs(2));
737 SmallVector<unsigned, 2> Src2Regs(OpdMapper.getVRegs(3));
734 SmallVector<Register, 2> DefRegs(OpdMapper.getVRegs(0));
735 SmallVector Src0Regs(OpdMapper.getVRegs(1));
736 SmallVector Src1Regs(OpdMapper.getVRegs(2));
737 SmallVector Src2Regs(OpdMapper.getVRegs(3));
738738
739739 // All inputs are SGPRs, nothing special to do.
740740 if (DefRegs.empty()) {
780780 break;
781781
782782 LLT HalfTy = getHalfSizedType(DstTy);
783 SmallVector DefRegs(OpdMapper.getVRegs(0));
784 SmallVector Src0Regs(OpdMapper.getVRegs(1));
785 SmallVector<unsigned, 2> Src1Regs(OpdMapper.getVRegs(2));
783 SmallVector<Register, 2> DefRegs(OpdMapper.getVRegs(0));
784 SmallVector Src0Regs(OpdMapper.getVRegs(1));
785 SmallVector Src1Regs(OpdMapper.getVRegs(2));
786786
787787 // All inputs are SGPRs, nothing special to do.
788788 if (DefRegs.empty()) {
1212 #ifndef LLVM_LIB_TARGET_AMDGPU_AMDGPUREGISTERBANKINFO_H
1313 #define LLVM_LIB_TARGET_AMDGPU_AMDGPUREGISTERBANKINFO_H
1414
15 #include "llvm/CodeGen/Register.h"
1516 #include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h"
1617
1718 #define GET_REGBANK_DECLARATIONS
5354 /// Split 64-bit value \p Reg into two 32-bit halves and populate them into \p
5455 /// Regs. This appropriately sets the regbank of the new registers.
5556 void split64BitValueForMapping(MachineIRBuilder &B,
56 SmallVector<unsigned, 2> &Regs,
57 SmallVector<Register, 2> &Regs,
5758 LLT HalfTy,
5859 unsigned Reg) const;
5960
8181 }
8282 }
8383
84 unsigned SIRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
84 Register SIRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
8585 const SIFrameLowering *TFI =
8686 MF.getSubtarget().getFrameLowering();
8787 const SIMachineFunctionInfo *FuncInfo = MF.getInfo();
185185 // Does MII and MIJ share the same pred_sel ?
186186 int OpI = TII->getOperandIdx(MII->getOpcode(), R600::OpName::pred_sel),
187187 OpJ = TII->getOperandIdx(MIJ->getOpcode(), R600::OpName::pred_sel);
188 unsigned PredI = (OpI > -1)?MII->getOperand(OpI).getReg():0,
189 PredJ = (OpJ > -1)?MIJ->getOperand(OpJ).getReg():0;
188 Register PredI = (OpI > -1)?MII->getOperand(OpI).getReg() : Register(),
189 PredJ = (OpJ > -1)?MIJ->getOperand(OpJ).getReg() : Register();
190190 if (PredI != PredJ)
191191 return false;
192192 if (SUJ->isSucc(SUI)) {
6666 return &CalleeSavedReg;
6767 }
6868
69 unsigned R600RegisterInfo::getFrameRegister(const MachineFunction &MF) const {
69 Register R600RegisterInfo::getFrameRegister(const MachineFunction &MF) const {
7070 return R600::NoRegister;
7171 }
7272
2525
2626 BitVector getReservedRegs(const MachineFunction &MF) const override;
2727 const MCPhysReg *getCalleeSavedRegs(const MachineFunction *MF) const override;
28 unsigned getFrameRegister(const MachineFunction &MF) const override;
28 Register getFrameRegister(const MachineFunction &MF) const override;
2929
3030 /// get the HW encoding for a register's channel.
3131 unsigned getHWRegChan(unsigned reg) const;
184184 assert(SaveExec.getSubReg() == AMDGPU::NoSubRegister &&
185185 Cond.getSubReg() == AMDGPU::NoSubRegister);
186186
187 unsigned SaveExecReg = SaveExec.getReg();
187 Register SaveExecReg = SaveExec.getReg();
188188
189189 MachineOperand &ImpDefSCC = MI.getOperand(4);
190190 assert(ImpDefSCC.getReg() == AMDGPU::SCC && ImpDefSCC.isDef());
196196
197197 // Add an implicit def of exec to discourage scheduling VALU after this which
198198 // will interfere with trying to form s_and_saveexec_b64 later.
199 unsigned CopyReg = SimpleIf ? SaveExecReg
199 Register CopyReg = SimpleIf ? SaveExecReg
200200 : MRI->createVirtualRegister(BoolRC);
201201 MachineInstr *CopyExec =
202202 BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), CopyReg)
265265 MachineBasicBlock &MBB = *MI.getParent();
266266 const DebugLoc &DL = MI.getDebugLoc();
267267
268 unsigned DstReg = MI.getOperand(0).getReg();
268 Register DstReg = MI.getOperand(0).getReg();
269269 assert(MI.getOperand(0).getSubReg() == AMDGPU::NoSubRegister);
270270
271271 bool ExecModified = MI.getOperand(3).getImm() != 0;
274274 // We are running before TwoAddressInstructions, and si_else's operands are
275275 // tied. In order to correctly tie the registers, split this into a copy of
276276 // the src like it does.
277 unsigned CopyReg = MRI->createVirtualRegister(BoolRC);
277 Register CopyReg = MRI->createVirtualRegister(BoolRC);
278278 MachineInstr *CopyExec =
279279 BuildMI(MBB, Start, DL, TII->get(AMDGPU::COPY), CopyReg)
280280 .add(MI.getOperand(1)); // Saved EXEC
281281
282282 // This must be inserted before phis and any spill code inserted before the
283283 // else.
284 unsigned SaveReg = ExecModified ?
284 Register SaveReg = ExecModified ?
285285 MRI->createVirtualRegister(BoolRC) : DstReg;
286286 MachineInstr *OrSaveExec =
287287 BuildMI(MBB, Start, DL, TII->get(OrSaveExecOpc), SaveReg)
720720 if (SpillToSMEM && OnlyToVGPR)
721721 return false;
722722
723 unsigned FrameReg = getFrameRegister(*MF);
723 Register FrameReg = getFrameRegister(*MF);
724724
725725 assert(SpillToVGPR || (SuperReg != MFI->getStackPtrOffsetReg() &&
726726 SuperReg != MFI->getFrameOffsetReg() &&
913913 unsigned EltSize = 4;
914914 unsigned ScalarLoadOp;
915915
916 unsigned FrameReg = getFrameRegister(*MF);
916 Register FrameReg = getFrameRegister(*MF);
917917
918918 const TargetRegisterClass *RC = getPhysRegClass(SuperReg);
919919 if (SpillToSMEM && isSGPRClass(RC)) {
10621062 MachineOperand &FIOp = MI->getOperand(FIOperandNum);
10631063 int Index = MI->getOperand(FIOperandNum).getIndex();
10641064
1065 unsigned FrameReg = getFrameRegister(*MF);
1065 Register FrameReg = getFrameRegister(*MF);
10661066
10671067 switch (MI->getOpcode()) {
10681068 // SGPR register spill
11531153 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
11541154
11551155 bool IsCopy = MI->getOpcode() == AMDGPU::V_MOV_B32_e32;
1156 unsigned ResultReg = IsCopy ?
1156 Register ResultReg = IsCopy ?
11571157 MI->getOperand(0).getReg() :
11581158 MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
11591159
6969 return 100;
7070 }
7171
72 unsigned getFrameRegister(const MachineFunction &MF) const override;
72 Register getFrameRegister(const MachineFunction &MF) const override;
7373
7474 bool canRealignStack(const MachineFunction &MF) const override;
7575 bool requiresRegisterScavenging(const MachineFunction &Fn) const override;
423423 bool IsStore = Ldst->mayStore();
424424 bool IsLoad = Ldst->mayLoad();
425425
426 unsigned ValReg = IsLoad ? Ldst->getOperand(0).getReg() : 0;
426 Register ValReg = IsLoad ? Ldst->getOperand(0).getReg() : Register();
427427 for (; MI != ME && MI != End; ++MI) {
428428 if (MI->isDebugValue())
429429 continue;
185185
186186 // Special handling of DBG_VALUE instructions.
187187 if (MI.isDebugValue()) {
188 unsigned FrameReg = getFrameRegister(MF);
188 Register FrameReg = getFrameRegister(MF);
189189 MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, false /*isDef*/);
190190 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
191191 return;
218218 ObjSize, RS, SPAdj);
219219 }
220220
221 unsigned ARCRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
221 Register ARCRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
222222 const ARCFrameLowering *TFI = getFrameLowering(MF);
223223 return TFI->hasFP(MF) ? ARC::FP : ARC::SP;
224224 }
4545 CallingConv::ID CC) const override;
4646
4747 // Debug information queries.
48 unsigned getFrameRegister(const MachineFunction &MF) const override;
48 Register getFrameRegister(const MachineFunction &MF) const override;
4949
5050 //! Return whether to emit frame moves
5151 static bool needsFrameMoves(const MachineFunction &MF);
425425 || needsStackRealignment(MF);
426426 }
427427
428 unsigned
428 Register
429429 ARMBaseRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
430430 const ARMSubtarget &STI = MF.getSubtarget();
431431 const ARMFrameLowering *TFI = getFrameLowering(MF);
785785 int PIdx = MI.findFirstPredOperandIdx();
786786 ARMCC::CondCodes Pred = (PIdx == -1)
787787 ? ARMCC::AL : (ARMCC::CondCodes)MI.getOperand(PIdx).getImm();
788 unsigned PredReg = (PIdx == -1) ? 0 : MI.getOperand(PIdx+1).getReg();
788 Register PredReg = (PIdx == -1) ? Register() : MI.getOperand(PIdx+1).getReg();
789789 if (Offset == 0)
790790 // Must be addrmode4/6.
791791 MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, false, false, false);
172172 bool cannotEliminateFrame(const MachineFunction &MF) const;
173173
174174 // Debug information queries.
175 unsigned getFrameRegister(const MachineFunction &MF) const override;
175 Register getFrameRegister(const MachineFunction &MF) const override;
176176 unsigned getBaseRegister() const { return BasePtr; }
177177
178178 bool isLowRegister(unsigned Reg) const;
150150 assert(VA.isRegLoc() && "Value should be in reg");
151151 assert(NextVA.isRegLoc() && "Value should be in reg");
152152
153 unsigned NewRegs[] = {MRI.createGenericVirtualRegister(LLT::scalar(32)),
153 Register NewRegs[] = {MRI.createGenericVirtualRegister(LLT::scalar(32)),
154154 MRI.createGenericVirtualRegister(LLT::scalar(32))};
155155 MIRBuilder.buildUnmerge(NewRegs, Arg.Reg);
156156
231231 /// Lower the return value for the already existing \p Ret. This assumes that
232232 /// \p MIRBuilder's insertion point is correct.
233233 bool ARMCallLowering::lowerReturnVal(MachineIRBuilder &MIRBuilder,
234 const Value *Val, ArrayRef<unsigned> VRegs,
234 const Value *Val, ArrayRef<Register> VRegs,
235235 MachineInstrBuilder &Ret) const {
236236 if (!Val)
237237 // Nothing to do here.
256256 ArgInfo CurArgInfo(VRegs[i], SplitEVTs[i].getTypeForEVT(Ctx));
257257 setArgFlags(CurArgInfo, AttributeList::ReturnIndex, DL, F);
258258
259 SmallVector<unsigned, 4> Regs;
259 SmallVector<Register, 4> Regs;
260260 splitToValueTypes(CurArgInfo, SplitVTs, MF,
261 [&](unsigned Reg) { Regs.push_back(Reg); });
261 [&](Register Reg) { Regs.push_back(Reg); });
262262 if (Regs.size() > 1)
263263 MIRBuilder.buildUnmerge(Regs, VRegs[i]);
264264 }
272272
273273 bool ARMCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
274274 const Value *Val,
275 ArrayRef<unsigned> VRegs) const {
275 ArrayRef<Register> VRegs) const {
276276 assert(!Val == VRegs.empty() && "Return value without a vreg");
277277
278278 auto const &ST = MIRBuilder.getMF().getSubtarget();
385385 assert(VA.isRegLoc() && "Value should be in reg");
386386 assert(NextVA.isRegLoc() && "Value should be in reg");
387387
388 unsigned NewRegs[] = {MRI.createGenericVirtualRegister(LLT::scalar(32)),
388 Register NewRegs[] = {MRI.createGenericVirtualRegister(LLT::scalar(32)),
389389 MRI.createGenericVirtualRegister(LLT::scalar(32))};
390390
391391 assignValueToReg(NewRegs[0], VA.getLocReg(), VA);
420420
421421 bool ARMCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
422422 const Function &F,
423 ArrayRef<unsigned> VRegs) const {
423 ArrayRef<Register> VRegs) const {
424424 auto &TLI = *getTLI();
425425 auto Subtarget = TLI.getSubtarget();
426426
452452 AssignFn);
453453
454454 SmallVector ArgInfos;
455 SmallVector<unsigned, 4> SplitRegs;
455 SmallVector<Register, 4> SplitRegs;
456456 unsigned Idx = 0;
457457 for (auto &Arg : F.args()) {
458458 ArgInfo AInfo(VRegs[Idx], Arg.getType());
461461 SplitRegs.clear();
462462
463463 splitToValueTypes(AInfo, ArgInfos, MF,
464 [&](unsigned Reg) { SplitRegs.push_back(Reg); });
464 [&](Register Reg) { SplitRegs.push_back(Reg); });
465465
466466 if (!SplitRegs.empty())
467467 MIRBuilder.buildMerge(VRegs[Idx], SplitRegs);
567567 if (Arg.Flags.isByVal())
568568 return false;
569569
570 SmallVector<unsigned, 8> Regs;
570 SmallVector<Register, 8> Regs;
571571 splitToValueTypes(Arg, ArgInfos, MF,
572572 [&](unsigned Reg) { Regs.push_back(Reg); });
573573
588588 return false;
589589
590590 ArgInfos.clear();
591 SmallVector<unsigned, 8> SplitRegs;
591 SmallVector<Register, 8> SplitRegs;
592592 splitToValueTypes(OrigRet, ArgInfos, MF,
593 [&](unsigned Reg) { SplitRegs.push_back(Reg); });
593 [&](Register Reg) { SplitRegs.push_back(Reg); });
594594
595595 auto RetAssignFn = TLI.CCAssignFnForReturn(CallConv, IsVarArg);
596596 CallReturnHandler RetHandler(MIRBuilder, MRI, MIB, RetAssignFn);
3232 ARMCallLowering(const ARMTargetLowering &TLI);
3333
3434 bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val,
35 ArrayRef<unsigned> VRegs) const override;
35 ArrayRef<Register> VRegs) const override;
3636
3737 bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F,
38 ArrayRef<unsigned> VRegs) const override;
38 ArrayRef<Register> VRegs) const override;
3939
4040 bool lowerCall(MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv,
4141 const MachineOperand &Callee, const ArgInfo &OrigRet,
4343
4444 private:
4545 bool lowerReturnVal(MachineIRBuilder &MIRBuilder, const Value *Val,
46 ArrayRef<unsigned> VRegs,
46 ArrayRef<Register> VRegs,
4747 MachineInstrBuilder &Ret) const;
4848
4949 using SplitArgTy = std::function;
120120 }
121121 }
122122
123 unsigned BPFRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
123 Register BPFRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
124124 return BPF::R10;
125125 }
3131 unsigned FIOperandNum,
3232 RegScavenger *RS = nullptr) const override;
3333
34 unsigned getFrameRegister(const MachineFunction &MF) const override;
34 Register getFrameRegister(const MachineFunction &MF) const override;
3535 };
3636 }
3737
253253 MI.isMetaInstruction();
254254 }
255255
256 static unsigned UseReg(const MachineOperand& MO) {
257 return MO.isReg() ? MO.getReg() : 0;
256 static Register UseReg(const MachineOperand& MO) {
257 return MO.isReg() ? MO.getReg() : Register();
258258 }
259259
260260 /// isSafeToMoveTogether - Returns true if it is safe to move I1 next to I2 such
302302 std::advance(It2, MaxX);
303303 MachineInstr &Def1 = *It1, &Def2 = *It2;
304304 MachineOperand *Src1 = &Def1.getOperand(2), *Src2 = &Def2.getOperand(2);
305 unsigned SR1 = Src1->isReg() ? Src1->getReg() : 0;
306 unsigned SR2 = Src2->isReg() ? Src2->getReg() : 0;
305 Register SR1 = Src1->isReg() ? Src1->getReg() : Register();
306 Register SR2 = Src2->isReg() ? Src2->getReg() : Register();
307307 bool Failure = false, CanUp = true, CanDown = true;
308308 for (unsigned X = MinX+1; X < MaxX; X++) {
309309 const DefUseInfo &DU = DUM.lookup(X);
5050
5151 RegisterSubReg(unsigned r = 0, unsigned s = 0) : R(r), S(s) {}
5252 RegisterSubReg(const MachineOperand &MO) : R(MO.getReg()), S(MO.getSubReg()) {}
53 RegisterSubReg(const Register &Reg) : R(Reg), S(0) {}
5354
5455 bool operator== (const RegisterSubReg &Reg) const {
5556 return R == Reg.R && S == Reg.S;
285285 }
286286
287287
288 unsigned HexagonRegisterInfo::getFrameRegister(const MachineFunction
288 Register HexagonRegisterInfo::getFrameRegister(const MachineFunction
289289 &MF) const {
290290 const HexagonFrameLowering *TFI = getFrameLowering(MF);
291291 if (TFI->hasFP(MF))
6565
6666 // Debug information queries.
6767 unsigned getRARegister() const;
68 unsigned getFrameRegister(const MachineFunction &MF) const override;
68 Register getFrameRegister(const MachineFunction &MF) const override;
6969 unsigned getFrameRegister() const;
7070 unsigned getStackRegister() const;
7171
257257
258258 unsigned LanaiRegisterInfo::getRARegister() const { return Lanai::RCA; }
259259
260 unsigned
260 Register
261261 LanaiRegisterInfo::getFrameRegister(const MachineFunction & /*MF*/) const {
262262 return Lanai::FP;
263263 }
264264
265 unsigned LanaiRegisterInfo::getBaseRegister() const { return Lanai::R14; }
265 Register LanaiRegisterInfo::getBaseRegister() const { return Lanai::R14; }
266266
267267 const uint32_t *
268268 LanaiRegisterInfo::getCallPreservedMask(const MachineFunction & /*MF*/,
4141
4242 // Debug information queries.
4343 unsigned getRARegister() const;
44 unsigned getFrameRegister(const MachineFunction &MF) const override;
45 unsigned getBaseRegister() const;
44 Register getFrameRegister(const MachineFunction &MF) const override;
45 Register getBaseRegister() const;
4646 bool hasBasePointer(const MachineFunction &MF) const;
4747
4848 int getDwarfRegNum(unsigned RegNum, bool IsEH) const;
153153 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
154154 }
155155
156 unsigned MSP430RegisterInfo::getFrameRegister(const MachineFunction &MF) const {
156 Register MSP430RegisterInfo::getFrameRegister(const MachineFunction &MF) const {
157157 const MSP430FrameLowering *TFI = getFrameLowering(MF);
158158 return TFI->hasFP(MF) ? MSP430::FP : MSP430::SP;
159159 }
3636 RegScavenger *RS = nullptr) const override;
3737
3838 // Debug information queries.
39 unsigned getFrameRegister(const MachineFunction &MF) const override;
39 Register getFrameRegister(const MachineFunction &MF) const override;
4040 };
4141
4242 } // end namespace llvm
2323 MipsCallLowering::MipsCallLowering(const MipsTargetLowering &TLI)
2424 : CallLowering(&TLI) {}
2525
26 bool MipsCallLowering::MipsHandler::assign(unsigned VReg, const CCValAssign &VA,
26 bool MipsCallLowering::MipsHandler::assign(Register VReg, const CCValAssign &VA,
2727 const EVT &VT) {
2828 if (VA.isRegLoc()) {
2929 assignValueToReg(VReg, VA, VT);
3535 return true;
3636 }
3737
38 bool MipsCallLowering::MipsHandler::assignVRegs(ArrayRef<unsigned> VRegs,
38 bool MipsCallLowering::MipsHandler::assignVRegs(ArrayRef<Register> VRegs,
3939 ArrayRef ArgLocs,
4040 unsigned ArgLocsStartIndex,
4141 const EVT &VT) {
4646 }
4747
4848 void MipsCallLowering::MipsHandler::setLeastSignificantFirst(
49 SmallVectorImpl<unsigned> &VRegs) {
49 SmallVectorImpl<Register> &VRegs) {
5050 if (!MIRBuilder.getMF().getDataLayout().isLittleEndian())
5151 std::reverse(VRegs.begin(), VRegs.end());
5252 }
5353
5454 bool MipsCallLowering::MipsHandler::handle(
5555 ArrayRef ArgLocs, ArrayRef Args) {
56 SmallVector<unsigned, 4> VRegs;
56 SmallVector<Register, 4> VRegs;
5757 unsigned SplitLength;
5858 const Function &F = MIRBuilder.getMF().getFunction();
5959 const DataLayout &DL = F.getParent()->getDataLayout();
8989 : MipsHandler(MIRBuilder, MRI) {}
9090
9191 private:
92 void assignValueToReg(unsigned ValVReg, const CCValAssign &VA,
92 void assignValueToReg(Register ValVReg, const CCValAssign &VA,
9393 const EVT &VT) override;
9494
9595 unsigned getStackAddress(const CCValAssign &VA,
9696 MachineMemOperand *&MMO) override;
9797
98 void assignValueToAddress(unsigned ValVReg, const CCValAssign &VA) override;
99
100 bool handleSplit(SmallVectorImpl &VRegs,
98 void assignValueToAddress(Register ValVReg, const CCValAssign &VA) override;
99
100 bool handleSplit(SmallVectorImpl &VRegs,
101101 ArrayRef ArgLocs, unsigned ArgLocsStartIndex,
102 unsigned ArgsReg, const EVT &VT) override;
102 Register ArgsReg, const EVT &VT) override;
103103
104104 virtual void markPhysRegUsed(unsigned PhysReg) {
105105 MIRBuilder.getMBB().addLiveIn(PhysReg);
128128
129129 } // end anonymous namespace
130130
131 void IncomingValueHandler::assignValueToReg(unsigned ValVReg,
131 void IncomingValueHandler::assignValueToReg(Register ValVReg,
132132 const CCValAssign &VA,
133133 const EVT &VT) {
134134 const MipsSubtarget &STI =
193193 return AddrReg;
194194 }
195195
196 void IncomingValueHandler::assignValueToAddress(unsigned ValVReg,
196 void IncomingValueHandler::assignValueToAddress(Register ValVReg,
197197 const CCValAssign &VA) {
198198 if (VA.getLocInfo() == CCValAssign::SExt ||
199199 VA.getLocInfo() == CCValAssign::ZExt ||
200200 VA.getLocInfo() == CCValAssign::AExt) {
201 unsigned LoadReg = MRI.createGenericVirtualRegister(LLT::scalar(32));
201 Register LoadReg = MRI.createGenericVirtualRegister(LLT::scalar(32));
202202 buildLoad(LoadReg, VA);
203203 MIRBuilder.buildTrunc(ValVReg, LoadReg);
204204 } else
205205 buildLoad(ValVReg, VA);
206206 }
207207
208 bool IncomingValueHandler::handleSplit(SmallVectorImpl<unsigned> &VRegs,
208 bool IncomingValueHandler::handleSplit(SmallVectorImpl<Register> &VRegs,
209209 ArrayRef ArgLocs,
210210 unsigned ArgLocsStartIndex,
211 unsigned ArgsReg, const EVT &VT) {
211 Register ArgsReg, const EVT &VT) {
212212 if (!assignVRegs(VRegs, ArgLocs, ArgLocsStartIndex, VT))
213213 return false;
214214 setLeastSignificantFirst(VRegs);
224224 : MipsHandler(MIRBuilder, MRI), MIB(MIB) {}
225225
226226 private:
227 void assignValueToReg(unsigned ValVReg, const CCValAssign &VA,
227 void assignValueToReg(Register ValVReg, const CCValAssign &VA,
228228 const EVT &VT) override;
229229
230230 unsigned getStackAddress(const CCValAssign &VA,
231231 MachineMemOperand *&MMO) override;
232232
233 void assignValueToAddress(unsigned ValVReg, const CCValAssign &VA) override;
234
235 bool handleSplit(SmallVectorImpl &VRegs,
233 void assignValueToAddress(Register ValVReg, const CCValAssign &VA) override;
234
235 bool handleSplit(SmallVectorImpl &VRegs,
236236 ArrayRef ArgLocs, unsigned ArgLocsStartIndex,
237 unsigned ArgsReg, const EVT &VT) override;
238
239 unsigned extendRegister(unsigned ValReg, const CCValAssign &VA);
237 Register ArgsReg, const EVT &VT) override;
238
239 unsigned extendRegister(Register ValReg, const CCValAssign &VA);
240240
241241 MachineInstrBuilder &MIB;
242242 };
243243 } // end anonymous namespace
244244
245 void OutgoingValueHandler::assignValueToReg(unsigned ValVReg,
245 void OutgoingValueHandler::assignValueToReg(Register ValVReg,
246246 const CCValAssign &VA,
247247 const EVT &VT) {
248 unsigned PhysReg = VA.getLocReg();
248 Register PhysReg = VA.getLocReg();
249249 const MipsSubtarget &STI =
250250 static_cast(MIRBuilder.getMF().getSubtarget());
251251
286286
287287 LLT p0 = LLT::pointer(0, 32);
288288 LLT s32 = LLT::scalar(32);
289 unsigned SPReg = MRI.createGenericVirtualRegister(p0);
289 Register SPReg = MRI.createGenericVirtualRegister(p0);
290290 MIRBuilder.buildCopy(SPReg, Mips::SP);
291291
292 unsigned OffsetReg = MRI.createGenericVirtualRegister(s32);
292 Register OffsetReg = MRI.createGenericVirtualRegister(s32);
293293 unsigned Offset = VA.getLocMemOffset();
294294 MIRBuilder.buildConstant(OffsetReg, Offset);
295295
296 unsigned AddrReg = MRI.createGenericVirtualRegister(p0);
296 Register AddrReg = MRI.createGenericVirtualRegister(p0);
297297 MIRBuilder.buildGEP(AddrReg, SPReg, OffsetReg);
298298
299299 MachinePointerInfo MPO =
305305 return AddrReg;
306306 }
307307
308 void OutgoingValueHandler::assignValueToAddress(unsigned ValVReg,
308 void OutgoingValueHandler::assignValueToAddress(Register ValVReg,
309309 const CCValAssign &VA) {
310310 MachineMemOperand *MMO;
311 unsigned Addr = getStackAddress(VA, MMO);
311 Register Addr = getStackAddress(VA, MMO);
312312 unsigned ExtReg = extendRegister(ValVReg, VA);
313313 MIRBuilder.buildStore(ExtReg, Addr, *MMO);
314314 }
315315
316 unsigned OutgoingValueHandler::extendRegister(unsigned ValReg,
316 unsigned OutgoingValueHandler::extendRegister(Register ValReg,
317317 const CCValAssign &VA) {
318318 LLT LocTy{VA.getLocVT()};
319319 switch (VA.getLocInfo()) {
320320 case CCValAssign::SExt: {
321 unsigned ExtReg = MRI.createGenericVirtualRegister(LocTy);
321 Register ExtReg = MRI.createGenericVirtualRegister(LocTy);
322322 MIRBuilder.buildSExt(ExtReg, ValReg);
323323 return ExtReg;
324324 }
325325 case CCValAssign::ZExt: {
326 unsigned ExtReg = MRI.createGenericVirtualRegister(LocTy);
326 Register ExtReg = MRI.createGenericVirtualRegister(LocTy);
327327 MIRBuilder.buildZExt(ExtReg, ValReg);
328328 return ExtReg;
329329 }
330330 case CCValAssign::AExt: {
331 unsigned ExtReg = MRI.createGenericVirtualRegister(LocTy);
331 Register ExtReg = MRI.createGenericVirtualRegister(LocTy);
332332 MIRBuilder.buildAnyExt(ExtReg, ValReg);
333333 return ExtReg;
334334 }
341341 llvm_unreachable("unable to extend register");
342342 }
343343
344 bool OutgoingValueHandler::handleSplit(SmallVectorImpl<unsigned> &VRegs,
344 bool OutgoingValueHandler::handleSplit(SmallVectorImpl<Register> &VRegs,
345345 ArrayRef ArgLocs,
346346 unsigned ArgLocsStartIndex,
347 unsigned ArgsReg, const EVT &VT) {
347 Register ArgsReg, const EVT &VT) {
348348 MIRBuilder.buildUnmerge(VRegs, ArgsReg);
349349 setLeastSignificantFirst(VRegs);
350350 if (!assignVRegs(VRegs, ArgLocs, ArgLocsStartIndex, VT))
395395
396396 bool MipsCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
397397 const Value *Val,
398 ArrayRef<unsigned> VRegs) const {
398 ArrayRef<Register> VRegs) const {
399399
400400 MachineInstrBuilder Ret = MIRBuilder.buildInstrNoInsert(Mips::RetRA);
401401
443443
444444 bool MipsCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
445445 const Function &F,
446 ArrayRef<unsigned> VRegs) const {
446 ArrayRef<Register> VRegs) const {
447447
448448 // Quick exit if there aren't any args.
449449 if (F.arg_empty())
3333 ArrayRef Args);
3434
3535 protected:
36 bool assignVRegs(ArrayRef<unsigned> VRegs, ArrayRef ArgLocs,
36 bool assignVRegs(ArrayRef<Register> VRegs, ArrayRef ArgLocs,
3737 unsigned ArgLocsStartIndex, const EVT &VT);
3838
39 void setLeastSignificantFirst(SmallVectorImpl<unsigned> &VRegs);
39 void setLeastSignificantFirst(SmallVectorImpl<Register> &VRegs);
4040
4141 MachineIRBuilder &MIRBuilder;
4242 MachineRegisterInfo &MRI;
4343
4444 private:
45 bool assign(unsigned VReg, const CCValAssign &VA, const EVT &VT);
45 bool assign(Register VReg, const CCValAssign &VA, const EVT &VT);
4646
4747 virtual unsigned getStackAddress(const CCValAssign &VA,
4848 MachineMemOperand *&MMO) = 0;
4949
50 virtual void assignValueToReg(unsigned ValVReg, const CCValAssign &VA,
50 virtual void assignValueToReg(Register ValVReg, const CCValAssign &VA,
5151 const EVT &VT) = 0;
5252
53 virtual void assignValueToAddress(unsigned ValVReg,
53 virtual void assignValueToAddress(Register ValVReg,
5454 const CCValAssign &VA) = 0;
5555
56 virtual bool handleSplit(SmallVectorImpl<unsigned> &VRegs,
56 virtual bool handleSplit(SmallVectorImpl<Register> &VRegs,
5757 ArrayRef ArgLocs,
58 unsigned ArgLocsStartIndex, unsigned ArgsReg,
58 unsigned ArgLocsStartIndex, Register ArgsReg,
5959 const EVT &VT) = 0;
6060 };
6161
6262 MipsCallLowering(const MipsTargetLowering &TLI);
6363
6464 bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val,
65 ArrayRef<unsigned> VRegs) const override;
65 ArrayRef<Register> VRegs) const override;
6666
6767 bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F,
68 ArrayRef<unsigned> VRegs) const override;
68 ArrayRef<Register> VRegs) const override;
6969
7070 bool lowerCall(MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv,
7171 const MachineOperand &Callee, const ArgInfo &OrigRet,
276276 eliminateFI(MI, FIOperandNum, FrameIndex, stackSize, spOffset);
277277 }
278278
279 unsigned MipsRegisterInfo::
279 Register MipsRegisterInfo::
280280 getFrameRegister(const MachineFunction &MF) const {
281281 const MipsSubtarget &Subtarget = MF.getSubtarget();
282282 const TargetFrameLowering *TFI = Subtarget.getFrameLowering();
6868 bool canRealignStack(const MachineFunction &MF) const override;
6969
7070 /// Debug information queries.
71 unsigned getFrameRegister(const MachineFunction &MF) const override;
71 Register getFrameRegister(const MachineFunction &MF) const override;
7272
7373 /// Return GPR register class.
7474 virtual const TargetRegisterClass *intRegClass(unsigned Size) const = 0;
37623762
37633763 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
37643764 DebugLoc DL = MI.getDebugLoc();
3765 unsigned Fd = MI.getOperand(0).getReg();
3766 unsigned Ws = MI.getOperand(1).getReg();
3765 Register Fd = MI.getOperand(0).getReg();
3766 Register Ws = MI.getOperand(1).getReg();
37673767
37683768 MachineRegisterInfo &RegInfo = BB->getParent()->getRegInfo();
37693769 const TargetRegisterClass *GPRRC =
37713771 unsigned MTC1Opc = IsFGR64onMips64
37723772 ? Mips::DMTC1
37733773 : (IsFGR64onMips32 ? Mips::MTC1_D64 : Mips::MTC1);
3774 unsigned COPYOpc = IsFGR64onMips64 ? Mips::COPY_S_D : Mips::COPY_S_W;
3775
3776 unsigned Wtemp = RegInfo.createVirtualRegister(&Mips::MSA128WRegClass);
3777 unsigned WPHI = Wtemp;
3774 Register COPYOpc = IsFGR64onMips64 ? Mips::COPY_S_D : Mips::COPY_S_W;
3775
3776 Register Wtemp = RegInfo.createVirtualRegister(&Mips::MSA128WRegClass);
3777 Register WPHI = Wtemp;
37783778
37793779 BuildMI(*BB, MI, DL, TII->get(Mips::FEXUPR_W), Wtemp).addReg(Ws);
37803780 if (IsFGR64) {
37833783 }
37843784
37853785 // Perform the safety regclass copy mentioned above.
3786 unsigned Rtemp = RegInfo.createVirtualRegister(GPRRC);
3787 unsigned FPRPHI = IsFGR64onMips32
3786 Register Rtemp = RegInfo.createVirtualRegister(GPRRC);
3787 Register FPRPHI = IsFGR64onMips32
37883788 ? RegInfo.createVirtualRegister(&Mips::FGR64RegClass)
37893789 : Fd;
37903790 BuildMI(*BB, MI, DL, TII->get(COPYOpc), Rtemp).addReg(WPHI).addImm(0);
37913791 BuildMI(*BB, MI, DL, TII->get(MTC1Opc), FPRPHI).addReg(Rtemp);
37923792
37933793 if (IsFGR64onMips32) {
3794 unsigned Rtemp2 = RegInfo.createVirtualRegister(GPRRC);
3794 Register Rtemp2 = RegInfo.createVirtualRegister(GPRRC);
37953795 BuildMI(*BB, MI, DL, TII->get(Mips::COPY_S_W), Rtemp2)
37963796 .addReg(WPHI)
37973797 .addImm(1);
125125 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
126126 }
127127
128 unsigned NVPTXRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
128 Register NVPTXRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
129129 return NVPTX::VRFrame;
130130 }
4141 unsigned FIOperandNum,
4242 RegScavenger *RS = nullptr) const override;
4343
44 unsigned getFrameRegister(const MachineFunction &MF) const override;
44 Register getFrameRegister(const MachineFunction &MF) const override;
4545
4646 ManagedStringPool *getStrPool() const {
4747 return const_cast(&ManagedStrPool);
24472447 /// Returns true if we should use a direct load into vector instruction
24482448 /// (such as lxsd or lfd), instead of a load into gpr + direct move sequence.
24492449 static bool usePartialVectorLoads(SDNode *N, const PPCSubtarget& ST) {
2450
2450
24512451 // If there are any other uses other than scalar to vector, then we should
24522452 // keep it as a scalar load -> direct move pattern to prevent multiple
24532453 // loads.
51085108 // We cannot add R2/X2 as an operand here for PATCHPOINT, because there is
51095109 // no way to mark dependencies as implicit here.
51105110 // We will add the R2/X2 dependency in EmitInstrWithCustomInserter.
5111 if (!isPatchPoint)
5111 if (!isPatchPoint)
51125112 Ops.push_back(DAG.getRegister(isPPC64 ? PPC::X2
51135113 : PPC::R2, PtrVT));
51145114 }
70867086 // undefined):
70877087 // < MSB1|LSB1, MSB2|LSB2, uu, uu, uu, uu, uu, uu> to
70887088 // < LSB1, LSB2, u, u, u, u, u, u, u, u, u, u, u, u, u, u>
7089 //
7089 //
70907090 // The same operation in little-endian ordering will be:
70917091 // to
70927092 //
98389838 BifID = Intrinsic::ppc_altivec_vmaxsh;
98399839 else if (VT == MVT::v16i8)
98409840 BifID = Intrinsic::ppc_altivec_vmaxsb;
9841
9841
98429842 return BuildIntrinsicOp(BifID, X, Y, DAG, dl, VT);
98439843 }
98449844
1011810118 MachineFunction *F = BB->getParent();
1011910119 MachineFunction::iterator It = ++BB->getIterator();
1012010120
10121 unsigned dest = MI.getOperand(0).getReg();
10122 unsigned ptrA = MI.getOperand(1).getReg();
10123 unsigned ptrB = MI.getOperand(2).getReg();
10124 unsigned incr = MI.getOperand(3).getReg();
10121 Register dest = MI.getOperand(0).getReg();
10122 Register ptrA = MI.getOperand(1).getReg();
10123 Register ptrB = MI.getOperand(2).getReg();
10124 Register incr = MI.getOperand(3).getReg();
1012510125 DebugLoc dl = MI.getDebugLoc();
1012610126
1012710127 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
1013710137 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
1013810138
1013910139 MachineRegisterInfo &RegInfo = F->getRegInfo();
10140 unsigned TmpReg = (!BinOpcode) ? incr :
10140 Register TmpReg = (!BinOpcode) ? incr :
1014110141 RegInfo.createVirtualRegister( AtomicSize == 8 ? &PPC::G8RCRegClass
1014210142 : &PPC::GPRCRegClass);
1014310143
1024510245 is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
1024610246 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
1024710247
10248 unsigned PtrReg = RegInfo.createVirtualRegister(RC);
10249 unsigned Shift1Reg = RegInfo.createVirtualRegister(GPRC);
10250 unsigned ShiftReg =
10248 Register PtrReg = RegInfo.createVirtualRegister(RC);
10249 Register Shift1Reg = RegInfo.createVirtualRegister(GPRC);
10250 Register ShiftReg =
1025110251 isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC);
10252 unsigned Incr2Reg = RegInfo.createVirtualRegister(GPRC);
10253 unsigned MaskReg = RegInfo.createVirtualRegister(GPRC);
10254 unsigned Mask2Reg = RegInfo.createVirtualRegister(GPRC);
10255 unsigned Mask3Reg = RegInfo.createVirtualRegister(GPRC);
10256 unsigned Tmp2Reg = RegInfo.createVirtualRegister(GPRC);
10257 unsigned Tmp3Reg = RegInfo.createVirtualRegister(GPRC);
10258 unsigned Tmp4Reg = RegInfo.createVirtualRegister(GPRC);
10259 unsigned TmpDestReg = RegInfo.createVirtualRegister(GPRC);
10260 unsigned Ptr1Reg;
10261 unsigned TmpReg =
10252 Register Incr2Reg = RegInfo.createVirtualRegister(GPRC);
10253 Register MaskReg = RegInfo.createVirtualRegister(GPRC);
10254 Register Mask2Reg = RegInfo.createVirtualRegister(GPRC);
10255 Register Mask3Reg = RegInfo.createVirtualRegister(GPRC);
10256 Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC);
10257 Register Tmp3Reg = RegInfo.createVirtualRegister(GPRC);
10258 Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC);
10259 Register TmpDestReg = RegInfo.createVirtualRegister(GPRC);
10260 Register Ptr1Reg;
10261 Register TmpReg =
1026210262 (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(GPRC);
1026310263
1026410264 // thisMBB:
1106011060 is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
1106111061 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
1106211062
11063 unsigned PtrReg = RegInfo.createVirtualRegister(RC);
11064 unsigned Shift1Reg = RegInfo.createVirtualRegister(GPRC);
11065 unsigned ShiftReg =
11063 Register PtrReg = RegInfo.createVirtualRegister(RC);
11064 Register Shift1Reg = RegInfo.createVirtualRegister(GPRC);
11065 Register ShiftReg =
1106611066 isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC);
11067 unsigned NewVal2Reg = RegInfo.createVirtualRegister(GPRC);
11068 unsigned NewVal3Reg = RegInfo.createVirtualRegister(GPRC);
11069 unsigned OldVal2Reg = RegInfo.createVirtualRegister(GPRC);
11070 unsigned OldVal3Reg = RegInfo.createVirtualRegister(GPRC);
11071 unsigned MaskReg = RegInfo.createVirtualRegister(GPRC);
11072 unsigned Mask2Reg = RegInfo.createVirtualRegister(GPRC);
11073 unsigned Mask3Reg = RegInfo.createVirtualRegister(GPRC);
11074 unsigned Tmp2Reg = RegInfo.createVirtualRegister(GPRC);
11075 unsigned Tmp4Reg = RegInfo.createVirtualRegister(GPRC);
11076 unsigned TmpDestReg = RegInfo.createVirtualRegister(GPRC);
11077 unsigned Ptr1Reg;
11078 unsigned TmpReg = RegInfo.createVirtualRegister(GPRC);
11079 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
11067 Register NewVal2Reg = RegInfo.createVirtualRegister(GPRC);
11068 Register NewVal3Reg = RegInfo.createVirtualRegister(GPRC);
11069 Register OldVal2Reg = RegInfo.createVirtualRegister(GPRC);
11070 Register OldVal3Reg = RegInfo.createVirtualRegister(GPRC);
11071 Register MaskReg = RegInfo.createVirtualRegister(GPRC);
11072 Register Mask2Reg = RegInfo.createVirtualRegister(GPRC);
11073 Register Mask3Reg = RegInfo.createVirtualRegister(GPRC);
11074 Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC);
11075 Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC);
11076 Register TmpDestReg = RegInfo.createVirtualRegister(GPRC);
11077 Register Ptr1Reg;
11078 Register TmpReg = RegInfo.createVirtualRegister(GPRC);
11079 Register ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
1108011080 // thisMBB:
1108111081 // ...
1108211082 // fallthrough --> loopMBB
1127211272 // Save FPSCR value.
1127311273 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg);
1127411274
11275 // The floating point rounding mode is in the bits 62:63 of FPCSR, and has
11275 // The floating point rounding mode is in the bits 62:63 of FPCSR, and has
1127611276 // the following settings:
1127711277 // 00 Round to nearest
1127811278 // 01 Round to 0
1129211292
1129311293 // Copy register from F8RCRegClass::SrcReg to G8RCRegClass::DestReg
1129411294 // or copy register from G8RCRegClass::SrcReg to F8RCRegClass::DestReg.
11295 // If the target doesn't have DirectMove, we should use stack to do the
11295 // If the target doesn't have DirectMove, we should use stack to do the
1129611296 // conversion, because the target doesn't have the instructions like mtvsrd
1129711297 // or mfvsrd to do this conversion directly.
1129811298 auto copyRegFromG8RCOrF8RC = [&] (unsigned DestReg, unsigned SrcReg) {
1133811338 MachineMemOperand::MOLoad, MFI.getObjectSize(FrameIdx),
1133911339 MFI.getObjectAlignment(FrameIdx));
1134011340
11341 // Load from the stack where SrcReg is stored, and save to DestReg,
11342 // so we have done the RegClass conversion from RegClass::SrcReg to
11341 // Load from the stack where SrcReg is stored, and save to DestReg,
11342 // so we have done the RegClass conversion from RegClass::SrcReg to
1134311343 // RegClass::DestReg.
1134411344 BuildMI(*BB, MI, dl, TII->get(LoadOp), DestReg)
1134511345 .addImm(0)
1134911349 };
1135011350
1135111351 unsigned OldFPSCRReg = MI.getOperand(0).getReg();
11352
11352
1135311353 // Save FPSCR value.
1135411354 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg);
1135511355
1135611356 // When the operand is gprc register, use two least significant bits of the
11357 // register and mtfsf instruction to set the bits 62:63 of FPSCR.
11358 //
11359 // copy OldFPSCRTmpReg, OldFPSCRReg
11357 // register and mtfsf instruction to set the bits 62:63 of FPSCR.
11358 //
11359 // copy OldFPSCRTmpReg, OldFPSCRReg
1136011360 // (INSERT_SUBREG ExtSrcReg, (IMPLICIT_DEF ImDefReg), SrcOp, 1)
1136111361 // rldimi NewFPSCRTmpReg, ExtSrcReg, OldFPSCRReg, 0, 62
1136211362 // copy NewFPSCRReg, NewFPSCRTmpReg
1136611366 unsigned OldFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
1136711367
1136811368 copyRegFromG8RCOrF8RC(OldFPSCRTmpReg, OldFPSCRReg);
11369
11369
1137011370 unsigned ImDefReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
1137111371 unsigned ExtSrcReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
1137211372
1379013790 }
1379113791 case ISD::BUILD_VECTOR:
1379213792 return DAGCombineBuildVector(N, DCI);
13793 case ISD::ABS:
13793 case ISD::ABS:
1379413794 return combineABS(N, DCI);
13795 case ISD::VSELECT:
13795 case ISD::VSELECT:
1379613796 return combineVSelect(N, DCI);
1379713797 }
1379813798
1389013890
1389113891 if (!DisableInnermostLoopAlign32) {
1389213892 // If the nested loop is an innermost loop, prefer to a 32-byte alignment,
13893 // so that we can decrease cache misses and branch-prediction misses.
13893 // so that we can decrease cache misses and branch-prediction misses.
1389413894 // Actual alignment of the loop will depend on the hotness check and other
1389513895 // logic in alignBlocks.
13896 if (ML->getLoopDepth() > 1 && ML->getSubLoops().empty())
13896 if (ML->getLoopDepth() > 1 && ML->getSubLoops().empty())
1389713897 return 5;
1389813898 }
1389913899
1430914309 if (CModel == CodeModel::Small || CModel == CodeModel::Large)
1431014310 return true;
1431114311
14312 // JumpTable and BlockAddress are accessed as got-indirect.
14312 // JumpTable and BlockAddress are accessed as got-indirect.
1431314313 if (isa(GA) || isa(GA))
1431414314 return true;
1431514315
390390 // Swap op1/op2
391391 assert(((OpIdx1 == 1 && OpIdx2 == 2) || (OpIdx1 == 2 && OpIdx2 == 1)) &&
392392 "Only the operands 1 and 2 can be swapped in RLSIMI/RLWIMIo.");
393 unsigned Reg0 = MI.getOperand(0).getReg();
394 unsigned Reg1 = MI.getOperand(1).getReg();
395 unsigned Reg2 = MI.getOperand(2).getReg();
393 Register Reg0 = MI.getOperand(0).getReg();
394 Register Reg1 = MI.getOperand(1).getReg();
395 Register Reg2 = MI.getOperand(2).getReg();
396396 unsigned SubReg1 = MI.getOperand(1).getSubReg();
397397 unsigned SubReg2 = MI.getOperand(2).getSubReg();
398398 bool Reg1IsKill = MI.getOperand(1).isKill();
420420
421421 if (NewMI) {
422422 // Create a new instruction.
423 unsigned Reg0 = ChangeReg0 ? Reg2 : MI.getOperand(0).getReg();
423 Register Reg0 = ChangeReg0 ? Reg2 : MI.getOperand(0).getReg();
424424 bool Reg0IsDead = MI.getOperand(0).isDead();
425425 return BuildMI(MF, MI.getDebugLoc(), MI.getDesc())
426426 .addReg(Reg0, RegState::Define | getDeadRegState(Reg0IsDead))
23992399 return &*It;
24002400 }
24012401 break;
2402 } else if (It->readsRegister(Reg, &getRegisterInfo()))
2402 } else if (It->readsRegister(Reg, &getRegisterInfo()))
24032403 // If we see another use of this reg between the def and the MI,
24042404 // we want to flat it so the def isn't deleted.
24052405 SeenIntermediateUse = true;
32173217 }
32183218 }
32193219
3220 // Check if the 'MI' that has the index OpNoForForwarding
3220 // Check if the 'MI' that has the index OpNoForForwarding
32213221 // meets the requirement described in the ImmInstrInfo.
32223222 bool PPCInstrInfo::isUseMIElgibleForForwarding(MachineInstr &MI,
32233223 const ImmInstrInfo &III,
32633263 MachineOperand *&RegMO) const {
32643264 unsigned Opc = DefMI.getOpcode();
32653265 if (Opc != PPC::ADDItocL && Opc != PPC::ADDI && Opc != PPC::ADDI8)
3266 return false;
3266 return false;
32673267
32683268 assert(DefMI.getNumOperands() >= 3 &&
32693269 "Add inst must have at least three operands");
34353435 // Otherwise, it is Constant Pool Index(CPI) or Global,
34363436 // which is relocation in fact. We need to replace the special zero
34373437 // register with ImmMO.
3438 // Before that, we need to fixup the target flags for imm.
3438 // Before that, we need to fixup the target flags for imm.
34393439 // For some reason, we miss to set the flag for the ImmMO if it is CPI.
34403440 if (DefMI.getOpcode() == PPC::ADDItocL)
34413441 ImmMO->setTargetFlags(PPCII::MO_TOC_LO);
11131113 MI.getOperand(OperandBase + 1).ChangeToRegister(SReg, false, false, true);
11141114 }
11151115
1116 unsigned PPCRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
1116 Register PPCRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
11171117 const PPCFrameLowering *TFI = getFrameLowering(MF);
11181118
11191119 if (!TM.isPPC64())
11221122 return TFI->hasFP(MF) ? PPC::X31 : PPC::X1;
11231123 }
11241124
1125 unsigned PPCRegisterInfo::getBaseRegister(const MachineFunction &MF) const {
1125 Register PPCRegisterInfo::getBaseRegister(const MachineFunction &MF) const {
11261126 const PPCSubtarget &Subtarget = MF.getSubtarget();
11271127 if (!hasBasePointer(MF))
11281128 return getFrameRegister(MF);
131131 int64_t Offset) const override;
132132
133133 // Debug information queries.
134 unsigned getFrameRegister(const MachineFunction &MF) const override;
134 Register getFrameRegister(const MachineFunction &MF) const override;
135135
136136 // Base pointer (stack realignment) support.
137 unsigned getBaseRegister(const MachineFunction &MF) const;
137 Register getBaseRegister(const MachineFunction &MF) const;
138138 bool hasBasePointer(const MachineFunction &MF) const;
139139
140140 /// stripRegisterPrefix - This method strips the character prefix from a
123123 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
124124 }
125125
126 unsigned RISCVRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
126 Register RISCVRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
127127 const TargetFrameLowering *TFI = getFrameLowering(MF);
128128 return TFI->hasFP(MF) ? RISCV::X8 : RISCV::X2;
129129 }
3838 unsigned FIOperandNum,
3939 RegScavenger *RS = nullptr) const override;
4040
41 unsigned getFrameRegister(const MachineFunction &MF) const override;
41 Register getFrameRegister(const MachineFunction &MF) const override;
4242
4343 bool requiresRegisterScavenging(const MachineFunction &MF) const override {
4444 return true;
211211
212212 }
213213
214 unsigned SparcRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
214 Register SparcRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
215215 return SP::I6;
216216 }
217217
3737 int SPAdj, unsigned FIOperandNum,
3838 RegScavenger *RS = nullptr) const override;
3939
40 unsigned getFrameRegister(const MachineFunction &MF) const override;
40 Register getFrameRegister(const MachineFunction &MF) const override;
4141
4242 bool canRealignStack(const MachineFunction &MF) const override;
4343
524524 // SrcReg2 is the register if the source operand is a register,
525525 // 0 if the source operand is immediate, and the base register
526526 // if the source operand is memory (index is not supported).
527 unsigned SrcReg = Compare.getOperand(0).getReg();
528 unsigned SrcReg2 =
529 Compare.getOperand(1).isReg() ? Compare.getOperand(1).getReg() : 0;
527 Register SrcReg = Compare.getOperand(0).getReg();
528 Register SrcReg2 =
529 Compare.getOperand(1).isReg() ? Compare.getOperand(1).getReg() : Register();
530530 MachineBasicBlock::iterator MBBI = Compare, MBBE = Branch;
531531 for (++MBBI; MBBI != MBBE; ++MBBI)
532532 if (MBBI->modifiesRegister(SrcReg, TRI) ||
62486248 }
62496249
62506250 // Force base value Base into a register before MI. Return the register.
6251 static unsigned forceReg(MachineInstr &MI, MachineOperand &Base,
6251 static Register forceReg(MachineInstr &MI, MachineOperand &Base,
62526252 const SystemZInstrInfo *TII) {
62536253 if (Base.isReg())
62546254 return Base.getReg();
62576257 MachineFunction &MF = *MBB->getParent();
62586258 MachineRegisterInfo &MRI = MF.getRegInfo();
62596259
6260 unsigned Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
6260 Register Reg = MRI.createVirtualRegister(&SystemZ::ADDR64BitRegClass);
62616261 BuildMI(*MBB, MI, MI.getDebugLoc(), TII->get(SystemZ::LA), Reg)
62626262 .add(Base)
62636263 .addImm(0)
65416541 MachineOperand Base = earlyUseOperand(MI.getOperand(1));
65426542 int64_t Disp = MI.getOperand(2).getImm();
65436543 MachineOperand Src2 = earlyUseOperand(MI.getOperand(3));
6544 unsigned BitShift = (IsSubWord ? MI.getOperand(4).getReg() : 0);
6545 unsigned NegBitShift = (IsSubWord ? MI.getOperand(5).getReg() : 0);
6544 Register BitShift = IsSubWord ? MI.getOperand(4).getReg() : Register();
6545 Register NegBitShift = IsSubWord ? MI.getOperand(5).getReg() : Register();
65466546 DebugLoc DL = MI.getDebugLoc();
65476547 if (IsSubWord)
65486548 BitSize = MI.getOperand(6).getImm();
65606560 assert(LOpcode && CSOpcode && "Displacement out of range");
65616561
65626562 // Create virtual registers for temporary results.
6563 unsigned OrigVal = MRI.createVirtualRegister(RC);
6564 unsigned OldVal = MRI.createVirtualRegister(RC);
6565 unsigned NewVal = (BinOpcode || IsSubWord ?
6563 Register OrigVal = MRI.createVirtualRegister(RC);
6564 Register OldVal = MRI.createVirtualRegister(RC);
6565 Register NewVal = (BinOpcode || IsSubWord ?
65666566 MRI.createVirtualRegister(RC) : Src2.getReg());
6567 unsigned RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal);
6568 unsigned RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal);
6567 Register RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal);
6568 Register RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal);
65696569
65706570 // Insert a basic block for the main loop.
65716571 MachineBasicBlock *StartMBB = MBB;
66586658 unsigned Dest = MI.getOperand(0).getReg();
66596659 MachineOperand Base = earlyUseOperand(MI.getOperand(1));
66606660 int64_t Disp = MI.getOperand(2).getImm();
6661 unsigned Src2 = MI.getOperand(3).getReg();
6662 unsigned BitShift = (IsSubWord ? MI.getOperand(4).getReg() : 0);
6663 unsigned NegBitShift = (IsSubWord ? MI.getOperand(5).getReg() : 0);
6661 Register Src2 = MI.getOperand(3).getReg();
6662 Register BitShift = (IsSubWord ? MI.getOperand(4).getReg() : Register());
6663 Register NegBitShift = (IsSubWord ? MI.getOperand(5).getReg() : Register());
66646664 DebugLoc DL = MI.getDebugLoc();
66656665 if (IsSubWord)
66666666 BitSize = MI.getOperand(6).getImm();
66786678 assert(LOpcode && CSOpcode && "Displacement out of range");
66796679
66806680 // Create virtual registers for temporary results.
6681 unsigned OrigVal = MRI.createVirtualRegister(RC);
6682 unsigned OldVal = MRI.createVirtualRegister(RC);
6683 unsigned NewVal = MRI.createVirtualRegister(RC);
6684 unsigned RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal);
6685 unsigned RotatedAltVal = (IsSubWord ? MRI.createVirtualRegister(RC) : Src2);
6686 unsigned RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal);
6681 Register OrigVal = MRI.createVirtualRegister(RC);
6682 Register OldVal = MRI.createVirtualRegister(RC);
6683 Register NewVal = MRI.createVirtualRegister(RC);
6684 Register RotatedOldVal = (IsSubWord ? MRI.createVirtualRegister(RC) : OldVal);
6685 Register RotatedAltVal = (IsSubWord ? MRI.createVirtualRegister(RC) : Src2);
6686 Register RotatedNewVal = (IsSubWord ? MRI.createVirtualRegister(RC) : NewVal);
66876687
66886688 // Insert 3 basic blocks for the loop.
66896689 MachineBasicBlock *StartMBB = MBB;
69666966 if (MI.getNumExplicitOperands() > 5) {
69676967 bool HaveSingleBase = DestBase.isIdenticalTo(SrcBase);
69686968
6969 uint64_t StartCountReg = MI.getOperand(5).getReg();
6970 uint64_t StartSrcReg = forceReg(MI, SrcBase, TII);
6971 uint64_t StartDestReg = (HaveSingleBase ? StartSrcReg :
6969 Register StartCountReg = MI.getOperand(5).getReg();
6970 Register StartSrcReg = forceReg(MI, SrcBase, TII);
6971 Register StartDestReg = (HaveSingleBase ? StartSrcReg :
69726972 forceReg(MI, DestBase, TII));
69736973
69746974 const TargetRegisterClass *RC = &SystemZ::ADDR64BitRegClass;
6975 uint64_t ThisSrcReg = MRI.createVirtualRegister(RC);
6976 uint64_t ThisDestReg = (HaveSingleBase ? ThisSrcReg :
6975 Register ThisSrcReg = MRI.createVirtualRegister(RC);
6976 Register ThisDestReg = (HaveSingleBase ? ThisSrcReg :
69776977 MRI.createVirtualRegister(RC));
6978 uint64_t NextSrcReg = MRI.createVirtualRegister(RC);
6979 uint64_t NextDestReg = (HaveSingleBase ? NextSrcReg :
6978 Register NextSrcReg = MRI.createVirtualRegister(RC);
6979 Register NextDestReg = (HaveSingleBase ? NextSrcReg :
69806980 MRI.createVirtualRegister(RC));
69816981
69826982 RC = &SystemZ::GR64BitRegClass;
6983 uint64_t ThisCountReg = MRI.createVirtualRegister(RC);
6984 uint64_t NextCountReg = MRI.createVirtualRegister(RC);
6983 Register ThisCountReg = MRI.createVirtualRegister(RC);
6984 Register NextCountReg = MRI.createVirtualRegister(RC);
69856985
69866986 MachineBasicBlock *StartMBB = MBB;
69876987 MachineBasicBlock *DoneMBB = splitBlockBefore(MI, MBB);
11781178 MemOpcode = -1;
11791179 else {
11801180 assert(NumOps == 3 && "Expected two source registers.");
1181 unsigned DstReg = MI.getOperand(0).getReg();
1182 unsigned DstPhys =
1181 Register DstReg = MI.getOperand(0).getReg();
1182 Register DstPhys =
11831183 (TRI->isVirtualRegister(DstReg) ? VRM->getPhys(DstReg) : DstReg);
1184 unsigned SrcReg = (OpNum == 2 ? MI.getOperand(1).getReg()
1184 Register SrcReg = (OpNum == 2 ? MI.getOperand(1).getReg()
11851185 : ((OpNum == 1 && MI.isCommutable())
11861186 ? MI.getOperand(2).getReg()
1187 : 0));
1187 : Register()));
11881188 if (DstPhys && !SystemZ::GRH32BitRegClass.contains(DstPhys) && SrcReg &&
11891189 TRI->isVirtualRegister(SrcReg) && DstPhys == VRM->getPhys(SrcReg))
11901190 NeedsCommute = (OpNum == 1);
163163 continue;
164164
165165 auto tryAddHint = [&](const MachineOperand *MO) -> void {
166 unsigned Reg = MO->getReg();
167 unsigned PhysReg = isPhysicalRegister(Reg) ? Reg : VRM->getPhys(Reg);
166 Register Reg = MO->getReg();
167 Register PhysReg = isPhysicalRegister(Reg) ? Reg : VRM->getPhys(Reg);
168168 if (PhysReg) {
169169 if (MO->getSubReg())
170170 PhysReg = getSubReg(PhysReg, MO->getSubReg());
398398 return true;
399399 }
400400
401 unsigned
401 Register
402402 SystemZRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
403403 const SystemZFrameLowering *TFI = getFrameLowering(MF);
404404 return TFI->hasFP(MF) ? SystemZ::R11D : SystemZ::R15D;
8282 const TargetRegisterClass *NewRC,
8383 LiveIntervals &LIS) const override;
8484
85 unsigned getFrameRegister(const MachineFunction &MF) const override;
85 Register getFrameRegister(const MachineFunction &MF) const override;
8686 };
8787
8888 } // end namespace llvm
6565 assert(MFI.getObjectSize(FrameIndex) != 0 &&
6666 "We assume that variable-sized objects have already been lowered, "
6767 "and don't use FrameIndex operands.");
68 unsigned FrameRegister = getFrameRegister(MF);
68 Register FrameRegister = getFrameRegister(MF);
6969
7070 // If this is the address operand of a load or store, make it relative to SP
7171 // and fold the frame offset directly in.
129129 MI.getOperand(FIOperandNum).ChangeToRegister(FIRegOperand, /*IsDef=*/false);
130130 }
131131
132 unsigned
132 Register
133133 WebAssemblyRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
134134 static const unsigned Regs[2][2] = {
135135 /* !isArch64Bit isArch64Bit */
3838 RegScavenger *RS = nullptr) const override;
3939
4040 // Debug information queries.
41 unsigned getFrameRegister(const MachineFunction &MF) const override;
41 Register getFrameRegister(const MachineFunction &MF) const override;
4242
4343 const TargetRegisterClass *
4444 getPointerRegClass(const MachineFunction &MF,
7474 return true;
7575 }
7676
77 SmallVector<unsigned, 8> SplitRegs;
77 SmallVector<Register, 8> SplitRegs;
7878
7979 EVT PartVT = TLI.getRegisterType(Context, VT);
8080 Type *PartTy = PartVT.getTypeForEVT(Context);
181181
182182 bool X86CallLowering::lowerReturn(
183183 MachineIRBuilder &MIRBuilder, const Value *Val,
184 ArrayRef<unsigned> VRegs) const {
184 ArrayRef<Register> VRegs) const {
185185 assert(((Val && !VRegs.empty()) || (!Val && VRegs.empty())) &&
186186 "Return value without a vreg");
187187 auto MIB = MIRBuilder.buildInstrNoInsert(X86::RET).addImm(0);
204204 ArgInfo CurArgInfo = ArgInfo{VRegs[i], SplitEVTs[i].getTypeForEVT(Ctx)};
205205 setArgFlags(CurArgInfo, AttributeList::ReturnIndex, DL, F);
206206 if (!splitToValueTypes(CurArgInfo, SplitArgs, DL, MRI,
207 [&](ArrayRef<unsigned> Regs) {
207 [&](ArrayRef<Register> Regs) {
208208 MIRBuilder.buildUnmerge(Regs, VRegs[i]);
209209 }))
210210 return false;
320320
321321 bool X86CallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
322322 const Function &F,
323 ArrayRef<unsigned> VRegs) const {
323 ArrayRef<Register> VRegs) const {
324324 if (F.arg_empty())
325325 return true;
326326
348348 ArgInfo OrigArg(VRegs[Idx], Arg.getType());
349349 setArgFlags(OrigArg, Idx + AttributeList::FirstArgIndex, DL, F);
350350 if (!splitToValueTypes(OrigArg, SplitArgs, DL, MRI,
351 [&](ArrayRef<unsigned> Regs) {
351 [&](ArrayRef<Register> Regs) {
352352 MIRBuilder.buildMerge(VRegs[Idx], Regs);
353353 }))
354354 return false;
408408 return false;
409409
410410 if (!splitToValueTypes(OrigArg, SplitArgs, DL, MRI,
411 [&](ArrayRef<unsigned> Regs) {
411 [&](ArrayRef<Register> Regs) {
412412 MIRBuilder.buildUnmerge(Regs, OrigArg.Reg);
413413 }))
414414 return false;
451451
452452 if (OrigRet.Reg) {
453453 SplitArgs.clear();
454 SmallVector<unsigned, 8> NewRegs;
454 SmallVector<Register, 8> NewRegs;
455455
456456 if (!splitToValueTypes(OrigRet, SplitArgs, DL, MRI,
457 [&](ArrayRef<unsigned> Regs) {
457 [&](ArrayRef<Register> Regs) {
458458 NewRegs.assign(Regs.begin(), Regs.end());
459459 }))
460460 return false;
2828 X86CallLowering(const X86TargetLowering &TLI);
2929
3030 bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val,
31 ArrayRef<unsigned> VRegs) const override;
31 ArrayRef<Register> VRegs) const override;
3232
3333 bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F,
34 ArrayRef<unsigned> VRegs) const override;
34 ArrayRef<Register> VRegs) const override;
3535
3636 bool lowerCall(MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv,
3737 const MachineOperand &Callee, const ArgInfo &OrigRet,
3939
4040 private:
4141 /// A function of this type is used to perform value split action.
42 using SplitArgTy = std::functionunsigned>)>;
42 using SplitArgTy = std::functionRegister>)>;
4343
4444 bool splitToValueTypes(const ArgInfo &OrigArgInfo,
4545 SmallVectorImpl &SplitArgs,
583583 // registers. For the prolog expansion we use RAX, RCX and RDX.
584584 MachineRegisterInfo &MRI = MF.getRegInfo();
585585 const TargetRegisterClass *RegClass = &X86::GR64RegClass;
586 const unsigned SizeReg = InProlog ? (unsigned)X86::RAX
586 const Register SizeReg = InProlog ? X86::RAX
587587 : MRI.createVirtualRegister(RegClass),
588 ZeroReg = InProlog ? (unsigned)X86::RCX
588 ZeroReg = InProlog ? X86::RCX
589