llvm.org GIT mirror llvm / 4951996
GlobalISel: implement low-level type with just size & vector lanes. This should be all the low-level instruction selection needs to determine how to implement an operation, with the remaining context taken from the opcode (e.g. G_ADD vs G_FADD) or other flags not based on type (e.g. fast-math). git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@276158 91177308-0d34-0410-b5e6-96231b3b80d8 Tim Northover 3 years ago
21 changed file(s) with 397 addition(s) and 187 deletion(s). Raw diff Collapse all Expand all
1616 #include "llvm/CodeGen/GlobalISel/Types.h"
1717
1818 #include "llvm/CodeGen/MachineBasicBlock.h"
19 #include "llvm/CodeGen/LowLevelType.h"
1920 #include "llvm/IR/DebugLoc.h"
2021
2122 namespace llvm {
9596 /// \pre Ty == nullptr or isPreISelGenericOpcode(Opcode)
9697 ///
9798 /// \return The newly created instruction.
98 MachineInstr *buildInstr(unsigned Opcode, Type *Ty);
99 MachineInstr *buildInstr(unsigned Opcode, LLT Ty);
99100
100101 /// Build and insert = \p Opcode [\p Ty] \p BB.
101102 ///
103104 /// \pre Ty == nullptr or isPreISelGenericOpcode(Opcode)
104105 ///
105106 /// \return The newly created instruction.
106 MachineInstr *buildInstr(unsigned Opcode, Type *Ty, MachineBasicBlock &BB);
107 MachineInstr *buildInstr(unsigned Opcode, LLT Ty, MachineBasicBlock &BB);
107108
108109 /// Build and insert \p Res = \p Opcode [\p Ty] \p Op0, \p Op1.
109110 ///
111112 /// \pre Ty == nullptr or isPreISelGenericOpcode(Opcode)
112113 ///
113114 /// \return The newly created instruction.
114 MachineInstr *buildInstr(unsigned Opcode, Type *Ty, unsigned Res,
115 MachineInstr *buildInstr(unsigned Opcode, LLT Ty, unsigned Res,
115116 unsigned Op0, unsigned Op1);
116117
117118 /// Build and insert \p Res = \p Opcode \p Op0, \p Op1.
290290 /// Total number of register banks.
291291 unsigned NumRegBanks;
292292
293 /// Mapping from MVT::SimpleValueType to register banks.
294 std::unique_ptr VTToRegBank;
295
296293 /// Create a RegisterBankInfo that can accomodate up to \p NumRegBanks
297294 /// RegisterBank instances.
298295 ///
324321 /// It also adjusts the size of the register bank to reflect the maximal
325322 /// size of a value that can be hold into that register bank.
326323 ///
327 /// If \p AddTypeMapping is true, this method also records what types can
328 /// be mapped to \p ID. Although this done by default, targets may want to
329 /// disable it, espicially if a given type may be mapped on different
330 /// register bank. Indeed, in such case, this method only records the
331 /// first register bank where the type matches.
332 /// This information is only used to provide default mapping
333 /// (see getInstrMappingImpl).
334 ///
335324 /// \note This method does *not* add the super classes of \p RCId.
336325 /// The rationale is if \p ID covers the registers of \p RCId, that
337326 /// does not necessarily mean that \p ID covers the set of registers
342331 ///
343332 /// \todo TableGen should just generate the BitSet vector for us.
344333 void addRegBankCoverage(unsigned ID, unsigned RCId,
345 const TargetRegisterInfo &TRI,
346 bool AddTypeMapping = true);
334 const TargetRegisterInfo &TRI);
347335
348336 /// Get the register bank identified by \p ID.
349337 RegisterBank &getRegBank(unsigned ID) {
350338 assert(ID < getNumRegBanks() && "Accessing an unknown register bank");
351339 return RegBanks[ID];
352 }
353
354 /// Get the register bank that has been recorded to cover \p SVT.
355 const RegisterBank *getRegBankForType(MVT::SimpleValueType SVT) const {
356 if (!VTToRegBank)
357 return nullptr;
358 assert(SVT < MVT::SimpleValueType::LAST_VALUETYPE && "Out-of-bound access");
359 return VTToRegBank.get()[SVT];
360 }
361
362 /// Record \p RegBank as the register bank that covers \p SVT.
363 /// If a record was already set for \p SVT, the mapping is not
364 /// updated, unless \p Force == true
365 ///
366 /// \post if getRegBankForType(SVT)\@pre == nullptr then
367 /// getRegBankForType(SVT) == &RegBank
368 /// \post if Force == true then getRegBankForType(SVT) == &RegBank
369 void recordRegBankForType(const RegisterBank &RegBank,
370 MVT::SimpleValueType SVT, bool Force = false) {
371 if (!VTToRegBank) {
372 VTToRegBank.reset(
373 new const RegisterBank *[MVT::SimpleValueType::LAST_VALUETYPE]);
374 std::fill(&VTToRegBank[0],
375 &VTToRegBank[MVT::SimpleValueType::LAST_VALUETYPE], nullptr);
376 }
377 assert(SVT < MVT::SimpleValueType::LAST_VALUETYPE && "Out-of-bound access");
378 // If we want to override the mapping or the mapping does not exits yet,
379 // set the register bank for SVT.
380 if (Force || !getRegBankForType(SVT))
381 VTToRegBank.get()[SVT] = &RegBank;
382340 }
383341
384342 /// Try to get the mapping of \p MI.
0 //== llvm/CodeGen/GlobalISel/LowLevelType.h -------------------- -*- C++ -*-==//
1 //
2 // The LLVM Compiler Infrastructure
3 //
4 // This file is distributed under the University of Illinois Open Source
5 // License. See LICENSE.TXT for details.
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// Implement a low-level type suitable for MachineInstr level instruction
10 /// selection.
11 ///
12 /// For a type attached to a MachineInstr, we only care about 2 details: total
13 /// size and the number of vector lanes (if any). Accordingly, there are 3
14 /// possible valid type-kinds:
15 ///
16 /// * `unsized` for labels etc
17 /// * `sN` for scalars and aggregates
18 /// * `` for vectors, which must have at least 2 elements.
19 ///
20 /// Other information required for correct selection is expected to be carried
21 /// by the opcode, or non-type flags. For example the distinction between G_ADD
22 /// and G_FADD for int/float or fast-math flags.
23 //
24 //===----------------------------------------------------------------------===//
25
26 #ifndef LLVM_CODEGEN_GLOBALISEL_LOWLEVELTYPE_H
27 #define LLVM_CODEGEN_GLOBALISEL_LOWLEVELTYPE_H
28
29 #include
30 #include "llvm/ADT/DenseMapInfo.h"
31 #include "llvm/CodeGen/ValueTypes.h"
32
33 namespace llvm {
34
35 class LLVMContext;
36 class Type;
37 class raw_ostream;
38
39 class LLT {
40 public:
41 enum TypeKind : uint16_t {
42 Invalid,
43 Scalar,
44 Vector,
45 Unsized,
46 };
47
48 /// \brief get a low-level scalar or aggregate "bag of bits".
49 static LLT scalar(unsigned SizeInBits) {
50 return LLT{Scalar, 1, SizeInBits};
51 }
52
53 /// \brief get a low-level vector of some number of elements and element
54 /// width. \p NumElements must be at least 2.
55 static LLT vector(uint16_t NumElements, unsigned ScalarSizeInBits) {
56 assert(NumElements > 1 && "invalid number of vector elements");
57 return LLT{Vector, NumElements, ScalarSizeInBits};
58 }
59
60 /// \brief get a low-level vector of some number of elements and element
61 /// type
62 static LLT vector(uint16_t NumElements, LLT ScalarTy) {
63 assert(NumElements > 1 && "invalid number of vector elements");
64 assert(ScalarTy.isScalar() && "invalid vector element type");
65 return LLT{Vector, NumElements, ScalarTy.getSizeInBits()};
66 }
67
68 /// \brief get an unsized but valid low-level type (e.g. for a label).
69 static LLT unsized() {
70 return LLT{Unsized, 1, 0};
71 }
72
73 explicit LLT(TypeKind Kind, uint16_t NumElements, unsigned ScalarSizeInBits)
74 : ScalarSize(ScalarSizeInBits), NumElements(NumElements), Kind(Kind) {
75 assert((Kind != Vector || NumElements > 1) &&
76 "invalid number of vector elements");
77 }
78
79 explicit LLT() : ScalarSize(0), NumElements(0), Kind(Invalid) {}
80
81 /// \brief construct a low-level type based on an LLVM type.
82 explicit LLT(const Type &Ty);
83
84 bool isValid() const { return Kind != Invalid; }
85
86 bool isScalar() const { return Kind == Scalar; }
87
88 bool isVector() const { return Kind == Vector; }
89
90 bool isSized() const { return Kind == Scalar || Kind == Vector; }
91
92 /// \brief Returns the number of elements in a vector LLT. Must only be called
93 /// on vector types.
94 uint16_t getNumElements() const {
95 assert(isVector() && "cannot get number of elements on scalar/aggregate");
96 return NumElements;
97 }
98
99 /// \brief Returns the total size of the type. Must only be called on sized
100 /// types.
101 unsigned getSizeInBits() const {
102 assert(isSized() && "attempt to get size of unsized type");
103 return ScalarSize * NumElements;
104 }
105
106 unsigned getScalarSizeInBits() const {
107 assert(isSized() && "cannot get size of this type");
108 return ScalarSize;
109 }
110
111 /// \brief Returns the vector's element type. Only valid for vector types.
112 LLT getElementType() const {
113 assert(isVector() && "cannot get element type of scalar/aggregate");
114 return scalar(ScalarSize);
115 }
116
117 /// \brief get a low-level type with half the size of the original, by halving
118 /// the size of the scalar type involved. For example `s32` will become
119 /// `s16`, `<2 x s32>` will become `<2 x s16>`.
120 LLT halfScalarSize() const {
121 assert(isSized() && "cannot change size of this type");
122 return LLT{Kind, NumElements, ScalarSize / 2};
123 }
124
125 /// \brief get a low-level type with twice the size of the original, by
126 /// doubling the size of the scalar type involved. For example `s32` will
127 /// become `s64`, `<2 x s32>` will become `<2 x s64>`.
128 LLT doubleScalarSize() const {
129 assert(isSized() && "cannot change size of this type");
130 return LLT{Kind, NumElements, ScalarSize * 2};
131 }
132
133 /// \brief get a low-level type with half the size of the original, by halving
134 /// the number of vector elements of the scalar type involved. The source must
135 /// be a vector type with an even number of elements. For example `<4 x
136 /// s32>` will become `<2 x s32>`, `<2 x s32>` will become `s32`.
137 LLT halfElements() const {
138 assert(isVector() && NumElements % 2 == 0 && "cannot half odd vector");
139 if (NumElements == 2)
140 return scalar(ScalarSize);
141
142 return LLT{Vector, static_cast(NumElements / 2), ScalarSize};
143 }
144
145 /// \brief get a low-level type with twice the size of the original, by
146 /// doubling the number of vector elements of the scalar type involved. The
147 /// source must be a vector type. For example `<2 x s32>` will become `<4 x
148 /// s32>`. Doubling the number of elements in sN produces <2 x sN>.
149 LLT doubleElements() const {
150 return LLT{Vector, static_cast(NumElements * 2), ScalarSize};
151 }
152
153 void print(raw_ostream &OS) const;
154
155 bool operator ==(const LLT &RHS) const {
156 return Kind == RHS.Kind && ScalarSize == RHS.ScalarSize &&
157 NumElements == RHS.NumElements;
158 }
159
160 friend struct DenseMapInfo;
161 private:
162 unsigned ScalarSize;
163 uint16_t NumElements;
164 TypeKind Kind;
165 };
166
167 template<> struct DenseMapInfo {
168 static inline LLT getEmptyKey() {
169 return LLT{LLT::Invalid, 0, -1u};
170 }
171 static inline LLT getTombstoneKey() {
172 return LLT{LLT::Invalid, 0, -2u};
173 }
174 static inline unsigned getHashValue(const LLT &Ty) {
175 uint64_t Val = ((uint64_t)Ty.ScalarSize << 32) |
176 ((uint64_t)Ty.NumElements << 16) | (uint64_t)Ty.Kind;
177 return DenseMapInfo::getHashValue(Val);
178 }
179 static bool isEqual(const LLT &LHS, const LLT &RHS) {
180 return LHS == RHS;
181 }
182 };
183
184 }
185
186 #endif
2222 #include "llvm/ADT/iterator_range.h"
2323 #include "llvm/Analysis/AliasAnalysis.h"
2424 #include "llvm/CodeGen/MachineOperand.h"
25 #ifdef LLVM_BUILD_GLOBAL_ISEL
26 #include "llvm/CodeGen/LowLevelType.h"
27 #endif
2528 #include "llvm/IR/DebugLoc.h"
2629 #include "llvm/IR/InlineAsm.h"
2730 #include "llvm/MC/MCInstrDesc.h"
3841 class TargetInstrInfo;
3942 class TargetRegisterClass;
4043 class TargetRegisterInfo;
41 #ifdef LLVM_BUILD_GLOBAL_ISEL
42 class Type;
43 #endif
4444 class MachineFunction;
4545 class MachineMemOperand;
4646
107107
108108 #ifdef LLVM_BUILD_GLOBAL_ISEL
109109 /// Type of the instruction in case of a generic opcode.
110 /// \invariant This must be nullptr is getOpcode() is not
110 /// \invariant This must be LLT{} if getOpcode() is not
111111 /// in the range of generic opcodes.
112 Type *Ty;
112 LLT Ty;
113113 #endif
114114
115115 MachineInstr(const MachineInstr&) = delete;
188188
189189 /// Set the type of the instruction.
190190 /// \pre getOpcode() is in the range of the generic opcodes.
191 void setType(Type *Ty);
192 Type *getType() const;
191 void setType(LLT Ty);
192 LLT getType() const;
193193
194194 /// Return true if MI is in a bundle (but not the first MI in a bundle).
195195 ///
4747 LiveVariables.cpp
4848 LLVMTargetMachine.cpp
4949 LocalStackSlotAllocation.cpp
50 LowLevelType.cpp
5051 LowerEmuTLS.cpp
5152 MachineBasicBlock.cpp
5253 MachineBlockFrequencyInfo.cpp
6868 unsigned Op0 = getOrCreateVReg(*Inst.getOperand(0));
6969 unsigned Op1 = getOrCreateVReg(*Inst.getOperand(1));
7070 unsigned Res = getOrCreateVReg(Inst);
71 MIRBuilder.buildInstr(Opcode, Inst.getType(), Res, Op0, Op1);
71 MIRBuilder.buildInstr(Opcode, LLT{*Inst.getType()}, Res, Op0, Op1);
7272 return true;
7373 }
7474
8787 if (BrInst.isUnconditional()) {
8888 const BasicBlock &BrTgt = *cast(BrInst.getOperand(0));
8989 MachineBasicBlock &TgtBB = getOrCreateBB(BrTgt);
90 MIRBuilder.buildInstr(TargetOpcode::G_BR, BrTgt.getType(), TgtBB);
90 MIRBuilder.buildInstr(TargetOpcode::G_BR, LLT{*BrTgt.getType()}, TgtBB);
9191 } else {
9292 assert(0 && "Not yet implemented");
9393 }
5555 //------------------------------------------------------------------------------
5656 // Build instruction variants.
5757 //------------------------------------------------------------------------------
58 MachineInstr *MachineIRBuilder::buildInstr(unsigned Opcode, Type *Ty) {
58 MachineInstr *MachineIRBuilder::buildInstr(unsigned Opcode, LLT Ty) {
5959 MachineInstr *NewMI = BuildMI(getMF(), DL, getTII().get(Opcode));
60 if (Ty) {
60 if (Ty.isValid()) {
6161 assert(isPreISelGenericOpcode(Opcode) &&
6262 "Only generic instruction can have a type");
6363 NewMI->setType(Ty);
7070
7171 MachineInstr *MachineIRBuilder::buildInstr(unsigned Opcode, unsigned Res,
7272 unsigned Op0, unsigned Op1) {
73 return buildInstr(Opcode, nullptr, Res, Op0, Op1);
73 return buildInstr(Opcode, LLT{}, Res, Op0, Op1);
7474 }
7575
76 MachineInstr *MachineIRBuilder::buildInstr(unsigned Opcode, Type *Ty,
76 MachineInstr *MachineIRBuilder::buildInstr(unsigned Opcode, LLT Ty,
7777 unsigned Res, unsigned Op0,
7878 unsigned Op1) {
7979 MachineInstr *NewMI = buildInstr(Opcode, Ty);
8686
8787 MachineInstr *MachineIRBuilder::buildInstr(unsigned Opcode, unsigned Res,
8888 unsigned Op0) {
89 MachineInstr *NewMI = buildInstr(Opcode, nullptr);
89 MachineInstr *NewMI = buildInstr(Opcode, LLT{});
9090 MachineInstrBuilder(getMF(), NewMI).addReg(Res, RegState::Define).addReg(Op0);
9191 return NewMI;
9292 }
9393
9494 MachineInstr *MachineIRBuilder::buildInstr(unsigned Opcode) {
95 return buildInstr(Opcode, nullptr);
95 return buildInstr(Opcode, LLT{});
9696 }
9797
98 MachineInstr *MachineIRBuilder::buildInstr(unsigned Opcode, Type *Ty,
98 MachineInstr *MachineIRBuilder::buildInstr(unsigned Opcode, LLT Ty,
9999 MachineBasicBlock &BB) {
100100 MachineInstr *NewMI = buildInstr(Opcode, Ty);
101101 MachineInstrBuilder(getMF(), NewMI).addMBB(&BB);
6464 }
6565
6666 void RegisterBankInfo::addRegBankCoverage(unsigned ID, unsigned RCId,
67 const TargetRegisterInfo &TRI,
68 bool AddTypeMapping) {
67 const TargetRegisterInfo &TRI) {
6968 RegisterBank &RB = getRegBank(ID);
7069 unsigned NbOfRegClasses = TRI.getNumRegClasses();
7170
9695
9796 // Remember the biggest size in bits.
9897 MaxSize = std::max(MaxSize, CurRC.getSize() * 8);
99
100 // If we have been asked to record the type supported by this
101 // register bank, do it now.
102 if (AddTypeMapping)
103 for (MVT::SimpleValueType SVT :
104 make_range(CurRC.vt_begin(), CurRC.vt_end()))
105 recordRegBankForType(getRegBank(ID), SVT);
10698
10799 // Walk through all sub register classes and push them into the worklist.
108100 bool First = true;
239231 // the register bank from the encoding constraints.
240232 CurRegBank = getRegBankFromConstraints(MI, OpIdx, TII, TRI);
241233 if (!CurRegBank) {
242 // Check if we can deduce the register bank from the type of
243 // the instruction.
244 Type *MITy = MI.getType();
245 if (MITy)
246 CurRegBank = getRegBankForType(
247 MVT::getVT(MITy, /*HandleUnknown*/ true).SimpleTy);
248 if (!CurRegBank)
249 // Use the current assigned register bank.
250 // That may not make much sense though.
251 CurRegBank = AltRegBank;
252 if (!CurRegBank) {
253 // All our attempts failed, give up.
254 CompleteMapping = false;
255
256 if (!isCopyLike)
257 // MI does not carry enough information to guess the mapping.
258 return InstructionMapping();
259
260 // For copies, we want to keep interating to find a register
261 // bank for the other operands if we did not find one yet.
262 if (RegBank)
263 break;
264 continue;
265 }
234 // All our attempts failed, give up.
235 CompleteMapping = false;
236
237 if (!isCopyLike)
238 // MI does not carry enough information to guess the mapping.
239 return InstructionMapping();
240
241 // For copies, we want to keep interating to find a register
242 // bank for the other operands if we did not find one yet.
243 if (RegBank)
244 break;
245 continue;
266246 }
267247 }
268248 RegBank = CurRegBank;
0 //===-- llvm/CodeGen/GlobalISel/LowLevelType.cpp --------------------------===//
1 //
2 // The LLVM Compiler Infrastructure
3 //
4 // This file is distributed under the University of Illinois Open Source
5 // License. See LICENSE.TXT for details.
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file This file implements the more header-heavy bits of the LLT class to
10 /// avoid polluting users' namespaces.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "llvm/CodeGen/LowLevelType.h"
15 #include "llvm/IR/DerivedTypes.h"
16 #include "llvm/Support/raw_ostream.h"
17 using namespace llvm;
18
19 LLT::LLT(const Type &Ty) {
20 if (auto VTy = dyn_cast(&Ty)) {
21 ScalarSize = VTy->getElementType()->getPrimitiveSizeInBits();
22 NumElements = VTy->getNumElements();
23 Kind = NumElements == 1 ? Scalar : Vector;
24 } else if (Ty.isSized()) {
25 // Aggregates are no different from real scalars as far as GlobalISel is
26 // concerned.
27 Kind = Scalar;
28 ScalarSize = Ty.getPrimitiveSizeInBits();
29 NumElements = 1;
30 } else {
31 Kind = Unsized;
32 ScalarSize = NumElements = 0;
33 }
34 }
35
36 void LLT::print(raw_ostream &OS) const {
37 if (isVector())
38 OS << "<" << NumElements << " x s" << ScalarSize << ">";
39 else if (isSized())
40 OS << "s" << ScalarSize;
41 else if (isValid())
42 OS << "unsized";
43 else
44 llvm_unreachable("trying to print an invalid type");
45 }
172172 return C;
173173 }
174174
175 static Cursor maybeLexIntegerType(Cursor C, MIToken &Token) {
176 if (C.peek() != 'i' || !isdigit(C.peek(1)))
177 return None;
175 static Cursor maybeLexIntegerOrScalarType(Cursor C, MIToken &Token) {
176 if ((C.peek() != 'i' && C.peek() != 's') || !isdigit(C.peek(1)))
177 return None;
178 char Kind = C.peek();
178179 auto Range = C;
179180 C.advance(); // Skip 'i'
180181 while (isdigit(C.peek()))
181182 C.advance();
182 Token.reset(MIToken::IntegerType, Range.upto(C));
183 Token.reset(Kind == 'i' ? MIToken::IntegerType : MIToken::ScalarType,
184 Range.upto(C));
183185 return C;
184186 }
185187
565567 return C.remaining();
566568 }
567569
568 if (Cursor R = maybeLexIntegerType(C, Token))
570 if (Cursor R = maybeLexIntegerOrScalarType(C, Token))
569571 return R.remaining();
570572 if (Cursor R = maybeLexMachineBasicBlock(C, Token, ErrorCallback))
571573 return R.remaining();
101101 NamedRegister,
102102 MachineBasicBlockLabel,
103103 MachineBasicBlock,
104 ScalarType,
104105 StackObject,
105106 FixedStackObject,
106107 NamedGlobalValue,
129129 bool parseIRConstant(StringRef::iterator Loc, StringRef Source,
130130 const Constant *&C);
131131 bool parseIRConstant(StringRef::iterator Loc, const Constant *&C);
132 bool parseIRType(StringRef::iterator Loc, StringRef Source, unsigned &Read,
133 Type *&Ty);
134 // \p MustBeSized defines whether or not \p Ty must be sized.
135 bool parseIRType(StringRef::iterator Loc, Type *&Ty, bool MustBeSized = true);
132 bool parseLowLevelType(StringRef::iterator Loc, LLT &Ty,
133 bool MustBeSized = true);
136134 bool parseTypedImmediateOperand(MachineOperand &Dest);
137135 bool parseFPImmediateOperand(MachineOperand &Dest);
138136 bool parseMBBReference(MachineBasicBlock *&MBB);
596594 if (Token.isError() || parseInstruction(OpCode, Flags))
597595 return true;
598596
599 Type *Ty = nullptr;
597 LLT Ty{};
600598 if (isPreISelGenericOpcode(OpCode)) {
601599 // For generic opcode, a type is mandatory.
602600 auto Loc = Token.location();
603 if (parseIRType(Loc, Ty))
601 if (parseLowLevelType(Loc, Ty))
604602 return true;
605603 }
606604
659657 // TODO: Check for extraneous machine operands.
660658 MI = MF.CreateMachineInstr(MCID, DebugLocation, /*NoImplicit=*/true);
661659 MI->setFlags(Flags);
662 if (Ty)
660 if (Ty.isValid())
663661 MI->setType(Ty);
664662 for (const auto &Operand : Operands)
665663 MI->addOperand(MF, Operand.Operand);
10271025 return false;
10281026 }
10291027
1030 bool MIParser::parseIRType(StringRef::iterator Loc, StringRef StringValue,
1031 unsigned &Read, Type *&Ty) {
1032 auto Source = StringValue.str(); // The source has to be null terminated.
1033 SMDiagnostic Err;
1034 Ty = parseTypeAtBeginning(Source.c_str(), Read, Err,
1035 *MF.getFunction()->getParent(), &PFS.IRSlots);
1036 if (!Ty)
1037 return error(Loc + Err.getColumnNo(), Err.getMessage());
1038 return false;
1039 }
1040
1041 bool MIParser::parseIRType(StringRef::iterator Loc, Type *&Ty,
1042 bool MustBeSized) {
1043 // At this point we enter in the IR world, i.e., to get the correct type,
1044 // we need to hand off the whole string, not just the current token.
1045 // E.g., <4 x i64> would give '<' as a token and there is not much
1046 // the IR parser can do with that.
1047 unsigned Read = 0;
1048 if (parseIRType(Loc, StringRef(Loc), Read, Ty))
1049 return true;
1050 // The type must be sized, otherwise there is not much the backend
1051 // can do with it.
1052 if (MustBeSized && !Ty->isSized())
1053 return error("expected a sized type");
1054 // The next token is Read characters from the Loc.
1055 // However, the current location is not Loc, but Loc + the length of Token.
1056 // Therefore, subtract the length of Token (range().end() - Loc) to the
1057 // number of characters to skip before the next token.
1058 lex(Read - (Token.range().end() - Loc));
1028 bool MIParser::parseLowLevelType(StringRef::iterator Loc, LLT &Ty,
1029 bool MustBeSized) {
1030 if (Token.is(MIToken::Identifier) && Token.stringValue() == "unsized") {
1031 if (MustBeSized)
1032 return error(Loc, "expected sN or for sized GlobalISel type");
1033 lex();
1034 Ty = LLT::unsized();
1035 return false;
1036 } else if (Token.is(MIToken::ScalarType)) {
1037 Ty = LLT::scalar(APSInt(Token.range().drop_front()).getZExtValue());
1038 lex();
1039 return false;
1040 }
1041
1042 // Now we're looking for a vector.
1043 if (Token.isNot(MIToken::less))
1044 return error(Loc, "expected unsized, sN or for GlobalISel type");
1045 lex();
1046
1047 if (Token.isNot(MIToken::IntegerLiteral))
1048 return error(Loc, "expected for vctor type");
1049 uint64_t NumElements = Token.integerValue().getZExtValue();
1050 lex();
1051
1052 if (Token.isNot(MIToken::Identifier) || Token.stringValue() != "x")
1053 return error(Loc, "expected '' for vector type");
1054 lex();
1055
1056 if (Token.isNot(MIToken::ScalarType))
1057 return error(Loc, "expected '' for vector type");
1058 uint64_t ScalarSize = APSInt(Token.range().drop_front()).getZExtValue();
1059 lex();
1060
1061 if (Token.isNot(MIToken::greater))
1062 return error(Loc, "expected '' for vector type");
1063 lex();
1064
1065 Ty = LLT::vector(NumElements, ScalarSize);
10591066 return false;
10601067 }
10611068
564564 OS << "frame-setup ";
565565 OS << TII->getName(MI.getOpcode());
566566 if (isPreISelGenericOpcode(MI.getOpcode())) {
567 assert(MI.getType() && "Generic instructions must have a type");
567 assert(MI.getType().isValid() && "Generic instructions must have a type");
568568 OS << ' ';
569 MI.getType()->print(OS, /*IsForDebug*/ false, /*NoDetails*/ true);
569 MI.getType().print(OS);
570570 }
571571 if (I < E)
572572 OS << ' ';
655655 debugLoc(std::move(dl))
656656 #ifdef LLVM_BUILD_GLOBAL_ISEL
657657 ,
658 Ty(nullptr)
658 Ty(LLT{})
659659 #endif
660660 {
661661 assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor");
679679 MemRefs(MI.MemRefs), debugLoc(MI.getDebugLoc())
680680 #ifdef LLVM_BUILD_GLOBAL_ISEL
681681 ,
682 Ty(nullptr)
682 Ty(LLT{})
683683 #endif
684684 {
685685 assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor");
709709 // The proper implementation is WIP and is tracked here:
710710 // PR26576.
711711 #ifndef LLVM_BUILD_GLOBAL_ISEL
712 void MachineInstr::setType(Type *Ty) {}
713
714 Type *MachineInstr::getType() const { return nullptr; }
712 void MachineInstr::setType(LLT Ty) {}
713
714 LLT MachineInstr::getType() const { return LLT{}; }
715715
716716 #else
717 void MachineInstr::setType(Type *Ty) {
718 assert((!Ty || isPreISelGenericOpcode(getOpcode())) &&
717 void MachineInstr::setType(LLT Ty) {
718 assert((!Ty.isValid() || isPreISelGenericOpcode(getOpcode())) &&
719719 "Non generic instructions are not supposed to be typed");
720720 this->Ty = Ty;
721721 }
722722
723 Type *MachineInstr::getType() const { return Ty; }
723 LLT MachineInstr::getType() const { return Ty; }
724724 #endif // LLVM_BUILD_GLOBAL_ISEL
725725
726726 /// RemoveRegOperandsFromUseLists - Unlink all of the register operands in
17231723 else
17241724 OS << "UNKNOWN";
17251725
1726 if (getType()) {
1726 if (getType().isValid()) {
17271727 OS << ' ';
1728 getType()->print(OS, /*IsForDebug*/ false, /*NoDetails*/ true);
1728 getType().print(OS);
17291729 OS << ' ';
17301730 }
17311731
154154 void AArch64RegisterBankInfo::applyMappingImpl(
155155 const OperandsMapper &OpdMapper) const {
156156 switch (OpdMapper.getMI().getOpcode()) {
157 case TargetOpcode::G_ADD:
157158 case TargetOpcode::G_OR: {
158159 // Those ID must match getInstrAlternativeMappings.
159160 assert((OpdMapper.getInstrMapping().getID() == 1 ||
165166 llvm_unreachable("Don't know how to handle that operation");
166167 }
167168 }
169
170 RegisterBankInfo::InstructionMapping
171 AArch64RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
172 RegisterBankInfo::InstructionMapping Mapping = getInstrMappingImpl(MI);
173 if (Mapping.isValid())
174 return Mapping;
175
176 // As a top-level guess, vectors go in FPRs, scalars in GPRs. Obviously this
177 // won't work for normal floating-point types (or NZCV). When such
178 // instructions exist we'll need to look at the MI's opcode.
179 LLT Ty = MI.getType();
180 unsigned BankID;
181 if (Ty.isVector())
182 BankID = AArch64::FPRRegBankID;
183 else
184 BankID = AArch64::GPRRegBankID;
185
186 Mapping = InstructionMapping{1, 1, MI.getNumOperands()};
187 int Size = Ty.isSized() ? Ty.getSizeInBits() : 0;
188 for (unsigned Idx = 0; Idx < MI.getNumOperands(); ++Idx)
189 Mapping.setOperandMapping(Idx, Size, getRegBank(BankID));
190
191 return Mapping;
192 }
6363 /// Alternative in the sense different from getInstrMapping.
6464 InstructionMappings
6565 getInstrAlternativeMappings(const MachineInstr &MI) const override;
66
67 InstructionMapping getInstrMapping(const MachineInstr &MI) const override;
6668 };
6769 } // End llvm namespace.
6870 #endif
88 ; CHECK: name: addi64
99 ; CHECK: [[ARG1:%[0-9]+]](64) = COPY %x0
1010 ; CHECK-NEXT: [[ARG2:%[0-9]+]](64) = COPY %x1
11 ; CHECK-NEXT: [[RES:%[0-9]+]](64) = G_ADD i64 [[ARG1]], [[ARG2]]
11 ; CHECK-NEXT: [[RES:%[0-9]+]](64) = G_ADD s64 [[ARG1]], [[ARG2]]
1212 ; CHECK-NEXT: %x0 = COPY [[RES]]
1313 ; CHECK-NEXT: RET_ReallyLR implicit %x0
1414 define i64 @addi64(i64 %arg1, i64 %arg2) {
2727 ; CHECK-NEXT: successors: %[[END:[0-9a-zA-Z._-]+]]({{0x[a-f0-9]+ / 0x[a-f0-9]+}} = 100.00%)
2828 ;
2929 ; Check that we emit the correct branch.
30 ; CHECK: G_BR label %[[END]]
30 ; CHECK: G_BR unsized %[[END]]
3131 ;
3232 ; Check that end contains the return instruction.
3333 ; CHECK: [[END]]:
4242 ; CHECK: name: ori64
4343 ; CHECK: [[ARG1:%[0-9]+]](64) = COPY %x0
4444 ; CHECK-NEXT: [[ARG2:%[0-9]+]](64) = COPY %x1
45 ; CHECK-NEXT: [[RES:%[0-9]+]](64) = G_OR i64 [[ARG1]], [[ARG2]]
45 ; CHECK-NEXT: [[RES:%[0-9]+]](64) = G_OR s64 [[ARG1]], [[ARG2]]
4646 ; CHECK-NEXT: %x0 = COPY [[RES]]
4747 ; CHECK-NEXT: RET_ReallyLR implicit %x0
4848 define i64 @ori64(i64 %arg1, i64 %arg2) {
5353 ; CHECK: name: ori32
5454 ; CHECK: [[ARG1:%[0-9]+]](32) = COPY %w0
5555 ; CHECK-NEXT: [[ARG2:%[0-9]+]](32) = COPY %w1
56 ; CHECK-NEXT: [[RES:%[0-9]+]](32) = G_OR i32 [[ARG1]], [[ARG2]]
56 ; CHECK-NEXT: [[RES:%[0-9]+]](32) = G_OR s32 [[ARG1]], [[ARG2]]
5757 ; CHECK-NEXT: %w0 = COPY [[RES]]
5858 ; CHECK-NEXT: RET_ReallyLR implicit %w0
5959 define i32 @ori32(i32 %arg1, i32 %arg2) {
6767 body: |
6868 bb.0.entry:
6969 liveins: %x0
70 ; CHECK: %0(32) = G_ADD i32 %x0
71 %0(32) = G_ADD i32 %x0, %x0
70 ; CHECK: %0(32) = G_ADD s32 %w0
71 %0(32) = G_ADD s32 %w0, %w0
7272 ...
7373
7474 ---
8484 body: |
8585 bb.0.entry:
8686 liveins: %d0
87 ; CHECK: %0(32) = G_ADD <2 x i32> %d0
88 %0(32) = G_ADD <2 x i32> %d0, %d0
87 ; CHECK: %0(64) = G_ADD <2 x s32> %d0
88 %0(64) = G_ADD <2 x s32> %d0, %d0
8989 ...
9090
9191 ---
106106 liveins: %s0, %x0
107107 ; CHECK: %0(32) = COPY %s0
108108 ; CHECK-NEXT: %2(32) = COPY %0
109 ; CHECK-NEXT: %1(32) = G_ADD i32 %2, %x0
109 ; CHECK-NEXT: %1(32) = G_ADD s32 %2, %w0
110110 %0(32) = COPY %s0
111 %1(32) = G_ADD i32 %0, %x0
111 %1(32) = G_ADD s32 %0, %w0
112112 ...
113113
114114 # Check that we repair the assignment for %0 differently for both uses.
128128 ; CHECK: %0(32) = COPY %s0
129129 ; CHECK-NEXT: %2(32) = COPY %0
130130 ; CHECK-NEXT: %3(32) = COPY %0
131 ; CHECK-NEXT: %1(32) = G_ADD i32 %2, %3
131 ; CHECK-NEXT: %1(32) = G_ADD s32 %2, %3
132132 %0(32) = COPY %s0
133 %1(32) = G_ADD i32 %0, %0
133 %1(32) = G_ADD s32 %0, %0
134134 ...
135135
136136 ---
151151 bb.0.entry:
152152 liveins: %w0
153153 ; CHECK: %0(32) = COPY %w0
154 ; CHECK-NEXT: %2(32) = G_ADD i32 %0, %w0
154 ; CHECK-NEXT: %2(32) = G_ADD s32 %0, %w0
155155 ; CHECK-NEXT: %1(32) = COPY %2
156156 %0(32) = COPY %w0
157 %1(32) = G_ADD i32 %0, %w0
157 %1(32) = G_ADD s32 %0, %w0
158158 ...
159159
160160 ---
186186
187187 bb.1.then:
188188 successors: %bb.2.end
189 %3(32) = G_ADD i32 %0, %0
189 %3(32) = G_ADD s32 %0, %0
190190
191191 bb.2.end:
192192 %4(32) = PHI %0, %bb.0.entry, %3, %bb.1.then
210210 liveins: %w0, %s0
211211 ; CHECK: %0(32) = COPY %w0
212212 ; CHECK-NEXT: %2(32) = COPY %s0
213 ; CHECK-NEXT: %1(32) = G_ADD i32 %0, %2
213 ; CHECK-NEXT: %1(32) = G_ADD s32 %0, %2
214214 %0(32) = COPY %w0
215 %1(32) = G_ADD i32 %0, %s0
215 %1(32) = G_ADD s32 %0, %s0
216216 ...
217217
218218 ---
228228 bb.0.entry:
229229 liveins: %w0
230230 ; CHECK: %0(32) = COPY %w0
231 ; CHECK-NEXT: %1(32) = G_ADD i32 %0, %0
231 ; CHECK-NEXT: %1(32) = G_ADD s32 %0, %0
232232 ; CHECK-NEXT: %s0 = COPY %1
233233 %0(32) = COPY %w0
234 %s0 = G_ADD i32 %0, %0
234 %s0 = G_ADD s32 %0, %0
235235 ...
236236
237237 ---
270270 ; FAST-NEXT: %3(64) = COPY %0
271271 ; FAST-NEXT: %4(64) = COPY %1
272272 ; The mapping of G_OR is on FPR.
273 ; FAST-NEXT: %2(64) = G_OR <2 x i32> %3, %4
273 ; FAST-NEXT: %2(64) = G_OR <2 x s32> %3, %4
274274
275275 ; Greedy mode remapped the instruction on the GPR bank.
276 ; GREEDY-NEXT: %2(64) = G_OR <2 x i32> %0, %1
276 ; GREEDY-NEXT: %2(64) = G_OR <2 x s32> %0, %1
277277 %0(64) = COPY %x0
278278 %1(64) = COPY %x1
279 %2(64) = G_OR <2 x i32> %0, %1
279 %2(64) = G_OR <2 x s32> %0, %1
280280 ...
281281
282282 ---
316316 ; FAST-NEXT: %3(64) = COPY %0
317317 ; FAST-NEXT: %4(64) = COPY %1
318318 ; The mapping of G_OR is on FPR.
319 ; FAST-NEXT: %2(64) = G_OR <2 x i32> %3, %4
319 ; FAST-NEXT: %2(64) = G_OR <2 x s32> %3, %4
320320
321321 ; Greedy mode remapped the instruction on the GPR bank.
322 ; GREEDY-NEXT: %3(64) = G_OR <2 x i32> %0, %1
322 ; GREEDY-NEXT: %3(64) = G_OR <2 x s32> %0, %1
323323 ; We need to keep %2 into FPR because we do not know anything about it.
324324 ; GREEDY-NEXT: %2(64) = COPY %3
325325 %0(64) = COPY %x0
326326 %1(64) = COPY %x1
327 %2(64) = G_OR <2 x i32> %0, %1
328 ...
327 %2(64) = G_OR <2 x s32> %0, %1
328 ...
44
55 ; Tests for add.
66 ; CHECK: name: addi32
7 ; CHECK: G_ADD i32
7 ; CHECK: G_ADD s32
88 define i32 @addi32(i32 %arg1, i32 %arg2) {
99 %res = add i32 %arg1, %arg2
1010 ret i32 %res
99 body: |
1010 bb.0.entry:
1111 liveins: %edi
12 ; CHECK: [[@LINE+1]]:16: expected a sized type
13 %0 = G_ADD %opaque %edi, %edi
12 ; CHECK: [[@LINE+1]]:16: expected sN or for sized GlobalISel type
13 %0 = G_ADD unsized %edi, %edi
1414 ...
3232 body: |
3333 bb.0.entry:
3434 liveins: %edi
35 ; CHECK: %0(32) = G_ADD i32 %edi
36 %0(32) = G_ADD i32 %edi, %edi
37 ; CHECK: %1(64) = G_ADD <2 x i32> %edi
38 %1(64) = G_ADD <2 x i32> %edi, %edi
39 ; CHECK: %2(64) = G_ADD <2 x i32> %edi
40 %2(64) = G_ADD %type_alias %edi, %edi
35 ; CHECK: %0(32) = G_ADD s32 %edi
36 %0(32) = G_ADD s32 %edi, %edi
37 ; CHECK: %1(64) = G_ADD <2 x s32> %edi
38 %1(64) = G_ADD <2 x s32> %edi, %edi
39 ; CHECK: %2(64) = G_ADD s64 %edi
40 %2(64) = G_ADD s64 %edi, %edi
4141 ; G_ADD is actually not a valid operand for structure type,
4242 ; but that is the only one we have for now for testing.
43 ; CHECK: %3(64) = G_ADD { i32, i32 } %edi
44 %3(64) = G_ADD {i32, i32} %edi, %edi
45 ; CHECK: %4(48) = G_ADD %structure_alias %edi
46 %4(48) = G_ADD %structure_alias %edi, %edi
43 ; CHECK: %3(64) = G_ADD s64 %edi
44 %3(64) = G_ADD s64 %edi, %edi
45 ; CHECK: %4(48) = G_ADD s48 %edi
46 %4(48) = G_ADD s48 %edi, %edi
4747 ...