llvm.org GIT mirror llvm / 43dec7d
[AMDGPU, PowerPC, TableGen] Fix some Clang-tidy modernize and Include What You Use warnings; other minor fixes (NFC). git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@289282 91177308-0d34-0410-b5e6-96231b3b80d8 Eugene Zelenko 3 years ago
16 changed file(s) with 264 addition(s) and 170 deletion(s). Raw diff Collapse all Expand all
99 // Interface to describe a layout of a stack frame on a AMDGPU target machine.
1010 //
1111 //===----------------------------------------------------------------------===//
12
1213 #include "AMDGPUFrameLowering.h"
1314 #include "AMDGPURegisterInfo.h"
1415 #include "AMDGPUSubtarget.h"
15
16 #include "llvm/CodeGen/MachineFunction.h"
1617 #include "llvm/CodeGen/MachineFrameInfo.h"
17 #include "llvm/CodeGen/MachineRegisterInfo.h"
18 #include "llvm/IR/Instructions.h"
18 #include "llvm/Support/MathExtras.h"
1919
2020 using namespace llvm;
2121 AMDGPUFrameLowering::AMDGPUFrameLowering(StackDirection D, unsigned StackAl,
2222 int LAO, unsigned TransAl)
2323 : TargetFrameLowering(D, StackAl, LAO, TransAl) { }
2424
25 AMDGPUFrameLowering::~AMDGPUFrameLowering() { }
25 AMDGPUFrameLowering::~AMDGPUFrameLowering() = default;
2626
2727 unsigned AMDGPUFrameLowering::getStackWidth(const MachineFunction &MF) const {
28
2928 // XXX: Hardcoding to 1 for now.
3029 //
3130 // I think the StackWidth should stored as metadata associated with the
10099
101100 return OffsetBytes / (getStackWidth(MF) * 4);
102101 }
103
1010 /// \brief Interface to describe a layout of a stack frame on an AMDGPU target.
1111 //
1212 //===----------------------------------------------------------------------===//
13
1314 #ifndef LLVM_LIB_TARGET_AMDGPU_AMDGPUFRAMELOWERING_H
1415 #define LLVM_LIB_TARGET_AMDGPU_AMDGPUFRAMELOWERING_H
1516
2627 public:
2728 AMDGPUFrameLowering(StackDirection D, unsigned StackAl, int LAO,
2829 unsigned TransAl = 1);
29 virtual ~AMDGPUFrameLowering();
30 ~AMDGPUFrameLowering() override;
3031
3132 /// \returns The number of 32-bit sub-registers that are used when storing
3233 /// values to the stack.
3940 return false;
4041 }
4142 };
42 } // namespace llvm
43 #endif
43
44 } // end namespace llvm
45
46 #endif // LLVM_LIB_TARGET_AMDGPU_AMDGPUFRAMELOWERING_H
1111 //
1212 //===----------------------------------------------------------------------===//
1313
14 #include "AMDGPU.h"
1415 #include "AMDGPUInstrInfo.h"
15 #include "AMDGPUIntrinsicInfo.h"
16 #include "AMDGPURegisterInfo.h"
1617 #include "AMDGPUISelLowering.h" // For AMDGPUISD
1718 #include "AMDGPUSubtarget.h"
19 #include "SIDefines.h"
20 #include "SIInstrInfo.h"
21 #include "SIRegisterInfo.h"
1822 #include "SIISelLowering.h"
1923 #include "SIMachineFunctionInfo.h"
24 #include "llvm/ADT/APInt.h"
25 #include "llvm/ADT/SmallVector.h"
26 #include "llvm/ADT/StringRef.h"
2027 #include "llvm/Analysis/ValueTracking.h"
2128 #include "llvm/CodeGen/FunctionLoweringInfo.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/CodeGen/PseudoSourceValue.h"
29 #include "llvm/CodeGen/ISDOpcodes.h"
30 #include "llvm/CodeGen/MachineFunction.h"
31 #include "llvm/CodeGen/MachineRegisterInfo.h"
32 #include "llvm/CodeGen/MachineValueType.h"
2433 #include "llvm/CodeGen/SelectionDAG.h"
2534 #include "llvm/CodeGen/SelectionDAGISel.h"
26 #include "llvm/IR/DiagnosticInfo.h"
35 #include "llvm/CodeGen/SelectionDAGNodes.h"
36 #include "llvm/CodeGen/ValueTypes.h"
37 #include "llvm/IR/BasicBlock.h"
38 #include "llvm/IR/Instruction.h"
39 #include "llvm/MC/MCInstrDesc.h"
40 #include "llvm/Support/Casting.h"
41 #include "llvm/Support/CodeGen.h"
42 #include "llvm/Support/ErrorHandling.h"
43 #include "llvm/Support/MathExtras.h"
44 #include
45 #include
46 #include
47 #include
2748
2849 using namespace llvm;
2950
3051 namespace llvm {
52
3153 class R600InstrInfo;
32 }
54
55 } // end namespace llvm
3356
3457 //===----------------------------------------------------------------------===//
3558 // Instruction Selector Implementation
4770 public:
4871 explicit AMDGPUDAGToDAGISel(TargetMachine &TM, CodeGenOpt::Level OptLevel)
4972 : SelectionDAGISel(TM, OptLevel) {}
50
51 virtual ~AMDGPUDAGToDAGISel();
73 ~AMDGPUDAGToDAGISel() override = default;
74
5275 bool runOnMachineFunction(MachineFunction &MF) override;
5376 void Select(SDNode *N) override;
5477 StringRef getPassName() const override;
148171 // Include the pieces autogenerated from the target description.
149172 #include "AMDGPUGenDAGISel.inc"
150173 };
174
151175 } // end anonymous namespace
152176
153177 /// \brief This pass converts a legalized DAG into a AMDGPU-specific
160184 bool AMDGPUDAGToDAGISel::runOnMachineFunction(MachineFunction &MF) {
161185 Subtarget = &MF.getSubtarget();
162186 return SelectionDAGISel::runOnMachineFunction(MF);
163 }
164
165 AMDGPUDAGToDAGISel::~AMDGPUDAGToDAGISel() {
166187 }
167188
168189 bool AMDGPUDAGToDAGISel::isInlineImmediate(const SDNode *N) const {
901922 Ptr = N2;
902923 VAddr = N3;
903924 } else {
904
905925 // (add N0, C1) -> offset
906926 VAddr = CurDAG->getTargetConstant(0, DL, MVT::i32);
907927 Ptr = N0;
12041224
12051225 bool AMDGPUDAGToDAGISel::SelectSMRD(SDValue Addr, SDValue &SBase,
12061226 SDValue &Offset, bool &Imm) const {
1207
12081227 SDLoc SL(Addr);
12091228 if (CurDAG->isBaseWithConstantOffset(Addr)) {
12101229 SDValue N0 = Addr.getOperand(0);
14501469 CurDAG->SelectNodeTo(N, AMDGPU::S_CBRANCH_VCCNZ, MVT::Other,
14511470 N->getOperand(2), // Basic Block
14521471 VCC.getValue(0));
1453 return;
14541472 }
14551473
14561474 // This is here because there isn't a way to use the generated sub0_sub1 as the
15211539
15221540 bool AMDGPUDAGToDAGISel::SelectVOP3Mods(SDValue In, SDValue &Src,
15231541 SDValue &SrcMods) const {
1524
15251542 unsigned Mods = 0;
15261543
15271544 Src = In;
2222 #include "SIISelLowering.h"
2323 #include "SIFrameLowering.h"
2424 #include "Utils/AMDGPUBaseInfo.h"
25 #include "llvm/ADT/Triple.h"
2526 #include "llvm/CodeGen/GlobalISel/GISelAccessor.h"
27 #include "llvm/CodeGen/MachineFunction.h"
2628 #include "llvm/CodeGen/SelectionDAGTargetInfo.h"
27 #include "llvm/Target/TargetSubtargetInfo.h"
29 #include "llvm/MC/MCInstrItineraries.h"
30 #include "llvm/Support/MathExtras.h"
31 #include
32 #include
33 #include
34 #include
2835
2936 #define GET_SUBTARGETINFO_HEADER
3037 #include "AMDGPUGenSubtargetInfo.inc"
3138
3239 namespace llvm {
3340
34 class SIMachineFunctionInfo;
3541 class StringRef;
3642
3743 class AMDGPUSubtarget : public AMDGPUGenSubtargetInfo {
124130 public:
125131 AMDGPUSubtarget(const Triple &TT, StringRef GPU, StringRef FS,
126132 const TargetMachine &TM);
127 virtual ~AMDGPUSubtarget();
133 ~AMDGPUSubtarget() override;
134
128135 AMDGPUSubtarget &initializeSubtargetDependencies(const Triple &TT,
129136 StringRef GPU, StringRef FS);
130137
594601 unsigned getMaxNumSGPRs() const;
595602 };
596603
597 } // End namespace llvm
598
599 #endif
604 } // end namespace llvm
605
606 #endif // LLVM_LIB_TARGET_AMDGPU_AMDGPUSUBTARGET_H
1616
1717 #include "AMDGPUIntrinsicInfo.h"
1818 #include "AMDGPUSubtarget.h"
19 #include "llvm/ADT/Optional.h"
20 #include "llvm/ADT/StringMap.h"
21 #include "llvm/ADT/StringRef.h"
22 #include "llvm/Analysis/TargetTransformInfo.h"
23 #include "llvm/Support/CodeGen.h"
24 #include "llvm/Target/TargetMachine.h"
25 #include
1926
2027 namespace llvm {
2128
3643 StringRef FS, TargetOptions Options,
3744 Optional RM, CodeModel::Model CM,
3845 CodeGenOpt::Level OL);
39 ~AMDGPUTargetMachine();
46 ~AMDGPUTargetMachine() override;
4047
4148 const AMDGPUSubtarget *getSubtargetImpl() const;
4249 const AMDGPUSubtarget *getSubtargetImpl(const Function &) const override = 0;
9097 const SISubtarget *getSubtargetImpl(const Function &) const override;
9198 };
9299
93 } // End namespace llvm
100 } // end namespace llvm
94101
95 #endif
102 #endif // LLVM_LIB_TARGET_AMDGPU_AMDGPUTARGETMACHINE_H
1414 #include "Utils/AMDKernelCodeTUtils.h"
1515 #include "Utils/AMDGPUAsmUtils.h"
1616 #include "llvm/ADT/APFloat.h"
17 #include "llvm/ADT/STLExtras.h"
17 #include "llvm/ADT/APInt.h"
1818 #include "llvm/ADT/SmallBitVector.h"
1919 #include "llvm/ADT/SmallString.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/StringRef.h"
2022 #include "llvm/ADT/StringSwitch.h"
2123 #include "llvm/ADT/Twine.h"
2224 #include "llvm/CodeGen/MachineValueType.h"
2325 #include "llvm/MC/MCContext.h"
2426 #include "llvm/MC/MCExpr.h"
2527 #include "llvm/MC/MCInst.h"
28 #include "llvm/MC/MCInstrDesc.h"
2629 #include "llvm/MC/MCInstrInfo.h"
2730 #include "llvm/MC/MCParser/MCAsmLexer.h"
2831 #include "llvm/MC/MCParser/MCAsmParser.h"
32 #include "llvm/MC/MCParser/MCAsmParserExtension.h"
2933 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
3034 #include "llvm/MC/MCParser/MCTargetAsmParser.h"
3135 #include "llvm/MC/MCRegisterInfo.h"
3236 #include "llvm/MC/MCStreamer.h"
3337 #include "llvm/MC/MCSubtargetInfo.h"
34 #include "llvm/MC/MCSymbolELF.h"
38 #include "llvm/MC/MCSymbol.h"
39 #include "llvm/Support/Casting.h"
3540 #include "llvm/Support/Debug.h"
3641 #include "llvm/Support/ELF.h"
37 #include "llvm/Support/SourceMgr.h"
42 #include "llvm/Support/ErrorHandling.h"
43 #include "llvm/Support/MathExtras.h"
44 #include "llvm/Support/raw_ostream.h"
45 #include "llvm/Support/SMLoc.h"
3846 #include "llvm/Support/TargetRegistry.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/Support/MathExtras.h"
47 #include
48 #include
49 #include
50 #include
51 #include
52 #include
53 #include
54 #include
55 #include
4156
4257 using namespace llvm;
4358 using namespace llvm::AMDGPU;
4560 namespace {
4661
4762 class AMDGPUAsmParser;
48 struct OptionalOperand;
4963
5064 enum RegisterKind { IS_UNKNOWN, IS_VGPR, IS_SGPR, IS_TTMP, IS_SPECIAL };
5165
364378 return S->getSymbol().getName();
365379 }
366380
367
368381 StringRef getToken() const {
369382 assert(isToken());
370383
714727 //bool ProcessInstruction(MCInst &Inst);
715728
716729 OperandMatchResultTy parseIntWithPrefix(const char *Prefix, int64_t &Int);
717 OperandMatchResultTy parseIntWithPrefix(const char *Prefix,
718 OperandVector &Operands,
719 enum AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone,
720 bool (*ConvertResult)(int64_t&) = 0);
721 OperandMatchResultTy parseNamedBit(const char *Name, OperandVector &Operands,
722 enum AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone);
723 OperandMatchResultTy parseStringWithPrefix(StringRef Prefix, StringRef &Value);
730 OperandMatchResultTy
731 parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
732 enum AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone,
733 bool (*ConvertResult)(int64_t &) = nullptr);
734 OperandMatchResultTy
735 parseNamedBit(const char *Name, OperandVector &Operands,
736 enum AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone);
737 OperandMatchResultTy parseStringWithPrefix(StringRef Prefix,
738 StringRef &Value);
724739
725740 OperandMatchResultTy parseImm(OperandVector &Operands);
726741 OperandMatchResultTy parseRegOrImm(OperandVector &Operands);
807822 bool (*ConvertResult)(int64_t&);
808823 };
809824
825 } // end anonymous namespace
826
810827 // May be called with integer type with equivalent bitwidth.
811828 static const fltSemantics *getFltSemantics(MVT VT) {
812829 switch (VT.getSizeInBits()) {
819836 default:
820837 llvm_unreachable("unsupported fp type");
821838 }
822 }
823
824839 }
825840
826841 //===----------------------------------------------------------------------===//
15551570 }
15561571
15571572 bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectISA() {
1558
15591573 uint32_t Major;
15601574 uint32_t Minor;
15611575 uint32_t Stepping;
15721586 return false;
15731587 }
15741588
1575
15761589 if (ParseDirectiveMajorMinor(Major, Minor))
15771590 return true;
15781591
16231636 }
16241637
16251638 bool AMDGPUAsmParser::ParseDirectiveAMDKernelCodeT() {
1626
16271639 amd_kernel_code_t Header;
16281640 AMDGPU::initDefaultAMDKernelCodeT(Header, getSTI().getFeatureBits());
16291641
16301642 while (true) {
1631
16321643 // Lex EndOfStatement. This is in a while loop, because lexing a comment
16331644 // will set the current token to EndOfStatement.
16341645 while(getLexer().is(AsmToken::EndOfStatement))
19972008
19982009 void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst,
19992010 const OperandVector &Operands) {
2000
20012011 OptionalImmIndexMap OptionalIdx;
20022012
20032013 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
20212031 }
20222032
20232033 void AMDGPUAsmParser::cvtDS(MCInst &Inst, const OperandVector &Operands) {
2024
20252034 std::map OptionalIdx;
20262035 bool GDSOnly = false;
20272036
25162525 }
25172526 Imm16Val |= (StreamId << STREAM_ID_SHIFT_);
25182527 }
2519 } while (0);
2528 } while (false);
25202529 }
25212530 break;
25222531 }
32233232 if (Inst.getOpcode() != AMDGPU::V_NOP_sdwa) {
32243233 // V_NOP_sdwa has no optional sdwa arguments
32253234 switch (BasicInstType) {
3226 case SIInstrFlags::VOP1: {
3235 case SIInstrFlags::VOP1:
32273236 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstSel, 6);
32283237 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstUnused, 2);
32293238 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, 6);
32303239 break;
3231 }
3232 case SIInstrFlags::VOP2: {
3240
3241 case SIInstrFlags::VOP2:
32333242 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstSel, 6);
32343243 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstUnused, 2);
32353244 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, 6);
32363245 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc1Sel, 6);
32373246 break;
3238 }
3239 case SIInstrFlags::VOPC: {
3247
3248 case SIInstrFlags::VOPC:
32403249 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, 6);
32413250 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc1Sel, 6);
32423251 break;
3243 }
3252
32443253 default:
32453254 llvm_unreachable("Invalid instruction type. Only VOP1, VOP2 and VOPC allowed");
32463255 }
32673276 #define GET_REGISTER_MATCHER
32683277 #define GET_MATCHER_IMPLEMENTATION
32693278 #include "AMDGPUGenAsmMatcher.inc"
3270
32713279
32723280 // This fuction should be defined after auto-generated include so that we have
32733281 // MatchClassKind enum defined
1717
1818 #include "llvm/ADT/ArrayRef.h"
1919 #include "llvm/MC/MCDisassembler/MCDisassembler.h"
20 #include "llvm/MC/MCDisassembler/MCRelocationInfo.h"
2021 #include "llvm/MC/MCDisassembler/MCSymbolizer.h"
22 #include
23 #include
24 #include
2125
2226 namespace llvm {
2327
3943 AMDGPUDisassembler(const MCSubtargetInfo &STI, MCContext &Ctx) :
4044 MCDisassembler(STI, Ctx) {}
4145
42 ~AMDGPUDisassembler() {}
46 ~AMDGPUDisassembler() override = default;
4347
4448 DecodeStatus getInstruction(MCInst &MI, uint64_t &Size,
4549 ArrayRef Bytes, uint64_t Address,
5155 MCOperand createRegOperand(unsigned RegClassID, unsigned Val) const;
5256 MCOperand createSRegOperand(unsigned SRegClassID, unsigned Val) const;
5357
54 MCOperand errOperand(unsigned V, const llvm::Twine& ErrMsg) const;
58 MCOperand errOperand(unsigned V, const Twine& ErrMsg) const;
5559
5660 DecodeStatus tryDecodeInst(const uint8_t* Table,
5761 MCInst &MI,
8185 OPW_LAST_,
8286 OPW_FIRST_ = OPW32
8387 };
88
8489 unsigned getVgprClassId(const OpWidthTy Width) const;
8590 unsigned getSgprClassId(const OpWidthTy Width) const;
8691 unsigned getTtmpClassId(const OpWidthTy Width) const;
117122 uint64_t Address) override;
118123 };
119124
120 } // namespace llvm
125 } // end namespace llvm
121126
122 #endif //LLVM_LIB_TARGET_AMDGPU_DISASSEMBLER_AMDGPUDISASSEMBLER_H
127 #endif // LLVM_LIB_TARGET_AMDGPU_DISASSEMBLER_AMDGPUDISASSEMBLER_H
1818 R600FrameLowering(StackDirection D, unsigned StackAl, int LAO,
1919 unsigned TransAl = 1) :
2020 AMDGPUFrameLowering(D, StackAl, LAO, TransAl) {}
21 virtual ~R600FrameLowering();
21 ~R600FrameLowering() override;
2222
23 void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const {}
24 void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const {}
23 void emitPrologue(MachineFunction &MF,
24 MachineBasicBlock &MBB) const override {}
25 void emitEpilogue(MachineFunction &MF,
26 MachineBasicBlock &MBB) const override {}
2527 };
2628
27 }
29 } // end namespace llvm
2830
29 #endif
31 #endif // LLVM_LIB_TARGET_AMDGPU_R600FRAMELOWERING_H
1616 #include "AMDGPUIntrinsicInfo.h"
1717 #include "AMDGPUSubtarget.h"
1818 #include "R600Defines.h"
19 #include "R600FrameLowering.h"
1920 #include "R600InstrInfo.h"
2021 #include "R600MachineFunctionInfo.h"
21 #include "llvm/Analysis/ValueTracking.h"
22 #include "Utils/AMDGPUBaseInfo.h"
23 #include "llvm/ADT/APFloat.h"
24 #include "llvm/ADT/APInt.h"
25 #include "llvm/ADT/ArrayRef.h"
26 #include "llvm/ADT/DenseMap.h"
27 #include "llvm/ADT/SmallVector.h"
2228 #include "llvm/CodeGen/CallingConvLower.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
29 #include "llvm/CodeGen/DAGCombine.h"
30 #include "llvm/CodeGen/ISDOpcodes.h"
31 #include "llvm/CodeGen/MachineBasicBlock.h"
32 #include "llvm/CodeGen/MachineFunction.h"
33 #include "llvm/CodeGen/MachineInstr.h"
2434 #include "llvm/CodeGen/MachineInstrBuilder.h"
35 #include "llvm/CodeGen/MachineMemOperand.h"
2536 #include "llvm/CodeGen/MachineRegisterInfo.h"
37 #include "llvm/CodeGen/MachineValueType.h"
2638 #include "llvm/CodeGen/SelectionDAG.h"
27 #include "llvm/IR/Argument.h"
28 #include "llvm/IR/Function.h"
39 #include "llvm/IR/Constants.h"
40 #include "llvm/IR/DerivedTypes.h"
41 #include "llvm/Support/Casting.h"
42 #include "llvm/Support/Compiler.h"
43 #include "llvm/Support/ErrorHandling.h"
44 #include
45 #include
46 #include
47 #include
48 #include
2949
3050 using namespace llvm;
3151
7090 setLoadExtAction(ISD::EXTLOAD, MVT::v4i32, MVT::v4i1, Expand);
7191 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i32, MVT::v4i1, Expand);
7292 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i32, MVT::v4i1, Expand);
73
7493
7594 setOperationAction(ISD::STORE, MVT::i8, Custom);
7695 setOperationAction(ISD::STORE, MVT::i32, Custom);
191210
192211 setSchedulingPreference(Sched::Source);
193212
194
195213 setTargetDAGCombine(ISD::FP_ROUND);
196214 setTargetDAGCombine(ISD::FP_TO_SINT);
197215 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
213231 MachineBasicBlock *
214232 R600TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
215233 MachineBasicBlock *BB) const {
216 MachineFunction * MF = BB->getParent();
234 MachineFunction *MF = BB->getParent();
217235 MachineRegisterInfo &MRI = MF->getRegInfo();
218236 MachineBasicBlock::iterator I = MI;
219237 const R600InstrInfo *TII = getSubtarget()->getInstrInfo();
280298 .bitcastToAPInt()
281299 .getZExtValue());
282300 break;
301
283302 case AMDGPU::MOV_IMM_I32:
284303 TII->buildMovImm(*BB, I, MI.getOperand(0).getReg(),
285304 MI.getOperand(1).getImm());
286305 break;
306
287307 case AMDGPU::MOV_IMM_GLOBAL_ADDR: {
288308 //TODO: Perhaps combine this instruction with the next if possible
289309 auto MIB = TII->buildDefaultInstruction(
293313 MIB->getOperand(Idx) = MI.getOperand(1);
294314 break;
295315 }
316
296317 case AMDGPU::CONST_COPY: {
297318 MachineInstr *NewMI = TII->buildDefaultInstruction(
298319 *BB, MI, AMDGPU::MOV, MI.getOperand(0).getReg(), AMDGPU::ALU_CONST);
303324
304325 case AMDGPU::RAT_WRITE_CACHELESS_32_eg:
305326 case AMDGPU::RAT_WRITE_CACHELESS_64_eg:
306 case AMDGPU::RAT_WRITE_CACHELESS_128_eg: {
327 case AMDGPU::RAT_WRITE_CACHELESS_128_eg:
307328 BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(MI.getOpcode()))
308329 .addOperand(MI.getOperand(0))
309330 .addOperand(MI.getOperand(1))
310331 .addImm(isEOP(I)); // Set End of program bit
311332 break;
312 }
313 case AMDGPU::RAT_STORE_TYPED_eg: {
333
334 case AMDGPU::RAT_STORE_TYPED_eg:
314335 BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(MI.getOpcode()))
315336 .addOperand(MI.getOperand(0))
316337 .addOperand(MI.getOperand(1))
317338 .addOperand(MI.getOperand(2))
318339 .addImm(isEOP(I)); // Set End of program bit
319340 break;
320 }
341
321342 case AMDGPU::BRANCH:
322343 BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::JUMP))
323344 .addOperand(MI.getOperand(0));
618639
619640 SDValue R600TargetLowering::vectorToVerticalVector(SelectionDAG &DAG,
620641 SDValue Vector) const {
621
622642 SDLoc DL(Vector);
623643 EVT VecVT = Vector.getValueType();
624644 EVT EltVT = VecVT.getVectorElementType();
625645 SmallVector Args;
626646
627 for (unsigned i = 0, e = VecVT.getVectorNumElements();
628 i != e; ++i) {
647 for (unsigned i = 0, e = VecVT.getVectorNumElements(); i != e; ++i) {
629648 Args.push_back(DAG.getNode(
630649 ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vector,
631650 DAG.getConstant(i, DL, getVectorIdxTy(DAG.getDataLayout()))));
636655
637656 SDValue R600TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
638657 SelectionDAG &DAG) const {
639
640658 SDLoc DL(Op);
641659 SDValue Vector = Op.getOperand(0);
642660 SDValue Index = Op.getOperand(1);
670688 SDValue R600TargetLowering::LowerGlobalAddress(AMDGPUMachineFunction *MFI,
671689 SDValue Op,
672690 SelectionDAG &DAG) const {
673
674691 GlobalAddressSDNode *GSD = cast(Op);
675692 if (GSD->getAddressSpace() != AMDGPUAS::CONSTANT_ADDRESS)
676693 return AMDGPUTargetLowering::LowerGlobalAddress(MFI, Op, DAG);
11291146
11301147 // Private AS needs special fixes
11311148 if (Align < MemVT.getStoreSize() && (AS != AMDGPUAS::PRIVATE_ADDRESS) &&
1132 !allowsMisalignedMemoryAccesses(MemVT, AS, Align, NULL)) {
1149 !allowsMisalignedMemoryAccesses(MemVT, AS, Align, nullptr)) {
11331150 return expandUnalignedStore(StoreNode, DAG);
11341151 }
11351152
17091726 return BuildVector;
17101727 }
17111728
1712
17131729 //===----------------------------------------------------------------------===//
17141730 // Custom DAG Optimizations
17151731 //===----------------------------------------------------------------------===//
20202036 case AMDGPU::MOV_IMM_F32: {
20212037 unsigned ImmReg = AMDGPU::ALU_LITERAL_X;
20222038 uint64_t ImmValue = 0;
2023
20242039
20252040 if (Src.getMachineOpcode() == AMDGPU::MOV_IMM_F32) {
20262041 ConstantFPSDNode *FPC = dyn_cast(Src.getOperand(0));
1515 #define LLVM_LIB_TARGET_AMDGPU_R600MACHINESCHEDULER_H
1616
1717 #include "llvm/CodeGen/MachineScheduler.h"
18 #include
1819
1920 using namespace llvm;
2021
2425 struct R600RegisterInfo;
2526
2627 class R600SchedStrategy final : public MachineSchedStrategy {
27 const ScheduleDAGMILive *DAG;
28 const R600InstrInfo *TII;
29 const R600RegisterInfo *TRI;
30 MachineRegisterInfo *MRI;
28 const ScheduleDAGMILive *DAG = nullptr;
29 const R600InstrInfo *TII = nullptr;
30 const R600RegisterInfo *TRI = nullptr;
31 MachineRegisterInfo *MRI = nullptr;
3132
3233 enum InstKind {
3334 IDAlu,
6566 int OccupedSlotsMask;
6667
6768 public:
68 R600SchedStrategy() :
69 DAG(nullptr), TII(nullptr), TRI(nullptr), MRI(nullptr) {
70 }
71
72 virtual ~R600SchedStrategy() {}
69 R600SchedStrategy() = default;
70 ~R600SchedStrategy() override = default;
7371
7472 void initialize(ScheduleDAGMI *dag) override;
7573 SUnit *pickNode(bool &IsTopNode) override;
9694 void MoveUnits(std::vector &QSrc, std::vector &QDst);
9795 };
9896
99 } // namespace llvm
97 } // end namespace llvm
10098
101 #endif /* R600MACHINESCHEDULER_H_ */
99 #endif // LLVM_LIB_TARGET_AMDGPU_R600MACHINESCHEDULER_H
1212 #include "AMDGPUFrameLowering.h"
1313
1414 namespace llvm {
15
1516 class SIInstrInfo;
1617 class SIMachineFunctionInfo;
1718 class SIRegisterInfo;
2223 SIFrameLowering(StackDirection D, unsigned StackAl, int LAO,
2324 unsigned TransAl = 1) :
2425 AMDGPUFrameLowering(D, StackAl, LAO, TransAl) {}
25 ~SIFrameLowering() override {}
26 ~SIFrameLowering() override = default;
2627
2728 void emitPrologue(MachineFunction &MF,
2829 MachineBasicBlock &MBB) const override;
5758 void emitDebuggerPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const;
5859 };
5960
60 }
61 } // end namespace llvm
6162
62 #endif
63 #endif // LLVM_LIB_TARGET_AMDGPU_SIFRAMELOWERING_H
None //===-- SIMachineScheduler.h - SI Scheduler Interface -*- C++ -*-------===//
0 //===-- SIMachineScheduler.h - SI Scheduler Interface -----------*- C++ -*-===//
11 //
22 // The LLVM Compiler Infrastructure
33 //
1515 #define LLVM_LIB_TARGET_AMDGPU_SIMACHINESCHEDULER_H
1616
1717 #include "SIInstrInfo.h"
18 #include "llvm/CodeGen/MachineBasicBlock.h"
1819 #include "llvm/CodeGen/MachineScheduler.h"
1920 #include "llvm/CodeGen/RegisterPressure.h"
20
21 using namespace llvm;
21 #include "llvm/CodeGen/ScheduleDAG.h"
22 #include
23 #include
24 #include
25 #include
26 #include
27 #include
2228
2329 namespace llvm {
2430
9298 public:
9399 SIScheduleBlock(SIScheduleDAGMI *DAG, SIScheduleBlockCreator *BC,
94100 unsigned ID):
95 DAG(DAG), BC(BC), SUnits(), TopReadySUs(), ScheduledSUnits(),
96 TopRPTracker(TopPressure), Scheduled(false),
97 HighLatencyBlock(false), ID(ID),
98 Preds(), Succs(), NumHighLatencySuccessors(0) {};
99
100 ~SIScheduleBlock() {};
101 DAG(DAG), BC(BC), TopRPTracker(TopPressure), Scheduled(false),
102 HighLatencyBlock(false), ID(ID), NumHighLatencySuccessors(0) {}
103
104 ~SIScheduleBlock() = default;
101105
102106 unsigned getID() const { return ID; }
103107
145149
146150 bool isScheduled() { return Scheduled; }
147151
148
149152 // Needs the block to be scheduled inside
150153 // TODO: find a way to compute it.
151154 std::vector &getInternalAdditionnalRegUsage() {
160163 private:
161164 struct SISchedCandidate : SISchedulerCandidate {
162165 // The best SUnit candidate.
163 SUnit *SU;
166 SUnit *SU = nullptr;
164167
165168 unsigned SGPRUsage;
166169 unsigned VGPRUsage;
168171 unsigned LowLatencyOffset;
169172 bool HasLowLatencyNonWaitedParent;
170173
171 SISchedCandidate()
172 : SU(nullptr) {}
174 SISchedCandidate() = default;
173175
174176 bool isValid() const { return SU; }
175177
340342 SIScheduleBlockScheduler(SIScheduleDAGMI *DAG,
341343 SISchedulerBlockSchedulerVariant Variant,
342344 SIScheduleBlocks BlocksStruct);
343 ~SIScheduleBlockScheduler() {};
344
345 std::vector getBlocks() { return BlocksScheduled; };
346
347 unsigned getVGPRUsage() { return maxVregUsage; };
348 unsigned getSGPRUsage() { return maxSregUsage; };
345 ~SIScheduleBlockScheduler() = default;
346
347 std::vector getBlocks() { return BlocksScheduled; }
348
349 unsigned getVGPRUsage() { return maxVregUsage; }
350 unsigned getSGPRUsage() { return maxSregUsage; }
349351
350352 private:
351353 struct SIBlockSchedCandidate : SISchedulerCandidate {
352354 // The best Block candidate.
353 SIScheduleBlock *Block;
355 SIScheduleBlock *Block = nullptr;
354356
355357 bool IsHighLatency;
356358 int VGPRUsageDiff;
359361 unsigned LastPosHighLatParentScheduled;
360362 unsigned Height;
361363
362 SIBlockSchedCandidate()
363 : Block(nullptr) {}
364 SIBlockSchedCandidate() = default;
364365
365366 bool isValid() const { return Block; }
366367
408409 SIScheduleBlockCreator BlockCreator;
409410
410411 public:
411 SIScheduler(SIScheduleDAGMI *DAG) : DAG(DAG), BlockCreator(DAG) {};
412
413 ~SIScheduler() {};
412 SIScheduler(SIScheduleDAGMI *DAG) : DAG(DAG), BlockCreator(DAG) {}
413
414 ~SIScheduler() = default;
414415
415416 struct SIScheduleBlockResult
416417 scheduleVariant(SISchedulerBlockCreatorVariant BlockVariant,
444445 }
445446
446447 MachineBasicBlock *getBB() { return BB; }
447 MachineBasicBlock::iterator getCurrentTop() { return CurrentTop; };
448 MachineBasicBlock::iterator getCurrentBottom() { return CurrentBottom; };
448 MachineBasicBlock::iterator getCurrentTop() { return CurrentTop; }
449 MachineBasicBlock::iterator getCurrentBottom() { return CurrentBottom; }
449450 LiveIntervals *getLIS() { return LIS; }
450451 MachineRegisterInfo *getMRI() { return &MRI; }
451452 const TargetRegisterInfo *getTRI() { return TRI; }
452 SUnit& getEntrySU() { return EntrySU; };
453 SUnit& getExitSU() { return ExitSU; };
453 SUnit& getEntrySU() { return EntrySU; }
454 SUnit& getExitSU() { return ExitSU; }
454455
455456 void restoreSULinksLeft();
456457
458459 _Iterator End,
459460 unsigned &VgprUsage,
460461 unsigned &SgprUsage);
462
461463 std::set getInRegs() {
462464 std::set InRegs;
463465 for (const auto &RegMaskPair : RPTracker.getPressure().LiveInRegs) {
464466 InRegs.insert(RegMaskPair.RegUnit);
465467 }
466468 return InRegs;
467 };
469 }
468470
469471 unsigned getVGPRSetID() const { return VGPRSetID; }
470472 unsigned getSGPRSetID() const { return SGPRSetID; }
485487 std::vector BottomUpIndex2SU;
486488 };
487489
488 } // namespace llvm
489
490 #endif /* SIMACHINESCHEDULER_H_ */
490 } // end namespace llvm
491
492 #endif // LLVM_LIB_TARGET_AMDGPU_SIMACHINESCHEDULER_H
5252 #include "AMDGPUSubtarget.h"
5353 #include "SIInstrInfo.h"
5454 #include "SIMachineFunctionInfo.h"
55 #include "llvm/ADT/DenseMap.h"
56 #include "llvm/ADT/SmallVector.h"
57 #include "llvm/ADT/StringRef.h"
58 #include "llvm/CodeGen/LiveInterval.h"
59 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
60 #include "llvm/CodeGen/MachineBasicBlock.h"
5561 #include "llvm/CodeGen/MachineFunction.h"
5662 #include "llvm/CodeGen/MachineFunctionPass.h"
63 #include "llvm/CodeGen/MachineInstr.h"
5764 #include "llvm/CodeGen/MachineInstrBuilder.h"
65 #include "llvm/CodeGen/MachineOperand.h"
5866 #include "llvm/CodeGen/MachineRegisterInfo.h"
67 #include "llvm/CodeGen/SlotIndexes.h"
68 #include "llvm/IR/CallingConv.h"
69 #include "llvm/IR/DebugLoc.h"
70 #include "llvm/MC/MCRegisterInfo.h"
71 #include "llvm/Pass.h"
72 #include "llvm/Support/Debug.h"
73 #include "llvm/Support/raw_ostream.h"
74 #include "llvm/Target/TargetRegisterInfo.h"
75 #include
76 #include
5977
6078 using namespace llvm;
6179
7088
7189 struct PrintState {
7290 public:
91 int State;
92
7393 explicit PrintState(int State) : State(State) {}
74
75 int State;
7694 };
7795
7896 static raw_ostream &operator<<(raw_ostream &OS, const PrintState &PS) {
102120 MachineBasicBlock *MBB = nullptr;
103121 MachineInstr *MI = nullptr;
104122
105 WorkItem() {}
123 WorkItem() = default;
106124 WorkItem(MachineBasicBlock *MBB) : MBB(MBB) {}
107125 WorkItem(MachineInstr *MI) : MI(MI) {}
108126 };
161179 }
162180 };
163181
164 } // End anonymous namespace
182 } // end anonymous namespace
165183
166184 char SIWholeQuadMode::ID = 0;
167185
242242 return PPC::get_VSPLTI_elt(N, 1, *CurDAG);
243243 }]>;
244244 def vecspltisb : PatLeaf<(build_vector), [{
245 return PPC::get_VSPLTI_elt(N, 1, *CurDAG).getNode() != 0;
245 return PPC::get_VSPLTI_elt(N, 1, *CurDAG).getNode() != nullptr;
246246 }], VSPLTISB_get_imm>;
247247
248248 // VSPLTISH_get_imm xform function: convert build_vector to VSPLTISH imm.
250250 return PPC::get_VSPLTI_elt(N, 2, *CurDAG);
251251 }]>;
252252 def vecspltish : PatLeaf<(build_vector), [{
253 return PPC::get_VSPLTI_elt(N, 2, *CurDAG).getNode() != 0;
253 return PPC::get_VSPLTI_elt(N, 2, *CurDAG).getNode() != nullptr;
254254 }], VSPLTISH_get_imm>;
255255
256256 // VSPLTISW_get_imm xform function: convert build_vector to VSPLTISW imm.
258258 return PPC::get_VSPLTI_elt(N, 4, *CurDAG);
259259 }]>;
260260 def vecspltisw : PatLeaf<(build_vector), [{
261 return PPC::get_VSPLTI_elt(N, 4, *CurDAG).getNode() != 0;
261 return PPC::get_VSPLTI_elt(N, 4, *CurDAG).getNode() != nullptr;
262262 }], VSPLTISW_get_imm>;
263263
264264 //===----------------------------------------------------------------------===//
1919 //===----------------------------------------------------------------------===//
2020
2121 #define DEBUG_TYPE "ppc-loop-preinc-prep"
22
2223 #include "PPC.h"
24 #include "PPCSubtarget.h"
2325 #include "PPCTargetMachine.h"
2426 #include "llvm/ADT/DepthFirstIterator.h"
25 #include "llvm/ADT/STLExtras.h"
27 #include "llvm/ADT/SmallPtrSet.h"
2628 #include "llvm/ADT/SmallSet.h"
27 #include "llvm/ADT/Statistic.h"
28 #include "llvm/Analysis/CodeMetrics.h"
29 #include "llvm/Analysis/InstructionSimplify.h"
29 #include "llvm/ADT/SmallVector.h"
3030 #include "llvm/Analysis/LoopInfo.h"
3131 #include "llvm/Analysis/ScalarEvolution.h"
3232 #include "llvm/Analysis/ScalarEvolutionExpander.h"
3333 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
34 #include "llvm/Analysis/ValueTracking.h"
34 #include "llvm/IR/BasicBlock.h"
3535 #include "llvm/IR/CFG.h"
3636 #include "llvm/IR/Dominators.h"
37 #include "llvm/IR/Function.h"
37 #include "llvm/IR/Instruction.h"
38 #include "llvm/IR/Instructions.h"
3839 #include "llvm/IR/IntrinsicInst.h"
3940 #include "llvm/IR/Module.h"
41 #include "llvm/IR/Value.h"
42 #include "llvm/Pass.h"
43 #include "llvm/Support/Casting.h"
4044 #include "llvm/Support/CommandLine.h"
4145 #include "llvm/Support/Debug.h"
4246 #include "llvm/Transforms/Scalar.h"
4347 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
4448 #include "llvm/Transforms/Utils/Local.h"
4549 #include "llvm/Transforms/Utils/LoopUtils.h"
46 #include "llvm/Transforms/Utils/ValueMapper.h"
50 #include
51 #include
52 #include
53
4754 using namespace llvm;
4855
4956 // By default, we limit this to creating 16 PHIs (which is a little over half
5360 cl::desc("Potential PHI threshold for PPC preinc loop prep"));
5461
5562 namespace llvm {
63
5664 void initializePPCLoopPreIncPrepPass(PassRegistry&);
57 }
65
66 } // end namespace llvm
5867
5968 namespace {
6069
6170 class PPCLoopPreIncPrep : public FunctionPass {
6271 public:
6372 static char ID; // Pass ID, replacement for typeid
73
6474 PPCLoopPreIncPrep() : FunctionPass(ID), TM(nullptr) {
6575 initializePPCLoopPreIncPrepPass(*PassRegistry::getPassRegistry());
6676 }
8898 ScalarEvolution *SE;
8999 bool PreserveLCSSA;
90100 };
91 }
101
102 } // end anonymous namespace
92103
93104 char PPCLoopPreIncPrep::ID = 0;
94105 static const char *name = "Prepare loop for pre-inc. addressing modes";
102113 }
103114
104115 namespace {
116
105117 struct BucketElement {
106118 BucketElement(const SCEVConstant *O, Instruction *I) : Offset(O), Instr(I) {}
107119 BucketElement(Instruction *I) : Offset(nullptr), Instr(I) {}
117129 const SCEV *BaseSCEV;
118130 SmallVector Elements;
119131 };
120 }
132
133 } // end anonymous namespace
121134
122135 static bool IsPtrInBounds(Value *BasePtr) {
123136 Value *StrippedBasePtr = BasePtr;
139152 return IMemI->getArgOperand(0);
140153 }
141154
142 return 0;
155 return nullptr;
143156 }
144157
145158 bool PPCLoopPreIncPrep::runOnFunction(Function &F) {
393406 Instruction *PtrIP = dyn_cast(Ptr);
394407 if (PtrIP && isa(NewBasePtr) &&
395408 cast(NewBasePtr)->getParent() == PtrIP->getParent())
396 PtrIP = 0;
409 PtrIP = nullptr;
397410 else if (isa(PtrIP))
398411 PtrIP = &*PtrIP->getParent()->getFirstInsertionPt();
399412 else if (!PtrIP)
436449
437450 return MadeChange;
438451 }
439
1313 #include "CodeGenTarget.h"
1414 #include "CodeGenSchedule.h"
1515 #include "llvm/ADT/SmallPtrSet.h"
16 #include "llvm/ADT/STLExtras.h"
1617 #include "llvm/ADT/StringExtras.h"
18 #include "llvm/ADT/StringRef.h"
1719 #include "llvm/MC/MCInstrItineraries.h"
1820 #include "llvm/MC/MCSchedule.h"
1921 #include "llvm/MC/SubtargetFeature.h"
2628 #include
2729 #include
2830 #include
31 #include
2932 #include
3033 #include
3134 #include
4144 // The SchedClassDesc table indexes into a global write resource table, write
4245 // latency table, and read advance table.
4346 struct SchedClassTables {
44 std::vector > ProcSchedClasses;
47 std::vector> ProcSchedClasses;
4548 std::vector WriteProcResources;
4649 std::vector WriteLatencies;
4750 std::vector WriterNames;
8083 Record *ItinData,
8184 std::string &ItinString, unsigned NOperandCycles);
8285 void EmitStageAndOperandCycleData(raw_ostream &OS,
83 std::vector >
86 std::vector>
8487 &ProcItinLists);
8588 void EmitItineraries(raw_ostream &OS,
86 std::vector >
89 std::vector>
8790 &ProcItinLists);
8891 void EmitProcessorProp(raw_ostream &OS, const Record *R, StringRef Name,
8992 char Separator);
356359 //
357360 void SubtargetEmitter::
358361 EmitStageAndOperandCycleData(raw_ostream &OS,
359 std::vector >
362 std::vector>
360363 &ProcItinLists) {
361
362364 // Multiple processor models may share an itinerary record. Emit it once.
363365 SmallPtrSet ItinsDefSet;
364366
497499 int NumUOps = ItinData ? ItinData->getValueAsInt("NumMicroOps") : 0;
498500 InstrItinerary Intinerary = { NumUOps, FindStage, FindStage + NStages,
499501 FindOperandCycle,
500 FindOperandCycle + NOperandCycles};
502 FindOperandCycle + NOperandCycles };
501503
502504 // Inject - empty slots will be 0, 0
503505 ItinList[SchedClassIdx] = Intinerary;
529531 //
530532 void SubtargetEmitter::
531533 EmitItineraries(raw_ostream &OS,
532 std::vector > &ProcItinLists) {
533
534 std::vector> &ProcItinLists) {
534535 // Multiple processor models may share an itinerary record. Emit it once.
535536 SmallPtrSet ItinsDefSet;
536537
537538 // For each processor's machine model
538 std::vector >::iterator
539 std::vector>::iterator
539540 ProcItinListsIter = ProcItinLists.begin();
540541 for (CodeGenSchedModels::ProcIter PI = SchedModels.procModelBegin(),
541542 PE = SchedModels.procModelEnd(); PI != PE; ++PI, ++ProcItinListsIter) {
12391240 << "#endif\n";
12401241
12411242 if (SchedModels.hasItineraries()) {
1242 std::vector > ProcItinLists;
1243 std::vector> ProcItinLists;
12431244 // Emit the stage data
12441245 EmitStageAndOperandCycleData(OS, ProcItinLists);
12451246 EmitItineraries(OS, ProcItinLists);
14231424 << Target << "WriteProcResTable, "
14241425 << Target << "WriteLatencyTable, "
14251426 << Target << "ReadAdvanceTable, ";
1427 OS << '\n'; OS.indent(22);
14261428 if (SchedModels.hasItineraries()) {
1427 OS << '\n'; OS.indent(22);
14281429 OS << Target << "Stages, "
14291430 << Target << "OperandCycles, "
14301431 << Target << "ForwardingPaths";
14311432 } else
1432 OS << "0, 0, 0";
1433 OS << "nullptr, nullptr, nullptr";
14331434 OS << ");\n}\n\n";
14341435
14351436 OS << "} // end namespace llvm\n\n";
15091510 << Target << "OperandCycles, "
15101511 << Target << "ForwardingPaths";
15111512 } else
1512 OS << "0, 0, 0";
1513 OS << "nullptr, nullptr, nullptr";
15131514 OS << ") {}\n\n";
15141515
15151516 EmitSchedModelHelpers(ClassName, OS);