LLVM 19.0.0git
AMDGPUTargetMachine.cpp
Go to the documentation of this file.
1//===-- AMDGPUTargetMachine.cpp - TargetMachine for hw codegen targets-----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// The AMDGPU target machine contains all of the hardware specific
11/// information needed to emit code for SI+ GPUs.
12//
13//===----------------------------------------------------------------------===//
14
15#include "AMDGPUTargetMachine.h"
16#include "AMDGPU.h"
17#include "AMDGPUAliasAnalysis.h"
21#include "AMDGPUIGroupLP.h"
22#include "AMDGPUISelDAGToDAG.h"
23#include "AMDGPUMacroFusion.h"
24#include "AMDGPURegBankSelect.h"
25#include "AMDGPUSplitModule.h"
30#include "GCNSchedStrategy.h"
31#include "GCNVOPDUtils.h"
32#include "R600.h"
34#include "R600TargetMachine.h"
36#include "SIMachineScheduler.h"
48#include "llvm/CodeGen/Passes.h"
51#include "llvm/IR/IntrinsicsAMDGPU.h"
52#include "llvm/IR/PassManager.h"
58#include "llvm/Transforms/IPO.h"
68#include <optional>
69
70using namespace llvm;
71using namespace llvm::PatternMatch;
72
73namespace {
74class SGPRRegisterRegAlloc : public RegisterRegAllocBase<SGPRRegisterRegAlloc> {
75public:
76 SGPRRegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C)
78};
79
80class VGPRRegisterRegAlloc : public RegisterRegAllocBase<VGPRRegisterRegAlloc> {
81public:
82 VGPRRegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C)
84};
85
86static bool onlyAllocateSGPRs(const TargetRegisterInfo &TRI,
87 const TargetRegisterClass &RC) {
88 return static_cast<const SIRegisterInfo &>(TRI).isSGPRClass(&RC);
89}
90
91static bool onlyAllocateVGPRs(const TargetRegisterInfo &TRI,
92 const TargetRegisterClass &RC) {
93 return !static_cast<const SIRegisterInfo &>(TRI).isSGPRClass(&RC);
94}
95
96
97/// -{sgpr|vgpr}-regalloc=... command line option.
98static FunctionPass *useDefaultRegisterAllocator() { return nullptr; }
99
100/// A dummy default pass factory indicates whether the register allocator is
101/// overridden on the command line.
102static llvm::once_flag InitializeDefaultSGPRRegisterAllocatorFlag;
103static llvm::once_flag InitializeDefaultVGPRRegisterAllocatorFlag;
104
105static SGPRRegisterRegAlloc
106defaultSGPRRegAlloc("default",
107 "pick SGPR register allocator based on -O option",
109
110static cl::opt<SGPRRegisterRegAlloc::FunctionPassCtor, false,
112SGPRRegAlloc("sgpr-regalloc", cl::Hidden, cl::init(&useDefaultRegisterAllocator),
113 cl::desc("Register allocator to use for SGPRs"));
114
115static cl::opt<VGPRRegisterRegAlloc::FunctionPassCtor, false,
117VGPRRegAlloc("vgpr-regalloc", cl::Hidden, cl::init(&useDefaultRegisterAllocator),
118 cl::desc("Register allocator to use for VGPRs"));
119
120
121static void initializeDefaultSGPRRegisterAllocatorOnce() {
122 RegisterRegAlloc::FunctionPassCtor Ctor = SGPRRegisterRegAlloc::getDefault();
123
124 if (!Ctor) {
125 Ctor = SGPRRegAlloc;
126 SGPRRegisterRegAlloc::setDefault(SGPRRegAlloc);
127 }
128}
129
130static void initializeDefaultVGPRRegisterAllocatorOnce() {
131 RegisterRegAlloc::FunctionPassCtor Ctor = VGPRRegisterRegAlloc::getDefault();
132
133 if (!Ctor) {
134 Ctor = VGPRRegAlloc;
135 VGPRRegisterRegAlloc::setDefault(VGPRRegAlloc);
136 }
137}
138
139static FunctionPass *createBasicSGPRRegisterAllocator() {
140 return createBasicRegisterAllocator(onlyAllocateSGPRs);
141}
142
143static FunctionPass *createGreedySGPRRegisterAllocator() {
144 return createGreedyRegisterAllocator(onlyAllocateSGPRs);
145}
146
147static FunctionPass *createFastSGPRRegisterAllocator() {
148 return createFastRegisterAllocator(onlyAllocateSGPRs, false);
149}
150
151static FunctionPass *createBasicVGPRRegisterAllocator() {
152 return createBasicRegisterAllocator(onlyAllocateVGPRs);
153}
154
155static FunctionPass *createGreedyVGPRRegisterAllocator() {
156 return createGreedyRegisterAllocator(onlyAllocateVGPRs);
157}
158
159static FunctionPass *createFastVGPRRegisterAllocator() {
160 return createFastRegisterAllocator(onlyAllocateVGPRs, true);
161}
162
163static SGPRRegisterRegAlloc basicRegAllocSGPR(
164 "basic", "basic register allocator", createBasicSGPRRegisterAllocator);
165static SGPRRegisterRegAlloc greedyRegAllocSGPR(
166 "greedy", "greedy register allocator", createGreedySGPRRegisterAllocator);
167
168static SGPRRegisterRegAlloc fastRegAllocSGPR(
169 "fast", "fast register allocator", createFastSGPRRegisterAllocator);
170
171
172static VGPRRegisterRegAlloc basicRegAllocVGPR(
173 "basic", "basic register allocator", createBasicVGPRRegisterAllocator);
174static VGPRRegisterRegAlloc greedyRegAllocVGPR(
175 "greedy", "greedy register allocator", createGreedyVGPRRegisterAllocator);
176
177static VGPRRegisterRegAlloc fastRegAllocVGPR(
178 "fast", "fast register allocator", createFastVGPRRegisterAllocator);
179}
180
181static cl::opt<bool>
183 cl::desc("Run early if-conversion"),
184 cl::init(false));
185
186static cl::opt<bool>
187OptExecMaskPreRA("amdgpu-opt-exec-mask-pre-ra", cl::Hidden,
188 cl::desc("Run pre-RA exec mask optimizations"),
189 cl::init(true));
190
191static cl::opt<bool>
192 LowerCtorDtor("amdgpu-lower-global-ctor-dtor",
193 cl::desc("Lower GPU ctor / dtors to globals on the device."),
194 cl::init(true), cl::Hidden);
195
196// Option to disable vectorizer for tests.
198 "amdgpu-load-store-vectorizer",
199 cl::desc("Enable load store vectorizer"),
200 cl::init(true),
201 cl::Hidden);
202
203// Option to control global loads scalarization
205 "amdgpu-scalarize-global-loads",
206 cl::desc("Enable global load scalarization"),
207 cl::init(true),
208 cl::Hidden);
209
210// Option to run internalize pass.
212 "amdgpu-internalize-symbols",
213 cl::desc("Enable elimination of non-kernel functions and unused globals"),
214 cl::init(false),
215 cl::Hidden);
216
217// Option to inline all early.
219 "amdgpu-early-inline-all",
220 cl::desc("Inline all functions early"),
221 cl::init(false),
222 cl::Hidden);
223
225 "amdgpu-enable-remove-incompatible-functions", cl::Hidden,
226 cl::desc("Enable removal of functions when they"
227 "use features not supported by the target GPU"),
228 cl::init(true));
229
231 "amdgpu-sdwa-peephole",
232 cl::desc("Enable SDWA peepholer"),
233 cl::init(true));
234
236 "amdgpu-dpp-combine",
237 cl::desc("Enable DPP combiner"),
238 cl::init(true));
239
240// Enable address space based alias analysis
242 cl::desc("Enable AMDGPU Alias Analysis"),
243 cl::init(true));
244
245// Option to run late CFG structurizer
247 "amdgpu-late-structurize",
248 cl::desc("Enable late CFG structurization"),
250 cl::Hidden);
251
252// Disable structurizer-based control-flow lowering in order to test convergence
253// control tokens. This should eventually be replaced by the wave-transform.
255 "amdgpu-disable-structurizer",
256 cl::desc("Disable structurizer for experiments; produces unusable code"),
258
259// Enable lib calls simplifications
261 "amdgpu-simplify-libcall",
262 cl::desc("Enable amdgpu library simplifications"),
263 cl::init(true),
264 cl::Hidden);
265
267 "amdgpu-ir-lower-kernel-arguments",
268 cl::desc("Lower kernel argument loads in IR pass"),
269 cl::init(true),
270 cl::Hidden);
271
273 "amdgpu-reassign-regs",
274 cl::desc("Enable register reassign optimizations on gfx10+"),
275 cl::init(true),
276 cl::Hidden);
277
279 "amdgpu-opt-vgpr-liverange",
280 cl::desc("Enable VGPR liverange optimizations for if-else structure"),
281 cl::init(true), cl::Hidden);
282
284 "amdgpu-atomic-optimizer-strategy",
285 cl::desc("Select DPP or Iterative strategy for scan"),
286 cl::init(ScanOptions::Iterative),
288 clEnumValN(ScanOptions::DPP, "DPP", "Use DPP operations for scan"),
289 clEnumValN(ScanOptions::Iterative, "Iterative",
290 "Use Iterative approach for scan"),
291 clEnumValN(ScanOptions::None, "None", "Disable atomic optimizer")));
292
293// Enable Mode register optimization
295 "amdgpu-mode-register",
296 cl::desc("Enable mode register pass"),
297 cl::init(true),
298 cl::Hidden);
299
300// Enable GFX11.5+ s_singleuse_vdst insertion
301static cl::opt<bool>
302 EnableInsertSingleUseVDST("amdgpu-enable-single-use-vdst",
303 cl::desc("Enable s_singleuse_vdst insertion"),
304 cl::init(false), cl::Hidden);
305
306// Enable GFX11+ s_delay_alu insertion
307static cl::opt<bool>
308 EnableInsertDelayAlu("amdgpu-enable-delay-alu",
309 cl::desc("Enable s_delay_alu insertion"),
310 cl::init(true), cl::Hidden);
311
312// Enable GFX11+ VOPD
313static cl::opt<bool>
314 EnableVOPD("amdgpu-enable-vopd",
315 cl::desc("Enable VOPD, dual issue of VALU in wave32"),
316 cl::init(true), cl::Hidden);
317
318// Option is used in lit tests to prevent deadcoding of patterns inspected.
319static cl::opt<bool>
320EnableDCEInRA("amdgpu-dce-in-ra",
321 cl::init(true), cl::Hidden,
322 cl::desc("Enable machine DCE inside regalloc"));
323
324static cl::opt<bool> EnableSetWavePriority("amdgpu-set-wave-priority",
325 cl::desc("Adjust wave priority"),
326 cl::init(false), cl::Hidden);
327
329 "amdgpu-scalar-ir-passes",
330 cl::desc("Enable scalar IR passes"),
331 cl::init(true),
332 cl::Hidden);
333
335 "amdgpu-enable-structurizer-workarounds",
336 cl::desc("Enable workarounds for the StructurizeCFG pass"), cl::init(true),
337 cl::Hidden);
338
340 "amdgpu-enable-lower-module-lds", cl::desc("Enable lower module lds pass"),
342 cl::Hidden);
343
345 "amdgpu-enable-pre-ra-optimizations",
346 cl::desc("Enable Pre-RA optimizations pass"), cl::init(true),
347 cl::Hidden);
348
350 "amdgpu-enable-promote-kernel-arguments",
351 cl::desc("Enable promotion of flat kernel pointer arguments to global"),
352 cl::Hidden, cl::init(true));
353
355 "amdgpu-enable-image-intrinsic-optimizer",
356 cl::desc("Enable image intrinsic optimizer pass"), cl::init(true),
357 cl::Hidden);
358
359static cl::opt<bool>
360 EnableLoopPrefetch("amdgpu-loop-prefetch",
361 cl::desc("Enable loop data prefetch on AMDGPU"),
362 cl::Hidden, cl::init(false));
363
365 "amdgpu-enable-max-ilp-scheduling-strategy",
366 cl::desc("Enable scheduling strategy to maximize ILP for a single wave."),
367 cl::Hidden, cl::init(false));
368
370 "amdgpu-enable-rewrite-partial-reg-uses",
371 cl::desc("Enable rewrite partial reg uses pass"), cl::init(true),
372 cl::Hidden);
373
375 "amdgpu-enable-hipstdpar",
376 cl::desc("Enable HIP Standard Parallelism Offload support"), cl::init(false),
377 cl::Hidden);
378
380 // Register the target
383
458}
459
460static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
461 return std::make_unique<AMDGPUTargetObjectFile>();
462}
463
465 return new SIScheduleDAGMI(C);
466}
467
468static ScheduleDAGInstrs *
470 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
471 ScheduleDAGMILive *DAG =
472 new GCNScheduleDAGMILive(C, std::make_unique<GCNMaxOccupancySchedStrategy>(C));
473 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
474 if (ST.shouldClusterStores())
475 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
476 DAG->addMutation(createIGroupLPDAGMutation(AMDGPU::SchedulingPhase::Initial));
477 DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
478 DAG->addMutation(createAMDGPUExportClusteringDAGMutation());
479 return DAG;
480}
481
482static ScheduleDAGInstrs *
484 ScheduleDAGMILive *DAG =
485 new GCNScheduleDAGMILive(C, std::make_unique<GCNMaxILPSchedStrategy>(C));
486 DAG->addMutation(createIGroupLPDAGMutation(AMDGPU::SchedulingPhase::Initial));
487 return DAG;
488}
489
490static ScheduleDAGInstrs *
492 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
493 auto DAG = new GCNIterativeScheduler(C,
495 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
496 if (ST.shouldClusterStores())
497 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
498 return DAG;
499}
500
502 return new GCNIterativeScheduler(C,
504}
505
506static ScheduleDAGInstrs *
508 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
509 auto DAG = new GCNIterativeScheduler(C,
511 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
512 if (ST.shouldClusterStores())
513 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
514 DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
515 return DAG;
516}
517
519SISchedRegistry("si", "Run SI's custom scheduler",
521
524 "Run GCN scheduler to maximize occupancy",
526
528 GCNMaxILPSchedRegistry("gcn-max-ilp", "Run GCN scheduler to maximize ilp",
530
532 "gcn-iterative-max-occupancy-experimental",
533 "Run GCN scheduler to maximize occupancy (experimental)",
535
537 "gcn-iterative-minreg",
538 "Run GCN iterative scheduler for minimal register usage (experimental)",
540
542 "gcn-iterative-ilp",
543 "Run GCN iterative scheduler for ILP scheduling (experimental)",
545
547 if (TT.getArch() == Triple::r600) {
548 // 32-bit pointers.
549 return "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
550 "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1";
551 }
552
553 // 32-bit private, local, and region pointers. 64-bit global, constant and
554 // flat. 160-bit non-integral fat buffer pointers that include a 128-bit
555 // buffer descriptor and a 32-bit offset, which are indexed by 32-bit values
556 // (address space 7), and 128-bit non-integral buffer resourcees (address
557 // space 8) which cannot be non-trivilally accessed by LLVM memory operations
558 // like getelementptr.
559 return "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32"
560 "-p7:160:256:256:32-p8:128:128-p9:192:256:256:32-i64:64-v16:16-v24:32-"
561 "v32:32-v48:64-v96:"
562 "128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-"
563 "G1-ni:7:8:9";
564}
565
568 if (!GPU.empty())
569 return GPU;
570
571 // Need to default to a target with flat support for HSA.
572 if (TT.getArch() == Triple::amdgcn)
573 return TT.getOS() == Triple::AMDHSA ? "generic-hsa" : "generic";
574
575 return "r600";
576}
577
578static Reloc::Model getEffectiveRelocModel(std::optional<Reloc::Model> RM) {
579 // The AMDGPU toolchain only supports generating shared objects, so we
580 // must always use PIC.
581 return Reloc::PIC_;
582}
583
585 StringRef CPU, StringRef FS,
586 const TargetOptions &Options,
587 std::optional<Reloc::Model> RM,
588 std::optional<CodeModel::Model> CM,
589 CodeGenOptLevel OptLevel)
592 getEffectiveCodeModel(CM, CodeModel::Small), OptLevel),
593 TLOF(createTLOF(getTargetTriple())) {
594 initAsmInfo();
595 if (TT.getArch() == Triple::amdgcn) {
596 if (getMCSubtargetInfo()->checkFeatures("+wavefrontsize64"))
598 else if (getMCSubtargetInfo()->checkFeatures("+wavefrontsize32"))
600 }
601}
602
607
609
611 Attribute GPUAttr = F.getFnAttribute("target-cpu");
612 return GPUAttr.isValid() ? GPUAttr.getValueAsString() : getTargetCPU();
613}
614
616 Attribute FSAttr = F.getFnAttribute("target-features");
617
618 return FSAttr.isValid() ? FSAttr.getValueAsString()
620}
621
622/// Predicate for Internalize pass.
623static bool mustPreserveGV(const GlobalValue &GV) {
624 if (const Function *F = dyn_cast<Function>(&GV))
625 return F->isDeclaration() || F->getName().starts_with("__asan_") ||
626 F->getName().starts_with("__sanitizer_") ||
627 AMDGPU::isEntryFunctionCC(F->getCallingConv());
628
630 return !GV.use_empty();
631}
632
635}
636
639 if (Params.empty())
641 Params.consume_front("strategy=");
642 auto Result = StringSwitch<std::optional<ScanOptions>>(Params)
643 .Case("dpp", ScanOptions::DPP)
644 .Cases("iterative", "", ScanOptions::Iterative)
645 .Case("none", ScanOptions::None)
646 .Default(std::nullopt);
647 if (Result)
648 return *Result;
649 return make_error<StringError>("invalid parameter", inconvertibleErrorCode());
650}
651
654 CodeGenFileType FileType, const CGPassBuilderOption &Opts,
656 AMDGPUCodeGenPassBuilder CGPB(*this, Opts, PIC);
657 return CGPB.buildPipeline(MPM, Out, DwoOut, FileType);
658}
659
661 PassBuilder &PB, bool PopulateClassToPassNames) {
662
663#define GET_PASS_REGISTRY "AMDGPUPassRegistry.def"
665
667 [](ModulePassManager &PM, OptimizationLevel Level) {
669 PM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM)));
670 if (EnableHipStdPar)
672 });
673
675 [](ModulePassManager &PM, OptimizationLevel Level) {
677
678 if (Level == OptimizationLevel::O0)
679 return;
680
682
683 if (InternalizeSymbols) {
686 }
687
690 });
691
693 [](FunctionPassManager &FPM, OptimizationLevel Level) {
694 if (Level == OptimizationLevel::O0)
695 return;
696
700 });
701
703 [this](CGSCCPassManager &PM, OptimizationLevel Level) {
704 if (Level == OptimizationLevel::O0)
705 return;
706
708
709 // Add promote kernel arguments pass to the opt pipeline right before
710 // infer address spaces which is needed to do actual address space
711 // rewriting.
712 if (Level.getSpeedupLevel() > OptimizationLevel::O1.getSpeedupLevel() &&
715
716 // Add infer address spaces pass to the opt pipeline after inlining
717 // but before SROA to increase SROA opportunities.
719
720 // This should run after inlining to have any chance of doing
721 // anything, and before other cleanup optimizations.
723
724 if (Level != OptimizationLevel::O0) {
725 // Promote alloca to vector before SROA and loop unroll. If we
726 // manage to eliminate allocas before unroll we may choose to unroll
727 // less.
729 }
730
731 PM.addPass(createCGSCCToFunctionPassAdaptor(std::move(FPM)));
732 });
733
735 [this](ModulePassManager &PM, OptimizationLevel Level) {
736 // We want to support the -lto-partitions=N option as "best effort".
737 // For that, we need to lower LDS earlier in the pipeline before the
738 // module is partitioned for codegen.
741 });
742}
743
744int64_t AMDGPUTargetMachine::getNullPointerValue(unsigned AddrSpace) {
745 return (AddrSpace == AMDGPUAS::LOCAL_ADDRESS ||
746 AddrSpace == AMDGPUAS::PRIVATE_ADDRESS ||
747 AddrSpace == AMDGPUAS::REGION_ADDRESS)
748 ? -1
749 : 0;
750}
751
753 unsigned DestAS) const {
754 return AMDGPU::isFlatGlobalAddrSpace(SrcAS) &&
756}
757
759 const auto *LD = dyn_cast<LoadInst>(V);
760 if (!LD)
762
763 // It must be a generic pointer loaded.
764 assert(V->getType()->isPointerTy() &&
765 V->getType()->getPointerAddressSpace() == AMDGPUAS::FLAT_ADDRESS);
766
767 const auto *Ptr = LD->getPointerOperand();
768 if (Ptr->getType()->getPointerAddressSpace() != AMDGPUAS::CONSTANT_ADDRESS)
770 // For a generic pointer loaded from the constant memory, it could be assumed
771 // as a global pointer since the constant memory is only populated on the
772 // host side. As implied by the offload programming model, only global
773 // pointers could be referenced on the host side.
775}
776
777std::pair<const Value *, unsigned>
779 if (auto *II = dyn_cast<IntrinsicInst>(V)) {
780 switch (II->getIntrinsicID()) {
781 case Intrinsic::amdgcn_is_shared:
782 return std::pair(II->getArgOperand(0), AMDGPUAS::LOCAL_ADDRESS);
783 case Intrinsic::amdgcn_is_private:
784 return std::pair(II->getArgOperand(0), AMDGPUAS::PRIVATE_ADDRESS);
785 default:
786 break;
787 }
788 return std::pair(nullptr, -1);
789 }
790 // Check the global pointer predication based on
791 // (!is_share(p) && !is_private(p)). Note that logic 'and' is commutative and
792 // the order of 'is_shared' and 'is_private' is not significant.
793 Value *Ptr;
794 if (match(
795 const_cast<Value *>(V),
796 m_c_And(m_Not(m_Intrinsic<Intrinsic::amdgcn_is_shared>(m_Value(Ptr))),
797 m_Not(m_Intrinsic<Intrinsic::amdgcn_is_private>(
798 m_Deferred(Ptr))))))
799 return std::pair(Ptr, AMDGPUAS::GLOBAL_ADDRESS);
800
801 return std::pair(nullptr, -1);
802}
803
804unsigned
806 switch (Kind) {
816 }
818}
819
821 Module &M, unsigned NumParts,
822 function_ref<void(std::unique_ptr<Module> MPart)> ModuleCallback) const {
823 splitAMDGPUModule(*this, M, NumParts, ModuleCallback);
824 return true;
825}
826
827//===----------------------------------------------------------------------===//
828// GCN Target Machine (SI+)
829//===----------------------------------------------------------------------===//
830
832 StringRef CPU, StringRef FS,
833 const TargetOptions &Options,
834 std::optional<Reloc::Model> RM,
835 std::optional<CodeModel::Model> CM,
836 CodeGenOptLevel OL, bool JIT)
837 : AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {}
838
841 StringRef GPU = getGPUName(F);
843
844 SmallString<128> SubtargetKey(GPU);
845 SubtargetKey.append(FS);
846
847 auto &I = SubtargetMap[SubtargetKey];
848 if (!I) {
849 // This needs to be done before we create a new subtarget since any
850 // creation will depend on the TM and the code generation flags on the
851 // function that reside in TargetOptions.
853 I = std::make_unique<GCNSubtarget>(TargetTriple, GPU, FS, *this);
854 }
855
856 I->setScalarizeGlobalBehavior(ScalarizeGlobal);
857
858 return I.get();
859}
860
863 return TargetTransformInfo(GCNTTIImpl(this, F));
864}
865
866//===----------------------------------------------------------------------===//
867// AMDGPU Pass Setup
868//===----------------------------------------------------------------------===//
869
870std::unique_ptr<CSEConfigBase> llvm::AMDGPUPassConfig::getCSEConfig() const {
872}
873
874namespace {
875
876class GCNPassConfig final : public AMDGPUPassConfig {
877public:
878 GCNPassConfig(LLVMTargetMachine &TM, PassManagerBase &PM)
879 : AMDGPUPassConfig(TM, PM) {
880 // It is necessary to know the register usage of the entire call graph. We
881 // allow calls without EnableAMDGPUFunctionCalls if they are marked
882 // noinline, so this is always required.
883 setRequiresCodeGenSCCOrder(true);
884 substitutePass(&PostRASchedulerID, &PostMachineSchedulerID);
885 }
886
887 GCNTargetMachine &getGCNTargetMachine() const {
888 return getTM<GCNTargetMachine>();
889 }
890
892 createMachineScheduler(MachineSchedContext *C) const override;
893
895 createPostMachineScheduler(MachineSchedContext *C) const override {
897 C, std::make_unique<PostGenericScheduler>(C),
898 /*RemoveKillFlags=*/true);
899 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
901 if (ST.shouldClusterStores())
903 DAG->addMutation(ST.createFillMFMAShadowMutation(DAG->TII));
904 DAG->addMutation(
905 createIGroupLPDAGMutation(AMDGPU::SchedulingPhase::PostRA));
906 if (isPassEnabled(EnableVOPD, CodeGenOptLevel::Less))
908 return DAG;
909 }
910
911 bool addPreISel() override;
912 void addMachineSSAOptimization() override;
913 bool addILPOpts() override;
914 bool addInstSelector() override;
915 bool addIRTranslator() override;
916 void addPreLegalizeMachineIR() override;
917 bool addLegalizeMachineIR() override;
918 void addPreRegBankSelect() override;
919 bool addRegBankSelect() override;
920 void addPreGlobalInstructionSelect() override;
921 bool addGlobalInstructionSelect() override;
922 void addFastRegAlloc() override;
923 void addOptimizedRegAlloc() override;
924
925 FunctionPass *createSGPRAllocPass(bool Optimized);
926 FunctionPass *createVGPRAllocPass(bool Optimized);
927 FunctionPass *createRegAllocPass(bool Optimized) override;
928
929 bool addRegAssignAndRewriteFast() override;
930 bool addRegAssignAndRewriteOptimized() override;
931
932 void addPreRegAlloc() override;
933 bool addPreRewrite() override;
934 void addPostRegAlloc() override;
935 void addPreSched2() override;
936 void addPreEmitPass() override;
937};
938
939} // end anonymous namespace
940
942 : TargetPassConfig(TM, PM) {
943 // Exceptions and StackMaps are not supported, so these passes will never do
944 // anything.
947 // Garbage collection is not supported.
950}
951
955 else
957}
958
963 // ReassociateGEPs exposes more opportunities for SLSR. See
964 // the example in reassociate-geps-and-slsr.ll.
966 // SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or
967 // EarlyCSE can reuse.
969 // Run NaryReassociate after EarlyCSE/GVN to be more effective.
971 // NaryReassociate on GEPs creates redundant common expressions, so run
972 // EarlyCSE after it.
974}
975
978
982
983 // There is no reason to run these.
987
989 if (LowerCtorDtor)
991
994
995 // Function calls are not supported, so make sure we inline everything.
998
999 // Handle uses of OpenCL image2d_t, image3d_t and sampler_t arguments.
1000 if (Arch == Triple::r600)
1002
1003 // Replace OpenCL enqueued block function pointers with global variables.
1005
1006 // Runs before PromoteAlloca so the latter can account for function uses
1009 }
1010
1011 // AMDGPUAttributor infers lack of llvm.amdgcn.lds.kernel.id calls, so run
1012 // after their introduction
1015
1018
1019 // Run atomic optimizer before Atomic Expand
1024 }
1025
1027
1030
1033
1037 AAResults &AAR) {
1038 if (auto *WrapperPass = P.getAnalysisIfAvailable<AMDGPUAAWrapperPass>())
1039 AAR.addAAResult(WrapperPass->getResult());
1040 }));
1041 }
1042
1044 // TODO: May want to move later or split into an early and late one.
1046 }
1047
1048 // Try to hoist loop invariant parts of divisions AMDGPUCodeGenPrepare may
1049 // have expanded.
1052 }
1053
1055
1056 // EarlyCSE is not always strong enough to clean up what LSR produces. For
1057 // example, GVN can combine
1058 //
1059 // %0 = add %a, %b
1060 // %1 = add %b, %a
1061 //
1062 // and
1063 //
1064 // %0 = shl nsw %a, 2
1065 // %1 = shl %a, 2
1066 //
1067 // but EarlyCSE can do neither of them.
1070}
1071
1074 // FIXME: This pass adds 2 hacky attributes that can be replaced with an
1075 // analysis, and should be removed.
1077 }
1078
1082
1084 // This lowering has been placed after codegenprepare to take advantage of
1085 // address mode matching (which is why it isn't put with the LDS lowerings).
1086 // It could be placed anywhere before uniformity annotations (an analysis
1087 // that it changes by splitting up fat pointers into their components)
1088 // but has been put before switch lowering and CFG flattening so that those
1089 // passes can run on the more optimized control flow this pass creates in
1090 // many cases.
1091 //
1092 // FIXME: This should ideally be put after the LoadStoreVectorizer.
1093 // However, due to some annoying facts about ResourceUsageAnalysis,
1094 // (especially as exercised in the resource-usage-dead-function test),
1095 // we need all the function passes codegenprepare all the way through
1096 // said resource usage analysis to run on the call graph produced
1097 // before codegenprepare runs (because codegenprepare will knock some
1098 // nodes out of the graph, which leads to function-level passes not
1099 // being run on them, which causes crashes in the resource usage analysis).
1101 // In accordance with the above FIXME, manually force all the
1102 // function-level passes into a CGSCCPassManager.
1103 addPass(new DummyCGSCCPass());
1104 }
1105
1107
1110
1111 // LowerSwitch pass may introduce unreachable blocks that can
1112 // cause unexpected behavior for subsequent passes. Placing it
1113 // here seems better that these blocks would get cleaned up by
1114 // UnreachableBlockElim inserted next in the pass flow.
1116}
1117
1121 return false;
1122}
1123
1126 return false;
1127}
1128
1130 // Do nothing. GC is not supported.
1131 return false;
1132}
1133
1136 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
1138 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
1139 if (ST.shouldClusterStores())
1140 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
1141 return DAG;
1142}
1143
1145 BumpPtrAllocator &Allocator, const Function &F,
1146 const TargetSubtargetInfo *STI) const {
1147 return R600MachineFunctionInfo::create<R600MachineFunctionInfo>(
1148 Allocator, F, static_cast<const R600Subtarget *>(STI));
1149}
1150
1151//===----------------------------------------------------------------------===//
1152// GCN Pass Setup
1153//===----------------------------------------------------------------------===//
1154
1155ScheduleDAGInstrs *GCNPassConfig::createMachineScheduler(
1156 MachineSchedContext *C) const {
1157 const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
1158 if (ST.enableSIScheduler())
1160
1163
1165}
1166
1167bool GCNPassConfig::addPreISel() {
1169
1170 if (TM->getOptLevel() > CodeGenOptLevel::None)
1172
1173 if (TM->getOptLevel() > CodeGenOptLevel::None)
1174 addPass(createSinkingPass());
1175
1176 // Merge divergent exit nodes. StructurizeCFG won't recognize the multi-exit
1177 // regions formed by them.
1181 addPass(createFixIrreduciblePass());
1182 addPass(createUnifyLoopExitsPass());
1183 }
1184 addPass(createStructurizeCFGPass(false)); // true -> SkipUniformRegions
1185 }
1189 // TODO: Move this right after structurizeCFG to avoid extra divergence
1190 // analysis. This depends on stopping SIAnnotateControlFlow from making
1191 // control flow modifications.
1193 }
1194 addPass(createLCSSAPass());
1195
1196 if (TM->getOptLevel() > CodeGenOptLevel::Less)
1197 addPass(&AMDGPUPerfHintAnalysisID);
1198
1199 return false;
1200}
1201
1202void GCNPassConfig::addMachineSSAOptimization() {
1204
1205 // We want to fold operands after PeepholeOptimizer has run (or as part of
1206 // it), because it will eliminate extra copies making it easier to fold the
1207 // real source operand. We want to eliminate dead instructions after, so that
1208 // we see fewer uses of the copies. We then need to clean up the dead
1209 // instructions leftover after the operands are folded as well.
1210 //
1211 // XXX - Can we get away without running DeadMachineInstructionElim again?
1212 addPass(&SIFoldOperandsID);
1213 if (EnableDPPCombine)
1214 addPass(&GCNDPPCombineID);
1215 addPass(&SILoadStoreOptimizerID);
1216 if (isPassEnabled(EnableSDWAPeephole)) {
1217 addPass(&SIPeepholeSDWAID);
1218 addPass(&EarlyMachineLICMID);
1219 addPass(&MachineCSEID);
1220 addPass(&SIFoldOperandsID);
1221 }
1224}
1225
1226bool GCNPassConfig::addILPOpts() {
1228 addPass(&EarlyIfConverterID);
1229
1231 return false;
1232}
1233
1234bool GCNPassConfig::addInstSelector() {
1236 addPass(&SIFixSGPRCopiesID);
1237 addPass(createSILowerI1CopiesPass());
1238 return false;
1239}
1240
1241bool GCNPassConfig::addIRTranslator() {
1242 addPass(new IRTranslator(getOptLevel()));
1243 return false;
1244}
1245
1246void GCNPassConfig::addPreLegalizeMachineIR() {
1247 bool IsOptNone = getOptLevel() == CodeGenOptLevel::None;
1248 addPass(createAMDGPUPreLegalizeCombiner(IsOptNone));
1249 addPass(new Localizer());
1250}
1251
1252bool GCNPassConfig::addLegalizeMachineIR() {
1253 addPass(new Legalizer());
1254 return false;
1255}
1256
1257void GCNPassConfig::addPreRegBankSelect() {
1258 bool IsOptNone = getOptLevel() == CodeGenOptLevel::None;
1259 addPass(createAMDGPUPostLegalizeCombiner(IsOptNone));
1261}
1262
1263bool GCNPassConfig::addRegBankSelect() {
1264 addPass(new AMDGPURegBankSelect());
1265 return false;
1266}
1267
1268void GCNPassConfig::addPreGlobalInstructionSelect() {
1269 bool IsOptNone = getOptLevel() == CodeGenOptLevel::None;
1270 addPass(createAMDGPURegBankCombiner(IsOptNone));
1271}
1272
1273bool GCNPassConfig::addGlobalInstructionSelect() {
1274 addPass(new InstructionSelect(getOptLevel()));
1275 return false;
1276}
1277
1278void GCNPassConfig::addPreRegAlloc() {
1279 if (LateCFGStructurize) {
1281 }
1282}
1283
1284void GCNPassConfig::addFastRegAlloc() {
1285 // FIXME: We have to disable the verifier here because of PHIElimination +
1286 // TwoAddressInstructions disabling it.
1287
1288 // This must be run immediately after phi elimination and before
1289 // TwoAddressInstructions, otherwise the processing of the tied operand of
1290 // SI_ELSE will introduce a copy of the tied operand source after the else.
1292
1294
1296}
1297
1298void GCNPassConfig::addOptimizedRegAlloc() {
1299 // Allow the scheduler to run before SIWholeQuadMode inserts exec manipulation
1300 // instructions that cause scheduling barriers.
1302
1303 if (OptExecMaskPreRA)
1305
1308
1309 if (isPassEnabled(EnablePreRAOptimizations))
1311
1312 // This is not an essential optimization and it has a noticeable impact on
1313 // compilation time, so we only enable it from O2.
1314 if (TM->getOptLevel() > CodeGenOptLevel::Less)
1316
1317 // FIXME: when an instruction has a Killed operand, and the instruction is
1318 // inside a bundle, seems only the BUNDLE instruction appears as the Kills of
1319 // the register in LiveVariables, this would trigger a failure in verifier,
1320 // we should fix it and enable the verifier.
1321 if (OptVGPRLiveRange)
1323 // This must be run immediately after phi elimination and before
1324 // TwoAddressInstructions, otherwise the processing of the tied operand of
1325 // SI_ELSE will introduce a copy of the tied operand source after the else.
1327
1328 if (EnableDCEInRA)
1330
1332}
1333
1334bool GCNPassConfig::addPreRewrite() {
1335 addPass(&SILowerWWMCopiesID);
1337 addPass(&GCNNSAReassignID);
1338 return true;
1339}
1340
1341FunctionPass *GCNPassConfig::createSGPRAllocPass(bool Optimized) {
1342 // Initialize the global default.
1343 llvm::call_once(InitializeDefaultSGPRRegisterAllocatorFlag,
1344 initializeDefaultSGPRRegisterAllocatorOnce);
1345
1346 RegisterRegAlloc::FunctionPassCtor Ctor = SGPRRegisterRegAlloc::getDefault();
1347 if (Ctor != useDefaultRegisterAllocator)
1348 return Ctor();
1349
1350 if (Optimized)
1351 return createGreedyRegisterAllocator(onlyAllocateSGPRs);
1352
1353 return createFastRegisterAllocator(onlyAllocateSGPRs, false);
1354}
1355
1356FunctionPass *GCNPassConfig::createVGPRAllocPass(bool Optimized) {
1357 // Initialize the global default.
1358 llvm::call_once(InitializeDefaultVGPRRegisterAllocatorFlag,
1359 initializeDefaultVGPRRegisterAllocatorOnce);
1360
1361 RegisterRegAlloc::FunctionPassCtor Ctor = VGPRRegisterRegAlloc::getDefault();
1362 if (Ctor != useDefaultRegisterAllocator)
1363 return Ctor();
1364
1365 if (Optimized)
1366 return createGreedyVGPRRegisterAllocator();
1367
1368 return createFastVGPRRegisterAllocator();
1369}
1370
1371FunctionPass *GCNPassConfig::createRegAllocPass(bool Optimized) {
1372 llvm_unreachable("should not be used");
1373}
1374
1376 "-regalloc not supported with amdgcn. Use -sgpr-regalloc and -vgpr-regalloc";
1377
1378bool GCNPassConfig::addRegAssignAndRewriteFast() {
1379 if (!usingDefaultRegAlloc())
1381
1382 addPass(&GCNPreRALongBranchRegID);
1383
1384 addPass(createSGPRAllocPass(false));
1385
1386 // Equivalent of PEI for SGPRs.
1387 addPass(&SILowerSGPRSpillsID);
1388 addPass(&SIPreAllocateWWMRegsID);
1389
1390 addPass(createVGPRAllocPass(false));
1391
1392 addPass(&SILowerWWMCopiesID);
1393 return true;
1394}
1395
1396bool GCNPassConfig::addRegAssignAndRewriteOptimized() {
1397 if (!usingDefaultRegAlloc())
1399
1400 addPass(&GCNPreRALongBranchRegID);
1401
1402 addPass(createSGPRAllocPass(true));
1403
1404 // Commit allocated register changes. This is mostly necessary because too
1405 // many things rely on the use lists of the physical registers, such as the
1406 // verifier. This is only necessary with allocators which use LiveIntervals,
1407 // since FastRegAlloc does the replacements itself.
1408 addPass(createVirtRegRewriter(false));
1409
1410 // Equivalent of PEI for SGPRs.
1411 addPass(&SILowerSGPRSpillsID);
1412 addPass(&SIPreAllocateWWMRegsID);
1413
1414 addPass(createVGPRAllocPass(true));
1415
1416 addPreRewrite();
1417 addPass(&VirtRegRewriterID);
1418
1420
1421 return true;
1422}
1423
1424void GCNPassConfig::addPostRegAlloc() {
1425 addPass(&SIFixVGPRCopiesID);
1426 if (getOptLevel() > CodeGenOptLevel::None)
1427 addPass(&SIOptimizeExecMaskingID);
1429}
1430
1431void GCNPassConfig::addPreSched2() {
1432 if (TM->getOptLevel() > CodeGenOptLevel::None)
1434 addPass(&SIPostRABundlerID);
1435}
1436
1437void GCNPassConfig::addPreEmitPass() {
1438 if (isPassEnabled(EnableVOPD, CodeGenOptLevel::Less))
1439 addPass(&GCNCreateVOPDID);
1440 addPass(createSIMemoryLegalizerPass());
1441 addPass(createSIInsertWaitcntsPass());
1442
1443 addPass(createSIModeRegisterPass());
1444
1445 if (getOptLevel() > CodeGenOptLevel::None)
1446 addPass(&SIInsertHardClausesID);
1447
1449 if (isPassEnabled(EnableSetWavePriority, CodeGenOptLevel::Less))
1451 if (getOptLevel() > CodeGenOptLevel::None)
1452 addPass(&SIPreEmitPeepholeID);
1453 // The hazard recognizer that runs as part of the post-ra scheduler does not
1454 // guarantee to be able handle all hazards correctly. This is because if there
1455 // are multiple scheduling regions in a basic block, the regions are scheduled
1456 // bottom up, so when we begin to schedule a region we don't know what
1457 // instructions were emitted directly before it.
1458 //
1459 // Here we add a stand-alone hazard recognizer pass which can handle all
1460 // cases.
1461 addPass(&PostRAHazardRecognizerID);
1462
1465
1466 if (isPassEnabled(EnableInsertDelayAlu, CodeGenOptLevel::Less))
1467 addPass(&AMDGPUInsertDelayAluID);
1468
1469 addPass(&BranchRelaxationPassID);
1470}
1471
1473 return new GCNPassConfig(*this, PM);
1474}
1475
1477 MachineFunction &MF) const {
1479 MF.getRegInfo().addDelegate(MFI);
1480}
1481
1483 BumpPtrAllocator &Allocator, const Function &F,
1484 const TargetSubtargetInfo *STI) const {
1485 return SIMachineFunctionInfo::create<SIMachineFunctionInfo>(
1486 Allocator, F, static_cast<const GCNSubtarget *>(STI));
1487}
1488
1490 return new yaml::SIMachineFunctionInfo();
1491}
1492
1496 return new yaml::SIMachineFunctionInfo(
1497 *MFI, *MF.getSubtarget<GCNSubtarget>().getRegisterInfo(), MF);
1498}
1499
1502 SMDiagnostic &Error, SMRange &SourceRange) const {
1503 const yaml::SIMachineFunctionInfo &YamlMFI =
1504 static_cast<const yaml::SIMachineFunctionInfo &>(MFI_);
1505 MachineFunction &MF = PFS.MF;
1507 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
1508
1509 if (MFI->initializeBaseYamlFields(YamlMFI, MF, PFS, Error, SourceRange))
1510 return true;
1511
1512 if (MFI->Occupancy == 0) {
1513 // Fixup the subtarget dependent default value.
1514 MFI->Occupancy = ST.computeOccupancy(MF.getFunction(), MFI->getLDSSize());
1515 }
1516
1517 auto parseRegister = [&](const yaml::StringValue &RegName, Register &RegVal) {
1518 Register TempReg;
1519 if (parseNamedRegisterReference(PFS, TempReg, RegName.Value, Error)) {
1520 SourceRange = RegName.SourceRange;
1521 return true;
1522 }
1523 RegVal = TempReg;
1524
1525 return false;
1526 };
1527
1528 auto parseOptionalRegister = [&](const yaml::StringValue &RegName,
1529 Register &RegVal) {
1530 return !RegName.Value.empty() && parseRegister(RegName, RegVal);
1531 };
1532
1533 if (parseOptionalRegister(YamlMFI.VGPRForAGPRCopy, MFI->VGPRForAGPRCopy))
1534 return true;
1535
1536 if (parseOptionalRegister(YamlMFI.SGPRForEXECCopy, MFI->SGPRForEXECCopy))
1537 return true;
1538
1539 if (parseOptionalRegister(YamlMFI.LongBranchReservedReg,
1540 MFI->LongBranchReservedReg))
1541 return true;
1542
1543 auto diagnoseRegisterClass = [&](const yaml::StringValue &RegName) {
1544 // Create a diagnostic for a the register string literal.
1545 const MemoryBuffer &Buffer =
1546 *PFS.SM->getMemoryBuffer(PFS.SM->getMainFileID());
1547 Error = SMDiagnostic(*PFS.SM, SMLoc(), Buffer.getBufferIdentifier(), 1,
1548 RegName.Value.size(), SourceMgr::DK_Error,
1549 "incorrect register class for field", RegName.Value,
1550 std::nullopt, std::nullopt);
1551 SourceRange = RegName.SourceRange;
1552 return true;
1553 };
1554
1555 if (parseRegister(YamlMFI.ScratchRSrcReg, MFI->ScratchRSrcReg) ||
1556 parseRegister(YamlMFI.FrameOffsetReg, MFI->FrameOffsetReg) ||
1557 parseRegister(YamlMFI.StackPtrOffsetReg, MFI->StackPtrOffsetReg))
1558 return true;
1559
1560 if (MFI->ScratchRSrcReg != AMDGPU::PRIVATE_RSRC_REG &&
1561 !AMDGPU::SGPR_128RegClass.contains(MFI->ScratchRSrcReg)) {
1562 return diagnoseRegisterClass(YamlMFI.ScratchRSrcReg);
1563 }
1564
1565 if (MFI->FrameOffsetReg != AMDGPU::FP_REG &&
1566 !AMDGPU::SGPR_32RegClass.contains(MFI->FrameOffsetReg)) {
1567 return diagnoseRegisterClass(YamlMFI.FrameOffsetReg);
1568 }
1569
1570 if (MFI->StackPtrOffsetReg != AMDGPU::SP_REG &&
1571 !AMDGPU::SGPR_32RegClass.contains(MFI->StackPtrOffsetReg)) {
1572 return diagnoseRegisterClass(YamlMFI.StackPtrOffsetReg);
1573 }
1574
1575 for (const auto &YamlReg : YamlMFI.WWMReservedRegs) {
1576 Register ParsedReg;
1577 if (parseRegister(YamlReg, ParsedReg))
1578 return true;
1579
1580 MFI->reserveWWMRegister(ParsedReg);
1581 }
1582
1583 auto parseAndCheckArgument = [&](const std::optional<yaml::SIArgument> &A,
1584 const TargetRegisterClass &RC,
1585 ArgDescriptor &Arg, unsigned UserSGPRs,
1586 unsigned SystemSGPRs) {
1587 // Skip parsing if it's not present.
1588 if (!A)
1589 return false;
1590
1591 if (A->IsRegister) {
1592 Register Reg;
1593 if (parseNamedRegisterReference(PFS, Reg, A->RegisterName.Value, Error)) {
1594 SourceRange = A->RegisterName.SourceRange;
1595 return true;
1596 }
1597 if (!RC.contains(Reg))
1598 return diagnoseRegisterClass(A->RegisterName);
1600 } else
1601 Arg = ArgDescriptor::createStack(A->StackOffset);
1602 // Check and apply the optional mask.
1603 if (A->Mask)
1604 Arg = ArgDescriptor::createArg(Arg, *A->Mask);
1605
1606 MFI->NumUserSGPRs += UserSGPRs;
1607 MFI->NumSystemSGPRs += SystemSGPRs;
1608 return false;
1609 };
1610
1611 if (YamlMFI.ArgInfo &&
1612 (parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentBuffer,
1613 AMDGPU::SGPR_128RegClass,
1614 MFI->ArgInfo.PrivateSegmentBuffer, 4, 0) ||
1615 parseAndCheckArgument(YamlMFI.ArgInfo->DispatchPtr,
1616 AMDGPU::SReg_64RegClass, MFI->ArgInfo.DispatchPtr,
1617 2, 0) ||
1618 parseAndCheckArgument(YamlMFI.ArgInfo->QueuePtr, AMDGPU::SReg_64RegClass,
1619 MFI->ArgInfo.QueuePtr, 2, 0) ||
1620 parseAndCheckArgument(YamlMFI.ArgInfo->KernargSegmentPtr,
1621 AMDGPU::SReg_64RegClass,
1622 MFI->ArgInfo.KernargSegmentPtr, 2, 0) ||
1623 parseAndCheckArgument(YamlMFI.ArgInfo->DispatchID,
1624 AMDGPU::SReg_64RegClass, MFI->ArgInfo.DispatchID,
1625 2, 0) ||
1626 parseAndCheckArgument(YamlMFI.ArgInfo->FlatScratchInit,
1627 AMDGPU::SReg_64RegClass,
1628 MFI->ArgInfo.FlatScratchInit, 2, 0) ||
1629 parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentSize,
1630 AMDGPU::SGPR_32RegClass,
1631 MFI->ArgInfo.PrivateSegmentSize, 0, 0) ||
1632 parseAndCheckArgument(YamlMFI.ArgInfo->LDSKernelId,
1633 AMDGPU::SGPR_32RegClass,
1634 MFI->ArgInfo.LDSKernelId, 0, 1) ||
1635 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDX,
1636 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDX,
1637 0, 1) ||
1638 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDY,
1639 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDY,
1640 0, 1) ||
1641 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupIDZ,
1642 AMDGPU::SGPR_32RegClass, MFI->ArgInfo.WorkGroupIDZ,
1643 0, 1) ||
1644 parseAndCheckArgument(YamlMFI.ArgInfo->WorkGroupInfo,
1645 AMDGPU::SGPR_32RegClass,
1646 MFI->ArgInfo.WorkGroupInfo, 0, 1) ||
1647 parseAndCheckArgument(YamlMFI.ArgInfo->PrivateSegmentWaveByteOffset,
1648 AMDGPU::SGPR_32RegClass,
1649 MFI->ArgInfo.PrivateSegmentWaveByteOffset, 0, 1) ||
1650 parseAndCheckArgument(YamlMFI.ArgInfo->ImplicitArgPtr,
1651 AMDGPU::SReg_64RegClass,
1652 MFI->ArgInfo.ImplicitArgPtr, 0, 0) ||
1653 parseAndCheckArgument(YamlMFI.ArgInfo->ImplicitBufferPtr,
1654 AMDGPU::SReg_64RegClass,
1655 MFI->ArgInfo.ImplicitBufferPtr, 2, 0) ||
1656 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDX,
1657 AMDGPU::VGPR_32RegClass,
1658 MFI->ArgInfo.WorkItemIDX, 0, 0) ||
1659 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDY,
1660 AMDGPU::VGPR_32RegClass,
1661 MFI->ArgInfo.WorkItemIDY, 0, 0) ||
1662 parseAndCheckArgument(YamlMFI.ArgInfo->WorkItemIDZ,
1663 AMDGPU::VGPR_32RegClass,
1664 MFI->ArgInfo.WorkItemIDZ, 0, 0)))
1665 return true;
1666
1667 if (ST.hasIEEEMode())
1668 MFI->Mode.IEEE = YamlMFI.Mode.IEEE;
1669 if (ST.hasDX10ClampMode())
1670 MFI->Mode.DX10Clamp = YamlMFI.Mode.DX10Clamp;
1671
1672 // FIXME: Move proper support for denormal-fp-math into base MachineFunction
1673 MFI->Mode.FP32Denormals.Input = YamlMFI.Mode.FP32InputDenormals
1676 MFI->Mode.FP32Denormals.Output = YamlMFI.Mode.FP32OutputDenormals
1679
1686
1687 return false;
1688}
static cl::opt< bool > EnableEarlyIfConversion("aarch64-enable-early-ifcvt", cl::Hidden, cl::desc("Run early if-conversion"), cl::init(true))
This is the AMGPU address space based alias analysis pass.
Defines an instruction selector for the AMDGPU target.
static cl::opt< bool > EnableDCEInRA("amdgpu-dce-in-ra", cl::init(true), cl::Hidden, cl::desc("Enable machine DCE inside regalloc"))
static cl::opt< bool, true > EnableLowerModuleLDS("amdgpu-enable-lower-module-lds", cl::desc("Enable lower module lds pass"), cl::location(AMDGPUTargetMachine::EnableLowerModuleLDS), cl::init(true), cl::Hidden)
static MachineSchedRegistry SISchedRegistry("si", "Run SI's custom scheduler", createSIMachineScheduler)
static ScheduleDAGInstrs * createIterativeILPMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EarlyInlineAll("amdgpu-early-inline-all", cl::desc("Inline all functions early"), cl::init(false), cl::Hidden)
static cl::opt< bool > EnableLowerKernelArguments("amdgpu-ir-lower-kernel-arguments", cl::desc("Lower kernel argument loads in IR pass"), cl::init(true), cl::Hidden)
static ScheduleDAGInstrs * createGCNMaxILPMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableSDWAPeephole("amdgpu-sdwa-peephole", cl::desc("Enable SDWA peepholer"), cl::init(true))
static MachineSchedRegistry GCNMinRegSchedRegistry("gcn-iterative-minreg", "Run GCN iterative scheduler for minimal register usage (experimental)", createMinRegScheduler)
static cl::opt< bool > EnableImageIntrinsicOptimizer("amdgpu-enable-image-intrinsic-optimizer", cl::desc("Enable image intrinsic optimizer pass"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableSIModeRegisterPass("amdgpu-mode-register", cl::desc("Enable mode register pass"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableDPPCombine("amdgpu-dpp-combine", cl::desc("Enable DPP combiner"), cl::init(true))
static MachineSchedRegistry IterativeGCNMaxOccupancySchedRegistry("gcn-iterative-max-occupancy-experimental", "Run GCN scheduler to maximize occupancy (experimental)", createIterativeGCNMaxOccupancyMachineScheduler)
static cl::opt< bool > EnableSetWavePriority("amdgpu-set-wave-priority", cl::desc("Adjust wave priority"), cl::init(false), cl::Hidden)
static cl::opt< bool > LowerCtorDtor("amdgpu-lower-global-ctor-dtor", cl::desc("Lower GPU ctor / dtors to globals on the device."), cl::init(true), cl::Hidden)
static cl::opt< bool, true > DisableStructurizer("amdgpu-disable-structurizer", cl::desc("Disable structurizer for experiments; produces unusable code"), cl::location(AMDGPUTargetMachine::DisableStructurizer), cl::ReallyHidden)
static cl::opt< bool > OptExecMaskPreRA("amdgpu-opt-exec-mask-pre-ra", cl::Hidden, cl::desc("Run pre-RA exec mask optimizations"), cl::init(true))
static cl::opt< bool > EnablePromoteKernelArguments("amdgpu-enable-promote-kernel-arguments", cl::desc("Enable promotion of flat kernel pointer arguments to global"), cl::Hidden, cl::init(true))
static cl::opt< bool > EnableRewritePartialRegUses("amdgpu-enable-rewrite-partial-reg-uses", cl::desc("Enable rewrite partial reg uses pass"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableLibCallSimplify("amdgpu-simplify-libcall", cl::desc("Enable amdgpu library simplifications"), cl::init(true), cl::Hidden)
static MachineSchedRegistry GCNMaxILPSchedRegistry("gcn-max-ilp", "Run GCN scheduler to maximize ilp", createGCNMaxILPMachineScheduler)
static cl::opt< bool > InternalizeSymbols("amdgpu-internalize-symbols", cl::desc("Enable elimination of non-kernel functions and unused globals"), cl::init(false), cl::Hidden)
static LLVM_READNONE StringRef getGPUOrDefault(const Triple &TT, StringRef GPU)
static Reloc::Model getEffectiveRelocModel(std::optional< Reloc::Model > RM)
static cl::opt< bool > EnableStructurizerWorkarounds("amdgpu-enable-structurizer-workarounds", cl::desc("Enable workarounds for the StructurizeCFG pass"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableAMDGPUAliasAnalysis("enable-amdgpu-aa", cl::Hidden, cl::desc("Enable AMDGPU Alias Analysis"), cl::init(true))
static Expected< ScanOptions > parseAMDGPUAtomicOptimizerStrategy(StringRef Params)
static ScheduleDAGInstrs * createMinRegScheduler(MachineSchedContext *C)
static cl::opt< bool, true > LateCFGStructurize("amdgpu-late-structurize", cl::desc("Enable late CFG structurization"), cl::location(AMDGPUTargetMachine::EnableLateStructurizeCFG), cl::Hidden)
static cl::opt< bool > EnableHipStdPar("amdgpu-enable-hipstdpar", cl::desc("Enable HIP Standard Parallelism Offload support"), cl::init(false), cl::Hidden)
static cl::opt< bool > EnableInsertDelayAlu("amdgpu-enable-delay-alu", cl::desc("Enable s_delay_alu insertion"), cl::init(true), cl::Hidden)
static ScheduleDAGInstrs * createIterativeGCNMaxOccupancyMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnableLoadStoreVectorizer("amdgpu-load-store-vectorizer", cl::desc("Enable load store vectorizer"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableMaxIlpSchedStrategy("amdgpu-enable-max-ilp-scheduling-strategy", cl::desc("Enable scheduling strategy to maximize ILP for a single wave."), cl::Hidden, cl::init(false))
static bool mustPreserveGV(const GlobalValue &GV)
Predicate for Internalize pass.
static cl::opt< bool > EnableLoopPrefetch("amdgpu-loop-prefetch", cl::desc("Enable loop data prefetch on AMDGPU"), cl::Hidden, cl::init(false))
LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAMDGPUTarget()
static cl::opt< bool > EnableInsertSingleUseVDST("amdgpu-enable-single-use-vdst", cl::desc("Enable s_singleuse_vdst insertion"), cl::init(false), cl::Hidden)
static cl::opt< bool > RemoveIncompatibleFunctions("amdgpu-enable-remove-incompatible-functions", cl::Hidden, cl::desc("Enable removal of functions when they" "use features not supported by the target GPU"), cl::init(true))
static cl::opt< bool > EnableScalarIRPasses("amdgpu-scalar-ir-passes", cl::desc("Enable scalar IR passes"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableRegReassign("amdgpu-reassign-regs", cl::desc("Enable register reassign optimizations on gfx10+"), cl::init(true), cl::Hidden)
static cl::opt< bool > OptVGPRLiveRange("amdgpu-opt-vgpr-liverange", cl::desc("Enable VGPR liverange optimizations for if-else structure"), cl::init(true), cl::Hidden)
static ScheduleDAGInstrs * createSIMachineScheduler(MachineSchedContext *C)
static cl::opt< bool > EnablePreRAOptimizations("amdgpu-enable-pre-ra-optimizations", cl::desc("Enable Pre-RA optimizations pass"), cl::init(true), cl::Hidden)
static cl::opt< ScanOptions > AMDGPUAtomicOptimizerStrategy("amdgpu-atomic-optimizer-strategy", cl::desc("Select DPP or Iterative strategy for scan"), cl::init(ScanOptions::Iterative), cl::values(clEnumValN(ScanOptions::DPP, "DPP", "Use DPP operations for scan"), clEnumValN(ScanOptions::Iterative, "Iterative", "Use Iterative approach for scan"), clEnumValN(ScanOptions::None, "None", "Disable atomic optimizer")))
static cl::opt< bool > EnableVOPD("amdgpu-enable-vopd", cl::desc("Enable VOPD, dual issue of VALU in wave32"), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableEarlyIfConversion("amdgpu-early-ifcvt", cl::Hidden, cl::desc("Run early if-conversion"), cl::init(false))
static ScheduleDAGInstrs * createGCNMaxOccupancyMachineScheduler(MachineSchedContext *C)
static MachineSchedRegistry GCNILPSchedRegistry("gcn-iterative-ilp", "Run GCN iterative scheduler for ILP scheduling (experimental)", createIterativeILPMachineScheduler)
static cl::opt< bool > ScalarizeGlobal("amdgpu-scalarize-global-loads", cl::desc("Enable global load scalarization"), cl::init(true), cl::Hidden)
static const char RegAllocOptNotSupportedMessage[]
static MachineSchedRegistry GCNMaxOccupancySchedRegistry("gcn-max-occupancy", "Run GCN scheduler to maximize occupancy", createGCNMaxOccupancyMachineScheduler)
The AMDGPU TargetMachine interface definition for hw codegen targets.
This file declares the AMDGPU-specific subclass of TargetLoweringObjectFile.
This file a TargetTransformInfo::Concept conforming object specific to the AMDGPU target machine.
Provides passes to inlining "always_inline" functions.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
This header provides classes for managing passes over SCCs of the call graph.
Provides analysis for continuously CSEing during GISel passes.
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
Definition: CommandLine.h:693
#define LLVM_READNONE
Definition: Compiler.h:220
#define LLVM_EXTERNAL_VISIBILITY
Definition: Compiler.h:135
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
This file defines the class GCNIterativeScheduler, which uses an iterative approach to find a best sc...
This file provides the interface for LLVM's Global Value Numbering pass which eliminates fully redund...
AcceleratorCodeSelection - Identify all functions reachable from a kernel, removing those that are un...
This file declares the IRTranslator pass.
#define RegName(no)
static LVOptions Options
Definition: LVOptions.cpp:25
static std::string computeDataLayout()
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
#define P(N)
ModulePassManager MPM
const char LLVMTargetMachineRef TM
PassInstrumentationCallbacks PIC
PassBuilder PB(Machine, PassOpts->PTO, std::nullopt, &PIC)
This header defines various interfaces for pass management in LLVM.
The AMDGPU TargetMachine interface definition for hw codegen targets.
Basic Register Allocator
This file describes the interface of the MachineFunctionPass responsible for assigning the generic vi...
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
SI Machine Scheduler interface.
static FunctionPass * useDefaultRegisterAllocator()
-regalloc=... command line option.
Target-Independent Code Generator Pass Configuration Options pass.
static std::unique_ptr< TargetLoweringObjectFile > createTLOF()
A manager for alias analyses.
void registerFunctionAnalysis()
Register a specific AA result.
void addAAResult(AAResultT &AAResult)
Register a specific AA result.
Legacy wrapper pass to provide the AMDGPUAAResult object.
Analysis pass providing a never-invalidated alias analysis result.
AMDGPUTargetMachine & getAMDGPUTargetMachine() const
std::unique_ptr< CSEConfigBase > getCSEConfig() const override
Returns the CSEConfig object to use for the current optimization level.
ScheduleDAGInstrs * createMachineScheduler(MachineSchedContext *C) const override
Create an instance of ScheduleDAGInstrs to be run within the standard MachineScheduler pass for this ...
bool isPassEnabled(const cl::opt< bool > &Opt, CodeGenOptLevel Level=CodeGenOptLevel::Default) const
Check if a pass is enabled given Opt option.
bool addPreISel() override
Methods with trivial inline returns are convenient points in the common codegen pass pipeline where t...
bool addInstSelector() override
addInstSelector - This method should install an instruction selector pass, which converts from LLVM c...
bool addGCPasses() override
addGCPasses - Add late codegen passes that analyze code for garbage collection.
AMDGPUPassConfig(LLVMTargetMachine &TM, PassManagerBase &PM)
void addIRPasses() override
Add common target configurable passes that perform LLVM IR to IR transforms following machine indepen...
void addCodeGenPrepare() override
Add pass to prepare the LLVM IR for code generation.
static int64_t getNullPointerValue(unsigned AddrSpace)
Get the integer value of a null pointer in the given address space.
unsigned getAddressSpaceForPseudoSourceKind(unsigned Kind) const override
getAddressSpaceForPseudoSourceKind - Given the kind of memory (e.g.
const TargetSubtargetInfo * getSubtargetImpl() const
void registerDefaultAliasAnalyses(AAManager &) override
Allow the target to register alias analyses with the AAManager for use with the new pass manager.
std::pair< const Value *, unsigned > getPredicatedAddrSpace(const Value *V) const override
If the specified predicate checks whether a generic pointer falls within a specified address space,...
StringRef getFeatureString(const Function &F) const
AMDGPUTargetMachine(const Target &T, const Triple &TT, StringRef CPU, StringRef FS, const TargetOptions &Options, std::optional< Reloc::Model > RM, std::optional< CodeModel::Model > CM, CodeGenOptLevel OL)
bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override
Returns true if a cast between SrcAS and DestAS is a noop.
Error buildCodeGenPipeline(ModulePassManager &MPM, raw_pwrite_stream &Out, raw_pwrite_stream *DwoOut, CodeGenFileType FileType, const CGPassBuilderOption &Opts, PassInstrumentationCallbacks *PIC) override
void registerPassBuilderCallbacks(PassBuilder &PB, bool PopulateClassToPassNames) override
Allow the target to modify the pass pipeline.
bool splitModule(Module &M, unsigned NumParts, function_ref< void(std::unique_ptr< Module > MPart)> ModuleCallback) const override
Entry point for module splitting.
StringRef getGPUName(const Function &F) const
unsigned getAssumedAddrSpace(const Value *V) const override
If the specified generic pointer could be assumed as a pointer to a specific address space,...
StringRef getValueAsString() const
Return the attribute's value as a string.
Definition: Attributes.cpp:349
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition: Attributes.h:193
Allocate memory in an ever growing pool, as if by bump-pointer.
Definition: Allocator.h:66
Error buildPipeline(ModulePassManager &MPM, raw_pwrite_stream &Out, raw_pwrite_stream *DwoOut, CodeGenFileType FileType) const
void removeDeadConstantUsers() const
If there are any dead constant users dangling off of this constant, remove them.
Definition: Constants.cpp:723
This pass is required by interprocedural register allocation.
Lightweight error class with error context and mandatory checking.
Definition: Error.h:160
Tagged union holding either a T or a Error.
Definition: Error.h:474
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:311
const SIRegisterInfo * getRegisterInfo() const override
Definition: GCNSubtarget.h:269
TargetTransformInfo getTargetTransformInfo(const Function &F) const override
Get a TargetTransformInfo implementation for the target.
void registerMachineRegisterInfoCallback(MachineFunction &MF) const override
bool parseMachineFunctionInfo(const yaml::MachineFunctionInfo &, PerFunctionMIParsingState &PFS, SMDiagnostic &Error, SMRange &SourceRange) const override
Parse out the target's MachineFunctionInfo from the YAML reprsentation.
yaml::MachineFunctionInfo * convertFuncInfoToYAML(const MachineFunction &MF) const override
Allocate and initialize an instance of the YAML representation of the MachineFunctionInfo.
yaml::MachineFunctionInfo * createDefaultFuncInfoYAML() const override
Allocate and return a default initialized instance of the YAML representation for the MachineFunction...
TargetPassConfig * createPassConfig(PassManagerBase &PM) override
Create a pass configuration object to be used by addPassToEmitX methods for generating a pipeline of ...
GCNTargetMachine(const Target &T, const Triple &TT, StringRef CPU, StringRef FS, const TargetOptions &Options, std::optional< Reloc::Model > RM, std::optional< CodeModel::Model > CM, CodeGenOptLevel OL, bool JIT)
MachineFunctionInfo * createMachineFunctionInfo(BumpPtrAllocator &Allocator, const Function &F, const TargetSubtargetInfo *STI) const override
Create the target's instance of MachineFunctionInfo.
Pass to remove unused function declarations.
Definition: GlobalDCE.h:36
This pass is responsible for selecting generic machine instructions to target-specific instructions.
A pass that internalizes all functions and variables other than those that must be preserved accordin...
Definition: Internalize.h:34
This class describes a target machine that is implemented with the LLVM target-independent code gener...
This pass implements the localization mechanism described at the top of this file.
Definition: Localizer.h:43
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
void addDelegate(Delegate *delegate)
MachineSchedRegistry provides a selection of available machine instruction schedulers.
This interface provides simple read-only access to a block of memory, and provides simple methods for...
Definition: MemoryBuffer.h:51
virtual StringRef getBufferIdentifier() const
Return an identifier for this buffer, typically the filename it was read from.
Definition: MemoryBuffer.h:76
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
static const OptimizationLevel O0
Disable as many optimizations as possible.
unsigned getSpeedupLevel() const
static const OptimizationLevel O1
Optimize quickly without destroying debuggability.
This class provides access to building LLVM's passes.
Definition: PassBuilder.h:104
void registerPipelineEarlySimplificationEPCallback(const std::function< void(ModulePassManager &, OptimizationLevel)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:475
void registerPipelineStartEPCallback(const std::function< void(ModulePassManager &, OptimizationLevel)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:466
void registerPeepholeEPCallback(const std::function< void(FunctionPassManager &, OptimizationLevel)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:400
void registerCGSCCOptimizerLateEPCallback(const std::function< void(CGSCCPassManager &, OptimizationLevel)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:445
void registerFullLinkTimeOptimizationLastEPCallback(const std::function< void(ModulePassManager &, OptimizationLevel)> &C)
Register a callback for a default optimizer pipeline extension point.
Definition: PassBuilder.h:511
This class manages callbacks registration, as well as provides a way for PassInstrumentation to pass ...
LLVM_ATTRIBUTE_MINSIZE void addPass(PassT &&Pass)
Definition: PassManager.h:249
PassRegistry - This class manages the registration and intitialization of the pass subsystem as appli...
Definition: PassRegistry.h:37
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
Pass interface - Implemented by all 'passes'.
Definition: Pass.h:94
MachineFunctionInfo * createMachineFunctionInfo(BumpPtrAllocator &Allocator, const Function &F, const TargetSubtargetInfo *STI) const override
Create the target's instance of MachineFunctionInfo.
RegisterPassParser class - Handle the addition of new machine passes.
RegisterRegAllocBase class - Track the registration of register allocators.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
This class keeps track of the SPI_SP_INPUT_ADDR config register, which tells the hardware which inter...
bool initializeBaseYamlFields(const yaml::SIMachineFunctionInfo &YamlMFI, const MachineFunction &MF, PerFunctionMIParsingState &PFS, SMDiagnostic &Error, SMRange &SourceRange)
Instances of this class encapsulate one diagnostic report, allowing printing to a raw_ostream as a ca...
Definition: SourceMgr.h:281
Represents a location in source code.
Definition: SMLoc.h:23
Represents a range in source code.
Definition: SMLoc.h:48
A ScheduleDAG for scheduling lists of MachineInstr.
ScheduleDAGMILive is an implementation of ScheduleDAGInstrs that schedules machine instructions while...
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
void addMutation(std::unique_ptr< ScheduleDAGMutation > Mutation)
Add a postprocessing step to the DAG builder.
const TargetInstrInfo * TII
Target instruction information.
Definition: ScheduleDAG.h:557
const TargetRegisterInfo * TRI
Target processor register info.
Definition: ScheduleDAG.h:558
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition: SmallString.h:26
void append(StringRef RHS)
Append from a StringRef.
Definition: SmallString.h:68
unsigned getMainFileID() const
Definition: SourceMgr.h:132
const MemoryBuffer * getMemoryBuffer(unsigned i) const
Definition: SourceMgr.h:125
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
constexpr bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:134
bool consume_front(StringRef Prefix)
Returns true if this StringRef has the given prefix and removes that prefix.
Definition: StringRef.h:628
A switch()-like statement whose cases are string literals.
Definition: StringSwitch.h:44
StringSwitch & Case(StringLiteral S, T Value)
Definition: StringSwitch.h:69
R Default(T Value)
Definition: StringSwitch.h:182
StringSwitch & Cases(StringLiteral S0, StringLiteral S1, T Value)
Definition: StringSwitch.h:90
Triple TargetTriple
Triple string, CPU name, and target feature strings the TargetMachine instance is created with.
Definition: TargetMachine.h:95
const Triple & getTargetTriple() const
const MCSubtargetInfo * getMCSubtargetInfo() const
StringRef getTargetFeatureString() const
StringRef getTargetCPU() const
std::unique_ptr< const MCSubtargetInfo > STI
void resetTargetOptions(const Function &F) const
Reset the target options based on the function's attributes.
std::unique_ptr< const MCRegisterInfo > MRI
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
Target-Independent Code Generator Pass Configuration Options.
LLVMTargetMachine * TM
virtual void addCodeGenPrepare()
Add pass to prepare the LLVM IR for code generation.
virtual bool addILPOpts()
Add passes that optimize instruction level parallelism for out-of-order targets.
virtual void addPostRegAlloc()
This method may be implemented by targets that want to run passes after register allocation pass pipe...
CodeGenOptLevel getOptLevel() const
virtual void addOptimizedRegAlloc()
addOptimizedRegAlloc - Add passes related to register allocation.
virtual void addIRPasses()
Add common target configurable passes that perform LLVM IR to IR transforms following machine indepen...
virtual void addFastRegAlloc()
addFastRegAlloc - Add the minimum set of target-independent passes that are required for fast registe...
virtual void addMachineSSAOptimization()
addMachineSSAOptimization - Add standard passes that optimize machine instructions in SSA form.
void disablePass(AnalysisID PassID)
Allow the target to disable a specific standard pass by default.
AnalysisID addPass(AnalysisID PassID)
Utilities for targets to add passes to the pass manager.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
Target - Wrapper for Target specific information.
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition: Triple.h:373
LLVM Value Representation.
Definition: Value.h:74
bool use_empty() const
Definition: Value.h:344
An efficient, type-erasing, non-owning reference to a callable.
PassManagerBase - An abstract interface to allow code to add passes to a pass manager without having ...
An abstract base class for streams implementations that also support a pwrite operation.
Definition: raw_ostream.h:445
Interfaces for registering analysis passes, producing common pass manager configurations,...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ REGION_ADDRESS
Address space for region memory. (GDS)
@ LOCAL_ADDRESS
Address space for local memory.
@ CONSTANT_ADDRESS
Address space for constant memory (VTX2).
@ FLAT_ADDRESS
Address space for flat memory.
@ GLOBAL_ADDRESS
Address space for global memory (RAT0, VTX0).
@ PRIVATE_ADDRESS
Address space for private memory.
bool isFlatGlobalAddrSpace(unsigned AS)
Definition: AMDGPU.h:415
bool isEntryFunctionCC(CallingConv::ID CC)
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
Definition: PatternMatch.h:893
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
Definition: PatternMatch.h:92
BinaryOp_match< cst_pred_ty< is_all_ones >, ValTy, Instruction::Xor, true > m_Not(const ValTy &V)
Matches a 'Not' as 'xor V, -1' or 'xor -1, V'.
@ ReallyHidden
Definition: CommandLine.h:139
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
Definition: CommandLine.h:718
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:450
LocationClass< Ty > location(Ty &L)
Definition: CommandLine.h:470
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
FunctionPass * createFlattenCFGPass()
void initializeSIFormMemoryClausesPass(PassRegistry &)
char & SIPreAllocateWWMRegsID
FunctionPass * createFastRegisterAllocator()
FastRegisterAllocation Pass - This pass register allocates as fast as possible.
char & EarlyMachineLICMID
This pass performs loop invariant code motion on machine instructions.
ImmutablePass * createAMDGPUAAWrapperPass()
char & PostRAHazardRecognizerID
PostRAHazardRecognizer - This pass runs the post-ra hazard recognizer.
FunctionPass * createAMDGPUSetWavePriorityPass()
void initializeAMDGPUInsertSingleUseVDSTPass(PassRegistry &)
Pass * createLCSSAPass()
Definition: LCSSA.cpp:506
void initializeGCNCreateVOPDPass(PassRegistry &)
ModulePass * createAMDGPUOpenCLEnqueuedBlockLoweringPass()
char & GCNPreRAOptimizationsID
char & GCLoweringID
GCLowering Pass - Used by gc.root to perform its default lowering operations.
void initializeGCNPreRAOptimizationsPass(PassRegistry &)
Pass * createLoadStoreVectorizerPass()
Create a legacy pass manager instance of the LoadStoreVectorizer pass.
void initializeGCNRewritePartialRegUsesPass(llvm::PassRegistry &)
void initializeAMDGPUAttributorLegacyPass(PassRegistry &)
char & SIPostRABundlerID
FunctionPass * createSIModeRegisterPass()
FunctionPass * createGreedyRegisterAllocator()
Greedy register allocation pass - This pass implements a global register allocator for optimized buil...
void initializeAMDGPUAAWrapperPassPass(PassRegistry &)
ModulePass * createAMDGPULowerBufferFatPointersPass()
void initializeR600ClauseMergePassPass(PassRegistry &)
void initializeSIModeRegisterPass(PassRegistry &)
ModulePass * createAMDGPUCtorDtorLoweringLegacyPass()
void initializeSIOptimizeVGPRLiveRangePass(PassRegistry &)
ModuleToFunctionPassAdaptor createModuleToFunctionPassAdaptor(FunctionPassT &&Pass, bool EagerlyInvalidate=false)
A function to deduce a function pass type and wrap it in the templated adaptor.
Definition: PassManager.h:916
void initializeAMDGPULateCodeGenPreparePass(PassRegistry &)
void initializeAMDGPURewriteUndefForPHILegacyPass(PassRegistry &)
FunctionPass * createAMDGPUPreLegalizeCombiner(bool IsOptNone)
char & GCNRewritePartialRegUsesID
FunctionPass * createAMDGPUPostLegalizeCombiner(bool IsOptNone)
void initializeAMDGPUAnnotateUniformValuesPass(PassRegistry &)
std::error_code inconvertibleErrorCode()
The value returned by this function can be returned from convertToErrorCode for Error values where no...
Definition: Error.cpp:90
void initializeSIShrinkInstructionsPass(PassRegistry &)
char & SIFoldOperandsID
void initializeGCNPreRALongBranchRegPass(PassRegistry &)
char & SILoadStoreOptimizerID
std::unique_ptr< ScheduleDAGMutation > createIGroupLPDAGMutation(AMDGPU::SchedulingPhase Phase)
Phase specifes whether or not this is a reentry into the IGroupLPDAGMutation.
void initializeAMDGPUDAGToDAGISelLegacyPass(PassRegistry &)
FunctionPass * createNaryReassociatePass()
char & PatchableFunctionID
This pass implements the "patchable-function" attribute.
char & PostRASchedulerID
PostRAScheduler - This pass performs post register allocation scheduling.
void initializeR600ExpandSpecialInstrsPassPass(PassRegistry &)
void initializeR600PacketizerPass(PassRegistry &)
std::unique_ptr< ScheduleDAGMutation > createVOPDPairingMutation()
ModulePass * createAMDGPUAlwaysInlinePass(bool GlobalOpt=true)
void initializeSIPreEmitPeepholePass(PassRegistry &)
char & SILowerWWMCopiesID
void initializeSIFixVGPRCopiesPass(PassRegistry &)
void initializeAMDGPUGlobalISelDivergenceLoweringPass(PassRegistry &)
std::unique_ptr< CSEConfigBase > getStandardCSEConfigForOpt(CodeGenOptLevel Level)
Definition: CSEInfo.cpp:79
Target & getTheR600Target()
The target for R600 GPUs.
char & MachineSchedulerID
MachineScheduler - This pass schedules machine instructions.
Pass * createStructurizeCFGPass(bool SkipUniformRegions=false)
When SkipUniformRegions is true the structizer will not structurize regions that only contain uniform...
void initializeAMDGPURemoveIncompatibleFunctionsPass(PassRegistry &)
void initializeSILowerWWMCopiesPass(PassRegistry &)
void initializeGCNNSAReassignPass(PassRegistry &)
char & PostMachineSchedulerID
PostMachineScheduler - This pass schedules machine instructions postRA.
void initializeSIInsertWaitcntsPass(PassRegistry &)
char & AMDGPUInsertSingleUseVDSTID
Pass * createLICMPass()
Definition: LICM.cpp:379
ScheduleDAGMILive * createGenericSchedLive(MachineSchedContext *C)
Create the standard converging machine scheduler.
char & SIFormMemoryClausesID
void initializeAMDGPULowerModuleLDSLegacyPass(PassRegistry &)
void initializeAMDGPUCtorDtorLoweringLegacyPass(PassRegistry &)
void initializeAMDGPURegBankCombinerPass(PassRegistry &)
void initializeSILoadStoreOptimizerPass(PassRegistry &)
void initializeSILateBranchLoweringPass(PassRegistry &)
void initializeSIPeepholeSDWAPass(PassRegistry &)
char & AMDGPUUnifyDivergentExitNodesID
FunctionPass * createAMDGPUAtomicOptimizerPass(ScanOptions ScanStrategy)
char & ShadowStackGCLoweringID
ShadowStackGCLowering - Implements the custom lowering mechanism used by the shadow stack GC.
char & GCNNSAReassignID
void initializeAMDGPURewriteOutArgumentsPass(PassRegistry &)
void initializeAMDGPUExternalAAWrapperPass(PassRegistry &)
void initializeAMDGPULowerKernelArgumentsPass(PassRegistry &)
char & AMDGPUPerfHintAnalysisID
char & SILowerSGPRSpillsID
CodeModel::Model getEffectiveCodeModel(std::optional< CodeModel::Model > CM, CodeModel::Model Default)
Helper method for getting the code model, returning Default if CM does not have a value.
char & SILateBranchLoweringPassID
char & BranchRelaxationPassID
BranchRelaxation - This pass replaces branches that need to jump further than is supported by a branc...
FunctionPass * createSinkingPass()
Definition: Sink.cpp:277
CGSCCToFunctionPassAdaptor createCGSCCToFunctionPassAdaptor(FunctionPassT &&Pass, bool EagerlyInvalidate=false, bool NoRerun=false)
A function to deduce a function pass type and wrap it in the templated adaptor.
FunctionPass * createSIShrinkInstructionsPass()
void initializeAMDGPUAnnotateKernelFeaturesPass(PassRegistry &)
CodeGenFileType
These enums are meant to be passed into addPassesToEmitFile to indicate what type of file to emit,...
Definition: CodeGen.h:83
void initializeSIPostRABundlerPass(PassRegistry &)
void initializeAMDGPUPromoteAllocaToVectorPass(PassRegistry &)
Pass * createAMDGPUAttributorLegacyPass()
void initializeSIWholeQuadModePass(PassRegistry &)
std::unique_ptr< ScheduleDAGMutation > createStoreClusterDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, bool ReorderWhileClustering=false)
If ReorderWhileClustering is set to true, no attempt will be made to reduce reordering due to store c...
FunctionPass * createLoopDataPrefetchPass()
FunctionPass * createAMDGPULowerKernelArgumentsPass()
char & AMDGPUInsertDelayAluID
Pass * createAMDGPUAnnotateKernelFeaturesPass()
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:159
std::unique_ptr< ScheduleDAGMutation > createAMDGPUMacroFusionDAGMutation()
Note that you have to add: DAG.addMutation(createAMDGPUMacroFusionDAGMutation()); to AMDGPUPassConfig...
char & StackMapLivenessID
StackMapLiveness - This pass analyses the register live-out set of stackmap/patchpoint intrinsics and...
char & SIOptimizeVGPRLiveRangeID
FunctionPass * createUnifyLoopExitsPass()
char & SIOptimizeExecMaskingPreRAID
FunctionPass * createFixIrreduciblePass()
char & FuncletLayoutID
This pass lays out funclets contiguously.
void initializeSIInsertHardClausesPass(PassRegistry &)
char & DetectDeadLanesID
This pass adds dead/undef flags after analyzing subregister lanes.
void initializeAMDGPUPostLegalizerCombinerPass(PassRegistry &)
CodeGenOptLevel
Code generation optimization level.
Definition: CodeGen.h:54
void initializeSIAnnotateControlFlowPass(PassRegistry &)
ModulePass * createAMDGPUPrintfRuntimeBinding()
void initializeSIMemoryLegalizerPass(PassRegistry &)
Pass * createAlwaysInlinerLegacyPass(bool InsertLifetime=true)
Create a legacy pass manager instance of a pass to inline and remove functions marked as "always_inli...
void initializeR600ControlFlowFinalizerPass(PassRegistry &)
void initializeAMDGPUImageIntrinsicOptimizerPass(PassRegistry &)
FunctionPass * createAMDGPUAnnotateUniformValues()
ModulePass * createAMDGPULowerModuleLDSLegacyPass(const AMDGPUTargetMachine *TM=nullptr)
void initializeAMDGPUPreLegalizerCombinerPass(PassRegistry &)
FunctionPass * createAMDGPUPromoteAlloca()
FunctionPass * createSeparateConstOffsetFromGEPPass(bool LowerGEP=false)
char & EarlyIfConverterID
EarlyIfConverter - This pass performs if-conversion on SSA form by inserting cmov instructions.
char & SIPreEmitPeepholeID
ModulePass * createAMDGPURemoveIncompatibleFunctionsPass(const TargetMachine *)
FunctionPass * createSILowerI1CopiesPass()
void initializeGCNRegPressurePrinterPass(PassRegistry &)
void initializeAMDGPUArgumentUsageInfoPass(PassRegistry &)
FunctionPass * createBasicRegisterAllocator()
BasicRegisterAllocation Pass - This pass implements a degenerate global register allocator using the ...
void initializeGlobalISel(PassRegistry &)
Initialize all passes linked into the GlobalISel library.
Definition: GlobalISel.cpp:17
void initializeSIPreAllocateWWMRegsPass(PassRegistry &)
ModulePass * createR600OpenCLImageTypeLoweringPass()
FunctionPass * createAMDGPUCodeGenPreparePass()
FunctionPass * createAMDGPUISelDag(TargetMachine &TM, CodeGenOptLevel OptLevel)
This pass converts a legalized DAG into a AMDGPU-specific.
Target & getTheGCNTarget()
The target for GCN GPUs.
void initializeAMDGPUAtomicOptimizerPass(PassRegistry &)
char & MachineCSEID
MachineCSE - This pass performs global CSE on machine instructions.
Definition: MachineCSE.cpp:165
char & GCNDPPCombineID
FunctionPass * createAMDGPURegBankCombiner(bool IsOptNone)
char & SIWholeQuadModeID
std::unique_ptr< ScheduleDAGMutation > createLoadClusterDAGMutation(const TargetInstrInfo *TII, const TargetRegisterInfo *TRI, bool ReorderWhileClustering=false)
If ReorderWhileClustering is set to true, no attempt will be made to reduce reordering due to store c...
void initializeSIOptimizeExecMaskingPreRAPass(PassRegistry &)
void initializeAMDGPUMarkLastScratchLoadPass(PassRegistry &)
char & LiveVariablesID
LiveVariables pass - This pass computes the set of blocks in which each variable is life and sets mac...
void initializeAMDGPUCodeGenPreparePass(PassRegistry &)
FunctionPass * createGVNPass(bool NoMemDepAnalysis=false)
Create a legacy GVN pass.
Definition: GVN.cpp:3396
FunctionPass * createAMDGPURewriteUndefForPHILegacyPass()
void call_once(once_flag &flag, Function &&F, Args &&... ArgList)
Execute the function specified as a parameter once.
Definition: Threading.h:87
void initializeSILowerSGPRSpillsPass(PassRegistry &)
void initializeAMDGPULowerKernelAttributesPass(PassRegistry &)
char & SIInsertHardClausesID
FunctionPass * createAMDGPUMachineCFGStructurizerPass()
void initializeAMDGPUResourceUsageAnalysisPass(PassRegistry &)
void initializeSIFixSGPRCopiesPass(PassRegistry &)
char & GCNCreateVOPDID
FunctionPass * createInferAddressSpacesPass(unsigned AddressSpace=~0u)
char & VirtRegRewriterID
VirtRegRewriter pass.
Definition: VirtRegMap.cpp:227
void initializeSILowerI1CopiesPass(PassRegistry &)
char & SILowerControlFlowID
FunctionPass * createLowerSwitchPass()
FunctionPass * createVirtRegRewriter(bool ClearVirtRegs=true)
Definition: VirtRegMap.cpp:645
void initializeR600VectorRegMergerPass(PassRegistry &)
ImmutablePass * createExternalAAWrapperPass(std::function< void(Pass &, Function &, AAResults &)> Callback)
A wrapper pass around a callback which can be used to populate the AAResults in the AAResultsWrapperP...
void initializeSIOptimizeExecMaskingPass(PassRegistry &)
FunctionPass * createAMDGPUGlobalISelDivergenceLoweringPass()
FunctionPass * createSIMemoryLegalizerPass()
void initializeSIFoldOperandsPass(PassRegistry &)
void initializeSILowerControlFlowPass(PassRegistry &)
char & SIPeepholeSDWAID
void splitAMDGPUModule(const AMDGPUTargetMachine &TM, Module &M, unsigned N, function_ref< void(std::unique_ptr< Module > MPart)> ModuleCallback)
Splits the module M into N linkable partitions.
char & SIFixVGPRCopiesID
char & TwoAddressInstructionPassID
TwoAddressInstruction - This pass reduces two-address instructions to use two operands.
void initializeAMDGPURegBankSelectPass(PassRegistry &)
FunctionPass * createAtomicExpandLegacyPass()
AtomicExpandPass - At IR level this pass replace atomic instructions with __atomic_* library calls,...
MCRegisterInfo * createGCNMCRegisterInfo(AMDGPUDwarfFlavour DwarfFlavour)
FunctionPass * createStraightLineStrengthReducePass()
FunctionPass * createAMDGPUImageIntrinsicOptimizerPass(const TargetMachine *)
void initializeAMDGPUUnifyDivergentExitNodesPass(PassRegistry &)
void initializeAMDGPULowerBufferFatPointersPass(PassRegistry &)
FunctionPass * createSIInsertWaitcntsPass()
FunctionPass * createEarlyCSEPass(bool UseMemorySSA=false)
Definition: EarlyCSE.cpp:1932
void initializeGCNDPPCombinePass(PassRegistry &)
char & PHIEliminationID
PHIElimination - This pass eliminates machine instruction PHI nodes by inserting copy instructions.
bool parseNamedRegisterReference(PerFunctionMIParsingState &PFS, Register &Reg, StringRef Src, SMDiagnostic &Error)
Definition: MIParser.cpp:3631
FunctionPass * createAMDGPULateCodeGenPreparePass()
char & AMDGPUMarkLastScratchLoadID
char & RenameIndependentSubregsID
This pass detects subregister lanes in a virtual register that are used independently of other lanes ...
std::unique_ptr< ScheduleDAGMutation > createAMDGPUExportClusteringDAGMutation()
void initializeAMDGPUPrintfRuntimeBindingPass(PassRegistry &)
void initializeAMDGPUPromoteAllocaPass(PassRegistry &)
void initializeAMDGPUOpenCLEnqueuedBlockLoweringPass(PassRegistry &)
void initializeAMDGPUInsertDelayAluPass(PassRegistry &)
char & SIOptimizeExecMaskingID
void initializeAMDGPUUnifyMetadataPass(PassRegistry &)
char & SIFixSGPRCopiesID
FunctionPass * createSIAnnotateControlFlowPass()
Create the annotation pass.
void initializeAMDGPUAlwaysInlinePass(PassRegistry &)
char & DeadMachineInstructionElimID
DeadMachineInstructionElim - This pass removes dead machine instructions.
char & GCNPreRALongBranchRegID
void initializeAMDGPUPromoteKernelArgumentsPass(PassRegistry &)
#define N
static ArgDescriptor createStack(unsigned Offset, unsigned Mask=~0u)
static ArgDescriptor createArg(const ArgDescriptor &Arg, unsigned Mask)
static ArgDescriptor createRegister(Register Reg, unsigned Mask=~0u)
DenormalModeKind Input
Denormal treatment kind for floating point instruction inputs in the default floating-point environme...
@ PreserveSign
The sign of a flushed-to-zero number is preserved in the sign of 0.
@ IEEE
IEEE-754 denormal numbers preserved.
DenormalModeKind Output
Denormal flushing mode for floating point instruction results in the default floating point environme...
MachineFunctionInfo - This class can be derived from and used by targets to hold private target-speci...
MachineSchedContext provides enough context from the MachineScheduler pass for the target to instanti...
RegisterTargetMachine - Helper template for registering a target machine implementation,...
bool DX10Clamp
Used by the vector ALU to force DX10-style treatment of NaNs: when set, clamp NaN to zero; otherwise,...
DenormalMode FP64FP16Denormals
If this is set, neither input or output denormals are flushed for both f64 and f16/v2f16 instructions...
bool IEEE
Floating point opcodes that support exception flag gathering quiet and propagate signaling NaN inputs...
DenormalMode FP32Denormals
If this is set, neither input or output denormals are flushed for most f32 instructions.
The llvm::once_flag structure.
Definition: Threading.h:68
Targets should override this in a way that mirrors the implementation of llvm::MachineFunctionInfo.
SmallVector< StringValue > WWMReservedRegs
std::optional< SIArgumentInfo > ArgInfo
A wrapper around std::string which contains a source range that's being set during parsing.