37#define GET_CC_REGISTER_LISTS
38#include "AArch64GenCallingConv.inc"
39#define GET_REGINFO_TARGET_DESC
40#include "AArch64GenRegisterInfo.inc"
53 unsigned &RegToUseForCFI)
const {
54 if (AArch64::PPRRegClass.
contains(Reg))
57 if (AArch64::ZPRRegClass.
contains(Reg)) {
58 RegToUseForCFI = getSubReg(Reg, AArch64::dsub);
59 for (
int I = 0; CSR_AArch64_AAPCS_SaveList[
I]; ++
I) {
60 if (CSR_AArch64_AAPCS_SaveList[
I] == RegToUseForCFI)
72 assert(MF &&
"Invalid MachineFunction pointer.");
77 return CSR_AArch64_NoRegs_SaveList;
79 return CSR_AArch64_AllRegs_SaveList;
82 return CSR_Win_AArch64_Arm64EC_Thunk_SaveList;
90 return CSR_Win_AArch64_CFGuard_Check_SaveList;
95 Attribute::SwiftError))
96 return CSR_Win_AArch64_AAPCS_SwiftError_SaveList;
98 return CSR_Win_AArch64_AAPCS_SwiftTail_SaveList;
99 return CSR_Win_AArch64_AAPCS_SaveList;
102 return CSR_AArch64_AAVPCS_SaveList;
104 return CSR_AArch64_SVE_AAPCS_SaveList;
108 "Calling convention AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0 is "
109 "only supported to improve calls to SME ACLE save/restore/disable-za "
110 "functions, and is not intended to be used beyond that scope.");
114 "Calling convention AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2 is "
115 "only supported to improve calls to SME ACLE __arm_sme_state "
116 "and is not intended to be used beyond that scope.");
120 Attribute::SwiftError))
121 return CSR_AArch64_AAPCS_SwiftError_SaveList;
123 return CSR_AArch64_AAPCS_SwiftTail_SaveList;
125 return CSR_AArch64_RT_MostRegs_SaveList;
127 return CSR_AArch64_RT_AllRegs_SaveList;
131 return CSR_AArch64_AAPCS_X18_SaveList;
133 return CSR_AArch64_SVE_AAPCS_SaveList;
134 return CSR_AArch64_AAPCS_SaveList;
139 assert(MF &&
"Invalid MachineFunction pointer.");
141 "Invalid subtarget for getDarwinCalleeSavedRegs");
145 "Calling convention CFGuard_Check is unsupported on Darwin.");
147 return CSR_Darwin_AArch64_AAVPCS_SaveList;
150 "Calling convention SVE_VectorCall is unsupported on Darwin.");
154 "Calling convention AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0 is "
155 "only supported to improve calls to SME ACLE save/restore/disable-za "
156 "functions, and is not intended to be used beyond that scope.");
160 "Calling convention AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2 is "
161 "only supported to improve calls to SME ACLE __arm_sme_state "
162 "and is not intended to be used beyond that scope.");
165 ? CSR_Darwin_AArch64_CXX_TLS_PE_SaveList
166 : CSR_Darwin_AArch64_CXX_TLS_SaveList;
170 Attribute::SwiftError))
171 return CSR_Darwin_AArch64_AAPCS_SwiftError_SaveList;
173 return CSR_Darwin_AArch64_AAPCS_SwiftTail_SaveList;
175 return CSR_Darwin_AArch64_RT_MostRegs_SaveList;
177 return CSR_Darwin_AArch64_RT_AllRegs_SaveList;
179 return CSR_Darwin_AArch64_AAPCS_Win64_SaveList;
180 return CSR_Darwin_AArch64_AAPCS_SaveList;
185 assert(MF &&
"Invalid MachineFunction pointer.");
188 return CSR_Darwin_AArch64_CXX_TLS_ViaCopy_SaveList;
199 for (
size_t i = 0; i < AArch64::GPR64commonRegClass.getNumRegs(); ++i) {
201 UpdatedCSRs.
push_back(AArch64::GPR64commonRegClass.getRegister(i));
211 unsigned Idx)
const {
213 if (RC == &AArch64::GPR32allRegClass &&
Idx == AArch64::hsub)
214 return &AArch64::FPR32RegClass;
215 else if (RC == &AArch64::GPR64allRegClass &&
Idx == AArch64::hsub)
216 return &AArch64::FPR64RegClass;
219 return AArch64GenRegisterInfo::getSubClassWithSubReg(RC,
Idx);
226 "Invalid subtarget for getDarwinCallPreservedMask");
229 return CSR_Darwin_AArch64_CXX_TLS_RegMask;
231 return CSR_Darwin_AArch64_AAVPCS_RegMask;
234 "Calling convention SVE_VectorCall is unsupported on Darwin.");
236 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0_RegMask;
238 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2_RegMask;
241 "Calling convention CFGuard_Check is unsupported on Darwin.");
246 return CSR_Darwin_AArch64_AAPCS_SwiftError_RegMask;
248 return CSR_Darwin_AArch64_AAPCS_SwiftTail_RegMask;
250 return CSR_Darwin_AArch64_RT_MostRegs_RegMask;
252 return CSR_Darwin_AArch64_RT_AllRegs_RegMask;
253 return CSR_Darwin_AArch64_AAPCS_RegMask;
262 return SCS ? CSR_AArch64_NoRegs_SCS_RegMask : CSR_AArch64_NoRegs_RegMask;
264 return SCS ? CSR_AArch64_AllRegs_SCS_RegMask : CSR_AArch64_AllRegs_RegMask;
274 return SCS ? CSR_AArch64_AAVPCS_SCS_RegMask : CSR_AArch64_AAVPCS_RegMask;
276 return SCS ? CSR_AArch64_SVE_AAPCS_SCS_RegMask
277 : CSR_AArch64_SVE_AAPCS_RegMask;
279 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0_RegMask;
281 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2_RegMask;
283 return CSR_Win_AArch64_CFGuard_Check_RegMask;
287 return SCS ? CSR_AArch64_AAPCS_SwiftError_SCS_RegMask
288 : CSR_AArch64_AAPCS_SwiftError_RegMask;
292 return CSR_AArch64_AAPCS_SwiftTail_RegMask;
295 return SCS ? CSR_AArch64_RT_MostRegs_SCS_RegMask
296 : CSR_AArch64_RT_MostRegs_RegMask;
298 return SCS ? CSR_AArch64_RT_AllRegs_SCS_RegMask
299 : CSR_AArch64_RT_AllRegs_RegMask;
302 return SCS ? CSR_AArch64_AAPCS_SCS_RegMask : CSR_AArch64_AAPCS_RegMask;
308 return CSR_AArch64_AAPCS_RegMask;
315 return CSR_Darwin_AArch64_TLS_RegMask;
318 return CSR_AArch64_TLS_ELF_RegMask;
325 memcpy(UpdatedMask, *Mask,
sizeof(UpdatedMask[0]) * RegMaskSize);
327 for (
size_t i = 0; i < AArch64::GPR64commonRegClass.getNumRegs(); ++i) {
330 subregs_inclusive(AArch64::GPR64commonRegClass.getRegister(i))) {
341 return CSR_AArch64_SMStartStop_RegMask;
346 return CSR_AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0_RegMask;
350 return CSR_AArch64_NoRegs_RegMask;
365 return CSR_Darwin_AArch64_AAPCS_ThisReturn_RegMask;
366 return CSR_AArch64_AAPCS_ThisReturn_RegMask;
370 return CSR_AArch64_StackProbe_Windows_RegMask;
373std::optional<std::string>
377 return std::string(
"X19 is used as the frame base pointer register.");
388 for (
unsigned i = AArch64::B16; i <= AArch64::B31; ++i)
394 " is clobbered by asynchronous signals when using Arm64EC.";
406 markSuperRegs(
Reserved, AArch64::WSP);
407 markSuperRegs(
Reserved, AArch64::WZR);
410 markSuperRegs(
Reserved, AArch64::W29);
415 markSuperRegs(
Reserved, AArch64::W13);
416 markSuperRegs(
Reserved, AArch64::W14);
417 markSuperRegs(
Reserved, AArch64::W23);
418 markSuperRegs(
Reserved, AArch64::W24);
419 markSuperRegs(
Reserved, AArch64::W28);
420 for (
unsigned i = AArch64::B16; i <= AArch64::B31; ++i)
424 for (
size_t i = 0; i < AArch64::GPR32commonRegClass.getNumRegs(); ++i) {
426 markSuperRegs(
Reserved, AArch64::GPR32commonRegClass.getRegister(i));
430 markSuperRegs(
Reserved, AArch64::W19);
434 markSuperRegs(
Reserved, AArch64::W16);
455 markSuperRegs(
Reserved, AArch64::FPCR);
456 markSuperRegs(
Reserved, AArch64::FPSR);
459 markSuperRegs(
Reserved, AArch64::X27);
460 markSuperRegs(
Reserved, AArch64::X28);
461 markSuperRegs(
Reserved, AArch64::W27);
462 markSuperRegs(
Reserved, AArch64::W28);
473 for (
size_t i = 0; i < AArch64::GPR32commonRegClass.getNumRegs(); ++i) {
475 markSuperRegs(
Reserved, AArch64::GPR32commonRegClass.getRegister(i));
502 " function calls if any of the argument registers is reserved.")});
515 if (PhysReg == AArch64::ZA || PhysReg == AArch64::ZT0)
523 unsigned Kind)
const {
524 return &AArch64::GPR64spRegClass;
529 if (RC == &AArch64::CCRRegClass)
530 return &AArch64::GPR64RegClass;
548 if (hasStackRealignment(MF))
552 if (ST.hasSVE() || ST.isStreaming()) {
587 return HasReg(CC_AArch64_GHC_ArgRegs, Reg);
598 return HasReg(CC_AArch64_Win64_VarArg_ArgRegs, Reg);
601 return HasReg(CC_AArch64_Win64PCS_ArgRegs, Reg);
604 return HasReg(CC_AArch64_Win64PCS_Swift_ArgRegs, Reg) ||
605 HasReg(CC_AArch64_Win64PCS_ArgRegs, Reg);
611 return HasReg(CC_AArch64_AAPCS_ArgRegs, Reg);
614 return HasReg(CC_AArch64_AAPCS_ArgRegs, Reg) ||
615 HasReg(CC_AArch64_AAPCS_Swift_ArgRegs, Reg);
621 return HasReg(CC_AArch64_DarwinPCS_ArgRegs, Reg);
624 return HasReg(CC_AArch64_DarwinPCS_ArgRegs, Reg) ||
625 HasReg(CC_AArch64_DarwinPCS_Swift_ArgRegs, Reg);
629 return HasReg(CC_AArch64_DarwinPCS_ILP32_VarArg_ArgRegs, Reg);
630 return HasReg(CC_AArch64_DarwinPCS_VarArg_ArgRegs, Reg);
633 HasReg(CC_AArch64_Win64_VarArg_ArgRegs, Reg);
634 return HasReg(CC_AArch64_Win64PCS_ArgRegs, Reg);
636 return HasReg(CC_AArch64_Win64_CFGuard_Check_ArgRegs, Reg);
642 return HasReg(CC_AArch64_Win64PCS_ArgRegs, Reg);
643 return HasReg(CC_AArch64_AAPCS_ArgRegs, Reg);
650 return TFI->
hasFP(MF) ? AArch64::FP : AArch64::SP;
677 "Expected SVE area to be calculated by this point");
700 for (
unsigned i = 0; !
MI->getOperand(i).isFI(); ++i)
701 assert(i < MI->getNumOperands() &&
702 "Instr doesn't have FrameIndex operand!");
713 if (!
MI->mayLoad() && !
MI->mayStore())
728 int64_t FPOffset =
Offset - 16 * 20;
765 assert(
MI &&
"Unable to get the legal offset for nil instruction.");
779 DL = Ins->getDebugLoc();
785 Register BaseReg =
MRI.createVirtualRegister(&AArch64::GPR64spRegClass);
786 MRI.constrainRegClass(BaseReg,
TII->getRegClass(MCID, 0,
this, MF));
803 while (!
MI.getOperand(i).isFI()) {
805 assert(i <
MI.getNumOperands() &&
"Instr doesn't have FrameIndex operand!");
812 assert(
Done &&
"Unable to resolve frame index!");
826 if (
MI.getOpcode() == AArch64::STGloop ||
827 MI.getOpcode() == AArch64::STZGloop) {
828 assert(FIOperandNum == 3 &&
829 "Wrong frame index operand for STGloop/STZGloop");
830 unsigned Op =
MI.getOpcode() == AArch64::STGloop ? AArch64::STGloop_wback
831 : AArch64::STZGloop_wback;
832 ScratchReg =
MI.getOperand(1).getReg();
833 MI.getOperand(3).ChangeToRegister(ScratchReg,
false,
false,
true);
835 MI.tieOperands(1, 3);
838 MI.getMF()->getRegInfo().createVirtualRegister(&AArch64::GPR64RegClass);
839 MI.getOperand(FIOperandNum)
840 .ChangeToRegister(ScratchReg,
false,
false,
true);
850 assert(
Offset.getScalable() % 2 == 0 &&
"Invalid frame offset");
856 int64_t VGSized =
Offset.getScalable() / 2;
860 Ops.
append({dwarf::DW_OP_bregx, VG, 0ULL});
863 }
else if (VGSized < 0) {
866 Ops.
append({dwarf::DW_OP_bregx, VG, 0ULL});
873 int SPAdj,
unsigned FIOperandNum,
875 assert(SPAdj == 0 &&
"Unexpected");
884 int FrameIndex =
MI.getOperand(FIOperandNum).getIndex();
890 if (
MI.getOpcode() == TargetOpcode::STACKMAP ||
891 MI.getOpcode() == TargetOpcode::PATCHPOINT ||
892 MI.getOpcode() == TargetOpcode::STATEPOINT) {
898 MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg,
false );
899 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(
Offset.getFixed());
903 if (
MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE) {
907 "Frame offsets with a scalable component are not supported");
913 if (
MI.getOpcode() == AArch64::TAGPstack) {
916 FrameReg =
MI.getOperand(3).getReg();
928 MF, FrameIndex, FrameReg,
false,
true);
937 MI.getOperand(FIOperandNum)
938 .ChangeToRegister(ScratchReg,
false,
false,
true);
941 FrameReg = AArch64::SP;
946 MF, FrameIndex, FrameReg,
false,
true);
954 "Emergency spill slot is out of reach");
969 switch (RC->
getID()) {
972 case AArch64::GPR32RegClassID:
973 case AArch64::GPR32spRegClassID:
974 case AArch64::GPR32allRegClassID:
975 case AArch64::GPR64spRegClassID:
976 case AArch64::GPR64allRegClassID:
977 case AArch64::GPR64RegClassID:
978 case AArch64::GPR32commonRegClassID:
979 case AArch64::GPR64commonRegClassID:
984 case AArch64::FPR8RegClassID:
985 case AArch64::FPR16RegClassID:
986 case AArch64::FPR32RegClassID:
987 case AArch64::FPR64RegClassID:
988 case AArch64::FPR128RegClassID:
991 case AArch64::MatrixIndexGPR32_8_11RegClassID:
992 case AArch64::MatrixIndexGPR32_12_15RegClassID:
995 case AArch64::DDRegClassID:
996 case AArch64::DDDRegClassID:
997 case AArch64::DDDDRegClassID:
998 case AArch64::QQRegClassID:
999 case AArch64::QQQRegClassID:
1000 case AArch64::QQQQRegClassID:
1003 case AArch64::FPR128_loRegClassID:
1004 case AArch64::FPR64_loRegClassID:
1005 case AArch64::FPR16_loRegClassID:
1007 case AArch64::FPR128_0to7RegClassID:
1017 else if (hasStackRealignment(MF))
1030 ((DstRC->
getID() == AArch64::GPR64RegClassID) ||
1031 (DstRC->
getID() == AArch64::GPR64commonRegClassID)) &&
1032 MI->getOperand(0).getSubReg() &&
MI->getOperand(1).getSubReg())
1039 switch (
MI.getOpcode()) {
1040 case AArch64::COALESCER_BARRIER_FPR16:
1041 case AArch64::COALESCER_BARRIER_FPR32:
1042 case AArch64::COALESCER_BARRIER_FPR64:
1043 case AArch64::COALESCER_BARRIER_FPR128:
1059 if (
MI->isCopy() &&
SubReg != DstSubReg &&
1060 (AArch64::ZPRRegClass.hasSubClassEq(DstRC) ||
1061 AArch64::ZPRRegClass.hasSubClassEq(SrcRC))) {
1062 unsigned SrcReg =
MI->getOperand(1).getReg();
1063 if (
any_of(
MRI.def_instructions(SrcReg), IsCoalescerBarrier))
1065 unsigned DstReg =
MI->getOperand(0).getReg();
1066 if (
any_of(
MRI.use_nodbg_instructions(DstReg), IsCoalescerBarrier))
1075 return R == AArch64::VG;
unsigned const MachineRegisterInfo * MRI
static Register createScratchRegisterForInstruction(MachineInstr &MI, unsigned FIOperandNum, const AArch64InstrInfo *TII)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file implements the BitVector class.
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
This file contains constants used for implementing Dwarf debug support.
const HexagonInstrInfo * TII
INITIALIZE_PASS(RISCVInsertVSETVLI, DEBUG_TYPE, RISCV_INSERT_VSETVLI_NAME, false, false) char RISCVCoalesceVSETVLI const LiveIntervals * LIS
This file declares the machine register scavenger class.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static unsigned getDwarfRegNum(unsigned Reg, const TargetRegisterInfo *TRI)
Go up the super-register chain until we hit a valid dwarf register number.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
StackOffset getNonLocalFrameIndexReference(const MachineFunction &MF, int FI) const override
getNonLocalFrameIndexReference - This method returns the offset used to reference a frame index locat...
bool hasFP(const MachineFunction &MF) const override
hasFP - Return true if the specified function should have a dedicated frame pointer register.
StackOffset resolveFrameIndexReference(const MachineFunction &MF, int FI, Register &FrameReg, bool PreferFP, bool ForSimm) const
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
unsigned getTaggedBasePointerOffset() const
uint64_t getStackSizeSVE() const
bool hasCalculatedStackSizeSVE() const
static const char * getRegisterName(MCRegister Reg, unsigned AltIdx=AArch64::NoRegAltName)
BitVector getStrictlyReservedRegs(const MachineFunction &MF) const
const TargetRegisterClass * getCrossCopyRegClass(const TargetRegisterClass *RC) const override
const uint32_t * getThisReturnPreservedMask(const MachineFunction &MF, CallingConv::ID) const
getThisReturnPreservedMask - Returns a call preserved mask specific to the case that 'returned' is on...
bool isReservedReg(const MachineFunction &MF, MCRegister Reg) const
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const override
const MCPhysReg * getCalleeSavedRegsViaCopy(const MachineFunction *MF) const
BitVector getReservedRegs(const MachineFunction &MF) const override
bool shouldCoalesce(MachineInstr *MI, const TargetRegisterClass *SrcRC, unsigned SubReg, const TargetRegisterClass *DstRC, unsigned DstSubReg, const TargetRegisterClass *NewRC, LiveIntervals &LIS) const override
SrcRC and DstRC will be morphed into NewRC if this returns true.
bool requiresVirtualBaseRegisters(const MachineFunction &MF) const override
const TargetRegisterClass * getSubClassWithSubReg(const TargetRegisterClass *RC, unsigned Idx) const override
unsigned getRegPressureLimit(const TargetRegisterClass *RC, MachineFunction &MF) const override
Register materializeFrameBaseRegister(MachineBasicBlock *MBB, int FrameIdx, int64_t Offset) const override
Insert defining instruction(s) for BaseReg to be a pointer to FrameIdx at the beginning of the basic ...
const TargetRegisterClass * getPointerRegClass(const MachineFunction &MF, unsigned Kind=0) const override
void UpdateCustomCalleeSavedRegs(MachineFunction &MF) const
bool requiresRegisterScavenging(const MachineFunction &MF) const override
bool isFrameOffsetLegal(const MachineInstr *MI, Register BaseReg, int64_t Offset) const override
void resolveFrameIndex(MachineInstr &MI, Register BaseReg, int64_t Offset) const override
bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const override
needsFrameBaseReg - Returns true if the instruction's frame index reference would be better served by...
const uint32_t * getWindowsStackProbePreservedMask() const
Stack probing calls preserve different CSRs to the normal CC.
AArch64RegisterInfo(const Triple &TT)
bool isAnyArgRegReserved(const MachineFunction &MF) const
void emitReservedArgRegCallError(const MachineFunction &MF) const
bool regNeedsCFI(unsigned Reg, unsigned &RegToUseForCFI) const
Return whether the register needs a CFI entry.
bool isStrictlyReservedReg(const MachineFunction &MF, MCRegister Reg) const
bool eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj, unsigned FIOperandNum, RegScavenger *RS=nullptr) const override
const uint32_t * getTLSCallPreservedMask() const
const uint32_t * getNoPreservedMask() const override
Register getFrameRegister(const MachineFunction &MF) const override
bool shouldAnalyzePhysregInMachineLoopInfo(MCRegister R) const override
void getOffsetOpcodes(const StackOffset &Offset, SmallVectorImpl< uint64_t > &Ops) const override
const MCPhysReg * getDarwinCalleeSavedRegs(const MachineFunction *MF) const
bool isAsmClobberable(const MachineFunction &MF, MCRegister PhysReg) const override
const uint32_t * SMEABISupportRoutinesCallPreservedMaskFromX0() const
const MCPhysReg * getCalleeSavedRegs(const MachineFunction *MF) const override
Code Generation virtual methods...
const uint32_t * getCustomEHPadPreservedMask(const MachineFunction &MF) const override
unsigned getLocalAddressRegister(const MachineFunction &MF) const
bool hasBasePointer(const MachineFunction &MF) const
const uint32_t * getDarwinCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const
const uint32_t * getSMStartStopCallPreservedMask() const
bool useFPForScavengingIndex(const MachineFunction &MF) const override
bool cannotEliminateFrame(const MachineFunction &MF) const
bool isArgumentRegister(const MachineFunction &MF, MCRegister Reg) const override
void UpdateCustomCallPreservedMask(MachineFunction &MF, const uint32_t **Mask) const
std::optional< std::string > explainReservedReg(const MachineFunction &MF, MCRegister PhysReg) const override
bool requiresFrameIndexScavenging(const MachineFunction &MF) const override
unsigned getBaseRegister() const
bool isTargetWindows() const
bool isTargetDarwin() const
bool isTargetILP32() const
bool isXRegisterReservedForRA(size_t i) const
unsigned getNumXRegisterReserved() const
const AArch64TargetLowering * getTargetLowering() const override
bool isCallingConvWin64(CallingConv::ID CC) const
bool isXRegCustomCalleeSaved(size_t i) const
bool isWindowsArm64EC() const
bool isXRegisterReserved(size_t i) const
bool isTargetLinux() const
bool supportSwiftError() const override
Return true if the target supports swifterror attribute.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
bool hasAttrSomewhere(Attribute::AttrKind Kind, unsigned *Index=nullptr) const
Return true if the specified attribute is set for at least one parameter or for the return value.
static void appendOffset(SmallVectorImpl< uint64_t > &Ops, int64_t Offset)
Append Ops with operations to apply the Offset.
This class represents an Operation in the Expression.
Diagnostic information for unsupported feature in backend.
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
AttributeList getAttributes() const
Return the attribute list for this Function.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Describe properties that are true of each instruction in the target description file.
bool regsOverlap(MCRegister RegA, MCRegister RegB) const
Returns true if the two registers are equal or alias each other.
Wrapper class representing physical registers. Should be passed by value.
MCSubRegIterator enumerates all sub-registers of Reg.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
uint64_t getStackSize() const
Return the number of bytes that must be allocated to hold all of the fixed size frame objects.
bool adjustsStack() const
Return true if this function adjusts the stack – e.g., when calling another function.
bool isFrameAddressTaken() const
This method may be called any time after instruction selection is complete to determine if there is a...
int64_t getLocalFrameSize() const
Get the size of the local object blob.
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
uint32_t * allocateRegMask()
Allocate and initialize a register mask with NumRegister bits.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
bool hasEHFunclets() const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
Representation of each machine instruction.
MachineOperand class - Representation of each machine instruction operand.
void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags=0)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value.
static unsigned getRegMaskSize(unsigned NumRegs)
Returns number of elements needed for a regmask array.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void setCalleeSavedRegs(ArrayRef< MCPhysReg > CSRs)
Sets the updated Callee Saved Registers list.
bool isScavengingFrameIndex(int FI) const
Query whether a frame index is a scavenging frame index.
Wrapper class representing virtual and physical registers.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
int64_t getFixed() const
Returns the fixed component of the stack.
bool DisableFramePointerElim(const MachineFunction &MF) const
DisableFramePointerElim - This returns true if frame pointer elimination optimization should be disab...
unsigned getID() const
Return the register class ID number.
Triple - Helper class for working with autoconf configuration names.
bool isOSDarwin() const
Is this a "Darwin" OS (macOS, iOS, tvOS, watchOS, XROS, or DriverKit).
bool isOSBinFormatELF() const
Tests whether the OS uses the ELF binary format.
@ MO_TAGGED
MO_TAGGED - With MO_PAGE, indicates that the page includes a memory tag in bits 56-63.
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
void initLLVMToCVRegMapping(MCRegisterInfo *MRI)
@ AArch64_VectorCall
Used between AArch64 Advanced SIMD functions.
@ Swift
Calling convention for Swift.
@ AArch64_SVE_VectorCall
Used between AArch64 SVE functions.
@ CFGuard_Check
Special calling convention on Windows for calling the Control Guard Check ICall funtion.
@ PreserveMost
Used for runtime calls that preserves most registers.
@ AnyReg
OBSOLETED - Used for stack based JavaScript calls.
@ AArch64_SME_ABI_Support_Routines_PreserveMost_From_X2
Preserve X2-X15, X19-X29, SP, Z0-Z31, P0-P15.
@ CXX_FAST_TLS
Used for access functions.
@ AArch64_SME_ABI_Support_Routines_PreserveMost_From_X0
Preserve X0-X13, X19-X29, SP, Z0-Z31, P0-P15.
@ GHC
Used by the Glasgow Haskell Compiler (GHC).
@ PreserveAll
Used for runtime calls that preserves (almost) all registers.
@ Fast
Attempts to make calls as fast as possible (e.g.
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
@ Win64
The C convention as implemented on Windows/x86-64 and AArch64.
@ SwiftTail
This follows the Swift calling convention in how arguments are passed but guarantees tail calls will ...
@ GRAAL
Used by GraalVM. Two additional registers are reserved.
@ ARM64EC_Thunk_X64
Calling convention used in the ARM64EC ABI to implement calls between x64 code and thunks.
@ C
The default llvm calling convention, compatible with C.
This is an optimization pass for GlobalISel generic memory operations.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
int isAArch64FrameOffsetLegal(const MachineInstr &MI, StackOffset &Offset, bool *OutUseUnscaledOp=nullptr, unsigned *OutUnscaledOp=nullptr, int64_t *EmittableOffset=nullptr)
Check if the Offset is a valid frame offset for MI.
@ AArch64FrameOffsetIsLegal
Offset is legal.
@ AArch64FrameOffsetCanUpdate
Offset can apply, at least partly.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
void emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, StackOffset Offset, const TargetInstrInfo *TII, MachineInstr::MIFlag=MachineInstr::NoFlags, bool SetNZCV=false, bool NeedsWinCFI=false, bool *HasWinCFI=nullptr, bool EmitCFAOffset=false, StackOffset InitialOffset={}, unsigned FrameReg=AArch64::SP)
emitFrameOffset - Emit instructions as needed to set DestReg to SrcReg plus Offset.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
bool rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx, unsigned FrameReg, StackOffset &Offset, const AArch64InstrInfo *TII)
rewriteAArch64FrameIndex - Rewrite MI to access 'Offset' bytes from the FP.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.