71#include "llvm/IR/IntrinsicsPowerPC.h"
106#define DEBUG_TYPE "ppc-lowering"
128 cl::desc(
"disable vector permute decomposition"),
132 "disable-auto-paired-vec-st",
133 cl::desc(
"disable automatically generated 32byte paired vector stores"),
138 cl::desc(
"Set minimum number of entries to use a jump table on PPC"));
142 cl::desc(
"max depth when checking alias info in GatherAllAliases()"));
146 cl::desc(
"Set inclusive limit count of TLS local-dynamic access(es) in a "
147 "function to use initial-exec"));
152 "Number of shuffles lowered to a VPERM or XXPERM");
153STATISTIC(NumDynamicAllocaProbed,
"Number of dynamic stack allocation probed");
176 initializeAddrModeMap();
179 bool isPPC64 = Subtarget.
isPPC64();
188 if (!Subtarget.hasEFPU2())
213 if (Subtarget.isISA3_0()) {
243 if (!Subtarget.hasSPE()) {
251 const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 };
252 for (
MVT VT : ScalarIntVTs) {
259 if (Subtarget.useCRBits()) {
262 if (isPPC64 || Subtarget.hasFPCVT()) {
265 isPPC64 ? MVT::i64 : MVT::i32);
268 isPPC64 ? MVT::i64 : MVT::i32);
272 isPPC64 ? MVT::i64 : MVT::i32);
275 isPPC64 ? MVT::i64 : MVT::i32);
279 isPPC64 ? MVT::i64 : MVT::i32);
282 isPPC64 ? MVT::i64 : MVT::i32);
286 isPPC64 ? MVT::i64 : MVT::i32);
289 isPPC64 ? MVT::i64 : MVT::i32);
336 if (Subtarget.isISA3_0()) {
371 if (!Subtarget.hasSPE()) {
376 if (Subtarget.hasVSX()) {
381 if (Subtarget.hasFSQRT()) {
386 if (Subtarget.hasFPRND()) {
427 if (Subtarget.hasSPE()) {
435 if (Subtarget.hasSPE())
441 if (!Subtarget.hasFSQRT() &&
442 !(
TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTE() &&
446 if (!Subtarget.hasFSQRT() &&
447 !(
TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTES() &&
448 Subtarget.hasFRES()))
451 if (Subtarget.hasFCPSGN()) {
459 if (Subtarget.hasFPRND()) {
473 if (Subtarget.isISA3_1()) {
484 if (Subtarget.isISA3_0()) {
504 if (!Subtarget.useCRBits()) {
517 if (!Subtarget.useCRBits())
520 if (Subtarget.hasFPU()) {
531 if (!Subtarget.useCRBits())
536 if (Subtarget.hasSPE()) {
560 if (Subtarget.hasDirectMove() && isPPC64) {
565 if (
TM.Options.UnsafeFPMath) {
668 if (Subtarget.hasSPE()) {
690 if (Subtarget.has64BitSupport()) {
705 if (Subtarget.hasLFIWAX() || Subtarget.
isPPC64()) {
711 if (Subtarget.hasSPE()) {
721 if (Subtarget.hasFPCVT()) {
722 if (Subtarget.has64BitSupport()) {
743 if (Subtarget.use64BitRegs()) {
761 if (Subtarget.has64BitSupport()) {
768 if (Subtarget.hasVSX()) {
775 if (Subtarget.hasAltivec()) {
776 for (
MVT VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) {
791 if (VT.getSizeInBits() <= 128 && VT.getScalarSizeInBits() <= 64) {
804 if (Subtarget.hasVSX()) {
810 if (Subtarget.hasP8Altivec() && (VT.SimpleTy != MVT::v1i128)) {
820 if (Subtarget.hasP9Altivec() && (VT.SimpleTy != MVT::v1i128))
894 if (!Subtarget.hasP8Vector()) {
936 if (Subtarget.hasAltivec())
937 for (
auto VT : {MVT::v4i32, MVT::v8i16, MVT::v16i8})
940 if (Subtarget.hasP8Altivec())
951 if (Subtarget.hasVSX()) {
957 if (Subtarget.hasP8Altivec())
962 if (Subtarget.isISA3_1()) {
1000 if (Subtarget.hasVSX()) {
1003 if (Subtarget.hasP8Vector()) {
1007 if (Subtarget.hasDirectMove() && isPPC64) {
1021 if (
TM.Options.UnsafeFPMath) {
1058 if (Subtarget.hasP8Vector())
1067 if (Subtarget.hasP8Altivec()) {
1094 if (Subtarget.isISA3_1())
1197 if (Subtarget.hasP8Altivec()) {
1202 if (Subtarget.hasP9Vector()) {
1207 if (Subtarget.useCRBits()) {
1266 }
else if (Subtarget.hasVSX()) {
1291 for (
MVT VT : {MVT::f32, MVT::f64}) {
1310 if (Subtarget.hasP9Altivec()) {
1311 if (Subtarget.isISA3_1()) {
1334 if (Subtarget.hasP10Vector()) {
1339 if (Subtarget.pairedVectorMemops()) {
1344 if (Subtarget.hasMMA()) {
1345 if (Subtarget.isISAFuture())
1354 if (Subtarget.has64BitSupport())
1357 if (Subtarget.isISA3_1())
1375 if (Subtarget.hasAltivec()) {
1402 if (Subtarget.hasFPCVT())
1405 if (Subtarget.useCRBits())
1414 if (Subtarget.useCRBits()) {
1445 setLibcallName(RTLIB::MEMCPY, isPPC64 ?
"___memmove64" :
"___memmove");
1446 setLibcallName(RTLIB::MEMMOVE, isPPC64 ?
"___memmove64" :
"___memmove");
1447 setLibcallName(RTLIB::MEMSET, isPPC64 ?
"___memset64" :
"___memset");
1448 setLibcallName(RTLIB::BZERO, isPPC64 ?
"___bzero64" :
"___bzero");
1453 if (Subtarget.useCRBits()) {
1558void PPCTargetLowering::initializeAddrModeMap() {
1609 if (MaxAlign == MaxMaxAlign)
1611 if (
VectorType *VTy = dyn_cast<VectorType>(Ty)) {
1612 if (MaxMaxAlign >= 32 &&
1613 VTy->getPrimitiveSizeInBits().getFixedValue() >= 256)
1614 MaxAlign =
Align(32);
1615 else if (VTy->getPrimitiveSizeInBits().getFixedValue() >= 128 &&
1617 MaxAlign =
Align(16);
1618 }
else if (
ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
1621 if (EltAlign > MaxAlign)
1622 MaxAlign = EltAlign;
1623 }
else if (
StructType *STy = dyn_cast<StructType>(Ty)) {
1624 for (
auto *EltTy : STy->elements()) {
1627 if (EltAlign > MaxAlign)
1628 MaxAlign = EltAlign;
1629 if (MaxAlign == MaxMaxAlign)
1642 if (Subtarget.hasAltivec())
1644 return Alignment.
value();
1652 return Subtarget.hasSPE();
1660 Type *VectorTy,
unsigned ElemSizeInBits,
unsigned &
Index)
const {
1661 if (!Subtarget.
isPPC64() || !Subtarget.hasVSX())
1664 if (
auto *VTy = dyn_cast<VectorType>(VectorTy)) {
1665 if (VTy->getScalarType()->isIntegerTy()) {
1667 if (ElemSizeInBits == 32) {
1671 if (ElemSizeInBits == 64) {
1697 return "PPCISD::FTSQRT";
1699 return "PPCISD::FSQRT";
1704 return "PPCISD::XXSPLTI_SP_TO_DP";
1706 return "PPCISD::XXSPLTI32DX";
1710 return "PPCISD::XXPERM";
1730 return "PPCISD::CALL_RM";
1732 return "PPCISD::CALL_NOP_RM";
1734 return "PPCISD::CALL_NOTOC_RM";
1739 return "PPCISD::BCTRL_RM";
1741 return "PPCISD::BCTRL_LOAD_TOC_RM";
1753 return "PPCISD::SCALAR_TO_VECTOR_PERMUTED";
1755 return "PPCISD::ANDI_rec_1_EQ_BIT";
1757 return "PPCISD::ANDI_rec_1_GT_BIT";
1772 return "PPCISD::ST_VSR_SCAL_INT";
1801 return "PPCISD::PADDI_DTPREL";
1817 return "PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR";
1819 return "PPCISD::TLS_LOCAL_EXEC_MAT_ADDR";
1829 return "PPCISD::STRICT_FADDRTZ";
1831 return "PPCISD::STRICT_FCTIDZ";
1833 return "PPCISD::STRICT_FCTIWZ";
1835 return "PPCISD::STRICT_FCTIDUZ";
1837 return "PPCISD::STRICT_FCTIWUZ";
1839 return "PPCISD::STRICT_FCFID";
1841 return "PPCISD::STRICT_FCFIDU";
1843 return "PPCISD::STRICT_FCFIDS";
1845 return "PPCISD::STRICT_FCFIDUS";
1848 return "PPCISD::STORE_COND";
1856 return Subtarget.useCRBits() ? MVT::i1 : MVT::i32;
1873 return CFP->getValueAPF().isZero();
1877 if (
const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
1878 return CFP->getValueAPF().isZero();
1886 return Op < 0 ||
Op == Val;
1898 if (ShuffleKind == 0) {
1901 for (
unsigned i = 0; i != 16; ++i)
1904 }
else if (ShuffleKind == 2) {
1907 for (
unsigned i = 0; i != 16; ++i)
1910 }
else if (ShuffleKind == 1) {
1911 unsigned j = IsLE ? 0 : 1;
1912 for (
unsigned i = 0; i != 8; ++i)
1929 if (ShuffleKind == 0) {
1932 for (
unsigned i = 0; i != 16; i += 2)
1936 }
else if (ShuffleKind == 2) {
1939 for (
unsigned i = 0; i != 16; i += 2)
1943 }
else if (ShuffleKind == 1) {
1944 unsigned j = IsLE ? 0 : 2;
1945 for (
unsigned i = 0; i != 8; i += 2)
1966 if (!Subtarget.hasP8Vector())
1970 if (ShuffleKind == 0) {
1973 for (
unsigned i = 0; i != 16; i += 4)
1979 }
else if (ShuffleKind == 2) {
1982 for (
unsigned i = 0; i != 16; i += 4)
1988 }
else if (ShuffleKind == 1) {
1989 unsigned j = IsLE ? 0 : 4;
1990 for (
unsigned i = 0; i != 8; i += 4)
2007 unsigned LHSStart,
unsigned RHSStart) {
2008 if (
N->getValueType(0) != MVT::v16i8)
2010 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) &&
2011 "Unsupported merge size!");
2013 for (
unsigned i = 0; i != 8/UnitSize; ++i)
2014 for (
unsigned j = 0; j != UnitSize; ++j) {
2016 LHSStart+j+i*UnitSize) ||
2018 RHSStart+j+i*UnitSize))
2033 if (ShuffleKind == 1)
2035 else if (ShuffleKind == 2)
2040 if (ShuffleKind == 1)
2042 else if (ShuffleKind == 0)
2058 if (ShuffleKind == 1)
2060 else if (ShuffleKind == 2)
2065 if (ShuffleKind == 1)
2067 else if (ShuffleKind == 0)
2117 unsigned RHSStartValue) {
2118 if (
N->getValueType(0) != MVT::v16i8)
2121 for (
unsigned i = 0; i < 2; ++i)
2122 for (
unsigned j = 0; j < 4; ++j)
2124 i*RHSStartValue+j+IndexOffset) ||
2126 i*RHSStartValue+j+IndexOffset+8))
2148 unsigned indexOffset = CheckEven ? 4 : 0;
2149 if (ShuffleKind == 1)
2151 else if (ShuffleKind == 2)
2157 unsigned indexOffset = CheckEven ? 0 : 4;
2158 if (ShuffleKind == 1)
2160 else if (ShuffleKind == 0)
2176 if (
N->getValueType(0) != MVT::v16i8)
2183 for (i = 0; i != 16 && SVOp->
getMaskElt(i) < 0; ++i)
2186 if (i == 16)
return -1;
2191 if (ShiftAmt < i)
return -1;
2196 if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) {
2198 for (++i; i != 16; ++i)
2201 }
else if (ShuffleKind == 1) {
2203 for (++i; i != 16; ++i)
2210 ShiftAmt = 16 - ShiftAmt;
2219 EVT VT =
N->getValueType(0);
2220 if (VT == MVT::v2i64 || VT == MVT::v2f64)
2221 return EltSize == 8 &&
N->getMaskElt(0) ==
N->getMaskElt(1);
2224 EltSize <= 8 &&
"Can only handle 1,2,4,8 byte element sizes");
2228 if (
N->getMaskElt(0) % EltSize != 0)
2233 unsigned ElementBase =
N->getMaskElt(0);
2236 if (ElementBase >= 16)
2241 for (
unsigned i = 1; i != EltSize; ++i)
2242 if (
N->getMaskElt(i) < 0 ||
N->getMaskElt(i) != (
int)(i+ElementBase))
2245 for (
unsigned i = EltSize, e = 16; i != e; i += EltSize) {
2246 if (
N->getMaskElt(i) < 0)
continue;
2247 for (
unsigned j = 0; j != EltSize; ++j)
2248 if (
N->getMaskElt(i+j) !=
N->getMaskElt(j))
2265 assert((Width == 2 || Width == 4 || Width == 8 || Width == 16) &&
2266 "Unexpected element width.");
2267 assert((StepLen == 1 || StepLen == -1) &&
"Unexpected element width.");
2269 unsigned NumOfElem = 16 / Width;
2270 unsigned MaskVal[16];
2271 for (
unsigned i = 0; i < NumOfElem; ++i) {
2272 MaskVal[0] =
N->getMaskElt(i * Width);
2273 if ((StepLen == 1) && (MaskVal[0] % Width)) {
2275 }
else if ((StepLen == -1) && ((MaskVal[0] + 1) % Width)) {
2279 for (
unsigned int j = 1; j < Width; ++j) {
2280 MaskVal[j] =
N->getMaskElt(i * Width + j);
2281 if (MaskVal[j] != MaskVal[j-1] + StepLen) {
2291 unsigned &InsertAtByte,
bool &Swap,
bool IsLE) {
2296 unsigned M0 =
N->getMaskElt(0) / 4;
2297 unsigned M1 =
N->getMaskElt(4) / 4;
2298 unsigned M2 =
N->getMaskElt(8) / 4;
2299 unsigned M3 =
N->getMaskElt(12) / 4;
2300 unsigned LittleEndianShifts[] = { 2, 1, 0, 3 };
2301 unsigned BigEndianShifts[] = { 3, 0, 1, 2 };
2306 if ((
M0 > 3 &&
M1 == 1 && M2 == 2 && M3 == 3) ||
2307 (
M0 < 4 &&
M1 == 5 && M2 == 6 && M3 == 7)) {
2308 ShiftElts = IsLE ? LittleEndianShifts[
M0 & 0x3] : BigEndianShifts[
M0 & 0x3];
2309 InsertAtByte = IsLE ? 12 : 0;
2314 if ((
M1 > 3 &&
M0 == 0 && M2 == 2 && M3 == 3) ||
2315 (
M1 < 4 &&
M0 == 4 && M2 == 6 && M3 == 7)) {
2316 ShiftElts = IsLE ? LittleEndianShifts[
M1 & 0x3] : BigEndianShifts[
M1 & 0x3];
2317 InsertAtByte = IsLE ? 8 : 4;
2322 if ((M2 > 3 &&
M0 == 0 &&
M1 == 1 && M3 == 3) ||
2323 (M2 < 4 &&
M0 == 4 &&
M1 == 5 && M3 == 7)) {
2324 ShiftElts = IsLE ? LittleEndianShifts[M2 & 0x3] : BigEndianShifts[M2 & 0x3];
2325 InsertAtByte = IsLE ? 4 : 8;
2330 if ((M3 > 3 &&
M0 == 0 &&
M1 == 1 && M2 == 2) ||
2331 (M3 < 4 &&
M0 == 4 &&
M1 == 5 && M2 == 6)) {
2332 ShiftElts = IsLE ? LittleEndianShifts[M3 & 0x3] : BigEndianShifts[M3 & 0x3];
2333 InsertAtByte = IsLE ? 0 : 12;
2340 if (
N->getOperand(1).isUndef()) {
2343 unsigned XXINSERTWSrcElem = IsLE ? 2 : 1;
2344 if (
M0 == XXINSERTWSrcElem &&
M1 == 1 && M2 == 2 && M3 == 3) {
2345 InsertAtByte = IsLE ? 12 : 0;
2348 if (
M0 == 0 &&
M1 == XXINSERTWSrcElem && M2 == 2 && M3 == 3) {
2349 InsertAtByte = IsLE ? 8 : 4;
2352 if (
M0 == 0 &&
M1 == 1 && M2 == XXINSERTWSrcElem && M3 == 3) {
2353 InsertAtByte = IsLE ? 4 : 8;
2356 if (
M0 == 0 &&
M1 == 1 && M2 == 2 && M3 == XXINSERTWSrcElem) {
2357 InsertAtByte = IsLE ? 0 : 12;
2366 bool &Swap,
bool IsLE) {
2367 assert(
N->getValueType(0) == MVT::v16i8 &&
"Shuffle vector expects v16i8");
2373 unsigned M0 =
N->getMaskElt(0) / 4;
2374 unsigned M1 =
N->getMaskElt(4) / 4;
2375 unsigned M2 =
N->getMaskElt(8) / 4;
2376 unsigned M3 =
N->getMaskElt(12) / 4;
2380 if (
N->getOperand(1).isUndef()) {
2381 assert(
M0 < 4 &&
"Indexing into an undef vector?");
2382 if (
M1 != (
M0 + 1) % 4 || M2 != (
M1 + 1) % 4 || M3 != (M2 + 1) % 4)
2385 ShiftElts = IsLE ? (4 -
M0) % 4 :
M0;
2391 if (
M1 != (
M0 + 1) % 8 || M2 != (
M1 + 1) % 8 || M3 != (M2 + 1) % 8)
2395 if (
M0 == 0 ||
M0 == 7 ||
M0 == 6 ||
M0 == 5) {
2400 ShiftElts = (8 -
M0) % 8;
2401 }
else if (
M0 == 4 ||
M0 == 3 ||
M0 == 2 ||
M0 == 1) {
2406 ShiftElts = (4 -
M0) % 4;
2411 if (
M0 == 0 ||
M0 == 1 ||
M0 == 2 ||
M0 == 3) {
2416 }
else if (
M0 == 4 ||
M0 == 5 ||
M0 == 6 ||
M0 == 7) {
2428 assert(
N->getValueType(0) == MVT::v16i8 &&
"Shuffle vector expects v16i8");
2433 for (
int i = 0; i < 16; i += Width)
2434 if (
N->getMaskElt(i) != i + Width - 1)
2465 bool &Swap,
bool IsLE) {
2466 assert(
N->getValueType(0) == MVT::v16i8 &&
"Shuffle vector expects v16i8");
2472 unsigned M0 =
N->getMaskElt(0) / 8;
2473 unsigned M1 =
N->getMaskElt(8) / 8;
2474 assert(((
M0 |
M1) < 4) &&
"A mask element out of bounds?");
2478 if (
N->getOperand(1).isUndef()) {
2479 if ((
M0 |
M1) < 2) {
2480 DM = IsLE ? (((~M1) & 1) << 1) + ((~
M0) & 1) : (
M0 << 1) + (
M1 & 1);
2488 if (
M0 > 1 &&
M1 < 2) {
2490 }
else if (M0 < 2 && M1 > 1) {
2498 DM = (((~M1) & 1) << 1) + ((~
M0) & 1);
2501 if (M0 < 2 && M1 > 1) {
2503 }
else if (
M0 > 1 &&
M1 < 2) {
2511 DM = (
M0 << 1) + (
M1 & 1);
2526 if (VT == MVT::v2i64 || VT == MVT::v2f64)
2531 return (16 / EltSize) - 1 - (SVOp->
getMaskElt(0) / EltSize);
2547 unsigned EltSize = 16/
N->getNumOperands();
2548 if (EltSize < ByteSize) {
2549 unsigned Multiple = ByteSize/EltSize;
2551 assert(Multiple > 1 && Multiple <= 4 &&
"How can this happen?");
2554 for (
unsigned i = 0, e =
N->getNumOperands(); i != e; ++i) {
2555 if (
N->getOperand(i).isUndef())
continue;
2557 if (!isa<ConstantSDNode>(
N->getOperand(i)))
return SDValue();
2559 if (!UniquedVals[i&(Multiple-1)].getNode())
2560 UniquedVals[i&(Multiple-1)] =
N->getOperand(i);
2561 else if (UniquedVals[i&(Multiple-1)] !=
N->getOperand(i))
2571 bool LeadingZero =
true;
2572 bool LeadingOnes =
true;
2573 for (
unsigned i = 0; i != Multiple-1; ++i) {
2574 if (!UniquedVals[i].getNode())
continue;
2581 if (!UniquedVals[Multiple-1].getNode())
2588 if (!UniquedVals[Multiple-1].getNode())
2590 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue();
2599 for (
unsigned i = 0, e =
N->getNumOperands(); i != e; ++i) {
2600 if (
N->getOperand(i).isUndef())
continue;
2602 OpVal =
N->getOperand(i);
2603 else if (OpVal !=
N->getOperand(i))
2609 unsigned ValSizeInBytes = EltSize;
2612 Value = CN->getZExtValue();
2614 assert(CN->getValueType(0) == MVT::f32 &&
"Only one legal FP vector type!");
2615 Value = llvm::bit_cast<uint32_t>(CN->getValueAPF().convertToFloat());
2621 if (ValSizeInBytes < ByteSize)
return SDValue();
2632 if (MaskVal == 0)
return SDValue();
2635 if (SignExtend32<5>(MaskVal) == MaskVal)
2649 if (!isa<ConstantSDNode>(
N))
2652 Imm = (int16_t)
N->getAsZExtVal();
2653 if (
N->getValueType(0) == MVT::i32)
2654 return Imm == (int32_t)
N->getAsZExtVal();
2656 return Imm == (int64_t)
N->getAsZExtVal();
2674 return (~(LHSKnown.
Zero | RHSKnown.
Zero) == 0);
2683 if (
MemSDNode *Memop = dyn_cast<MemSDNode>(U)) {
2684 if (Memop->getMemoryVT() == MVT::f64) {
2685 Base =
N.getOperand(0);
2698 if (!isa<ConstantSDNode>(
N))
2701 Imm = (int64_t)
N->getAsZExtVal();
2702 return isInt<34>(Imm);
2729 (!EncodingAlignment ||
isAligned(*EncodingAlignment, Imm)))
2734 Base =
N.getOperand(0);
2737 }
else if (
N.getOpcode() ==
ISD::OR) {
2739 (!EncodingAlignment ||
isAligned(*EncodingAlignment, Imm)))
2751 if (~(LHSKnown.
Zero | RHSKnown.
Zero) == 0) {
2752 Base =
N.getOperand(0);
2823 (!EncodingAlignment ||
isAligned(*EncodingAlignment, imm))) {
2829 Base =
N.getOperand(0);
2832 }
else if (
N.getOperand(1).getOpcode() ==
PPCISD::Lo) {
2834 assert(!
N.getOperand(1).getConstantOperandVal(1) &&
2835 "Cannot handle constant offsets yet!");
2836 Disp =
N.getOperand(1).getOperand(0);
2841 Base =
N.getOperand(0);
2844 }
else if (
N.getOpcode() ==
ISD::OR) {
2847 (!EncodingAlignment ||
isAligned(*EncodingAlignment, imm))) {
2857 dyn_cast<FrameIndexSDNode>(
N.getOperand(0))) {
2861 Base =
N.getOperand(0);
2874 (!EncodingAlignment ||
isAligned(*EncodingAlignment, Imm))) {
2877 CN->getValueType(0));
2882 if ((CN->getValueType(0) == MVT::i32 ||
2883 (int64_t)CN->getZExtValue() == (
int)CN->getZExtValue()) &&
2884 (!EncodingAlignment ||
2885 isAligned(*EncodingAlignment, CN->getZExtValue()))) {
2886 int Addr = (int)CN->getZExtValue();
2893 unsigned Opc = CN->
getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8;
2914 if (
N.getValueType() != MVT::i64)
2927 Base =
N.getOperand(0);
2943 Base =
N.getOperand(0);
2976 !
N.getOperand(1).hasOneUse() || !
N.getOperand(0).hasOneUse())) {
2977 Base =
N.getOperand(0);
2990 Ty *PCRelCand = dyn_cast<Ty>(
N);
3002 if (isValidPCRelNode<ConstantPoolSDNode>(
N) ||
3003 isValidPCRelNode<GlobalAddressSDNode>(
N) ||
3004 isValidPCRelNode<JumpTableSDNode>(
N) ||
3005 isValidPCRelNode<BlockAddressSDNode>(
N))
3021 EVT MemVT = LD->getMemoryVT();
3028 if (!ST.hasP8Vector())
3033 if (!ST.hasP9Vector())
3046 if (UI.getUse().get().getResNo() == 0 &&
3068 Ptr = LD->getBasePtr();
3069 VT = LD->getMemoryVT();
3070 Alignment = LD->getAlign();
3071 }
else if (
StoreSDNode *ST = dyn_cast<StoreSDNode>(
N)) {
3072 Ptr = ST->getBasePtr();
3073 VT = ST->getMemoryVT();
3074 Alignment = ST->getAlign();
3097 if (isa<FrameIndexSDNode>(
Base) || isa<RegisterSDNode>(
Base))
3100 SDValue Val = cast<StoreSDNode>(
N)->getValue();
3113 if (VT != MVT::i64) {
3118 if (Alignment <
Align(4))
3128 if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 &&
3130 isa<ConstantSDNode>(
Offset))
3145 unsigned &HiOpFlags,
unsigned &LoOpFlags,
3187 const bool Is64Bit = Subtarget.
isPPC64();
3188 EVT VT = Is64Bit ? MVT::i64 : MVT::i32;
3202 EVT PtrVT =
Op.getValueType();
3218 return getTOCEntry(DAG,
SDLoc(CP), GA);
3221 unsigned MOHiFlag, MOLoFlag;
3228 return getTOCEntry(DAG,
SDLoc(CP), GA);
3288 EVT PtrVT =
Op.getValueType();
3306 return getTOCEntry(DAG,
SDLoc(JT), GA);
3309 unsigned MOHiFlag, MOLoFlag;
3316 return getTOCEntry(DAG,
SDLoc(GA), GA);
3326 EVT PtrVT =
Op.getValueType();
3345 return getTOCEntry(DAG,
SDLoc(BASDN), GA);
3354 unsigned MOHiFlag, MOLoFlag;
3365 return LowerGlobalTLSAddressAIX(
Op, DAG);
3367 return LowerGlobalTLSAddressLinux(
Op, DAG);
3391 if (II->getOpcode() == Instruction::Call)
3392 if (
const CallInst *CI = dyn_cast<const CallInst>(&*II))
3393 if (
Function *CF = CI->getCalledFunction())
3394 if (CF->isDeclaration() &&
3395 CF->getIntrinsicID() == Intrinsic::threadlocal_address)
3397 dyn_cast<GlobalValue>(II->getOperand(0))) {
3403 unsigned TLSGVCnt = TLSGV.
size();
3413 <<
" function is using the TLS-IE model for TLS-LD access.\n");
3428 bool Is64Bit = Subtarget.
isPPC64();
3432 if (Subtarget.hasAIXShLibTLSModelOpt())
3438 bool HasAIXSmallLocalExecTLS = Subtarget.hasAIXSmallLocalExecTLS();
3439 bool HasAIXSmallTLSGlobalAttr =
false;
3442 SDValue VariableOffset = getTOCEntry(DAG, dl, VariableOffsetTGA);
3446 if (GVar->hasAttribute(
"aix-small-tls"))
3447 HasAIXSmallTLSGlobalAttr =
true;
3466 if ((HasAIXSmallLocalExecTLS || HasAIXSmallTLSGlobalAttr) &&
3467 IsTLSLocalExecModel) {
3487 if (HasAIXSmallLocalExecTLS || HasAIXSmallTLSGlobalAttr)
3489 "currently only supported on AIX (64-bit mode).");
3495 bool HasAIXSmallLocalDynamicTLS = Subtarget.hasAIXSmallLocalDynamicTLS();
3499 if (!Is64Bit && HasAIXSmallLocalDynamicTLS)
3501 "currently only supported on AIX (64-bit mode).");
3509 SDValue VariableOffset = getTOCEntry(DAG, dl, VariableOffsetTGA);
3513 dyn_cast_or_null<GlobalVariable>(
M->getOrInsertGlobal(
3516 assert(TLSGV &&
"Not able to create GV for _$TLSML.");
3519 SDValue ModuleHandleTOC = getTOCEntry(DAG, dl, ModuleHandleTGA);
3530 if (HasAIXSmallLocalDynamicTLS) {
3539 return DAG.
getNode(
ISD::ADD, dl, PtrVT, ModuleHandle, VariableOffset);
3552 SDValue VariableOffset = getTOCEntry(DAG, dl, VariableOffsetTGA);
3553 SDValue RegionHandle = getTOCEntry(DAG, dl, RegionHandleTGA);
3571 bool is64bit = Subtarget.
isPPC64();
3618 if (!
TM.isPositionIndependent())
3677 PtrVT, GOTPtr, TGA, TGA);
3679 PtrVT, TLSAddr, TGA);
3688 EVT PtrVT =
Op.getValueType();
3713 return getTOCEntry(DAG,
DL, GA);
3716 unsigned MOHiFlag, MOLoFlag;
3724 return getTOCEntry(DAG,
DL, GA);
3736 bool IsStrict =
Op->isStrictFPOpcode();
3738 cast<CondCodeSDNode>(
Op.getOperand(IsStrict ? 3 : 2))->get();
3742 EVT LHSVT =
LHS.getValueType();
3746 if (LHSVT == MVT::f128) {
3747 assert(!Subtarget.hasP9Vector() &&
3748 "SETCC for f128 is already legal under Power9!");
3759 assert(!IsStrict &&
"Don't know how to handle STRICT_FSETCC!");
3761 if (
Op.getValueType() == MVT::v2i64) {
3764 if (
LHS.getValueType() == MVT::v2i64) {
3772 int ShuffV[] = {1, 0, 3, 2};
3777 dl, MVT::v4i32, Shuff, SetCC32));
3794 if (
C->isAllOnes() ||
C->isZero())
3804 EVT VT =
Op.getValueType();
3813 EVT VT =
Node->getValueType(0);
3817 const Value *SV = cast<SrcValueSDNode>(
Node->getOperand(2))->getValue();
3827 if (VT == MVT::i64) {
3858 InChain = OverflowArea.
getValue(1);
3904 InChain = DAG.
getTruncStore(InChain, dl, OverflowArea, OverflowAreaPtr,
3911 assert(!Subtarget.
isPPC64() &&
"LowerVACOPY is PPC32 only");
3926 return Op.getOperand(0);
3935 "Expecting Inline ASM node.");
3945 if (
Op.getOperand(NumOps - 1).getValueType() == MVT::Glue)
3951 unsigned NumVals =
Flags.getNumOperandRegisters();
3954 switch (
Flags.getKind()) {
3965 for (; NumVals; --NumVals, ++i) {
3966 Register Reg = cast<RegisterSDNode>(
Op.getOperand(i))->getReg();
3967 if (Reg != PPC::LR && Reg != PPC::LR8)
3992 bool isPPC64 = (PtrVT == MVT::i64);
3998 Entry.Ty = IntPtrTy;
3999 Entry.Node = Trmp;
Args.push_back(Entry);
4002 Entry.Node = DAG.
getConstant(isPPC64 ? 48 : 40, dl,
4003 isPPC64 ? MVT::i64 : MVT::i32);
4004 Args.push_back(Entry);
4006 Entry.Node = FPtr;
Args.push_back(Entry);
4007 Entry.Node = Nest;
Args.push_back(Entry);
4011 CLI.setDebugLoc(dl).setChain(Chain).setLibCallee(
4015 std::pair<SDValue, SDValue> CallResult =
LowerCallTo(CLI);
4016 return CallResult.second;
4030 const Value *SV = cast<SrcValueSDNode>(
Op.getOperand(2))->getValue();
4031 return DAG.
getStore(
Op.getOperand(0), dl, FR,
Op.getOperand(1),
4066 uint64_t FrameOffset = PtrVT.getSizeInBits()/8;
4075 const Value *SV = cast<SrcValueSDNode>(
Op.getOperand(2))->getValue();
4090 nextPtr = DAG.
getNode(
ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset);
4093 SDValue thirdStore = DAG.
getStore(secondStore, dl, StackOffsetFI, nextPtr,
4095 nextOffset += FrameOffset;
4096 nextPtr = DAG.
getNode(
ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset);
4099 return DAG.
getStore(thirdStore, dl, FR, nextPtr,
4105static const MCPhysReg FPR[] = {PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5,
4106 PPC::F6, PPC::F7, PPC::F8, PPC::F9, PPC::F10,
4107 PPC::F11, PPC::F12, PPC::F13};
4112 unsigned PtrByteSize) {
4114 if (Flags.isByVal())
4115 ArgSize = Flags.getByValSize();
4119 if (!Flags.isInConsecutiveRegs())
4120 ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4129 unsigned PtrByteSize) {
4130 Align Alignment(PtrByteSize);
4133 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
4134 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
4135 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 ||
4136 ArgVT == MVT::v1i128 || ArgVT == MVT::f128)
4137 Alignment =
Align(16);
4140 if (Flags.isByVal()) {
4141 auto BVAlign = Flags.getNonZeroByValAlign();
4142 if (BVAlign > PtrByteSize) {
4143 if (BVAlign.value() % PtrByteSize != 0)
4145 "ByVal alignment is not a multiple of the pointer size");
4147 Alignment = BVAlign;
4152 if (Flags.isInConsecutiveRegs()) {
4156 if (Flags.isSplit() && OrigVT != MVT::ppcf128)
4170 unsigned PtrByteSize,
unsigned LinkageSize,
4171 unsigned ParamAreaSize,
unsigned &ArgOffset,
4172 unsigned &AvailableFPRs,
4173 unsigned &AvailableVRs) {
4174 bool UseMemory =
false;
4179 ArgOffset =
alignTo(ArgOffset, Alignment);
4182 if (ArgOffset >= LinkageSize + ParamAreaSize)
4187 if (Flags.isInConsecutiveRegsLast())
4188 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4191 if (ArgOffset > LinkageSize + ParamAreaSize)
4196 if (!Flags.isByVal()) {
4197 if (ArgVT == MVT::f32 || ArgVT == MVT::f64)
4198 if (AvailableFPRs > 0) {
4202 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
4203 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
4204 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 ||
4205 ArgVT == MVT::v1i128 || ArgVT == MVT::f128)
4206 if (AvailableVRs > 0) {
4218 unsigned NumBytes) {
4222SDValue PPCTargetLowering::LowerFormalArguments(
4227 return LowerFormalArguments_AIX(Chain, CallConv, isVarArg, Ins, dl, DAG,
4230 return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG,
4233 return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG,
4237SDValue PPCTargetLowering::LowerFormalArguments_32SVR4(
4279 const Align PtrAlign(4);
4288 CCInfo.AllocateStack(LinkageSize, PtrAlign);
4290 CCInfo.PreAnalyzeFormalArguments(Ins);
4293 CCInfo.clearWasPPCF128();
4295 for (
unsigned i = 0, e = ArgLocs.
size(); i != e; ++i) {
4308 RC = &PPC::GPRCRegClass;
4311 if (Subtarget.hasP8Vector())
4312 RC = &PPC::VSSRCRegClass;
4313 else if (Subtarget.hasSPE())
4314 RC = &PPC::GPRCRegClass;
4316 RC = &PPC::F4RCRegClass;
4319 if (Subtarget.hasVSX())
4320 RC = &PPC::VSFRCRegClass;
4321 else if (Subtarget.hasSPE())
4323 RC = &PPC::GPRCRegClass;
4325 RC = &PPC::F8RCRegClass;
4330 RC = &PPC::VRRCRegClass;
4333 RC = &PPC::VRRCRegClass;
4337 RC = &PPC::VRRCRegClass;
4344 if (VA.
getLocVT() == MVT::f64 && Subtarget.hasSPE()) {
4345 assert(i + 1 < e &&
"No second half of double precision argument");
4357 ValVT == MVT::i1 ? MVT::i32 : ValVT);
4358 if (ValVT == MVT::i1)
4373 ArgOffset += ArgSize - ObjSize;
4391 CCByValInfo.AllocateStack(CCInfo.getStackSize(), PtrAlign);
4396 unsigned MinReservedArea = CCByValInfo.getStackSize();
4397 MinReservedArea = std::max(MinReservedArea, LinkageSize);
4413 PPC::R3, PPC::R4, PPC::R5, PPC::R6,
4414 PPC::R7, PPC::R8, PPC::R9, PPC::R10,
4416 const unsigned NumGPArgRegs = std::size(GPArgRegs);
4419 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
4422 unsigned NumFPArgRegs = std::size(FPArgRegs);
4431 int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 +
4435 PtrVT.getSizeInBits() / 8, CCInfo.getStackSize(),
true));
4444 for (
unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) {
4448 VReg = MF.
addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass);
4463 for (
unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) {
4467 VReg = MF.
addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass);
4480 if (!MemOps.
empty())
4491 const SDLoc &dl)
const {
4495 else if (
Flags.isZExt())
4502SDValue PPCTargetLowering::LowerFormalArguments_64SVR4(
4515 "fastcc not supported on varargs functions");
4521 unsigned PtrByteSize = 8;
4525 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
4526 PPC::X7, PPC::X8, PPC::X9, PPC::X10,
4529 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
4530 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
4533 const unsigned Num_GPR_Regs = std::size(GPR);
4535 const unsigned Num_VR_Regs = std::size(VR);
4543 bool HasParameterArea = !isELFv2ABI || isVarArg;
4544 unsigned ParamAreaSize = Num_GPR_Regs * PtrByteSize;
4545 unsigned NumBytes = LinkageSize;
4546 unsigned AvailableFPRs = Num_FPR_Regs;
4547 unsigned AvailableVRs = Num_VR_Regs;
4548 for (
unsigned i = 0, e =
Ins.size(); i != e; ++i) {
4549 if (Ins[i].
Flags.isNest())
4553 PtrByteSize, LinkageSize, ParamAreaSize,
4554 NumBytes, AvailableFPRs, AvailableVRs))
4555 HasParameterArea =
true;
4562 unsigned ArgOffset = LinkageSize;
4563 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
4566 unsigned CurArgIdx = 0;
4567 for (
unsigned ArgNo = 0, e =
Ins.size(); ArgNo != e; ++ArgNo) {
4569 bool needsLoad =
false;
4570 EVT ObjectVT =
Ins[ArgNo].VT;
4571 EVT OrigVT =
Ins[ArgNo].ArgVT;
4573 unsigned ArgSize = ObjSize;
4575 if (Ins[ArgNo].isOrigArg()) {
4576 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx);
4577 CurArgIdx =
Ins[ArgNo].getOrigArgIndex();
4582 unsigned CurArgOffset;
4584 auto ComputeArgOffset = [&]() {
4588 ArgOffset =
alignTo(ArgOffset, Alignment);
4589 CurArgOffset = ArgOffset;
4596 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
4597 GPR_idx = std::min(GPR_idx, Num_GPR_Regs);
4602 if (
Flags.isByVal()) {
4603 assert(Ins[ArgNo].isOrigArg() &&
"Byval arguments cannot be implicit");
4609 ObjSize =
Flags.getByValSize();
4610 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4632 if (HasParameterArea ||
4633 ArgSize + ArgOffset > LinkageSize + Num_GPR_Regs * PtrByteSize)
4640 if (ObjSize < PtrByteSize) {
4644 if (!isLittleEndian) {
4650 if (GPR_idx != Num_GPR_Regs) {
4662 ArgOffset += PtrByteSize;
4671 for (
unsigned j = 0;
j < ArgSize;
j += PtrByteSize) {
4672 if (GPR_idx == Num_GPR_Regs)
4683 unsigned StoreSizeInBits = std::min(PtrByteSize, (ObjSize - j)) * 8;
4691 ArgOffset += ArgSize;
4700 if (
Flags.isNest()) {
4705 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
4706 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
4714 if (GPR_idx != Num_GPR_Regs) {
4719 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
4722 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
4728 ArgSize = PtrByteSize;
4739 if (FPR_idx != Num_FPR_Regs) {
4742 if (ObjectVT == MVT::f32)
4744 Subtarget.hasP8Vector()
4745 ? &PPC::VSSRCRegClass
4746 : &PPC::F4RCRegClass);
4749 ? &PPC::VSFRCRegClass
4750 : &PPC::F8RCRegClass);
4765 if (ObjectVT == MVT::f32) {
4766 if ((ArgOffset % PtrByteSize) == (isLittleEndian ? 4 : 0))
4784 ArgSize =
Flags.isInConsecutiveRegs() ? ObjSize : PtrByteSize;
4785 ArgOffset += ArgSize;
4786 if (
Flags.isInConsecutiveRegsLast())
4787 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4801 if (VR_idx != Num_VR_Regs) {
4818 if (ObjSize < ArgSize && !isLittleEndian)
4819 CurArgOffset += ArgSize - ObjSize;
4829 unsigned MinReservedArea;
4830 if (HasParameterArea)
4831 MinReservedArea = std::max(ArgOffset, LinkageSize + 8 * PtrByteSize);
4833 MinReservedArea = LinkageSize;
4850 int Depth = ArgOffset;
4859 for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
4860 GPR_idx < Num_GPR_Regs; ++GPR_idx) {
4872 if (!MemOps.
empty())
4881 unsigned ParamSize) {
4883 if (!isTailCall)
return 0;
4887 int SPDiff = (int)CallerMinReservedArea - (
int)ParamSize;
4889 if (SPDiff < FI->getTailCallSPDelta())
4905 "PC Relative callers do not have a TOC and cannot share a TOC Base");
4918 if (!
TM.shouldAssumeDSOLocal(CalleeGV))
4924 const Function *
F = dyn_cast<Function>(CalleeGV);
4925 const GlobalAlias *Alias = dyn_cast<GlobalAlias>(CalleeGV);
4930 F = dyn_cast<Function>(GlobalObj);
4963 if (
TM.getFunctionSections() || CalleeGV->
hasComdat() ||
4964 Caller->hasComdat() || CalleeGV->
getSection() != Caller->getSection())
4966 if (
const auto *
F = dyn_cast<Function>(CalleeGV)) {
4967 if (
F->getSectionPrefix() != Caller->getSectionPrefix())
4979 const unsigned PtrByteSize = 8;
4983 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
4984 PPC::X7, PPC::X8, PPC::X9, PPC::X10,
4987 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
4988 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
4991 const unsigned NumGPRs = std::size(GPR);
4992 const unsigned NumFPRs = 13;
4993 const unsigned NumVRs = std::size(VR);
4994 const unsigned ParamAreaSize = NumGPRs * PtrByteSize;
4996 unsigned NumBytes = LinkageSize;
4997 unsigned AvailableFPRs = NumFPRs;
4998 unsigned AvailableVRs = NumVRs;
5001 if (Param.Flags.isNest())
continue;
5004 LinkageSize, ParamAreaSize, NumBytes,
5005 AvailableFPRs, AvailableVRs))
5016 auto CalleeArgEnd = CB.
arg_end();
5019 for (; CalleeArgIter != CalleeArgEnd; ++CalleeArgIter, ++CallerArgIter) {
5020 const Value* CalleeArg = *CalleeArgIter;
5021 const Value* CallerArg = &(*CallerArgIter);
5022 if (CalleeArg == CallerArg)
5030 isa<UndefValue>(CalleeArg))
5048 if (!isTailCallableCC(CallerCC) || !isTailCallableCC(CalleeCC))
5058bool PPCTargetLowering::IsEligibleForTailCallOptimization_64SVR4(
5063 bool isCalleeExternalSymbol)
const {
5066 if (
DisableSCO && !TailCallOpt)
return false;
5069 if (isVarArg)
return false;
5145bool PPCTargetLowering::IsEligibleForTailCallOptimization(
5179 if (!
C)
return nullptr;
5181 int Addr =
C->getZExtValue();
5182 if ((
Addr & 3) != 0 ||
5188 (
int)
C->getZExtValue() >> 2,
SDLoc(
Op),
5195struct TailCallArgumentInfo {
5200 TailCallArgumentInfo() =
default;
5210 for (
unsigned i = 0, e = TailCallArgs.
size(); i != e; ++i) {
5211 SDValue Arg = TailCallArgs[i].Arg;
5212 SDValue FIN = TailCallArgs[i].FrameIdxOp;
5213 int FI = TailCallArgs[i].FrameIdx;
5216 Chain, dl, Arg, FIN,
5225 int SPDiff,
const SDLoc &dl) {
5231 bool isPPC64 = Subtarget.
isPPC64();
5232 int SlotSize = isPPC64 ? 8 : 4;
5233 int NewRetAddrLoc = SPDiff + FL->getReturnSaveOffset();
5235 NewRetAddrLoc,
true);
5236 EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
5238 Chain = DAG.
getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx,
5248 SDValue Arg,
int SPDiff,
unsigned ArgOffset,
5250 int Offset = ArgOffset + SPDiff;
5253 EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
5255 TailCallArgumentInfo
Info;
5257 Info.FrameIdxOp = FIN;
5265SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(
5270 EVT VT = Subtarget.
isPPC64() ? MVT::i64 : MVT::i32;
5271 LROpOut = getReturnAddrFrameIndex(DAG);
5288 return DAG.
getMemcpy(Chain, dl, Dst, Src, SizeNode,
5289 Flags.getNonZeroByValAlign(),
false,
false,
false,
5297 SDValue PtrOff,
int SPDiff,
unsigned ArgOffset,
bool isPPC64,
5320 const SDLoc &dl,
int SPDiff,
unsigned NumBytes,
SDValue LROp,
5330 if (!MemOpChains2.
empty())
5354SDValue PPCTargetLowering::LowerCallResult(
5362 CCRetInfo.AnalyzeCallResult(
5368 for (
unsigned i = 0, e = RVLocs.
size(); i != e; ++i) {
5374 if (Subtarget.hasSPE() && VA.
getLocVT() == MVT::f64) {
5377 Chain =
Lo.getValue(1);
5378 InGlue =
Lo.getValue(2);
5382 Chain =
Hi.getValue(1);
5383 InGlue =
Hi.getValue(2);
5420 auto *
G = dyn_cast<GlobalAddressSDNode>(Callee);
5452 bool IsStrictFPCall =
false) {
5456 unsigned RetOpc = 0;
5481 auto *
G = dyn_cast<GlobalAddressSDNode>(Callee);
5487 if (IsStrictFPCall) {
5518 auto isLocalCallee = [&]() {
5523 !isa_and_nonnull<GlobalIFunc>(GV);
5534 const auto getAIXFuncEntryPointSymbolSDNode = [&](
const GlobalValue *GV) {
5544 auto *
G = dyn_cast<GlobalAddressSDNode>(Callee);
5547 const GlobalValue *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal();
5550 assert(!isa<GlobalIFunc>(GV) &&
"IFunc is not supported on AIX.");
5551 return getAIXFuncEntryPointSymbolSDNode(GV);
5558 const char *SymName = S->getSymbol();
5564 dyn_cast_or_null<Function>(
Mod->getNamedValue(SymName)))
5565 return getAIXFuncEntryPointSymbolSDNode(
F);
5571 const auto getExternalFunctionEntryPointSymbol = [&](
StringRef SymName) {
5579 SymName = getExternalFunctionEntryPointSymbol(SymName)->getName().data();
5586 assert(Callee.getNode() &&
"What no callee?");
5592 "Expected a CALLSEQ_STARTSDNode.");
5609 SDValue MTCTROps[] = {Chain, Callee, Glue};
5610 EVT ReturnTypes[] = {MVT::Other, MVT::Glue};
5651 auto MMOFlags = Subtarget.hasInvariantFunctionDescriptors()
5666 const MVT RegVT = Subtarget.
isPPC64() ? MVT::i64 : MVT::i32;
5670 SDValue LoadFuncPtr = DAG.
getLoad(RegVT, dl, LDChain, Callee, MPI,
5671 Alignment, MMOFlags);
5678 DAG.
getLoad(RegVT, dl, LDChain, AddTOC,
5685 DAG.
getLoad(RegVT, dl, LDChain, AddPtr,
5697 "Nest parameter is not supported on AIX.");
5713 SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass,
5716 const bool IsPPC64 = Subtarget.
isPPC64();
5718 const MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32;
5765 for (
unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
5767 RegsToPass[i].second.getValueType()));
5784 assert(Mask &&
"Missing call preserved mask for calling convention");
5792SDValue PPCTargetLowering::FinishCall(
5807 if (!CFlags.IsIndirect)
5811 dl, CFlags.HasNest, Subtarget);
5821 if (CFlags.IsTailCall) {
5825 cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) ||
5828 isa<ConstantSDNode>(Callee) ||
5830 "Expecting a global address, external symbol, absolute value, "
5831 "register or an indirect tail call when PC Relative calls are "
5835 "Unexpected call opcode for a tail call.");
5842 std::array<EVT, 2> ReturnTypes = {{MVT::Other, MVT::Glue}};
5843 Chain = DAG.
getNode(CallOpc, dl, ReturnTypes, Ops);
5855 Chain = DAG.
getCALLSEQ_END(Chain, NumBytes, BytesCalleePops, Glue, dl);
5858 return LowerCallResult(Chain, Glue, CFlags.CallConv, CFlags.IsVarArg, Ins, dl,
5869 const GlobalValue *CalleeGV = dyn_cast<GlobalValue>(CalleeFunc);
5878 return isEligibleForTCO(CalleeGV, CalleeCC, CallerCC, CB,
5879 CalleeFunc->
isVarArg(), Outs, Ins, CallerFunc,
5883bool PPCTargetLowering::isEligibleForTCO(
5888 bool isCalleeExternalSymbol)
const {
5893 return IsEligibleForTailCallOptimization_64SVR4(
5894 CalleeGV, CalleeCC, CallerCC, CB, isVarArg, Outs, Ins, CallerFunc,
5895 isCalleeExternalSymbol);
5897 return IsEligibleForTailCallOptimization(CalleeGV, CalleeCC, CallerCC,
5920 auto *
G = dyn_cast<GlobalAddressSDNode>(Callee);
5922 bool IsCalleeExternalSymbol = isa<ExternalSymbolSDNode>(Callee);
5925 isEligibleForTCO(GV, CallConv, CallerCC, CB, isVarArg, Outs, Ins,
5939 isa<GlobalAddressSDNode>(Callee)) &&
5940 "Callee should be an llvm::Function object.");
5943 <<
"\nTCO callee: ");
5950 "site marked musttail");
5955 if (Subtarget.useLongCalls() && isa<GlobalAddressSDNode>(Callee) &&
5957 Callee = LowerGlobalAddress(Callee, DAG);
5960 CallConv, isTailCall, isVarArg, isPatchPoint,
5968 return LowerCall_AIX(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
5973 return LowerCall_64SVR4(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
5975 return LowerCall_32SVR4(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
5979SDValue PPCTargetLowering::LowerCall_32SVR4(
5990 const bool IsVarArg = CFlags.IsVarArg;
5991 const bool IsTailCall = CFlags.IsTailCall;
5997 const Align PtrAlign(4);
6022 CCInfo.PreAnalyzeCallOperands(Outs);
6028 unsigned NumArgs = Outs.
size();
6030 for (
unsigned i = 0; i != NumArgs; ++i) {
6031 MVT ArgVT = Outs[i].VT;
6035 if (Outs[i].IsFixed) {
6045 errs() <<
"Call operand #" << i <<
" has unhandled type "
6055 CCInfo.clearWasPPCF128();
6062 CCByValInfo.AllocateStack(CCInfo.getStackSize(), PtrAlign);
6069 unsigned NumBytes = CCByValInfo.getStackSize();
6083 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
6094 bool seenFloatArg =
false;
6099 for (
unsigned i = 0, RealArgIdx = 0, j = 0, e = ArgLocs.
size();
6101 ++i, ++RealArgIdx) {
6103 SDValue Arg = OutVals[RealArgIdx];
6106 if (
Flags.isByVal()) {
6111 assert((j < ByValArgLocs.
size()) &&
"Index out of bounds!");
6134 Chain = CallSeqStart = NewCallSeqStart;
6153 if (Subtarget.hasSPE() && Arg.
getValueType() == MVT::f64) {
6160 RegsToPass.
push_back(std::make_pair(ArgLocs[++i].getLocReg(),
6184 if (!MemOpChains.
empty())
6190 for (
unsigned i = 0, e = RegsToPass.
size(); i != e; ++i) {
6191 Chain = DAG.
getCopyToReg(Chain, dl, RegsToPass[i].first,
6192 RegsToPass[i].second, InGlue);
6200 SDValue Ops[] = { Chain, InGlue };
6212 return FinishCall(CFlags, dl, DAG, RegsToPass, InGlue, Chain, CallSeqStart,
6213 Callee, SPDiff, NumBytes, Ins, InVals, CB);
6218SDValue PPCTargetLowering::createMemcpyOutsideCallSeq(
6230 return NewCallSeqStart;
6233SDValue PPCTargetLowering::LowerCall_64SVR4(
6242 unsigned NumOps = Outs.
size();
6243 bool IsSibCall =
false;
6247 unsigned PtrByteSize = 8;
6262 assert(!(IsFastCall && CFlags.IsVarArg) &&
6263 "fastcc not supported on varargs functions");
6270 unsigned NumBytes = LinkageSize;
6271 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
6274 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
6275 PPC::X7, PPC::X8, PPC::X9, PPC::X10,
6278 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
6279 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
6282 const unsigned NumGPRs = std::size(GPR);
6284 const unsigned NumVRs = std::size(VR);
6290 bool HasParameterArea = !isELFv2ABI || CFlags.IsVarArg || IsFastCall;
6291 if (!HasParameterArea) {
6292 unsigned ParamAreaSize = NumGPRs * PtrByteSize;
6293 unsigned AvailableFPRs = NumFPRs;
6294 unsigned AvailableVRs = NumVRs;
6295 unsigned NumBytesTmp = NumBytes;
6296 for (
unsigned i = 0; i != NumOps; ++i) {
6297 if (Outs[i].
Flags.isNest())
continue;
6299 PtrByteSize, LinkageSize, ParamAreaSize,
6300 NumBytesTmp, AvailableFPRs, AvailableVRs))
6301 HasParameterArea =
true;
6307 unsigned NumGPRsUsed = 0, NumFPRsUsed = 0, NumVRsUsed = 0;
6312 HasParameterArea =
false;
6315 for (
unsigned i = 0; i != NumOps; ++i) {
6317 EVT ArgVT = Outs[i].VT;
6318 EVT OrigVT = Outs[i].ArgVT;
6324 if (
Flags.isByVal()) {
6325 NumGPRsUsed += (
Flags.getByValSize()+7)/8;
6326 if (NumGPRsUsed > NumGPRs)
6327 HasParameterArea =
true;
6334 if (++NumGPRsUsed <= NumGPRs)
6344 if (++NumVRsUsed <= NumVRs)
6348 if (++NumVRsUsed <= NumVRs)
6353 if (++NumFPRsUsed <= NumFPRs)
6357 HasParameterArea =
true;
6364 NumBytes =
alignTo(NumBytes, Alignement);
6367 if (
Flags.isInConsecutiveRegsLast())
6368 NumBytes = ((NumBytes + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
6371 unsigned NumBytesActuallyUsed = NumBytes;
6381 if (HasParameterArea)
6382 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize);
6384 NumBytes = LinkageSize;
6399 if (CFlags.IsTailCall)
6411 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
6422 unsigned ArgOffset = LinkageSize;
6428 for (
unsigned i = 0; i != NumOps; ++i) {
6431 EVT ArgVT = Outs[i].VT;
6432 EVT OrigVT = Outs[i].ArgVT;
6441 auto ComputePtrOff = [&]() {
6445 ArgOffset =
alignTo(ArgOffset, Alignment);
6456 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
6457 GPR_idx = std::min(GPR_idx, NumGPRs);
6464 Arg = DAG.
getNode(ExtOp, dl, MVT::i64, Arg);
6470 if (
Flags.isByVal()) {
6488 EVT VT = (
Size==1) ? MVT::i8 : ((
Size==2) ? MVT::i16 : MVT::i32);
6489 if (GPR_idx != NumGPRs) {
6493 RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], Load));
6495 ArgOffset += PtrByteSize;
6500 if (GPR_idx == NumGPRs &&
Size < 8) {
6502 if (!isLittleEndian) {
6507 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
6510 ArgOffset += PtrByteSize;
6519 if ((NumGPRs - GPR_idx) * PtrByteSize <
Size)
6520 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff,
6525 if (
Size < 8 && GPR_idx != NumGPRs) {
6535 if (!isLittleEndian) {
6539 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
6547 RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], Load));
6550 ArgOffset += PtrByteSize;
6556 for (
unsigned j=0;
j<
Size;
j+=PtrByteSize) {
6559 if (GPR_idx != NumGPRs) {
6560 unsigned LoadSizeInBits = std::min(PtrByteSize, (
Size - j)) * 8;
6566 RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], Load));
6567 ArgOffset += PtrByteSize;
6569 ArgOffset += ((
Size -
j + PtrByteSize-1)/PtrByteSize)*PtrByteSize;
6581 if (
Flags.isNest()) {
6583 RegsToPass.
push_back(std::make_pair(PPC::X11, Arg));
6590 if (GPR_idx != NumGPRs) {
6591 RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], Arg));
6596 assert(HasParameterArea &&
6597 "Parameter area must exist to pass an argument in memory.");
6599 true, CFlags.IsTailCall,
false, MemOpChains,
6600 TailCallArguments, dl);
6602 ArgOffset += PtrByteSize;
6605 ArgOffset += PtrByteSize;
6618 bool NeedGPROrStack = CFlags.IsVarArg || FPR_idx == NumFPRs;
6619 bool NeededLoad =
false;
6622 if (FPR_idx != NumFPRs)
6623 RegsToPass.
push_back(std::make_pair(
FPR[FPR_idx++], Arg));
6626 if (!NeedGPROrStack)
6628 else if (GPR_idx != NumGPRs && !IsFastCall) {
6642 }
else if (!
Flags.isInConsecutiveRegs()) {
6648 }
else if (ArgOffset % PtrByteSize != 0) {
6652 if (!isLittleEndian)
6657 }
else if (
Flags.isInConsecutiveRegsLast()) {
6660 if (!isLittleEndian)
6670 RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], ArgVal));
6678 !isLittleEndian && !
Flags.isInConsecutiveRegs()) {
6683 assert(HasParameterArea &&
6684 "Parameter area must exist to pass an argument in memory.");
6686 true, CFlags.IsTailCall,
false, MemOpChains,
6687 TailCallArguments, dl);
6694 if (!IsFastCall || NeededLoad) {
6696 Flags.isInConsecutiveRegs()) ? 4 : 8;
6697 if (
Flags.isInConsecutiveRegsLast())
6698 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
6718 if (CFlags.IsVarArg) {
6719 assert(HasParameterArea &&
6720 "Parameter area must exist if we have a varargs call.");
6726 if (VR_idx != NumVRs) {
6730 RegsToPass.
push_back(std::make_pair(VR[VR_idx++], Load));
6733 for (
unsigned i=0; i<16; i+=PtrByteSize) {
6734 if (GPR_idx == NumGPRs)
6741 RegsToPass.
push_back(std::make_pair(GPR[GPR_idx++], Load));
6747 if (VR_idx != NumVRs) {
6748 RegsToPass.
push_back(std::make_pair(VR[VR_idx++], Arg));
6753 assert(HasParameterArea &&
6754 "Parameter area must exist to pass an argument in memory.");
6756 true, CFlags.IsTailCall,
true, MemOpChains,
6757 TailCallArguments, dl);
6768 assert((!HasParameterArea || NumBytesActuallyUsed == ArgOffset) &&
6769 "mismatch in size of parameter area");
6770 (void)NumBytesActuallyUsed;
6772 if (!MemOpChains.
empty())
6778 if (CFlags.IsIndirect) {
6782 assert(!CFlags.IsTailCall &&
"Indirect tails calls not supported");
6797 if (isELFv2ABI && !CFlags.IsPatchPoint)
6798 RegsToPass.
push_back(std::make_pair((
unsigned)PPC::X12, Callee));
6804 for (
unsigned i = 0, e = RegsToPass.
size(); i != e; ++i) {
6805 Chain = DAG.
getCopyToReg(Chain, dl, RegsToPass[i].first,
6806 RegsToPass[i].second, InGlue);
6810 if (CFlags.IsTailCall && !IsSibCall)
6814 return FinishCall(CFlags, dl, DAG, RegsToPass, InGlue, Chain, CallSeqStart,
6815 Callee, SPDiff, NumBytes, Ins, InVals, CB);
6822 "Required alignment greater than stack alignment.");
6842 return RequiredAlign <= 8;
6847 return RequiredAlign <= 4;
6857 const bool IsPPC64 = Subtarget.
isPPC64();
6859 const MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32;
6861 if (ValVT == MVT::f128)
6868 PPC::R3, PPC::R4, PPC::R5, PPC::R6,
6869 PPC::R7, PPC::R8, PPC::R9, PPC::R10};
6871 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
6872 PPC::X7, PPC::X8, PPC::X9, PPC::X10};
6875 PPC::V2, PPC::V3, PPC::V4, PPC::V5,
6876 PPC::V6, PPC::V7, PPC::V8, PPC::V9,
6877 PPC::V10, PPC::V11, PPC::V12, PPC::V13};
6882 "register width are not supported.");
6888 if (ByValSize == 0) {
6894 const unsigned StackSize =
alignTo(ByValSize, PtrAlign);
6896 for (
const unsigned E =
Offset + StackSize;
Offset < E;
6898 if (
unsigned Reg = State.
AllocateReg(IsPPC64 ? GPR_64 : GPR_32))
6916 assert(IsPPC64 &&
"PPC32 should have split i64 values.");
6925 if (
unsigned Reg = State.
AllocateReg(IsPPC64 ? GPR_64 : GPR_32))
6945 for (
unsigned I = 0;
I < StoreSize;
I += PtrAlign.
value()) {
6946 if (
unsigned Reg = State.
AllocateReg(IsPPC64 ? GPR_64 : GPR_32)) {
6947 assert(FReg &&
"An FPR should be available when a GPR is reserved.");
6980 const unsigned VecSize = 16;
6981 const Align VecAlign(VecSize);
6998 const unsigned PtrSize = IsPPC64 ? 8 : 4;
7004 while (NextRegIndex != GPRs.
size() &&
7009 assert(Reg &&
"Allocating register unexpectedly failed.");
7022 for (
unsigned I = 0;
I != VecSize;
I += PtrSize)
7034 if (NextRegIndex == GPRs.
size()) {
7043 if (GPRs[NextRegIndex] == PPC::R9) {
7048 const unsigned FirstReg = State.
AllocateReg(PPC::R9);
7049 const unsigned SecondReg = State.
AllocateReg(PPC::R10);
7050 assert(FirstReg && SecondReg &&
7051 "Allocating R9 or R10 unexpectedly failed.");
7065 for (
unsigned I = 0;
I != VecSize;
I += PtrSize) {
7067 assert(Reg &&
"Failed to allocated register for vararg vector argument");
7082 assert((IsPPC64 || SVT != MVT::i64) &&
7083 "i64 should have been split for 32-bit codegen.");
7091 return IsPPC64 ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
7093 return HasP8Vector ? &PPC::VSSRCRegClass : &PPC::F4RCRegClass;
7095 return HasVSX ? &PPC::VSFRCRegClass : &PPC::F8RCRegClass;
7103 return &PPC::VRRCRegClass;
7116 else if (Flags.isZExt())
7126 if (PPC::GPRCRegClass.
contains(Reg)) {
7127 assert(Reg >= PPC::R3 && Reg <= PPC::R10 &&
7128 "Reg must be a valid argument register!");
7129 return LASize + 4 * (Reg - PPC::R3);
7132 if (PPC::G8RCRegClass.
contains(Reg)) {
7133 assert(Reg >= PPC::X3 && Reg <= PPC::X10 &&
7134 "Reg must be a valid argument register!");
7135 return LASize + 8 * (Reg - PPC::X3);
7181SDValue PPCTargetLowering::LowerFormalArguments_AIX(
7188 "Unexpected calling convention!");
7198 const bool IsPPC64 = Subtarget.
isPPC64();
7199 const unsigned PtrByteSize = IsPPC64 ? 8 : 4;
7211 CCInfo.AllocateStack(LinkageSize,
Align(PtrByteSize));
7212 CCInfo.AnalyzeFormalArguments(Ins,
CC_AIX);
7230 auto HandleMemLoc = [&]() {
7233 assert((ValSize <= LocSize) &&
7234 "Object size is larger than size of MemLoc");
7237 if (LocSize > ValSize)
7238 CurArgOffset += LocSize - ValSize;
7240 const bool IsImmutable =
7255 assert(isVarArg &&
"Only use custom memloc for vararg.");
7258 const unsigned OriginalValNo = VA.
getValNo();
7259 (void)OriginalValNo;
7261 auto HandleCustomVecRegLoc = [&]() {
7262 assert(
I !=
End && ArgLocs[
I].isRegLoc() && ArgLocs[
I].needsCustom() &&
7263 "Missing custom RegLoc.");
7266 "Unexpected Val type for custom RegLoc.");
7268 "ValNo mismatch between custom MemLoc and RegLoc.");
7272 Subtarget.hasVSX()));
7279 HandleCustomVecRegLoc();
7280 HandleCustomVecRegLoc();
7284 if (
I !=
End && ArgLocs[
I].isRegLoc() && ArgLocs[
I].needsCustom()) {
7286 "Only 2 custom RegLocs expected for 64-bit codegen.");
7287 HandleCustomVecRegLoc();
7288 HandleCustomVecRegLoc();
7332 const unsigned Size =
7344 if (
Flags.isByVal()) {
7350 if (
Flags.getNonZeroByValAlign() > PtrByteSize)
7353 const unsigned StackSize =
alignTo(
Flags.getByValSize(), PtrByteSize);
7362 IsPPC64 ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
7364 auto HandleRegLoc = [&, RegClass, LocVT](
const MCPhysReg PhysReg,
7377 CopyFrom.
getValue(1), dl, CopyFrom,
7387 for (;
Offset != StackSize && ArgLocs[
I].isRegLoc();
7390 "RegLocs should be for ByVal argument.");
7397 if (
Offset != StackSize) {
7399 "Expected MemLoc for remaining bytes.");
7400 assert(ArgLocs[
I].isMemLoc() &&
"Expected MemLoc for remaining bytes.");
7414 Subtarget.hasVSX()));
7431 const unsigned MinParameterSaveArea = 8 * PtrByteSize;
7433 unsigned CallerReservedArea = std::max<unsigned>(
7434 CCInfo.getStackSize(), LinkageSize + MinParameterSaveArea);
7440 CallerReservedArea =
7449 static const MCPhysReg GPR_32[] = {PPC::R3, PPC::R4, PPC::R5, PPC::R6,
7450 PPC::R7, PPC::R8, PPC::R9, PPC::R10};
7452 static const MCPhysReg GPR_64[] = {PPC::X3, PPC::X4, PPC::X5, PPC::X6,
7453 PPC::X7, PPC::X8, PPC::X9, PPC::X10};
7454 const unsigned NumGPArgRegs = std::size(IsPPC64 ? GPR_64 : GPR_32);
7459 for (
unsigned GPRIndex =
7460 (CCInfo.getStackSize() - LinkageSize) / PtrByteSize;
7461 GPRIndex < NumGPArgRegs; ++GPRIndex) {
7464 IsPPC64 ? MF.
addLiveIn(GPR_64[GPRIndex], &PPC::G8RCRegClass)
7465 : MF.
addLiveIn(GPR_32[GPRIndex], &PPC::GPRCRegClass);
7477 if (!MemOps.
empty())
7483SDValue PPCTargetLowering::LowerCall_AIX(
7496 "Unexpected calling convention!");
7498 if (CFlags.IsPatchPoint)
7505 AIXCCState CCInfo(CFlags.CallConv, CFlags.IsVarArg, MF, ArgLocs,
7513 const bool IsPPC64 = Subtarget.
isPPC64();
7515 const unsigned PtrByteSize = IsPPC64 ? 8 : 4;
7516 CCInfo.AllocateStack(LinkageSize,
Align(PtrByteSize));
7517 CCInfo.AnalyzeCallOperands(Outs,
CC_AIX);
7525 const unsigned MinParameterSaveAreaSize = 8 * PtrByteSize;
7526 const unsigned NumBytes = std::max<unsigned>(
7527 LinkageSize + MinParameterSaveAreaSize, CCInfo.getStackSize());
7543 for (
unsigned I = 0, E = ArgLocs.
size();
I != E;) {
7544 const unsigned ValNo = ArgLocs[
I].getValNo();
7548 if (
Flags.isByVal()) {
7549 const unsigned ByValSize =
Flags.getByValSize();
7557 auto GetLoad = [&](
EVT VT,
unsigned LoadOffset) {
7566 unsigned LoadOffset = 0;
7569 while (LoadOffset + PtrByteSize <= ByValSize && ArgLocs[
I].isRegLoc()) {
7572 LoadOffset += PtrByteSize;
7575 "Unexpected location for pass-by-value argument.");
7579 if (LoadOffset == ByValSize)
7583 assert(ArgLocs[
I].getValNo() == ValNo &&
7584 "Expected additional location for by-value argument.");
7586 if (ArgLocs[
I].isMemLoc()) {
7587 assert(LoadOffset < ByValSize &&
"Unexpected memloc for by-val arg.");
7592 Chain = CallSeqStart = createMemcpyOutsideCallSeq(
7598 CallSeqStart, MemcpyFlags, DAG, dl);
7607 const unsigned ResidueBytes = ByValSize % PtrByteSize;
7608 assert(ResidueBytes != 0 && LoadOffset + PtrByteSize > ByValSize &&
7609 "Unexpected register residue for by-value argument.");
7611 for (
unsigned Bytes = 0; Bytes != ResidueBytes;) {
7615 : ((
N == 2) ? MVT::i16 : (
N == 4 ? MVT::i32 : MVT::i64));
7625 "Unexpected load emitted during handling of pass-by-value "
7633 ResidueVal = ResidueVal ? DAG.
getNode(
ISD::OR, dl, PtrVT, ResidueVal,
7668 assert(CFlags.IsVarArg &&
"Custom MemLocs only used for Vector args.");
7676 const unsigned OriginalValNo = VA.
getValNo();
7678 unsigned LoadOffset = 0;
7679 auto HandleCustomVecRegLoc = [&]() {
7680 assert(
I != E &&
"Unexpected end of CCvalAssigns.");
7681 assert(ArgLocs[
I].isRegLoc() && ArgLocs[
I].needsCustom() &&
7682 "Expected custom RegLoc.");
7685 "Custom MemLoc ValNo and custom RegLoc ValNo must match.");
7691 LoadOffset += PtrByteSize;
7697 HandleCustomVecRegLoc();
7698 HandleCustomVecRegLoc();
7700 if (
I != E && ArgLocs[
I].isRegLoc() && ArgLocs[
I].needsCustom() &&
7701 ArgLocs[
I].getValNo() == OriginalValNo) {
7703 "Only 2 custom RegLocs expected for 64-bit codegen.");
7704 HandleCustomVecRegLoc();
7705 HandleCustomVecRegLoc();
7723 "Unexpected register handling for calling convention.");
7729 "Custom register handling only expected for VarArg.");
7747 "Unexpected custom register for argument!");
7768 if (!MemOpChains.
empty())
7773 if (CFlags.IsIndirect) {
7774 assert(!CFlags.IsTailCall &&
"Indirect tail-calls not supported.");
7777 const MVT PtrVT = Subtarget.
isPPC64() ? MVT::i64 : MVT::i32;
7778 const unsigned TOCSaveOffset =
7794 for (
auto Reg : RegsToPass) {
7799 const int SPDiff = 0;
7800 return FinishCall(CFlags, dl, DAG, RegsToPass, InGlue, Chain, CallSeqStart,
7801 Callee, SPDiff, NumBytes, Ins, InVals, CB);
7810 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
7811 return CCInfo.CheckReturn(
7826 CCInfo.AnalyzeReturn(Outs,
7835 for (
unsigned i = 0, RealResIdx = 0; i != RVLocs.
size(); ++i, ++RealResIdx) {
7839 SDValue Arg = OutVals[RealResIdx];
7854 if (Subtarget.hasSPE() && VA.
getLocVT() == MVT::f64) {
7877 RetOps.push_back(Glue);
7883PPCTargetLowering::LowerGET_DYNAMIC_AREA_OFFSET(
SDValue Op,
7888 EVT IntVT =
Op.getValueType();
7892 SDValue FPSIdx = getFramePointerFrameIndex(DAG);
7894 SDValue Ops[2] = {Chain, FPSIdx};
7908 bool isPPC64 = Subtarget.
isPPC64();
7909 unsigned SP = isPPC64 ? PPC::X1 : PPC::R1;
7929 bool isPPC64 = Subtarget.
isPPC64();
7950PPCTargetLowering::getFramePointerFrameIndex(
SelectionDAG & DAG)
const {
7952 bool isPPC64 = Subtarget.
isPPC64();
7986 SDValue FPSIdx = getFramePointerFrameIndex(DAG);
7987 SDValue Ops[3] = { Chain, NegSize, FPSIdx };
7998 bool isPPC64 = Subtarget.
isPPC64();
8010 Op.getOperand(0),
Op.getOperand(1));
8017 Op.getOperand(0),
Op.getOperand(1));
8021 if (
Op.getValueType().isVector())
8022 return LowerVectorLoad(
Op, DAG);
8024 assert(
Op.getValueType() == MVT::i1 &&
8025 "Custom lowering only for i1 loads");
8038 BasePtr, MVT::i8, MMO);
8046 if (
Op.getOperand(1).getValueType().isVector())
8047 return LowerVectorStore(
Op, DAG);
8049 assert(
Op.getOperand(1).getValueType() == MVT::i1 &&
8050 "Custom lowering only for i1 stores");
8069 assert(
Op.getValueType() == MVT::i1 &&
8070 "Custom lowering only for i1 results");
8098 EVT TrgVT =
Op.getValueType();
8111 !llvm::has_single_bit<uint32_t>(
8122 if (SrcSize == 256) {
8133 Op1 = SrcSize == 128 ? N1 :
widenVec(DAG, N1,
DL);
8141 for (
unsigned i = 0; i < TrgNumElts; ++i)
8144 for (
unsigned i = 1; i <= TrgNumElts; ++i)
8148 for (
unsigned i = TrgNumElts; i < WideNumElts; ++i)
8161 EVT ResVT =
Op.getValueType();
8162 EVT CmpVT =
Op.getOperand(0).getValueType();
8164 SDValue TV =
Op.getOperand(2), FV =
Op.getOperand(3);
8170 if (!Subtarget.hasP9Vector() && CmpVT == MVT::f128) {
8187 if (Subtarget.hasP9Vector() && LHS == TV && RHS == FV) {
8219 if (
LHS.getValueType() == MVT::f32)
8232 if (
LHS.getValueType() == MVT::f32)
8241 if (
LHS.getValueType() == MVT::f32)
8255 if (
Cmp.getValueType() == MVT::f32)
8265 if (
Cmp.getValueType() == MVT::f32)
8271 if (
Cmp.getValueType() == MVT::f32)
8277 if (
Cmp.getValueType() == MVT::f32)
8283 if (
Cmp.getValueType() == MVT::f32)
8316 bool IsStrict =
Op->isStrictFPOpcode();
8322 Flags.setNoFPExcept(
Op->getFlags().hasNoFPExcept());
8325 SDValue Src =
Op.getOperand(IsStrict ? 1 : 0);
8327 MVT DestTy =
Op.getSimpleValueType();
8328 assert(Src.getValueType().isFloatingPoint() &&
8329 (DestTy == MVT::i8 || DestTy == MVT::i16 || DestTy == MVT::i32 ||
8330 DestTy == MVT::i64) &&
8331 "Invalid FP_TO_INT types");
8332 if (Src.getValueType() == MVT::f32) {
8336 DAG.
getVTList(MVT::f64, MVT::Other), {Chain, Src}, Flags);
8337 Chain = Src.getValue(1);
8341 if ((DestTy == MVT::i8 || DestTy == MVT::i16) && Subtarget.hasP9Vector())
8342 DestTy = Subtarget.
isPPC64() ? MVT::i64 : MVT::i32;
8351 assert((IsSigned || Subtarget.hasFPCVT()) &&
8352 "i64 FP_TO_UINT is supported only with FPCVT");
8355 EVT ConvTy = Src.getValueType() == MVT::f128 ? MVT::f128 : MVT::f64;
8359 Conv = DAG.
getNode(Opc, dl, DAG.
getVTList(ConvTy, MVT::Other), {Chain, Src},
8362 Conv = DAG.
getNode(Opc, dl, ConvTy, Src);
8367void PPCTargetLowering::LowerFP_TO_INTForReuse(
SDValue Op, ReuseLoadInfo &RLI,
8369 const SDLoc &dl)
const {
8373 bool IsStrict =
Op->isStrictFPOpcode();
8376 bool i32Stack =
Op.getValueType() == MVT::i32 && Subtarget.hasSTFIWX() &&
8377 (IsSigned || Subtarget.hasFPCVT());
8379 int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex();
8388 Alignment =
Align(4);
8391 SDValue Ops[] = { Chain, Tmp, FIPtr };
8393 DAG.
getVTList(MVT::Other), Ops, MVT::i32, MMO);
8395 Chain = DAG.
getStore(Chain, dl, Tmp, FIPtr, MPI, Alignment);
8399 if (
Op.getValueType() == MVT::i32 && !i32Stack) {
8408 RLI.Alignment = Alignment;
8416 const SDLoc &dl)
const {
8419 if (
Op->isStrictFPOpcode())
8426 const SDLoc &dl)
const {
8427 bool IsStrict =
Op->isStrictFPOpcode();
8430 SDValue Src =
Op.getOperand(IsStrict ? 1 : 0);
8431 EVT SrcVT = Src.getValueType();
8432 EVT DstVT =
Op.getValueType();
8435 if (SrcVT == MVT::f128)
8436 return Subtarget.hasP9Vector() ?
Op :
SDValue();
8440 if (SrcVT == MVT::ppcf128) {
8441 if (DstVT == MVT::i32) {
8446 Flags.setNoFPExcept(
Op->getFlags().hasNoFPExcept());
8457 {Op.getOperand(0), Lo, Hi}, Flags);
8460 {Res.getValue(1), Res}, Flags);
8466 const uint64_t TwoE31[] = {0x41e0000000000000LL, 0};
8490 {Chain, Src, FltOfs}, Flags);
8494 {Chain, Val}, Flags);
8497 dl, DstVT, Sel, DAG.
getConstant(0, dl, DstVT), SignMask);
8515 if (Subtarget.hasDirectMove() && Subtarget.
isPPC64())
8516 return LowerFP_TO_INTDirectMove(
Op, DAG, dl);
8519 LowerFP_TO_INTForReuse(
Op, RLI, DAG, dl);
8521 return DAG.
getLoad(
Op.getValueType(), dl, RLI.Chain, RLI.Ptr, RLI.MPI,
8522 RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges);
8533bool PPCTargetLowering::canReuseLoadAddress(
SDValue Op,
EVT MemVT,
8538 if (
Op->isStrictFPOpcode())
8543 (Subtarget.hasFPCVT() ||
Op.getValueType() == MVT::i32);
8547 Op.getOperand(0).getValueType())) {
8549 LowerFP_TO_INTForReuse(
Op, RLI, DAG, dl);
8554 if (!LD ||
LD->getExtensionType() != ET ||
LD->isVolatile() ||
8555 LD->isNonTemporal())
8557 if (
LD->getMemoryVT() != MemVT)
8567 RLI.Ptr =
LD->getBasePtr();
8568 if (
LD->isIndexed() && !
LD->getOffset().isUndef()) {
8570 "Non-pre-inc AM on PPC?");
8575 RLI.Chain =
LD->getChain();
8576 RLI.MPI =
LD->getPointerInfo();
8577 RLI.IsDereferenceable =
LD->isDereferenceable();
8578 RLI.IsInvariant =
LD->isInvariant();
8579 RLI.Alignment =
LD->getAlign();
8580 RLI.AAInfo =
LD->getAAInfo();
8581 RLI.Ranges =
LD->getRanges();
8583 RLI.ResChain =
SDValue(LD,
LD->isIndexed() ? 2 : 1);
8591void PPCTargetLowering::spliceIntoChain(
SDValue ResChain,
8597 SDLoc dl(NewResChain);
8600 NewResChain, DAG.
getUNDEF(MVT::Other));
8602 "A new TF really is required here");
8611bool PPCTargetLowering::directMoveIsProfitable(
const SDValue &
Op)
const {
8612 SDNode *Origin =
Op.getOperand(
Op->isStrictFPOpcode() ? 1 : 0).getNode();
8619 if (!Subtarget.hasP9Vector() &&
8628 if (UI.getUse().get().getResNo() != 0)
8650 Flags.setNoFPExcept(
Op->getFlags().hasNoFPExcept());
8654 bool IsSingle =
Op.getValueType() == MVT::f32 && Subtarget.hasFPCVT();
8657 EVT ConvTy = IsSingle ? MVT::f32 : MVT::f64;
8658 if (
Op->isStrictFPOpcode()) {
8660 Chain =
Op.getOperand(0);
8662 DAG.
getVTList(ConvTy, MVT::Other), {Chain, Src}, Flags);
8664 return DAG.
getNode(ConvOpc, dl, ConvTy, Src);
8672 const SDLoc &dl)
const {
8673 assert((
Op.getValueType() == MVT::f32 ||
8674 Op.getValueType() == MVT::f64) &&
8675 "Invalid floating point type as target of conversion");
8676 assert(Subtarget.hasFPCVT() &&
8677 "Int to FP conversions with direct moves require FPCVT");
8678 SDValue Src =
Op.getOperand(
Op->isStrictFPOpcode() ? 1 : 0);
8679 bool WordInt = Src.getSimpleValueType().SimpleTy == MVT::i32;
8701 for (
unsigned i = 1; i < NumConcat; ++i)
8708 const SDLoc &dl)
const {
8709 bool IsStrict =
Op->isStrictFPOpcode();
8710 unsigned Opc =
Op.getOpcode();
8711 SDValue Src =
Op.getOperand(IsStrict ? 1 : 0);
8714 "Unexpected conversion type");
8715 assert((
Op.getValueType() == MVT::v2f64 ||
Op.getValueType() == MVT::v4f32) &&
8716 "Supports conversions to v2f64/v4f32 only.");
8720 Flags.setNoFPExcept(
Op->getFlags().hasNoFPExcept());
8723 bool FourEltRes =
Op.getValueType() == MVT::v4f32;
8728 MVT IntermediateVT = FourEltRes ? MVT::v4i32 : MVT::v2i64;
8731 for (
unsigned i = 0; i < WideNumElts; ++i)
8734 int Stride = FourEltRes ? WideNumElts / 4 : WideNumElts / 2;
8735 int SaveElts = FourEltRes ? 4 : 2;
8737 for (
int i = 0; i < SaveElts; i++)
8738 ShuffV[i * Stride] = i;
8740 for (
int i = 1; i <= SaveElts; i++)
8741 ShuffV[i * Stride - 1] = i - 1;
8749 Arrange = DAG.
getBitcast(IntermediateVT, Arrange);
8750 EVT ExtVT = Src.getValueType();
8751 if (Subtarget.hasP9Altivec())
8762 {Op.getOperand(0), Extend}, Flags);
8764 return DAG.
getNode(Opc, dl,
Op.getValueType(), Extend);
8772 bool IsStrict =
Op->isStrictFPOpcode();
8773 SDValue Src =
Op.getOperand(IsStrict ? 1 : 0);
8778 Flags.setNoFPExcept(
Op->getFlags().hasNoFPExcept());
8780 EVT InVT = Src.getValueType();
8781 EVT OutVT =
Op.getValueType();
8784 return LowerINT_TO_FPVector(
Op, DAG, dl);
8787 if (
Op.getValueType() == MVT::f128)
8788 return Subtarget.hasP9Vector() ?
Op :
SDValue();
8791 if (
Op.getValueType() != MVT::f32 &&
Op.getValueType() != MVT::f64)
8794 if (Src.getValueType() == MVT::i1) {
8806 if (Subtarget.hasDirectMove() && directMoveIsProfitable(
Op) &&
8807 Subtarget.
isPPC64() && Subtarget.hasFPCVT())
8808 return LowerINT_TO_FPDirectMove(
Op, DAG, dl);
8810 assert((IsSigned || Subtarget.hasFPCVT()) &&
8811 "UINT_TO_FP is supported only with FPCVT");
8813 if (Src.getValueType() == MVT::i64) {
8825 if (
Op.getValueType() == MVT::f32 &&
8826 !Subtarget.hasFPCVT() &&
8867 if (canReuseLoadAddress(SINT, MVT::i64, RLI, DAG)) {
8868 Bits = DAG.
getLoad(MVT::f64, dl, RLI.Chain, RLI.Ptr, RLI.MPI,
8869 RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges);
8870 spliceIntoChain(RLI.ResChain,
Bits.getValue(1), DAG);
8871 }
else if (Subtarget.hasLFIWAX() &&
8872 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG,
ISD::SEXTLOAD)) {
8875 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8876 SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8879 Ops, MVT::i32, MMO);
8880 spliceIntoChain(RLI.ResChain,
Bits.getValue(1), DAG);
8881 }
else if (Subtarget.hasFPCVT() &&
8882 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG,
ISD::ZEXTLOAD)) {
8885 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8886 SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8889 Ops, MVT::i32, MMO);
8890 spliceIntoChain(RLI.ResChain,
Bits.getValue(1), DAG);
8891 }
else if (((Subtarget.hasLFIWAX() &&
8893 (Subtarget.hasFPCVT() &&
8907 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&
8908 "Expected an i32 store");
8914 RLI.Alignment =
Align(4);
8918 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8919 SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8922 dl, DAG.
getVTList(MVT::f64, MVT::Other),
8923 Ops, MVT::i32, MMO);
8924 Chain =
Bits.getValue(1);
8930 Chain =
FP.getValue(1);
8932 if (
Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) {
8936 {Chain, FP, DAG.getIntPtrConstant(0, dl)}, Flags);
8944 assert(Src.getValueType() == MVT::i32 &&
8945 "Unhandled INT_TO_FP type in custom expander!");
8955 if (Subtarget.hasLFIWAX() || Subtarget.hasFPCVT()) {
8958 if (!(ReusingLoad = canReuseLoadAddress(Src, MVT::i32, RLI, DAG))) {
8967 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&
8968 "Expected an i32 store");
8974 RLI.Alignment =
Align(4);
8979 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8980 SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8982 DAG.
getVTList(MVT::f64, MVT::Other), Ops,
8986 spliceIntoChain(RLI.ResChain, Ld.
getValue(1), DAG);
8989 "i32->FP without LFIWAX supported only on PPC64");
8998 Chain, dl, Ext64, FIdx,
9004 MVT::f64, dl, Chain, FIdx,
9012 Chain =
FP.getValue(1);
9013 if (
Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) {
9017 {Chain, FP, DAG.getIntPtrConstant(0, dl)}, Flags);
9048 EVT VT =
Op.getValueType();
9054 Chain =
MFFS.getValue(1);
9068 "Stack slot adjustment is valid only on big endian subtargets!");
9098 EVT VT =
Op.getValueType();
9102 VT ==
Op.getOperand(1).getValueType() &&
9122 SDValue OutOps[] = { OutLo, OutHi };
9127 EVT VT =
Op.getValueType();
9131 VT ==
Op.getOperand(1).getValueType() &&
9151 SDValue OutOps[] = { OutLo, OutHi };
9157 EVT VT =
Op.getValueType();
9160 VT ==
Op.getOperand(1).getValueType() &&
9180 SDValue OutOps[] = { OutLo, OutHi };
9187 EVT VT =
Op.getValueType();
9194 EVT AmtVT =
Z.getValueType();
9217 static const MVT VTys[] = {
9218 MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32
9221 EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1];
9224 if (Val == ((1LLU << (SplatSize * 8)) - 1)) {
9229 EVT CanonicalVT = VTys[SplatSize-1];
9238 const SDLoc &dl,
EVT DestVT = MVT::Other) {
9239 if (DestVT == MVT::Other) DestVT =
Op.getValueType();
9248 EVT DestVT = MVT::Other) {
9249 if (DestVT == MVT::Other) DestVT =
LHS.getValueType();
9258 EVT DestVT = MVT::Other) {
9261 DAG.
getConstant(IID, dl, MVT::i32), Op0, Op1, Op2);
9273 for (
unsigned i = 0; i != 16; ++i)
9294 EVT VecVT = V->getValueType(0);
9295 bool RightType = VecVT == MVT::v2f64 ||
9296 (HasP8Vector && VecVT == MVT::v4f32) ||
9297 (HasDirectMove && (VecVT == MVT::v2i64 || VecVT == MVT::v4i32));
9301 bool IsSplat =
true;
9302 bool IsLoad =
false;
9303 SDValue Op0 = V->getOperand(0);
9308 if (V->isConstant())
9310 for (
int i = 0, e = V->getNumOperands(); i < e; ++i) {
9311 if (V->getOperand(i).isUndef())
9315 if (V->getOperand(i).getOpcode() ==
ISD::LOAD ||
9317 V->getOperand(i).getOperand(0).getOpcode() ==
ISD::LOAD) ||
9319 V->getOperand(i).getOperand(0).getOpcode() ==
ISD::LOAD) ||
9321 V->getOperand(i).getOperand(0).getOpcode() ==
ISD::LOAD))
9325 if (V->getOperand(i) != Op0 ||
9326 (!IsLoad && !V->isOnlyUserOf(V->getOperand(i).getNode())))
9329 return !(IsSplat && IsLoad);
9338 if ((
Op.getValueType() != MVT::f128) ||
9359 LoadSDNode *LD = cast<LoadSDNode>(*InputLoad);
9367 APFloat APFloatToConvert = ArgAPFloat;
9368 bool LosesInfo =
true;
9373 ArgAPFloat = APFloatToConvert;
9395 APFloat APFloatToConvert = ArgAPFloat;
9396 bool LosesInfo =
true;
9400 return (!LosesInfo && !APFloatToConvert.
isDenormal());
9405 LoadSDNode *InputNode = dyn_cast<LoadSDNode>(
Op.getOperand(0));
9409 EVT Ty =
Op->getValueType(0);
9412 if ((Ty == MVT::v2f64 || Ty == MVT::v4f32 || Ty == MVT::v4i32) &&
9421 if ((Ty == MVT::v8i16 || Ty == MVT::v16i8) &&
ISD::isEXTLoad(InputNode) &&
9425 if (Ty == MVT::v2i64) {
9428 if (MemVT == MVT::i32) {
9448 assert(BVN &&
"Expected a BuildVectorSDNode in LowerBUILD_VECTOR");
9451 APInt APSplatBits, APSplatUndef;
9452 unsigned SplatBitSize;
9454 bool BVNIsConstantSplat =
9462 if (BVNIsConstantSplat && (SplatBitSize == 64) &&
9463 Subtarget.hasPrefixInstrs()) {
9466 if ((
Op->getValueType(0) == MVT::v2f64) &&
9501 if (!BVNIsConstantSplat || SplatBitSize > 32) {
9508 const SDValue *InputLoad = &
Op.getOperand(0);
9513 unsigned MemorySize =
LD->getMemoryVT().getScalarSizeInBits();
9514 unsigned ElementSize =
9517 assert(((ElementSize == 2 * MemorySize)
9521 "Unmatched element size and opcode!\n");
9526 unsigned NumUsesOfInputLD = 128 / ElementSize;
9528 if (BVInOp.isUndef())
9543 if (NumUsesOfInputLD == 1 &&
9546 Subtarget.hasLFIWAX()))
9555 Subtarget.isISA3_1() && ElementSize <= 16)
9558 assert(NumUsesOfInputLD > 0 &&
"No uses of input LD of a build_vector?");
9560 Subtarget.hasVSX()) {
9567 NewOpcode, dl, DAG.
getVTList(
Op.getValueType(), MVT::Other), Ops,
9568 LD->getMemoryVT(),
LD->getMemOperand());
9580 if (Subtarget.hasVSX() && Subtarget.
isPPC64() &&
9582 Subtarget.hasP8Vector()))
9589 unsigned SplatSize = SplatBitSize / 8;
9594 if (SplatBits == 0) {
9596 if (
Op.getValueType() != MVT::v4i32 || HasAnyUndefs) {
9608 if (Subtarget.hasPrefixInstrs() && SplatSize == 2)
9610 Op.getValueType(), DAG, dl);
9612 if (Subtarget.hasPrefixInstrs() && SplatSize == 4)
9617 if (Subtarget.hasP9Vector() && SplatSize == 1)
9622 int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >>
9624 if (SextVal >= -16 && SextVal <= 15)
9637 if (SextVal >= -32 && SextVal <= 31) {
9642 EVT VT = (SplatSize == 1 ? MVT::v16i8 :
9643 (SplatSize == 2 ? MVT::v8i16 : MVT::v4i32));
9646 if (VT ==
Op.getValueType())
9655 if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) {
9669 static const signed char SplatCsts[] = {
9670 -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7,
9671 -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16
9674 for (
unsigned idx = 0; idx < std::size(SplatCsts); ++idx) {
9677 int i = SplatCsts[idx];
9681 unsigned TypeShiftAmt = i & (SplatBitSize-1);
9684 if (SextVal == (
int)((
unsigned)i << TypeShiftAmt)) {
9686 static const unsigned IIDs[] = {
9687 Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0,
9688 Intrinsic::ppc_altivec_vslw
9695 if (SextVal == (
int)((
unsigned)i >> TypeShiftAmt)) {
9697 static const unsigned IIDs[] = {
9698 Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0,
9699 Intrinsic::ppc_altivec_vsrw
9706 if (SextVal == (
int)(((
unsigned)i << TypeShiftAmt) |
9707 ((
unsigned)i >> (SplatBitSize-TypeShiftAmt)))) {
9709 static const unsigned IIDs[] = {
9710 Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0,
9711 Intrinsic::ppc_altivec_vrlw
9718 if (SextVal == (
int)(((
unsigned)i << 8) | (i < 0 ? 0xFF : 0))) {
9724 if (SextVal == (
int)(((
unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) {
9730 if (SextVal == (
int)(((
unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) {
9745 unsigned OpNum = (PFEntry >> 26) & 0x0F;
9746 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
9747 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1);
9763 if (LHSID == (1*9+2)*9+3)
return LHS;
9764 assert(LHSID == ((4*9+5)*9+6)*9+7 &&
"Illegal OP_COPY!");
9776 ShufIdxs[ 0] = 0; ShufIdxs[ 1] = 1; ShufIdxs[ 2] = 2; ShufIdxs[ 3] = 3;
9777 ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19;
9778 ShufIdxs[ 8] = 4; ShufIdxs[ 9] = 5; ShufIdxs[10] = 6; ShufIdxs[11] = 7;
9779 ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23;
9782 ShufIdxs[ 0] = 8; ShufIdxs[ 1] = 9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11;
9783 ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27;
9784 ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15;
9785 ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31;
9788 for (
unsigned i = 0; i != 16; ++i)
9789 ShufIdxs[i] = (i&3)+0;
9792 for (
unsigned i = 0; i != 16; ++i)
9793 ShufIdxs[i] = (i&3)+4;
9796 for (
unsigned i = 0; i != 16; ++i)
9797 ShufIdxs[i] = (i&3)+8;
9800 for (
unsigned i = 0; i != 16; ++i)
9801 ShufIdxs[i] = (i&3)+12;
9822 const unsigned BytesInVector = 16;
9827 unsigned ShiftElts = 0, InsertAtByte = 0;
9831 unsigned LittleEndianShifts[] = {8, 7, 6, 5, 4, 3, 2, 1,
9832 0, 15, 14, 13, 12, 11, 10, 9};
9833 unsigned BigEndianShifts[] = {9, 10, 11, 12, 13, 14, 15, 0,
9834 1, 2, 3, 4, 5, 6, 7, 8};
9837 int OriginalOrder[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
9849 bool FoundCandidate =
false;
9853 unsigned VINSERTBSrcElem = IsLE ? 8 : 7;
9856 for (
unsigned i = 0; i < BytesInVector; ++i) {
9857 unsigned CurrentElement =
Mask[i];
9860 if (
V2.isUndef() && CurrentElement != VINSERTBSrcElem)
9863 bool OtherElementsInOrder =
true;
9866 for (
unsigned j = 0;
j < BytesInVector; ++
j) {
9873 (!
V2.isUndef() && CurrentElement < BytesInVector) ? BytesInVector : 0;
9874 if (Mask[j] != OriginalOrder[j] + MaskOffset) {
9875 OtherElementsInOrder =
false;
9882 if (OtherElementsInOrder) {
9889 ShiftElts = IsLE ? LittleEndianShifts[CurrentElement & 0xF]
9890 : BigEndianShifts[CurrentElement & 0xF];
9891 Swap = CurrentElement < BytesInVector;
9893 InsertAtByte = IsLE ? BytesInVector - (i + 1) : i;
9894 FoundCandidate =
true;
9899 if (!FoundCandidate)
9923 const unsigned NumHalfWords = 8;
9924 const unsigned BytesInVector = NumHalfWords * 2;
9933 unsigned ShiftElts = 0, InsertAtByte = 0;
9937 unsigned LittleEndianShifts[] = {4, 3, 2, 1, 0, 7, 6, 5};
9938 unsigned BigEndianShifts[] = {5, 6, 7, 0, 1, 2, 3, 4};
9941 uint32_t OriginalOrderLow = 0x1234567;
9942 uint32_t OriginalOrderHigh = 0x89ABCDEF;
9945 for (
unsigned i = 0; i < NumHalfWords; ++i) {
9946 unsigned MaskShift = (NumHalfWords - 1 - i) * 4;
9963 bool FoundCandidate =
false;
9966 for (
unsigned i = 0; i < NumHalfWords; ++i) {
9967 unsigned MaskShift = (NumHalfWords - 1 - i) * 4;
9977 unsigned VINSERTHSrcElem = IsLE ? 4 : 3;
9978 TargetOrder = OriginalOrderLow;
9982 if (MaskOneElt == VINSERTHSrcElem &&
9983 (Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) {
9984 InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2;
9985 FoundCandidate =
true;
9991 (MaskOneElt < NumHalfWords) ? OriginalOrderHigh : OriginalOrderLow;
9993 if ((Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) {
9995 ShiftElts = IsLE ? LittleEndianShifts[MaskOneElt & 0x7]
9996 : BigEndianShifts[MaskOneElt & 0x7];
9997 InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2;
9998 Swap = MaskOneElt < NumHalfWords;
9999 FoundCandidate =
true;
10005 if (!FoundCandidate)
10040 auto ShuffleMask = SVN->
getMask();
10055 ShuffleMask = CommutedSV->
getMask();
10064 APInt APSplatValue, APSplatUndef;
10065 unsigned SplatBitSize;
10081 if ((ShuffleMask[0] == 0 && ShuffleMask[8] == 8) &&
10082 (ShuffleMask[4] % 4 == 0 && ShuffleMask[12] % 4 == 0 &&
10083 ShuffleMask[4] > 15 && ShuffleMask[12] > 15))
10085 else if ((ShuffleMask[4] == 4 && ShuffleMask[12] == 12) &&
10086 (ShuffleMask[0] % 4 == 0 && ShuffleMask[8] % 4 == 0 &&
10087 ShuffleMask[0] > 15 && ShuffleMask[8] > 15))
10095 for (; SplatBitSize < 32; SplatBitSize <<= 1)
10096 SplatVal |= (SplatVal << SplatBitSize);
10110 assert(
Op.getValueType() == MVT::v1i128 &&
10111 "Only set v1i128 as custom, other type shouldn't reach here!");
10116 if (SHLAmt % 8 == 0) {
10117 std::array<int, 16>
Mask;
10118 std::iota(
Mask.begin(),
Mask.end(), 0);
10119 std::rotate(
Mask.begin(),
Mask.begin() + SHLAmt / 8,
Mask.end());
10148 if (
SDValue NewShuffle = combineVectorShuffle(SVOp, DAG)) {
10149 if (!isa<ShuffleVectorSDNode>(NewShuffle))
10152 SVOp = cast<ShuffleVectorSDNode>(
Op);
10153 V1 =
Op.getOperand(0);
10154 V2 =
Op.getOperand(1);
10156 EVT VT =
Op.getValueType();
10159 unsigned ShiftElts, InsertAtByte;
10165 bool IsPermutedLoad =
false;
10167 if (InputLoad && Subtarget.hasVSX() &&
V2.isUndef() &&
10177 if (IsPermutedLoad) {
10178 assert((isLittleEndian || IsFourByte) &&
10179 "Unexpected size for permuted load on big endian target");
10180 SplatIdx += IsFourByte ? 2 : 1;
10181 assert((SplatIdx < (IsFourByte ? 4 : 2)) &&
10182 "Splat of a value outside of the loaded memory");
10187 if ((IsFourByte && Subtarget.hasP9Vector()) || !IsFourByte) {
10190 Offset = isLittleEndian ? (3 - SplatIdx) * 4 : SplatIdx * 4;
10192 Offset = isLittleEndian ? (1 - SplatIdx) * 8 : SplatIdx * 8;
10196 if (
LD->getValueType(0).getSizeInBits() == (IsFourByte ? 32 : 64))
10209 DAG.
getVTList(IsFourByte ? MVT::v4i32 : MVT::v2i64, MVT::Other);
10212 Ops,
LD->getMemoryVT(),
LD->getMemOperand());
10221 if (VT == MVT::v2i64 || VT == MVT::v2f64)
10224 if (Subtarget.hasP9Vector() &&
10245 if (Subtarget.hasPrefixInstrs()) {
10247 if ((SplatInsertNode = lowerToXXSPLTI32DX(SVOp, DAG)))
10248 return SplatInsertNode;
10251 if (Subtarget.hasP9Altivec()) {
10253 if ((NewISDNode = lowerToVINSERTH(SVOp, DAG)))
10256 if ((NewISDNode = lowerToVINSERTB(SVOp, DAG)))
10260 if (Subtarget.hasVSX() &&
10273 if (Subtarget.hasVSX() &&
10286 if (Subtarget.hasP9Vector()) {
10306 if (Subtarget.hasVSX()) {
10327 if (
V2.isUndef()) {
10340 (Subtarget.hasP8Altivec() && (
10351 unsigned int ShuffleKind = isLittleEndian ? 2 : 0;
10361 (Subtarget.hasP8Altivec() && (
10372 unsigned PFIndexes[4];
10373 bool isFourElementShuffle =
true;
10374 for (
unsigned i = 0; i != 4 && isFourElementShuffle;
10376 unsigned EltNo = 8;
10377 for (
unsigned j = 0;
j != 4; ++
j) {
10378 if (PermMask[i * 4 + j] < 0)
10381 unsigned ByteSource = PermMask[i * 4 +
j];
10382 if ((ByteSource & 3) != j) {
10383 isFourElementShuffle =
false;
10388 EltNo = ByteSource / 4;
10389 }
else if (EltNo != ByteSource / 4) {
10390 isFourElementShuffle =
false;
10394 PFIndexes[i] = EltNo;
10402 if (isFourElementShuffle) {
10404 unsigned PFTableIndex = PFIndexes[0] * 9 * 9 * 9 + PFIndexes[1] * 9 * 9 +
10405 PFIndexes[2] * 9 + PFIndexes[3];
10408 unsigned Cost = (PFEntry >> 30);
10428 if (
V2.isUndef())
V2 = V1;
10430 return LowerVPERM(
Op, DAG, PermMask, VT, V1, V2);
10439 bool NeedSwap =
false;
10441 bool isPPC64 = Subtarget.
isPPC64();
10443 if (Subtarget.hasVSX() && Subtarget.hasP9Vector() &&
10445 LLVM_DEBUG(
dbgs() <<
"At least one of two input vectors are dead - using "
10446 "XXPERM instead\n");
10452 if ((!isLittleEndian && !
V2->hasOneUse() && V1->
hasOneUse()) ||
10453 (isLittleEndian && !V1->
hasOneUse() &&
V2->hasOneUse())) {
10455 NeedSwap = !NeedSwap;
10490 unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i];
10492 if (V1HasXXSWAPD) {
10495 else if (SrcElt < 16)
10498 if (V2HasXXSWAPD) {
10501 else if (SrcElt > 15)
10510 for (
unsigned j = 0;
j != BytesPerElement; ++
j)
10511 if (isLittleEndian)
10513 DAG.
getConstant(31 - (SrcElt * BytesPerElement + j), dl, MVT::i32));
10516 DAG.
getConstant(SrcElt * BytesPerElement + j, dl, MVT::i32));
10519 if (V1HasXXSWAPD) {
10523 if (V2HasXXSWAPD) {
10524 dl =
SDLoc(
V2->getOperand(0));
10525 V2 =
V2->getOperand(0)->getOperand(1);
10528 if (isPPC64 && (V1HasXXSWAPD || V2HasXXSWAPD)) {
10529 if (ValType != MVT::v2f64)
10531 if (
V2.getValueType() != MVT::v2f64)
10535 ShufflesHandledWithVPERM++;
10540 dbgs() <<
"Emitting a XXPERM for the following shuffle:\n";
10542 dbgs() <<
"Emitting a VPERM for the following shuffle:\n";
10545 dbgs() <<
"With the following permute control vector:\n";
10550 VPermMask = DAG.
getBitcast(MVT::v4i32, VPermMask);
10554 if (isLittleEndian)
10560 VPERMNode = DAG.
getBitcast(ValType, VPERMNode);
10572 switch (IntrinsicID) {
10576 case Intrinsic::ppc_altivec_vcmpbfp_p:
10580 case Intrinsic::ppc_altivec_vcmpeqfp_p:
10584 case Intrinsic::ppc_altivec_vcmpequb_p:
10588 case Intrinsic::ppc_altivec_vcmpequh_p:
10592 case Intrinsic::ppc_altivec_vcmpequw_p:
10596 case Intrinsic::ppc_altivec_vcmpequd_p:
10597 if (Subtarget.hasVSX() || Subtarget.hasP8Altivec()) {
10603 case Intrinsic::ppc_altivec_vcmpneb_p:
10604 case Intrinsic::ppc_altivec_vcmpneh_p:
10605 case Intrinsic::ppc_altivec_vcmpnew_p:
10606 case Intrinsic::ppc_altivec_vcmpnezb_p:
10607 case Intrinsic::ppc_altivec_vcmpnezh_p:
10608 case Intrinsic::ppc_altivec_vcmpnezw_p:
10609 if (Subtarget.hasP9Altivec()) {
10610 switch (IntrinsicID) {
10613 case Intrinsic::ppc_altivec_vcmpneb_p:
10616 case Intrinsic::ppc_altivec_vcmpneh_p:
10619 case Intrinsic::ppc_altivec_vcmpnew_p:
10622 case Intrinsic::ppc_altivec_vcmpnezb_p:
10625 case Intrinsic::ppc_altivec_vcmpnezh_p:
10628 case Intrinsic::ppc_altivec_vcmpnezw_p:
10636 case Intrinsic::ppc_altivec_vcmpgefp_p:
10640 case Intrinsic::ppc_altivec_vcmpgtfp_p:
10644 case Intrinsic::ppc_altivec_vcmpgtsb_p:
10648 case Intrinsic::ppc_altivec_vcmpgtsh_p:
10652 case Intrinsic::ppc_altivec_vcmpgtsw_p:
10656 case Intrinsic::ppc_altivec_vcmpgtsd_p:
10657 if (Subtarget.hasVSX() || Subtarget.hasP8Altivec()) {
10663 case Intrinsic::ppc_altivec_vcmpgtub_p:
10667 case Intrinsic::ppc_altivec_vcmpgtuh_p:
10671 case Intrinsic::ppc_altivec_vcmpgtuw_p:
10675 case Intrinsic::ppc_altivec_vcmpgtud_p:
10676 if (Subtarget.hasVSX() || Subtarget.hasP8Altivec()) {
10683 case Intrinsic::ppc_altivec_vcmpequq:
10684 case Intrinsic::ppc_altivec_vcmpgtsq:
10685 case Intrinsic::ppc_altivec_vcmpgtuq:
10686 if (!Subtarget.isISA3_1())
10688 switch (IntrinsicID) {
10691 case Intrinsic::ppc_altivec_vcmpequq:
10694 case Intrinsic::ppc_altivec_vcmpgtsq:
10697 case Intrinsic::ppc_altivec_vcmpgtuq:
10704 case Intrinsic::ppc_vsx_xvcmpeqdp_p:
10705 case Intrinsic::ppc_vsx_xvcmpgedp_p:
10706 case Intrinsic::ppc_vsx_xvcmpgtdp_p:
10707 case Intrinsic::ppc_vsx_xvcmpeqsp_p:
10708 case Intrinsic::ppc_vsx_xvcmpgesp_p:
10709 case Intrinsic::ppc_vsx_xvcmpgtsp_p:
10710 if (Subtarget.hasVSX()) {
10711 switch (IntrinsicID) {
10712 case Intrinsic::ppc_vsx_xvcmpeqdp_p:
10715 case Intrinsic::ppc_vsx_xvcmpgedp_p:
10718 case Intrinsic::ppc_vsx_xvcmpgtdp_p:
10721 case Intrinsic::ppc_vsx_xvcmpeqsp_p:
10724 case Intrinsic::ppc_vsx_xvcmpgesp_p:
10727 case Intrinsic::ppc_vsx_xvcmpgtsp_p:
10737 case Intrinsic::ppc_altivec_vcmpbfp:
10740 case Intrinsic::ppc_altivec_vcmpeqfp:
10743 case Intrinsic::ppc_altivec_vcmpequb:
10746 case Intrinsic::ppc_altivec_vcmpequh:
10749 case Intrinsic::ppc_altivec_vcmpequw:
10752 case Intrinsic::ppc_altivec_vcmpequd:
10753 if (Subtarget.hasP8Altivec())
10758 case Intrinsic::ppc_altivec_vcmpneb:
10759 case Intrinsic::ppc_altivec_vcmpneh:
10760 case Intrinsic::ppc_altivec_vcmpnew:
10761 case Intrinsic::ppc_altivec_vcmpnezb:
10762 case Intrinsic::ppc_altivec_vcmpnezh:
10763 case Intrinsic::ppc_altivec_vcmpnezw:
10764 if (Subtarget.hasP9Altivec())
10765 switch (IntrinsicID) {
10768 case Intrinsic::ppc_altivec_vcmpneb:
10771 case Intrinsic::ppc_altivec_vcmpneh:
10774 case Intrinsic::ppc_altivec_vcmpnew:
10777 case Intrinsic::ppc_altivec_vcmpnezb:
10780 case Intrinsic::ppc_altivec_vcmpnezh:
10783 case Intrinsic::ppc_altivec_vcmpnezw:
10790 case Intrinsic::ppc_altivec_vcmpgefp:
10793 case Intrinsic::ppc_altivec_vcmpgtfp:
10796 case Intrinsic::ppc_altivec_vcmpgtsb:
10799 case Intrinsic::ppc_altivec_vcmpgtsh:
10802 case Intrinsic::ppc_altivec_vcmpgtsw:
10805 case Intrinsic::ppc_altivec_vcmpgtsd:
10806 if (Subtarget.hasP8Altivec())
10811 case Intrinsic::ppc_altivec_vcmpgtub:
10814 case Intrinsic::ppc_altivec_vcmpgtuh:
10817 case Intrinsic::ppc_altivec_vcmpgtuw:
10820 case Intrinsic::ppc_altivec_vcmpgtud:
10821 if (Subtarget.hasP8Altivec())
10826 case Intrinsic::ppc_altivec_vcmpequq_p:
10827 case Intrinsic::ppc_altivec_vcmpgtsq_p:
10828 case Intrinsic::ppc_altivec_vcmpgtuq_p:
10829 if (!Subtarget.isISA3_1())
10831 switch (IntrinsicID) {
10834 case Intrinsic::ppc_altivec_vcmpequq_p:
10837 case Intrinsic::ppc_altivec_vcmpgtsq_p:
10840 case Intrinsic::ppc_altivec_vcmpgtuq_p:
10854 unsigned IntrinsicID =
Op.getConstantOperandVal(0);
10858 switch (IntrinsicID) {
10859 case Intrinsic::thread_pointer:
10865 case Intrinsic::ppc_rldimi: {
10866 assert(Subtarget.
isPPC64() &&
"rldimi is only available in 64-bit!");
10870 return Op.getOperand(2);
10871 if (
Mask.isAllOnes())
10874 unsigned MB = 0, ME = 0;
10878 if (ME < 63 - SH) {
10881 }
else if (ME > 63 - SH) {
10887 {Op.getOperand(2), Src,
10888 DAG.getTargetConstant(63 - ME, dl, MVT::i32),
10889 DAG.getTargetConstant(MB, dl, MVT::i32)}),
10893 case Intrinsic::ppc_rlwimi: {
10896 return Op.getOperand(2);
10897 if (
Mask.isAllOnes())
10900 unsigned MB = 0, ME = 0;
10904 PPC::RLWIMI, dl, MVT::i32,
10905 {Op.getOperand(2), Op.getOperand(1), Op.getOperand(3),
10906 DAG.getTargetConstant(MB, dl, MVT::i32),
10907 DAG.getTargetConstant(ME, dl, MVT::i32)}),
10911 case Intrinsic::ppc_rlwnm: {
10912 if (
Op.getConstantOperandVal(3) == 0)
10914 unsigned MB = 0, ME = 0;
10919 {Op.getOperand(1), Op.getOperand(2),
10920 DAG.getTargetConstant(MB, dl, MVT::i32),
10921 DAG.getTargetConstant(ME, dl, MVT::i32)}),
10925 case Intrinsic::ppc_mma_disassemble_acc: {
10926 if (Subtarget.isISAFuture()) {
10927 EVT ReturnTypes[] = {MVT::v256i1, MVT::v256i1};
10965 case Intrinsic::ppc_vsx_disassemble_pair: {
10968 if (IntrinsicID == Intrinsic::ppc_mma_disassemble_acc) {
10973 for (
int VecNo = 0; VecNo < NumVecs; VecNo++) {
10984 case Intrinsic::ppc_mma_xxmfacc:
10985 case Intrinsic::ppc_mma_xxmtacc: {
10987 if (!Subtarget.isISAFuture())
10998 case Intrinsic::ppc_unpack_longdouble: {
10999 auto *
Idx = dyn_cast<ConstantSDNode>(
Op.getOperand(2));
11000 assert(
Idx && (
Idx->getSExtValue() == 0 ||
Idx->getSExtValue() == 1) &&
11001 "Argument of long double unpack must be 0 or 1!");
11004 Idx->getValueType(0)));
11007 case Intrinsic::ppc_compare_exp_lt:
11008 case Intrinsic::ppc_compare_exp_gt:
11009 case Intrinsic::ppc_compare_exp_eq:
11010 case Intrinsic::ppc_compare_exp_uo: {
11012 switch (IntrinsicID) {
11013 case Intrinsic::ppc_compare_exp_lt:
11016 case Intrinsic::ppc_compare_exp_gt:
11019 case Intrinsic::ppc_compare_exp_eq:
11022 case Intrinsic::ppc_compare_exp_uo:
11028 PPC::SELECT_CC_I4, dl, MVT::i32,
11029 {SDValue(DAG.getMachineNode(PPC::XSCMPEXPDP, dl, MVT::i32,
11030 Op.getOperand(1), Op.getOperand(2)),
11032 DAG.getConstant(1, dl, MVT::i32), DAG.getConstant(0, dl, MVT::i32),
11033 DAG.getTargetConstant(Pred, dl, MVT::i32)}),
11036 case Intrinsic::ppc_test_data_class: {
11037 EVT OpVT =
Op.getOperand(1).getValueType();
11038 unsigned CmprOpc = OpVT == MVT::f128 ? PPC::XSTSTDCQP
11039 : (OpVT == MVT::f64 ? PPC::XSTSTDCDP
11043 PPC::SELECT_CC_I4, dl, MVT::i32,
11044 {SDValue(DAG.getMachineNode(CmprOpc, dl, MVT::i32, Op.getOperand(2),
11047 DAG.getConstant(1, dl, MVT::i32), DAG.getConstant(0, dl, MVT::i32),
11048 DAG.getTargetConstant(PPC::PRED_EQ, dl, MVT::i32)}),
11051 case Intrinsic::ppc_fnmsub: {
11052 EVT VT =
Op.getOperand(1).getValueType();
11053 if (!Subtarget.hasVSX() || (!Subtarget.hasFloat128() && VT == MVT::f128))
11059 Op.getOperand(2),
Op.getOperand(3));
11061 case Intrinsic::ppc_convert_f128_to_ppcf128:
11062 case Intrinsic::ppc_convert_ppcf128_to_f128: {
11063 RTLIB::Libcall LC = IntrinsicID == Intrinsic::ppc_convert_ppcf128_to_f128
11064 ? RTLIB::CONVERT_PPCF128_F128
11065 : RTLIB::CONVERT_F128_PPCF128;
11066 MakeLibCallOptions CallOptions;
11067 std::pair<SDValue, SDValue>
Result =
11068 makeLibCall(DAG, LC,
Op.getValueType(),
Op.getOperand(1), CallOptions,
11072 case Intrinsic::ppc_maxfe:
11073 case Intrinsic::ppc_maxfl:
11074 case Intrinsic::ppc_maxfs:
11075 case Intrinsic::ppc_minfe:
11076 case Intrinsic::ppc_minfl:
11077 case Intrinsic::ppc_minfs: {
11078 EVT VT =
Op.getValueType();
11081 [VT](
const SDUse &
Use) { return Use.getValueType() == VT; }) &&
11082 "ppc_[max|min]f[e|l|s] must have uniform type arguments");
11085 if (IntrinsicID == Intrinsic::ppc_minfe ||
11086 IntrinsicID == Intrinsic::ppc_minfl ||
11087 IntrinsicID == Intrinsic::ppc_minfs)
11109 Op.getOperand(1),
Op.getOperand(2),
11120 EVT VTs[] = {
Op.getOperand(2).getValueType(), MVT::Glue };
11132 switch (
Op.getConstantOperandVal(1)) {
11135 BitNo = 0; InvertBit =
false;
11138 BitNo = 0; InvertBit =
true;
11141 BitNo = 2; InvertBit =
false;
11144 BitNo = 2; InvertBit =
true;
11166 int ArgStart = isa<ConstantSDNode>(
Op.getOperand(0)) ? 0 : 1;
11168 switch (
Op.getConstantOperandVal(ArgStart)) {
11169 case Intrinsic::ppc_cfence: {
11170 assert(ArgStart == 1 &&
"llvm.ppc.cfence must carry a chain argument.");
11171 SDValue Val =
Op.getOperand(ArgStart + 1);
11173 if (Ty == MVT::i128) {
11178 unsigned Opcode = Subtarget.
isPPC64() ? PPC::CFENCE8 : PPC::CFENCE;
11179 EVT FTy = Subtarget.
isPPC64() ? MVT::i64 : MVT::i32;
11203 int VectorIndex = 0;
11216 "Expecting an atomic compare-and-swap here.");
11218 auto *AtomicNode = cast<AtomicSDNode>(
Op.getNode());
11219 EVT MemVT = AtomicNode->getMemoryVT();
11237 for (
int i = 0, e = AtomicNode->getNumOperands(); i <
e; i++)
11238 Ops.
push_back(AtomicNode->getOperand(i));
11250 EVT MemVT =
N->getMemoryVT();
11252 "Expect quadword atomic operations");
11254 unsigned Opc =
N->getOpcode();
11262 DAG.
getConstant(Intrinsic::ppc_atomic_load_i128, dl, MVT::i32)};
11263 for (
int I = 1, E =
N->getNumOperands();
I < E; ++
I)
11266 Ops, MemVT,
N->getMemOperand());
11273 DAG.
getNode(
ISD::OR, dl, {MVT::i128, MVT::Other}, {ValLo, ValHi});
11283 DAG.
getConstant(Intrinsic::ppc_atomic_store_i128, dl, MVT::i32)};
11293 N->getMemOperand());
11305 enum DataClassMask {
11307 DC_NEG_INF = 1 << 4,
11308 DC_POS_INF = 1 << 5,
11309 DC_NEG_ZERO = 1 << 2,
11310 DC_POS_ZERO = 1 << 3,
11311 DC_NEG_SUBNORM = 1,
11312 DC_POS_SUBNORM = 1 << 1,
11315 EVT VT =
Op.getValueType();
11317 unsigned TestOp = VT == MVT::f128 ? PPC::XSTSTDCQP
11318 : VT == MVT::f64 ? PPC::XSTSTDCDP
11329 return DAG.
getNOT(Dl, Rev, MVT::i1);
11336 TestOp, Dl, MVT::i32,
11338 DC_NEG_ZERO | DC_POS_ZERO |
11339 DC_NEG_SUBNORM | DC_POS_SUBNORM,
11345 DAG.
getMachineNode(TargetOpcode::EXTRACT_SUBREG, Dl, MVT::i1, Rev,
11351 TargetOpcode::EXTRACT_SUBREG, Dl, MVT::i1, Rev,
11356 Sign = DAG.
getNOT(Dl, Sign, MVT::i1);
11369 bool IsQuiet = Mask &
fcQNan;
11375 if (VT == MVT::f128) {
11379 QuietMask = 0x8000;
11380 }
else if (VT == MVT::f64) {
11392 QuietMask = 0x80000;
11393 }
else if (VT == MVT::f32) {
11395 QuietMask = 0x400000;
11411 unsigned NativeMask = 0;
11413 NativeMask |= DC_NAN;
11415 NativeMask |= DC_NEG_INF;
11417 NativeMask |= DC_POS_INF;
11419 NativeMask |= DC_NEG_ZERO;
11421 NativeMask |= DC_POS_ZERO;
11423 NativeMask |= DC_NEG_SUBNORM;
11425 NativeMask |= DC_POS_SUBNORM;
11428 TargetOpcode::EXTRACT_SUBREG, Dl, MVT::i1,
11430 TestOp, Dl, MVT::i32,
11439 assert(Subtarget.hasP9Vector() &&
"Test data class requires Power9");
11441 uint64_t RHSC =
Op.getConstantOperandVal(1);
11466 "Should only be called for ISD::INSERT_VECTOR_ELT");
11470 EVT VT =
Op.getValueType();
11475 if (VT == MVT::v2f64 &&
C)
11478 if (Subtarget.hasP9Vector()) {
11487 if ((VT == MVT::v4f32) && (
V2.getValueType() == MVT::f32) &&
11488 (isa<LoadSDNode>(V2))) {
11493 BitcastLoad,
Op.getOperand(2));
11494 return DAG.
getBitcast(MVT::v4f32, InsVecElt);
11498 if (Subtarget.isISA3_1()) {
11499 if ((VT == MVT::v2i64 || VT == MVT::v2f64) && !Subtarget.
isPPC64())
11503 if (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32 ||
11504 VT == MVT::v2i64 || VT == MVT::v4f32 || VT == MVT::v2f64)
11514 if (VT == MVT::v8i16 || VT == MVT::v16i8) {
11517 unsigned InsertAtElement =
C->getZExtValue();
11518 unsigned InsertAtByte = InsertAtElement * BytesInEachElement;
11520 InsertAtByte = (16 - BytesInEachElement) - InsertAtByte;
11534 EVT VT =
Op.getValueType();
11536 if (VT != MVT::v256i1 && VT != MVT::v512i1)
11542 assert((VT != MVT::v512i1 || Subtarget.hasMMA()) &&
11543 "Type unsupported without MMA");
11544 assert((VT != MVT::v256i1 || Subtarget.pairedVectorMemops()) &&
11545 "Type unsupported without paired vector support");
11550 for (
unsigned Idx = 0;
Idx < NumVecs; ++
Idx) {
11552 DAG.
getLoad(MVT::v16i8, dl, LoadChain, BasePtr,
11562 std::reverse(Loads.
begin(), Loads.
end());
11563 std::reverse(LoadChains.
begin(), LoadChains.
end());
11581 EVT StoreVT =
Value.getValueType();
11583 if (StoreVT != MVT::v256i1 && StoreVT != MVT::v512i1)
11589 assert((StoreVT != MVT::v512i1 || Subtarget.hasMMA()) &&
11590 "Type unsupported without MMA");
11591 assert((StoreVT != MVT::v256i1 || Subtarget.pairedVectorMemops()) &&
11592 "Type unsupported without paired vector support");
11595 unsigned NumVecs = 2;
11596 if (StoreVT == MVT::v512i1) {
11597 if (Subtarget.isISAFuture()) {
11598 EVT ReturnTypes[] = {MVT::v256i1, MVT::v256i1};
11600 PPC::DMXXEXTFDMR512, dl,
ArrayRef(ReturnTypes, 2),
Op.getOperand(1));
11603 Value2 =
SDValue(ExtNode, 1);
11608 for (
unsigned Idx = 0;
Idx < NumVecs; ++
Idx) {
11611 if (Subtarget.isISAFuture()) {
11621 DAG.
getStore(StoreChain, dl, Elt, BasePtr,
11635 if (
Op.getValueType() == MVT::v4i32) {
11652 LHS, RHS, DAG, dl, MVT::v4i32);
11655 LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32);
11660 }
else if (
Op.getValueType() == MVT::v16i8) {
11666 LHS, RHS, DAG, dl, MVT::v8i16);
11671 LHS, RHS, DAG, dl, MVT::v8i16);
11679 for (
unsigned i = 0; i != 8; ++i) {
11680 if (isLittleEndian) {
11682 Ops[i*2+1] = 2*i+16;
11685 Ops[i*2+1] = 2*i+1+16;
11688 if (isLittleEndian)
11698 bool IsStrict =
Op->isStrictFPOpcode();
11699 if (
Op.getOperand(IsStrict ? 1 : 0).getValueType() == MVT::f128 &&
11700 !Subtarget.hasP9Vector())
11710 "Should only be called for ISD::FP_EXTEND");
11714 if (
Op.getValueType() != MVT::v2f64 ||
11715 Op.getOperand(0).getValueType() != MVT::v2f32)
11727 "Node should have 2 operands with second one being a constant!");
11739 int DWord =
Idx >> 1;
11762 LD->getMemoryVT(),
LD->getMemOperand());
11775 LD->getMemoryVT(),
LD->getMemOperand());
11786 switch (
Op.getOpcode()) {
11815 return LowerGET_DYNAMIC_AREA_OFFSET(
Op, DAG);
11841 case ISD::FSHL:
return LowerFunnelShift(
Op, DAG);
11842 case ISD::FSHR:
return LowerFunnelShift(
Op, DAG);
11854 return LowerFP_ROUND(
Op, DAG);
11867 return LowerINTRINSIC_VOID(
Op, DAG);
11869 return LowerBSWAP(
Op, DAG);
11871 return LowerATOMIC_CMP_SWAP(
Op, DAG);
11873 return LowerATOMIC_LOAD_STORE(
Op, DAG);
11875 return LowerIS_FPCLASS(
Op, DAG);
11883 switch (
N->getOpcode()) {
11885 llvm_unreachable(
"Do not know how to custom type legalize this operation!");
11902 if (
N->getConstantOperandVal(1) != Intrinsic::loop_decrement)
11905 assert(
N->getValueType(0) == MVT::i1 &&
11906 "Unexpected result type for CTR decrement intrinsic");
11908 N->getValueType(0));
11918 switch (
N->getConstantOperandVal(0)) {
11919 case Intrinsic::ppc_pack_longdouble:
11921 N->getOperand(2),
N->getOperand(1)));
11923 case Intrinsic::ppc_maxfe:
11924 case Intrinsic::ppc_minfe:
11925 case Intrinsic::ppc_fnmsub:
11926 case Intrinsic::ppc_convert_f128_to_ppcf128:
11936 EVT VT =
N->getValueType(0);
11938 if (VT == MVT::i64) {
11951 if (
N->getOperand(
N->isStrictFPOpcode() ? 1 : 0).getValueType() ==
11955 Results.push_back(LoweredValue);
11956 if (
N->isStrictFPOpcode())
11961 if (!
N->getValueType(0).isVector())
12012 if (isa<LoadInst>(Inst))
12016 Intrinsic::ppc_cfence, {Inst->getType()}),
12026 unsigned AtomicSize,
12027 unsigned BinOpcode,
12028 unsigned CmpOpcode,
12029 unsigned CmpPred)
const {
12033 auto LoadMnemonic = PPC::LDARX;
12034 auto StoreMnemonic = PPC::STDCX;
12035 switch (AtomicSize) {
12039 LoadMnemonic = PPC::LBARX;
12040 StoreMnemonic = PPC::STBCX;
12041 assert(Subtarget.hasPartwordAtomics() &&
"Call this only with size >=4");
12044 LoadMnemonic = PPC::LHARX;
12045 StoreMnemonic = PPC::STHCX;
12046 assert(Subtarget.hasPartwordAtomics() &&
"Call this only with size >=4");
12049 LoadMnemonic = PPC::LWARX;
12050 StoreMnemonic = PPC::STWCX;
12053 LoadMnemonic = PPC::LDARX;
12054 StoreMnemonic = PPC::STDCX;
12070 CmpOpcode ?
F->CreateMachineBasicBlock(LLVM_BB) :
nullptr;
12072 F->insert(It, loopMBB);
12074 F->insert(It, loop2MBB);
12075 F->insert(It, exitMBB);
12081 Register TmpReg = (!BinOpcode) ? incr :
12083 : &PPC::GPRCRegClass);
12108 BuildMI(BB, dl,
TII->get(LoadMnemonic), dest)
12115 if (CmpOpcode == PPC::CMPW && AtomicSize < 4) {
12117 BuildMI(BB, dl,
TII->get(AtomicSize == 1 ? PPC::EXTSB : PPC::EXTSH),
12145 switch(
MI.getOpcode()) {
12149 return TII->isSignExtended(
MI.getOperand(1).getReg(),
12150 &
MI.getMF()->getRegInfo());
12174 case PPC::EXTSB8_32_64:
12175 case PPC::EXTSB8_rec:
12176 case PPC::EXTSB_rec:
12179 case PPC::EXTSH8_32_64:
12180 case PPC::EXTSH8_rec:
12181 case PPC::EXTSH_rec:
12183 case PPC::EXTSWSLI:
12184 case PPC::EXTSWSLI_32_64:
12185 case PPC::EXTSWSLI_32_64_rec:
12186 case PPC::EXTSWSLI_rec:
12187 case PPC::EXTSW_32:
12188 case PPC::EXTSW_32_64:
12189 case PPC::EXTSW_32_64_rec:
12190 case PPC::EXTSW_rec:
12193 case PPC::SRAWI_rec:
12194 case PPC::SRAW_rec:
12203 unsigned BinOpcode,
unsigned CmpOpcode,
unsigned CmpPred)
const {
12213 bool IsSignExtended =
12216 if (CmpOpcode == PPC::CMPW && !IsSignExtended) {
12218 BuildMI(*BB,
MI, dl,
TII->get(is8bit ? PPC::EXTSB : PPC::EXTSH), ValueReg)
12219 .
addReg(
MI.getOperand(3).getReg());
12220 MI.getOperand(3).setReg(ValueReg);
12224 if (Subtarget.hasPartwordAtomics())
12232 bool is64bit = Subtarget.
isPPC64();
12234 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
12245 CmpOpcode ?
F->CreateMachineBasicBlock(LLVM_BB) :
nullptr;
12247 F->insert(It, loopMBB);
12249 F->insert(It, loop2MBB);
12250 F->insert(It, exitMBB);
12256 is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
12301 if (ptrA != ZeroReg) {
12303 BuildMI(BB, dl,
TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
12311 BuildMI(BB, dl,
TII->get(PPC::RLWINM), Shift1Reg)
12312 .
addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0)
12315 .
addImm(is8bit ? 28 : 27);
12316 if (!isLittleEndian)
12317 BuildMI(BB, dl,
TII->get(PPC::XORI), ShiftReg)
12319 .
addImm(is8bit ? 24 : 16);
12321 BuildMI(BB, dl,
TII->get(PPC::RLDICR), PtrReg)
12326 BuildMI(BB, dl,
TII->get(PPC::RLWINM), PtrReg)
12336 BuildMI(BB, dl,
TII->get(PPC::ORI), Mask2Reg)
12340 BuildMI(BB, dl,
TII->get(PPC::SLW), MaskReg)
12345 BuildMI(BB, dl,
TII->get(PPC::LWARX), TmpDestReg)
12349 BuildMI(BB, dl,
TII->get(BinOpcode), TmpReg)
12352 BuildMI(BB, dl,
TII->get(PPC::ANDC), Tmp2Reg)
12364 unsigned ValueReg = SReg;
12365 unsigned CmpReg = Incr2Reg;
12366 if (CmpOpcode == PPC::CMPW) {
12368 BuildMI(BB, dl,
TII->get(PPC::SRW), ValueReg)
12372 BuildMI(BB, dl,
TII->get(is8bit ? PPC::EXTSB : PPC::EXTSH), ValueSReg)
12374 ValueReg = ValueSReg;
12406 .
addImm(is8bit ? 24 : 16)
12427 Register DstReg =
MI.getOperand(0).getReg();
12429 assert(
TRI->isTypeLegalForClass(*RC, MVT::i32) &&
"Invalid destination!");
12430 Register mainDstReg =
MRI.createVirtualRegister(RC);
12431 Register restoreDstReg =
MRI.createVirtualRegister(RC);
12434 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
12435 "Invalid Pointer Size!");
12483 Register LabelReg =
MRI.createVirtualRegister(PtrRC);
12484 Register BufReg =
MI.getOperand(1).getReg();
12499 BaseReg = Subtarget.
isPPC64() ? PPC::X1 : PPC::R1;
12501 BaseReg = Subtarget.
isPPC64() ? PPC::BP8 : PPC::BP;
12504 TII->get(Subtarget.
isPPC64() ? PPC::STD : PPC::STW))
12527 TII->get(Subtarget.
isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg);
12548 TII->get(PPC::PHI), DstReg)
12552 MI.eraseFromParent();
12566 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
12567 "Invalid Pointer Size!");
12570 (PVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
12573 unsigned FP = (PVT == MVT::i64) ? PPC::X31 : PPC::R31;
12574 unsigned SP = (PVT == MVT::i64) ? PPC::X1 : PPC::R1;
12588 Register BufReg =
MI.getOperand(0).getReg();
12593 if (PVT == MVT::i64) {
12605 if (PVT == MVT::i64) {
12617 if (PVT == MVT::i64) {
12629 if (PVT == MVT::i64) {
12641 if (PVT == MVT::i64 && Subtarget.
isSVR4ABI()) {
12651 TII->get(PVT == MVT::i64 ? PPC::MTCTR8 : PPC::MTCTR)).
addReg(Tmp);
12654 MI.eraseFromParent();
12670 "Unexpected stack alignment");
12674 unsigned StackProbeSize =
12677 StackProbeSize &= ~(StackAlign - 1);
12678 return StackProbeSize ? StackProbeSize : StackAlign;
12690 const bool isPPC64 = Subtarget.
isPPC64();
12722 MF->
insert(MBBIter, TestMBB);
12723 MF->
insert(MBBIter, BlockMBB);
12724 MF->
insert(MBBIter, TailMBB);
12729 Register DstReg =
MI.getOperand(0).getReg();
12730 Register NegSizeReg =
MI.getOperand(1).getReg();
12731 Register SPReg = isPPC64 ? PPC::X1 : PPC::R1;
12732 Register FinalStackPtr =
MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
12733 Register FramePointer =
MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
12734 Register ActualNegSizeReg =
MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
12740 if (!
MRI.hasOneNonDBGUse(NegSizeReg))
12742 isPPC64 ? PPC::PREPARE_PROBED_ALLOCA_64 : PPC::PREPARE_PROBED_ALLOCA_32;
12748 ProbeOpc = isPPC64 ? PPC::PREPARE_PROBED_ALLOCA_NEGSIZE_SAME_REG_64
12749 : PPC::PREPARE_PROBED_ALLOCA_NEGSIZE_SAME_REG_32;
12751 .
addDef(ActualNegSizeReg)
12753 .
add(
MI.getOperand(2))
12754 .
add(
MI.getOperand(3));
12760 .
addReg(ActualNegSizeReg);
12763 int64_t NegProbeSize = -(int64_t)ProbeSize;
12764 assert(isInt<32>(NegProbeSize) &&
"Unhandled probe size!");
12765 Register ScratchReg =
MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
12766 if (!isInt<16>(NegProbeSize)) {
12767 Register TempReg =
MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
12769 .
addImm(NegProbeSize >> 16);
12773 .
addImm(NegProbeSize & 0xFFFF);
12780 Register Div =
MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
12782 .
addReg(ActualNegSizeReg)
12784 Register Mul =
MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
12788 Register NegMod =
MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
12791 .
addReg(ActualNegSizeReg);
12800 Register CmpResult =
MRI.createVirtualRegister(&PPC::CRRCRegClass);
12801 BuildMI(TestMBB,
DL,
TII->get(isPPC64 ? PPC::CMPD : PPC::CMPW), CmpResult)
12815 BuildMI(BlockMBB,
DL,
TII->get(isPPC64 ? PPC::STDUX : PPC::STWUX), SPReg)
12826 MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
12828 TII->get(isPPC64 ? PPC::DYNAREAOFFSET8 : PPC::DYNAREAOFFSET),
12829 MaxCallFrameSizeReg)
12830 .
add(
MI.getOperand(2))
12831 .
add(
MI.getOperand(3));
12832 BuildMI(TailMBB,
DL,
TII->get(isPPC64 ? PPC::ADD8 : PPC::ADD4), DstReg)
12834 .
addReg(MaxCallFrameSizeReg);
12843 MI.eraseFromParent();
12845 ++NumDynamicAllocaProbed;
12850 switch (
MI.getOpcode()) {
12851 case PPC::SELECT_CC_I4:
12852 case PPC::SELECT_CC_I8:
12853 case PPC::SELECT_CC_F4:
12854 case PPC::SELECT_CC_F8:
12855 case PPC::SELECT_CC_F16:
12856 case PPC::SELECT_CC_VRRC:
12857 case PPC::SELECT_CC_VSFRC:
12858 case PPC::SELECT_CC_VSSRC:
12859 case PPC::SELECT_CC_VSRC:
12860 case PPC::SELECT_CC_SPE4:
12861 case PPC::SELECT_CC_SPE:
12869 switch (
MI.getOpcode()) {
12870 case PPC::SELECT_I4:
12871 case PPC::SELECT_I8:
12872 case PPC::SELECT_F4:
12873 case PPC::SELECT_F8:
12874 case PPC::SELECT_F16:
12875 case PPC::SELECT_SPE:
12876 case PPC::SELECT_SPE4:
12877 case PPC::SELECT_VRRC:
12878 case PPC::SELECT_VSFRC:
12879 case PPC::SELECT_VSSRC:
12880 case PPC::SELECT_VSRC:
12890 if (
MI.getOpcode() == TargetOpcode::STACKMAP ||
12891 MI.getOpcode() == TargetOpcode::PATCHPOINT) {
12893 MI.getOpcode() == TargetOpcode::PATCHPOINT &&
12906 if (
MI.getOpcode() == PPC::EH_SjLj_SetJmp32 ||
12907 MI.getOpcode() == PPC::EH_SjLj_SetJmp64) {
12909 }
else if (
MI.getOpcode() == PPC::EH_SjLj_LongJmp32 ||
12910 MI.getOpcode() == PPC::EH_SjLj_LongJmp64) {
12924 if (Subtarget.hasISEL() &&
12925 (
MI.getOpcode() == PPC::SELECT_CC_I4 ||
12926 MI.getOpcode() == PPC::SELECT_CC_I8 ||
12927 MI.getOpcode() == PPC::SELECT_I4 ||
MI.getOpcode() == PPC::SELECT_I8)) {
12929 if (
MI.getOpcode() == PPC::SELECT_CC_I4 ||
12930 MI.getOpcode() == PPC::SELECT_CC_I8)
12931 Cond.push_back(
MI.getOperand(4));
12934 Cond.push_back(
MI.getOperand(1));
12937 TII->insertSelect(*BB,
MI, dl,
MI.getOperand(0).getReg(),
Cond,
12938 MI.getOperand(2).getReg(),
MI.getOperand(3).getReg());
12954 F->insert(It, copy0MBB);
12955 F->insert(It, sinkMBB);
12959 unsigned CallFrameSize =
TII->getCallFrameSizeAt(
MI);
12974 .
addReg(
MI.getOperand(1).getReg())
12977 unsigned SelectPred =
MI.getOperand(4).getImm();
12980 .
addReg(
MI.getOperand(1).getReg())
12997 .
addReg(
MI.getOperand(3).getReg())
12999 .
addReg(
MI.getOperand(2).getReg())
13001 }
else if (
MI.getOpcode() == PPC::ReadTB) {
13017 F->insert(It, readMBB);
13018 F->insert(It, sinkMBB);
13039 BuildMI(BB, dl,
TII->get(PPC::CMPW), CmpReg)
13049 }
else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I8)
13051 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I16)
13053 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I32)
13055 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I64)
13058 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I8)
13060 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I16)
13062 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I32)
13064 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I64)
13067 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I8)
13069 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I16)
13071 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I32)
13073 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I64)
13076 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I8)
13078 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I16)
13080 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I32)
13082 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I64)
13085 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I8)
13087 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I16)
13089 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I32)
13091 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I64)
13094 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I8)
13096 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I16)
13098 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I32)
13100 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I64)
13103 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I8)
13105 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I16)
13107 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I32)
13109 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I64)
13112 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I8)
13114 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I16)
13116 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I32)
13118 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I64)
13121 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I8)
13123 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I16)
13125 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I32)
13127 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I64)
13130 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I8)
13132 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I16)
13134 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I32)
13136 else if (
MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I64)
13139 else if (
MI.getOpcode() == PPC::ATOMIC_SWAP_I8)
13141 else if (
MI.getOpcode() == PPC::ATOMIC_SWAP_I16)
13143 else if (
MI.getOpcode() == PPC::ATOMIC_SWAP_I32)
13145 else if (
MI.getOpcode() == PPC::ATOMIC_SWAP_I64)
13147 else if (
MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 ||
13148 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64 ||
13149 (Subtarget.hasPartwordAtomics() &&
13150 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8) ||
13151 (Subtarget.hasPartwordAtomics() &&
13152 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16)) {
13153 bool is64bit =
MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64;
13155 auto LoadMnemonic = PPC::LDARX;
13156 auto StoreMnemonic = PPC::STDCX;
13157 switch (
MI.getOpcode()) {
13160 case PPC::ATOMIC_CMP_SWAP_I8:
13161 LoadMnemonic = PPC::LBARX;
13162 StoreMnemonic = PPC::STBCX;
13163 assert(Subtarget.hasPartwordAtomics() &&
"No support partword atomics.");
13165 case PPC::ATOMIC_CMP_SWAP_I16:
13166 LoadMnemonic = PPC::LHARX;
13167 StoreMnemonic = PPC::STHCX;
13168 assert(Subtarget.hasPartwordAtomics() &&
"No support partword atomics.");
13170 case PPC::ATOMIC_CMP_SWAP_I32:
13171 LoadMnemonic = PPC::LWARX;
13172 StoreMnemonic = PPC::STWCX;
13174 case PPC::ATOMIC_CMP_SWAP_I64:
13175 LoadMnemonic = PPC::LDARX;
13176 StoreMnemonic = PPC::STDCX;
13184 Register oldval =
MI.getOperand(3).getReg();
13185 Register newval =
MI.getOperand(4).getReg();
13191 F->insert(It, loop1MBB);
13192 F->insert(It, loop2MBB);
13193 F->insert(It, exitMBB);
13214 BuildMI(BB, dl,
TII->get(is64bit ? PPC::CMPD : PPC::CMPW), CrReg)
13240 }
else if (
MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 ||
13241 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) {
13245 bool is64bit = Subtarget.
isPPC64();
13247 bool is8bit =
MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8;
13252 Register oldval =
MI.getOperand(3).getReg();
13253 Register newval =
MI.getOperand(4).getReg();
13259 F->insert(It, loop1MBB);
13260 F->insert(It, loop2MBB);
13261 F->insert(It, exitMBB);
13268 is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
13287 Register ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
13319 if (ptrA != ZeroReg) {
13321 BuildMI(BB, dl,
TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
13330 BuildMI(BB, dl,
TII->get(PPC::RLWINM), Shift1Reg)
13331 .
addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0)
13334 .
addImm(is8bit ? 28 : 27);
13335 if (!isLittleEndian)
13336 BuildMI(BB, dl,
TII->get(PPC::XORI), ShiftReg)
13338 .
addImm(is8bit ? 24 : 16);
13340 BuildMI(BB, dl,
TII->get(PPC::RLDICR), PtrReg)
13345 BuildMI(BB, dl,
TII->get(PPC::RLWINM), PtrReg)
13350 BuildMI(BB, dl,
TII->get(PPC::SLW), NewVal2Reg)
13353 BuildMI(BB, dl,
TII->get(PPC::SLW), OldVal2Reg)
13360 BuildMI(BB, dl,
TII->get(PPC::ORI), Mask2Reg)
13364 BuildMI(BB, dl,
TII->get(PPC::SLW), MaskReg)
13367 BuildMI(BB, dl,
TII->get(PPC::AND), NewVal3Reg)
13370 BuildMI(BB, dl,
TII->get(PPC::AND), OldVal3Reg)
13375 BuildMI(BB, dl,
TII->get(PPC::LWARX), TmpDestReg)
13392 BuildMI(BB, dl,
TII->get(PPC::ANDC), Tmp2Reg)
13416 }
else if (
MI.getOpcode() == PPC::FADDrtz) {
13441 auto MIB =
BuildMI(*BB,
MI, dl,
TII->get(PPC::FADD), Dest)
13449 }
else if (
MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT ||
13450 MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT ||
13451 MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8 ||
13452 MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT8) {
13453 unsigned Opcode = (
MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8 ||
13454 MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT8)
13457 bool IsEQ = (
MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT ||
13458 MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8);
13462 Opcode == PPC::ANDI_rec ? &PPC::GPRCRegClass : &PPC::G8RCRegClass);
13466 .
addReg(
MI.getOperand(1).getReg())
13469 MI.getOperand(0).getReg())
13470 .
addReg(IsEQ ? PPC::CR0EQ : PPC::CR0GT);
13471 }
else if (
MI.getOpcode() == PPC::TCHECK_RET) {
13477 MI.getOperand(0).getReg())
13479 }
else if (
MI.getOpcode() == PPC::TBEGIN_RET) {
13481 unsigned Imm =
MI.getOperand(1).getImm();
13484 MI.getOperand(0).getReg())
13486 }
else if (
MI.getOpcode() == PPC::SETRNDi) {
13488 Register OldFPSCRReg =
MI.getOperand(0).getReg();
13491 if (
MRI.use_empty(OldFPSCRReg))
13492 BuildMI(*BB,
MI, dl,
TII->get(TargetOpcode::IMPLICIT_DEF), OldFPSCRReg);
13494 BuildMI(*BB,
MI, dl,
TII->get(PPC::MFFS), OldFPSCRReg);
13505 unsigned Mode =
MI.getOperand(1).getImm();
13506 BuildMI(*BB,
MI, dl,
TII->get((Mode & 1) ? PPC::MTFSB1 : PPC::MTFSB0))
13510 BuildMI(*BB,
MI, dl,
TII->get((Mode & 2) ? PPC::MTFSB1 : PPC::MTFSB0))
13513 }
else if (
MI.getOpcode() == PPC::SETRND) {
13521 auto copyRegFromG8RCOrF8RC = [&] (
unsigned DestReg,
unsigned SrcReg) {
13522 if (Subtarget.hasDirectMove()) {
13523 BuildMI(*BB,
MI, dl,
TII->get(TargetOpcode::COPY), DestReg)
13527 unsigned StoreOp = PPC::STD, LoadOp = PPC::LFD;
13530 if (RC == &PPC::F8RCRegClass) {
13533 "Unsupported RegClass.");
13535 StoreOp = PPC::STFD;
13540 (RegInfo.
getRegClass(DestReg) == &PPC::F8RCRegClass) &&
13541 "Unsupported RegClass.");
13574 Register OldFPSCRReg =
MI.getOperand(0).getReg();
13577 BuildMI(*BB,
MI, dl,
TII->get(PPC::MFFS), OldFPSCRReg);
13591 copyRegFromG8RCOrF8RC(OldFPSCRTmpReg, OldFPSCRReg);
13599 BuildMI(*BB,
MI, dl,
TII->get(TargetOpcode::IMPLICIT_DEF), ImDefReg);
13600 BuildMI(*BB,
MI, dl,
TII->get(PPC::INSERT_SUBREG), ExtSrcReg)
13606 BuildMI(*BB,
MI, dl,
TII->get(PPC::RLDIMI), NewFPSCRTmpReg)
13613 copyRegFromG8RCOrF8RC(NewFPSCRReg, NewFPSCRTmpReg);
13622 }
else if (
MI.getOpcode() == PPC::SETFLM) {
13626 Register OldFPSCRReg =
MI.getOperand(0).getReg();
13627 if (
MRI.use_empty(OldFPSCRReg))
13628 BuildMI(*BB,
MI, Dl,
TII->get(TargetOpcode::IMPLICIT_DEF), OldFPSCRReg);
13630 BuildMI(*BB,
MI, Dl,
TII->get(PPC::MFFS), OldFPSCRReg);
13633 Register NewFPSCRReg =
MI.getOperand(1).getReg();
13639 }
else if (
MI.getOpcode() == PPC::PROBED_ALLOCA_32 ||
13640 MI.getOpcode() == PPC::PROBED_ALLOCA_64) {
13642 }
else if (
MI.getOpcode() == PPC::SPLIT_QUADWORD) {
13649 .
addUse(Src, 0, PPC::sub_gp8_x1);
13652 .
addUse(Src, 0, PPC::sub_gp8_x0);
13653 }
else if (
MI.getOpcode() == PPC::LQX_PSEUDO ||
13654 MI.getOpcode() == PPC::STQX_PSEUDO) {
13660 F->getRegInfo().createVirtualRegister(&PPC::G8RC_and_G8RC_NOX0RegClass);
13666 MI.getOpcode() == PPC::LQX_PSEUDO ?
TII->get(PPC::LQ)
13667 :
TII->get(PPC::STQ))
13675 MI.eraseFromParent();
13688 int RefinementSteps = Subtarget.hasRecipPrec() ? 1 : 3;
13691 return RefinementSteps;
13697 EVT VT =
Op.getValueType();
13700 ((VT != MVT::v2f64 && VT != MVT::v4f32) || !Subtarget.hasVSX())))
13724PPCTargetLowering::getSqrtResultForDenormInput(
SDValue Op,
13727 EVT VT =
Op.getValueType();
13728 if (VT != MVT::f64 &&
13729 ((VT != MVT::v2f64 && VT != MVT::v4f32) || !Subtarget.hasVSX()))
13736 int Enabled,
int &RefinementSteps,
13737 bool &UseOneConstNR,
13738 bool Reciprocal)
const {
13740 if ((VT == MVT::f32 && Subtarget.hasFRSQRTES()) ||
13741 (VT == MVT::f64 && Subtarget.hasFRSQRTE()) ||
13742 (VT == MVT::v4f32 && Subtarget.hasAltivec()) ||
13743 (VT == MVT::v2f64 && Subtarget.hasVSX())) {
13749 UseOneConstNR = !Subtarget.needsTwoConstNR();
13757 int &RefinementSteps)
const {
13759 if ((VT == MVT::f32 && Subtarget.hasFRES()) ||
13760 (VT == MVT::f64 && Subtarget.hasFRE()) ||
13761 (VT == MVT::v4f32 && Subtarget.hasAltivec()) ||
13762 (VT == MVT::v2f64 && Subtarget.hasVSX())) {
13770unsigned PPCTargetLowering::combineRepeatedFPDivisors()
const {
13808 unsigned Bytes,
int Dist,
13818 int FI = cast<FrameIndexSDNode>(Loc)->getIndex();
13819 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex();
13822 if (FS != BFS || FS != (
int)Bytes)
return false;
13826 SDValue Base1 = Loc, Base2 = BaseLoc;
13827 int64_t Offset1 = 0, Offset2 = 0;
13830 if (Base1 == Base2 && Offset1 == (Offset2 + Dist * Bytes))
13840 if (isGA1 && isGA2 && GV1 == GV2)
13841 return Offset1 == (Offset2 + Dist*Bytes);
13848 unsigned Bytes,
int Dist,
13851 EVT VT = LS->getMemoryVT();
13852 SDValue Loc = LS->getBasePtr();
13858 switch (
N->getConstantOperandVal(1)) {
13859 default:
return false;
13860 case Intrinsic::ppc_altivec_lvx:
13861 case Intrinsic::ppc_altivec_lvxl:
13862 case Intrinsic::ppc_vsx_lxvw4x:
13863 case Intrinsic::ppc_vsx_lxvw4x_be:
13866 case Intrinsic::ppc_vsx_lxvd2x:
13867 case Intrinsic::ppc_vsx_lxvd2x_be:
13870 case Intrinsic::ppc_altivec_lvebx:
13873 case Intrinsic::ppc_altivec_lvehx:
13876 case Intrinsic::ppc_altivec_lvewx:
13886 switch (
N->getConstantOperandVal(1)) {
13887 default:
return false;
13888 case Intrinsic::ppc_altivec_stvx:
13889 case Intrinsic::ppc_altivec_stvxl:
13890 case Intrinsic::ppc_vsx_stxvw4x:
13893 case Intrinsic::ppc_vsx_stxvd2x:
13896 case Intrinsic::ppc_vsx_stxvw4x_be:
13899 case Intrinsic::ppc_vsx_stxvd2x_be:
13902 case Intrinsic::ppc_altivec_stvebx:
13905 case Intrinsic::ppc_altivec_stvehx:
13908 case Intrinsic::ppc_altivec_stvewx:
13925 SDValue Chain = LD->getChain();
13926 EVT VT = LD->getMemoryVT();
13935 while (!Queue.empty()) {
13936 SDNode *ChainNext = Queue.pop_back_val();
13937 if (!Visited.
insert(ChainNext).second)
13940 if (
MemSDNode *ChainLD = dyn_cast<MemSDNode>(ChainNext)) {
13944 if (!Visited.
count(ChainLD->getChain().getNode()))
13945 Queue.push_back(ChainLD->getChain().getNode());
13947 for (
const SDUse &O : ChainNext->
ops())
13948 if (!Visited.
count(O.getNode()))
13949 Queue.push_back(O.getNode());
13951 LoadRoots.
insert(ChainNext);
13962 for (
SDNode *
I : LoadRoots) {
13963 Queue.push_back(
I);
13965 while (!Queue.empty()) {
13966 SDNode *LoadRoot = Queue.pop_back_val();
13967 if (!Visited.
insert(LoadRoot).second)
13970 if (
MemSDNode *ChainLD = dyn_cast<MemSDNode>(LoadRoot))
13975 if (((isa<MemSDNode>(U) &&
13976 cast<MemSDNode>(U)->getChain().getNode() == LoadRoot) ||
13979 Queue.push_back(U);
14012 auto Final = Shifted;
14023 DAGCombinerInfo &DCI)
const {
14031 if (!DCI.isAfterLegalizeDAG())
14036 for (
const SDNode *U :
N->uses())
14041 auto OpSize =
N->getOperand(0).getValueSizeInBits();
14045 if (OpSize <
Size) {
14063 DAGCombinerInfo &DCI)
const {
14067 assert(Subtarget.useCRBits() &&
"Expecting to be tracking CR bits");
14078 N->getValueType(0) != MVT::i1)
14081 if (
N->getOperand(0).getValueType() != MVT::i32 &&
14082 N->getOperand(0).getValueType() != MVT::i64)
14090 cast<CondCodeSDNode>(
N->getOperand(
14092 unsigned OpBits =
N->getOperand(0).getValueSizeInBits();
14103 return (
N->getOpcode() ==
ISD::SETCC ? ConvertSETCCToSubtract(
N, DCI)
14126 if (
N->getOperand(0).getOpcode() !=
ISD::AND &&
14127 N->getOperand(0).getOpcode() !=
ISD::OR &&
14128 N->getOperand(0).getOpcode() !=
ISD::XOR &&
14138 N->getOperand(1).getOpcode() !=
ISD::AND &&
14139 N->getOperand(1).getOpcode() !=
ISD::OR &&
14140 N->getOperand(1).getOpcode() !=
ISD::XOR &&
14153 for (
unsigned i = 0; i < 2; ++i) {
14157 N->getOperand(i).getOperand(0).getValueType() == MVT::i1) ||
14158 isa<ConstantSDNode>(
N->getOperand(i)))
14169 while (!BinOps.
empty()) {
14177 for (
unsigned i = 0, ie = BinOp.
getNumOperands(); i != ie; ++i) {
14211 for (
unsigned i = 0, ie = Inputs.
size(); i != ie; ++i) {
14212 if (isa<ConstantSDNode>(Inputs[i]))
14235 for (
unsigned i = 0, ie = PromOps.
size(); i != ie; ++i) {
14257 for (
unsigned i = 0, ie = Inputs.
size(); i != ie; ++i) {
14260 if (isa<ConstantSDNode>(Inputs[i]))
14266 std::list<HandleSDNode> PromOpHandles;
14267 for (
auto &PromOp : PromOps)
14268 PromOpHandles.emplace_back(PromOp);
14275 while (!PromOpHandles.empty()) {
14277 PromOpHandles.pop_back();
14283 if (!isa<ConstantSDNode>(PromOp.
getOperand(0)) &&
14286 PromOpHandles.emplace_front(PromOp);
14291 if (isa<ConstantSDNode>(RepValue))
14300 default:
C = 0;
break;
14305 if ((!isa<ConstantSDNode>(PromOp.
getOperand(
C)) &&
14313 PromOpHandles.emplace_front(PromOp);
14321 for (
unsigned i = 0; i < 2; ++i)
14322 if (isa<ConstantSDNode>(Ops[
C+i]))
14331 return N->getOperand(0);
14339 DAGCombinerInfo &DCI)
const {
14357 if (
N->getValueType(0) != MVT::i32 &&
14358 N->getValueType(0) != MVT::i64)
14361 if (!((
N->getOperand(0).getValueType() == MVT::i1 && Subtarget.useCRBits()) ||
14362 (
N->getOperand(0).getValueType() == MVT::i32 && Subtarget.
isPPC64())))
14365 if (
N->getOperand(0).getOpcode() !=
ISD::AND &&
14366 N->getOperand(0).getOpcode() !=
ISD::OR &&
14367 N->getOperand(0).getOpcode() !=
ISD::XOR &&
14378 while (!BinOps.
empty()) {
14386 for (
unsigned i = 0, ie = BinOp.
getNumOperands(); i != ie; ++i) {
14417 for (
unsigned i = 0, ie = Inputs.
size(); i != ie; ++i) {
14418 if (isa<ConstantSDNode>(Inputs[i]))
14429 SelectTruncOp[0].
insert(std::make_pair(
User,
14433 SelectTruncOp[0].
insert(std::make_pair(
User,
14436 SelectTruncOp[1].
insert(std::make_pair(
User,
14442 for (
unsigned i = 0, ie = PromOps.
size(); i != ie; ++i) {
14451 SelectTruncOp[0].
insert(std::make_pair(
User,
14455 SelectTruncOp[0].
insert(std::make_pair(
User,
14458 SelectTruncOp[1].
insert(std::make_pair(
User,
14464 unsigned PromBits =
N->getOperand(0).getValueSizeInBits();
14465 bool ReallyNeedsExt =
false;
14469 for (
unsigned i = 0, ie = Inputs.
size(); i != ie; ++i) {
14470 if (isa<ConstantSDNode>(Inputs[i]))
14474 Inputs[i].getOperand(0).getValueSizeInBits();
14475 assert(PromBits < OpBits &&
"Truncation not to a smaller bit count?");
14480 OpBits-PromBits))) ||
14483 (OpBits-(PromBits-1)))) {
14484 ReallyNeedsExt =
true;
14492 for (
unsigned i = 0, ie = Inputs.
size(); i != ie; ++i) {
14496 if (isa<ConstantSDNode>(Inputs[i]))
14499 SDValue InSrc = Inputs[i].getOperand(0);
14513 std::list<HandleSDNode> PromOpHandles;
14514 for (
auto &PromOp : PromOps)
14515 PromOpHandles.emplace_back(PromOp);
14521 while (!PromOpHandles.empty()) {
14523 PromOpHandles.pop_back();
14527 default:
C = 0;
break;
14532 if ((!isa<ConstantSDNode>(PromOp.
getOperand(
C)) &&
14540 PromOpHandles.emplace_front(PromOp);
14550 (SelectTruncOp[1].count(PromOp.
getNode()) &&
14552 PromOpHandles.emplace_front(PromOp);
14561 for (
unsigned i = 0; i < 2; ++i) {
14562 if (!isa<ConstantSDNode>(Ops[
C+i]))
14579 auto SI0 = SelectTruncOp[0].
find(PromOp.
getNode());
14580 if (SI0 != SelectTruncOp[0].
end())
14582 auto SI1 = SelectTruncOp[1].
find(PromOp.
getNode());
14583 if (SI1 != SelectTruncOp[1].
end())
14592 if (!ReallyNeedsExt)
14593 return N->getOperand(0);
14600 N->getValueSizeInBits(0), PromBits),
14601 dl,
N->getValueType(0)));
14604 "Invalid extension type");
14607 DAG.
getConstant(
N->getValueSizeInBits(0) - PromBits, dl, ShiftAmountTy);
14615 DAGCombinerInfo &DCI)
const {
14617 "Should be called with a SETCC node");
14635 EVT VT =
N->getValueType(0);
14636 EVT OpVT =
LHS.getValueType();
14642 return DAGCombineTruncBoolExt(
N, DCI);
14647 if (
LoadSDNode *LD = dyn_cast<LoadSDNode>(
Op.getNode()))
14649 Op.getValueType() == MVT::f64;
14661combineElementTruncationToVectorTruncation(
SDNode *
N,
14662 DAGCombinerInfo &DCI)
const {
14664 "Should be called with a BUILD_VECTOR node");
14669 SDValue FirstInput =
N->getOperand(0);
14671 "The input operand must be an fp-to-int conversion.");
14680 bool IsSplat =
true;
14685 EVT TargetVT =
N->getValueType(0);
14686 for (
int i = 0, e =
N->getNumOperands(); i < e; ++i) {
14687 SDValue NextOp =
N->getOperand(i);
14691 if (NextConversion != FirstConversion)
14699 if (
N->getOperand(i) != FirstInput)
14710 for (
int i = 0, e =
N->getNumOperands(); i < e; ++i) {
14711 SDValue In =
N->getOperand(i).getOperand(0);
14734 EVT NewVT = TargetVT == MVT::v2i64 ? MVT::v2f64 : MVT::v4f32;
14736 return DAG.
getNode(Opcode, dl, TargetVT, BV);
14749 "Should be called with a BUILD_VECTOR node");
14754 if (!
N->getValueType(0).getVectorElementType().isByteSized())
14757 bool InputsAreConsecutiveLoads =
true;
14758 bool InputsAreReverseConsecutive =
true;
14759 unsigned ElemSize =
N->getValueType(0).getScalarType().getStoreSize();
14760 SDValue FirstInput =
N->getOperand(0);
14761 bool IsRoundOfExtLoad =
false;
14766 FirstLoad = cast<LoadSDNode>(FirstInput.
getOperand(0));
14771 N->getNumOperands() == 1)
14774 if (!IsRoundOfExtLoad)
14775 FirstLoad = cast<LoadSDNode>(FirstInput);
14779 for (
int i = 1, e =
N->getNumOperands(); i < e; ++i) {
14781 if (IsRoundOfExtLoad &&
N->getOperand(i).getOpcode() !=
ISD::FP_ROUND)
14784 SDValue NextInput = IsRoundOfExtLoad ?
N->getOperand(i).getOperand(0) :
14790 IsRoundOfExtLoad ?
N->getOperand(i-1).getOperand(0) :
N->getOperand(i-1);
14791 LoadSDNode *LD1 = cast<LoadSDNode>(PreviousInput);
14792 LoadSDNode *LD2 = cast<LoadSDNode>(NextInput);
14801 InputsAreConsecutiveLoads =
false;
14803 InputsAreReverseConsecutive =
false;
14806 if (!InputsAreConsecutiveLoads && !InputsAreReverseConsecutive)
14811 assert(!(InputsAreConsecutiveLoads && InputsAreReverseConsecutive) &&
14812 "The loads cannot be both consecutive and reverse consecutive.");
14816 if (InputsAreConsecutiveLoads) {
14817 assert(FirstLoad &&
"Input needs to be a LoadSDNode.");
14821 ReturnSDVal = WideLoad;
14822 }
else if (InputsAreReverseConsecutive) {
14824 assert(LastLoad &&
"Input needs to be a LoadSDNode.");
14829 for (
int i =
N->getNumOperands() - 1; i >= 0; i--)
14833 DAG.
getUNDEF(
N->getValueType(0)), Ops);
14837 for (
auto *LD : InputLoads)
14839 return ReturnSDVal;
14856 for (
unsigned i = 0; i <
N->getNumOperands(); i++) {
14858 ShuffleMask[CorrectElems & 0xF] = Elems & 0xF;
14860 ShuffleMask[(CorrectElems & 0xF0) >> 4] = (Elems & 0xF0) >> 4;
14861 CorrectElems = CorrectElems >> 8;
14862 Elems = Elems >> 8;
14869 EVT VT =
N->getValueType(0);
14907 auto isSExtOfVecExtract = [&](
SDValue Op) ->
bool {
14927 if (Input && Input != Extract.
getOperand(0))
14933 Elems = Elems << 8;
14942 for (
unsigned i = 0; i <
N->getNumOperands(); i++) {
14943 if (!isSExtOfVecExtract(
N->getOperand(i))) {
14950 int TgtElemArrayIdx;
14952 int OutputSize =
N->getValueType(0).getScalarSizeInBits();
14953 if (InputSize + OutputSize == 40)
14954 TgtElemArrayIdx = 0;
14955 else if (InputSize + OutputSize == 72)
14956 TgtElemArrayIdx = 1;
14957 else if (InputSize + OutputSize == 48)
14958 TgtElemArrayIdx = 2;
14959 else if (InputSize + OutputSize == 80)
14960 TgtElemArrayIdx = 3;
14961 else if (InputSize + OutputSize == 96)
14962 TgtElemArrayIdx = 4;
14966 uint64_t CorrectElems = TargetElems[TgtElemArrayIdx];
14968 ? CorrectElems & 0x0F0F0F0F0F0F0F0F
14969 : CorrectElems & 0xF0F0F0F0F0F0F0F0;
14970 if (Elems != CorrectElems) {
14986 if (
N->getValueType(0) != MVT::v1i128)
14989 SDValue Operand =
N->getOperand(0);
14995 auto *LD = cast<LoadSDNode>(Operand);
14996 EVT MemoryType = LD->getMemoryVT();
15000 bool ValidLDType = MemoryType == MVT::i8 || MemoryType == MVT::i16 ||
15001 MemoryType == MVT::i32 || MemoryType == MVT::i64;
15004 if (!ValidLDType ||
15010 LD->getChain(), LD->getBasePtr(),
15014 DAG.
getVTList(MVT::v1i128, MVT::Other),
15015 LoadOps, MemoryType, LD->getMemOperand());
15019 DAGCombinerInfo &DCI)
const {
15021 "Should be called with a BUILD_VECTOR node");
15026 if (!Subtarget.hasVSX())
15032 SDValue FirstInput =
N->getOperand(0);
15034 SDValue Reduced = combineElementTruncationToVectorTruncation(
N, DCI);
15049 if (Subtarget.hasP9Altivec() && !DCI.isBeforeLegalize()) {
15058 if (Subtarget.isISA3_1()) {
15064 if (
N->getValueType(0) != MVT::v2f64)
15075 if (FirstInput.
getOpcode() !=
N->getOperand(1).getOpcode())
15079 SDValue Ext2 =
N->getOperand(1).getOperand(0);
15086 if (!Ext1Op || !Ext2Op)
15095 if (FirstElem == 0 && SecondElem == 1)
15097 else if (FirstElem == 2 && SecondElem == 3)
15105 return DAG.
getNode(NodeType, dl, MVT::v2f64,
15110 DAGCombinerInfo &DCI)
const {
15113 "Need an int -> FP conversion node here");
15124 if (
Op.getValueType() != MVT::f32 &&
Op.getValueType() != MVT::f64)
15126 if (!
Op.getOperand(0).getValueType().isSimple())
15128 if (
Op.getOperand(0).getValueType().getSimpleVT() <=
MVT(MVT::i1) ||
15129 Op.getOperand(0).getValueType().getSimpleVT() >
MVT(MVT::i64))
15132 SDValue FirstOperand(
Op.getOperand(0));
15133 bool SubWordLoad = FirstOperand.getOpcode() ==
ISD::LOAD &&
15134 (FirstOperand.getValueType() == MVT::i8 ||
15135 FirstOperand.getValueType() == MVT::i16);
15136 if (Subtarget.hasP9Vector() && Subtarget.hasP9Altivec() && SubWordLoad) {
15138 bool DstDouble =
Op.getValueType() == MVT::f64;
15139 unsigned ConvOp =
Signed ?
15145 LoadSDNode *LDN = cast<LoadSDNode>(FirstOperand.getNode());
15154 SDValue ExtOps[] = { Ld, WidthConst };
15156 return DAG.
getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ext);
15158 return DAG.
getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ld);
15166 if (
Op.getOperand(0).getValueType() == MVT::i32)
15170 "UINT_TO_FP is supported only with FPCVT");
15174 unsigned FCFOp = (Subtarget.hasFPCVT() &&
Op.getValueType() == MVT::f32)
15179 MVT FCFTy = (Subtarget.hasFPCVT() &&
Op.getValueType() == MVT::f32)
15186 Subtarget.hasFPCVT()) ||
15188 SDValue Src =
Op.getOperand(0).getOperand(0);
15189 if (Src.getValueType() == MVT::f32) {
15191 DCI.AddToWorklist(Src.getNode());
15192 }
else if (Src.getValueType() != MVT::f64) {
15204 if (
Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) {
15207 DCI.AddToWorklist(
FP.getNode());
15231 switch (
N->getOpcode()) {
15236 Chain = LD->getChain();
15237 Base = LD->getBasePtr();
15238 MMO = LD->getMemOperand();
15257 MVT VecTy =
N->getValueType(0).getSimpleVT();
15265 Chain = Load.getValue(1);
15271 if (VecTy != MVT::v2f64) {
15298 switch (
N->getOpcode()) {
15303 Chain = ST->getChain();
15304 Base = ST->getBasePtr();
15305 MMO = ST->getMemOperand();
15325 SDValue Src =
N->getOperand(SrcOpnd);
15326 MVT VecTy = Src.getValueType().getSimpleVT();
15329 if (VecTy != MVT::v2f64) {
15335 DAG.
getVTList(MVT::v2f64, MVT::Other), Chain, Src);
15341 StoreOps, VecTy, MMO);
15348 DAGCombinerInfo &DCI)
const {
15351 unsigned Opcode =
N->getOperand(1).getOpcode();
15353 bool Strict =
N->getOperand(1)->isStrictFPOpcode();
15357 &&
"Not a FP_TO_INT Instruction!");
15359 SDValue Val =
N->getOperand(1).getOperand(Strict ? 1 : 0);
15360 EVT Op1VT =
N->getOperand(1).getValueType();
15363 if (!Subtarget.hasVSX() || !Subtarget.hasFPCVT() || !
isTypeLegal(ResVT))
15367 bool ValidTypeForStoreFltAsInt =
15368 (Op1VT == MVT::i32 || (Op1VT == MVT::i64 && Subtarget.
isPPC64()) ||
15369 (Subtarget.hasP9Vector() && (Op1VT == MVT::i16 || Op1VT == MVT::i8)));
15372 if (ResVT == MVT::ppcf128 || (ResVT == MVT::f128 && !Subtarget.hasP9Vector()))
15375 if ((Op1VT != MVT::i64 && !Subtarget.hasP8Vector()) ||
15376 cast<StoreSDNode>(
N)->isTruncatingStore() || !ValidTypeForStoreFltAsInt)
15383 SDValue Ops[] = {
N->getOperand(0), Val,
N->getOperand(2),
15389 cast<StoreSDNode>(
N)->getMemoryVT(),
15390 cast<StoreSDNode>(
N)->getMemOperand());
15398 bool PrevElemFromFirstVec = Mask[0] < NumElts;
15399 for (
int i = 1, e = Mask.size(); i < e; i++) {
15400 if (PrevElemFromFirstVec && Mask[i] < NumElts)
15402 if (!PrevElemFromFirstVec && Mask[i] >= NumElts)
15404 PrevElemFromFirstVec = !PrevElemFromFirstVec;
15416 FirstOp =
Op.getOperand(i);
15423 if (
Op.getOperand(i) != FirstOp && !
Op.getOperand(i).isUndef())
15433 Op =
Op.getOperand(0);
15448 int LHSMaxIdx,
int RHSMinIdx,
15449 int RHSMaxIdx,
int HalfVec,
15450 unsigned ValidLaneWidth,
15452 for (
int i = 0, e = ShuffV.
size(); i < e; i++) {
15453 int Idx = ShuffV[i];
15454 if ((
Idx >= 0 &&
Idx < LHSMaxIdx) || (
Idx >= RHSMinIdx &&
Idx < RHSMaxIdx))
15456 Subtarget.
isLittleEndian() ? HalfVec : HalfVec - ValidLaneWidth;
15467 SDLoc dl(OrigSToV);
15470 "Expecting a SCALAR_TO_VECTOR here");
15483 "Cannot produce a permuted scalar_to_vector for one element vector");
15485 unsigned ResultInElt = NumElts / 2;
15487 NewMask[ResultInElt] =
Idx->getZExtValue();
15512 int NumElts =
LHS.getValueType().getVectorNumElements();
15522 if (!Subtarget.hasDirectMove())
15532 Mask = cast<ShuffleVectorSDNode>(Res)->getMask();
15541 if (SToVLHS || SToVRHS) {
15548 if (SToVLHS && SToVRHS &&
15555 int NumEltsOut = ShuffV.
size();
15560 unsigned ValidLaneWidth =
15562 LHS.getValueType().getScalarSizeInBits()
15564 RHS.getValueType().getScalarSizeInBits();
15568 int LHSMaxIdx = -1;
15569 int RHSMinIdx = -1;
15570 int RHSMaxIdx = -1;
15571 int HalfVec =
LHS.getValueType().getVectorNumElements() / 2;
15583 LHSMaxIdx = NumEltsOut / NumEltsIn;
15592 RHSMinIdx = NumEltsOut;
15593 RHSMaxIdx = NumEltsOut / NumEltsIn + RHSMinIdx;
15606 HalfVec, ValidLaneWidth, Subtarget);
15611 if (!isa<ShuffleVectorSDNode>(Res))
15613 Mask = cast<ShuffleVectorSDNode>(Res)->getMask();
15632 if (IsLittleEndian) {
15635 if (Mask[0] < NumElts)
15636 for (
int i = 1, e =
Mask.size(); i < e; i += 2) {
15640 ShuffV[i] = (ShuffV[i - 1] >= 0 ? ShuffV[i - 1] : 0) + NumElts;
15645 for (
int i = 0, e =
Mask.size(); i <
e; i += 2) {
15649 ShuffV[i] = (ShuffV[i + 1] >= 0 ? ShuffV[i + 1] : 0) + NumElts;
15654 if (Mask[0] < NumElts)
15655 for (
int i = 0, e =
Mask.size(); i <
e; i += 2) {
15659 ShuffV[i] = ShuffV[i + 1] >= 0 ? ShuffV[i + 1] - NumElts : 0;
15664 for (
int i = 1, e =
Mask.size(); i <
e; i += 2) {
15668 ShuffV[i] = ShuffV[i - 1] >= 0 ? ShuffV[i - 1] - NumElts : 0;
15675 cast<BuildVectorSDNode>(TheSplat.
getNode())->getSplatValue();
15678 if (IsLittleEndian)
15687 DAGCombinerInfo &DCI)
const {
15689 "Not a reverse memop pattern!");
15694 auto I =
Mask.rbegin();
15695 auto E =
Mask.rend();
15697 for (;
I != E; ++
I) {
15714 if (!Subtarget.hasP9Vector())
15717 if(!IsElementReverse(SVN))
15756 if (IntrinsicID == Intrinsic::ppc_stdcx)
15758 else if (IntrinsicID == Intrinsic::ppc_stwcx)
15760 else if (IntrinsicID == Intrinsic::ppc_sthcx)
15762 else if (IntrinsicID == Intrinsic::ppc_stbcx)
15773 switch (
N->getOpcode()) {
15776 return combineADD(
N, DCI);
15785 !isa<ConstantSDNode>(Op2) ||
N->getValueType(0) != MVT::i64 ||
15795 if (!isUInt<32>(Imm))
15802 return combineSHL(
N, DCI);
15804 return combineSRA(
N, DCI);
15806 return combineSRL(
N, DCI);
15808 return combineMUL(
N, DCI);
15811 return combineFMALike(
N, DCI);
15814 return N->getOperand(0);
15818 return N->getOperand(0);
15824 return N->getOperand(0);
15830 return DAGCombineExtBoolTrunc(
N, DCI);
15832 return combineTRUNCATE(
N, DCI);
15834 if (
SDValue CSCC = combineSetCC(
N, DCI))
15838 return DAGCombineTruncBoolExt(
N, DCI);
15841 return combineFPToIntToFP(
N, DCI);
15844 LSBaseSDNode* LSBase = cast<LSBaseSDNode>(
N->getOperand(0));
15845 return combineVReverseMemOP(cast<ShuffleVectorSDNode>(
N), LSBase, DCI);
15847 return combineVectorShuffle(cast<ShuffleVectorSDNode>(
N), DCI.
DAG);
15850 EVT Op1VT =
N->getOperand(1).getValueType();
15851 unsigned Opcode =
N->getOperand(1).getOpcode();
15855 SDValue Val = combineStoreFPToInt(
N, DCI);
15862 SDValue Val= combineVReverseMemOP(SVN, cast<LSBaseSDNode>(
N), DCI);
15868 if (cast<StoreSDNode>(
N)->isUnindexed() && Opcode ==
ISD::BSWAP &&
15869 N->getOperand(1).getNode()->hasOneUse() &&
15870 (Op1VT == MVT::i32 || Op1VT == MVT::i16 ||
15871 (Subtarget.hasLDBRX() && Subtarget.
isPPC64() && Op1VT == MVT::i64))) {
15875 EVT mVT = cast<StoreSDNode>(
N)->getMemoryVT();
15879 SDValue BSwapOp =
N->getOperand(1).getOperand(0);
15886 if (Op1VT.
bitsGT(mVT)) {
15891 if (Op1VT == MVT::i64)
15896 N->getOperand(0), BSwapOp,
N->getOperand(2), DAG.
getValueType(mVT)
15900 Ops, cast<StoreSDNode>(
N)->getMemoryVT(),
15901 cast<StoreSDNode>(
N)->getMemOperand());
15907 isa<ConstantSDNode>(
N->getOperand(1)) && Op1VT == MVT::i32) {
15909 EVT MemVT = cast<StoreSDNode>(
N)->getMemoryVT();
15919 cast<StoreSDNode>(
N)->setTruncatingStore(
true);
15928 (StoreVT == MVT::v2f64 || StoreVT == MVT::v2i64 ||
15929 StoreVT == MVT::v4f32 || StoreVT == MVT::v4i32))
15936 EVT VT = LD->getValueType(0);
15943 (LoadVT == MVT::v2f64 || LoadVT == MVT::v2i64 ||
15944 LoadVT == MVT::v4f32 || LoadVT == MVT::v4i32))
15955 auto ReplaceTwoFloatLoad = [&]() {
15956 if (VT != MVT::i64)
15971 if (!LD->hasNUsesOfValue(2, 0))
15974 auto UI = LD->use_begin();
15975 while (UI.getUse().getResNo() != 0) ++UI;
15977 while (UI.getUse().getResNo() != 0) ++UI;
15978 SDNode *RightShift = *UI;
15986 if (RightShift->getOpcode() !=
ISD::SRL ||
15987 !isa<ConstantSDNode>(RightShift->getOperand(1)) ||
15988 RightShift->getConstantOperandVal(1) != 32 ||
15989 !RightShift->hasOneUse())
15992 SDNode *Trunc2 = *RightShift->use_begin();
16002 Bitcast->getValueType(0) != MVT::f32)
16014 SDValue BasePtr = LD->getBasePtr();
16015 if (LD->isIndexed()) {
16017 "Non-pre-inc AM on PPC?");
16025 SDValue FloatLoad = DAG.
getLoad(MVT::f32, dl, LD->getChain(), BasePtr,
16026 LD->getPointerInfo(), LD->getAlign(),
16027 MMOFlags, LD->getAAInfo());
16033 LD->getPointerInfo().getWithOffset(4),
16036 if (LD->isIndexed()) {
16050 if (ReplaceTwoFloatLoad())
16053 EVT MemVT = LD->getMemoryVT();
16056 if (LD->isUnindexed() && VT.
isVector() &&
16059 !Subtarget.hasP8Vector() &&
16060 (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32 ||
16061 VT == MVT::v4f32))) &&
16062 LD->getAlign() < ABIAlignment) {
16064 SDValue Chain = LD->getChain();
16093 MVT PermCntlTy, PermTy, LDTy;
16094 Intr = isLittleEndian ? Intrinsic::ppc_altivec_lvsr
16095 : Intrinsic::ppc_altivec_lvsl;
16096 IntrLD = Intrinsic::ppc_altivec_lvx;
16097 IntrPerm = Intrinsic::ppc_altivec_vperm;
16098 PermCntlTy = MVT::v16i8;
16099 PermTy = MVT::v4i32;
16118 SDValue BaseLoadOps[] = { Chain, LDXIntID,
Ptr };
16122 BaseLoadOps, LDTy, BaseMMO);
16131 int IncValue = IncOffset;
16148 SDValue ExtraLoadOps[] = { Chain, LDXIntID,
Ptr };
16152 ExtraLoadOps, LDTy, ExtraMMO);
16163 if (isLittleEndian)
16165 ExtraLoad, BaseLoad, PermCntl, DAG, dl);
16168 BaseLoad, ExtraLoad, PermCntl, DAG, dl);
16171 Perm = Subtarget.hasAltivec()
16187 unsigned IID =
N->getConstantOperandVal(0);
16189 : Intrinsic::ppc_altivec_lvsl);
16190 if (IID ==
Intr &&
N->getOperand(1)->getOpcode() ==
ISD::ADD) {
16197 .
zext(
Add.getScalarValueSizeInBits()))) {
16198 SDNode *BasePtr =
Add->getOperand(0).getNode();
16199 for (
SDNode *U : BasePtr->uses()) {
16201 U->getConstantOperandVal(0) == IID) {
16211 if (isa<ConstantSDNode>(
Add->getOperand(1))) {
16212 SDNode *BasePtr =
Add->getOperand(0).getNode();
16213 for (
SDNode *U : BasePtr->uses()) {
16215 isa<ConstantSDNode>(U->getOperand(1)) &&
16216 (
Add->getConstantOperandVal(1) - U->getConstantOperandVal(1)) %
16222 V->getConstantOperandVal(0) == IID) {
16234 (IID == Intrinsic::ppc_altivec_vmaxsw ||
16235 IID == Intrinsic::ppc_altivec_vmaxsh ||
16236 IID == Intrinsic::ppc_altivec_vmaxsb)) {
16252 V2.getOperand(1) == V1) {
16267 switch (
N->getConstantOperandVal(1)) {
16270 case Intrinsic::ppc_altivec_vsum4sbs:
16271 case Intrinsic::ppc_altivec_vsum4shs:
16272 case Intrinsic::ppc_altivec_vsum4ubs: {
16278 dyn_cast<BuildVectorSDNode>(
N->getOperand(3))) {
16279 APInt APSplatBits, APSplatUndef;
16280 unsigned SplatBitSize;
16283 APSplatBits, APSplatUndef, SplatBitSize, HasAnyUndefs, 0,
16286 if (BVNIsConstantSplat && APSplatBits == 0)
16291 case Intrinsic::ppc_vsx_lxvw4x:
16292 case Intrinsic::ppc_vsx_lxvd2x:
16304 switch (
N->getConstantOperandVal(1)) {
16307 case Intrinsic::ppc_vsx_stxvw4x:
16308 case Intrinsic::ppc_vsx_stxvd2x:
16317 bool Is64BitBswapOn64BitTgt =
16318 Subtarget.
isPPC64() &&
N->getValueType(0) == MVT::i64;
16320 N->getOperand(0).hasOneUse();
16321 if (IsSingleUseNormalLd &&
16322 (
N->getValueType(0) == MVT::i32 ||
N->getValueType(0) == MVT::i16 ||
16323 (Subtarget.hasLDBRX() && Is64BitBswapOn64BitTgt))) {
16334 DAG.
getVTList(
N->getValueType(0) == MVT::i64 ?
16335 MVT::i64 : MVT::i32, MVT::Other),
16336 Ops, LD->getMemoryVT(), LD->getMemOperand());
16340 if (
N->getValueType(0) == MVT::i16)
16357 !IsSingleUseNormalLd)
16359 LoadSDNode *LD = cast<LoadSDNode>(
N->getOperand(0));
16362 if (!LD->isSimple())
16364 SDValue BasePtr = LD->getBasePtr();
16366 LD->getPointerInfo(), LD->getAlign());
16371 LD->getMemOperand(), 4, 4);
16381 Hi.getOperand(0).getValue(1),
Lo.getOperand(0).getValue(1));
16390 if (!
N->getOperand(0).hasOneUse() &&
16391 !
N->getOperand(1).hasOneUse() &&
16392 !
N->getOperand(2).hasOneUse()) {
16395 SDNode *VCMPrecNode =
nullptr;
16397 SDNode *LHSN =
N->getOperand(0).getNode();
16401 UI->getOperand(1) ==
N->getOperand(1) &&
16402 UI->getOperand(2) ==
N->getOperand(2) &&
16403 UI->getOperand(0) ==
N->getOperand(0)) {
16416 SDNode *FlagUser =
nullptr;
16418 FlagUser ==
nullptr; ++UI) {
16419 assert(UI != VCMPrecNode->
use_end() &&
"Didn't find user!");
16432 return SDValue(VCMPrecNode, 0);
16454 auto RHSAPInt =
RHS->getAsAPIntVal();
16455 if (!RHSAPInt.isIntN(64))
16458 unsigned Val = RHSAPInt.getZExtValue();
16459 auto isImpossibleCompare = [&]() {
16462 if (Val != 0 && Val != 1) {
16464 return N->getOperand(0);
16467 N->getOperand(0),
N->getOperand(4));
16472 unsigned StoreWidth = 0;
16475 if (
SDValue Impossible = isImpossibleCompare())
16489 auto *MemNode = cast<MemSDNode>(
LHS);
16492 DAG.
getVTList(MVT::i32, MVT::Other, MVT::Glue), Ops,
16493 MemNode->getMemoryVT(), MemNode->getMemOperand());
16497 if (
N->getOperand(0) ==
LHS.getValue(1))
16498 InChain =
LHS.getOperand(0);
16510 DAG.
getRegister(PPC::CR0, MVT::i32),
N->getOperand(4),
16516 assert(isDot &&
"Can't compare against a vector result!");
16518 if (
SDValue Impossible = isImpossibleCompare())
16521 bool BranchOnWhenPredTrue = (
CC ==
ISD::SETEQ) ^ (Val == 0);
16528 EVT VTs[] = {
LHS.getOperand(2).getValueType(), MVT::Glue };
16533 switch (
LHS.getConstantOperandVal(1)) {
16552 N->getOperand(4), CompNode.
getValue(1));
16557 return DAGCombineBuildVector(
N, DCI);
16568 EVT VT =
N->getValueType(0);
16569 if (VT == MVT::i64 && !Subtarget.
isPPC64())
16571 if ((VT != MVT::i32 && VT != MVT::i64) ||
16579 unsigned Lg2 = (IsNegPow2 ? -Divisor : Divisor).
countr_zero();
16599 const APInt &DemandedElts,
16601 unsigned Depth)
const {
16603 switch (
Op.getOpcode()) {
16607 if (cast<VTSDNode>(
Op.getOperand(2))->getVT() == MVT::i16)
16608 Known.
Zero = 0xFFFF0000;
16612 switch (
Op.getConstantOperandVal(0)) {
16614 case Intrinsic::ppc_altivec_vcmpbfp_p:
16615 case Intrinsic::ppc_altivec_vcmpeqfp_p:
16616 case Intrinsic::ppc_altivec_vcmpequb_p:
16617 case Intrinsic::ppc_altivec_vcmpequh_p:
16618 case Intrinsic::ppc_altivec_vcmpequw_p:
16619 case Intrinsic::ppc_altivec_vcmpequd_p:
16620 case Intrinsic::ppc_altivec_vcmpequq_p:
16621 case Intrinsic::ppc_altivec_vcmpgefp_p:
16622 case Intrinsic::ppc_altivec_vcmpgtfp_p:
16623 case Intrinsic::ppc_altivec_vcmpgtsb_p:
16624 case Intrinsic::ppc_altivec_vcmpgtsh_p:
16625 case Intrinsic::ppc_altivec_vcmpgtsw_p:
16626 case Intrinsic::ppc_altivec_vcmpgtsd_p:
16627 case Intrinsic::ppc_altivec_vcmpgtsq_p:
16628 case Intrinsic::ppc_altivec_vcmpgtub_p:
16629 case Intrinsic::ppc_altivec_vcmpgtuh_p:
16630 case Intrinsic::ppc_altivec_vcmpgtuw_p:
16631 case Intrinsic::ppc_altivec_vcmpgtud_p:
16632 case Intrinsic::ppc_altivec_vcmpgtuq_p:
16639 switch (
Op.getConstantOperandVal(1)) {
16642 case Intrinsic::ppc_load2r:
16644 Known.
Zero = 0xFFFF0000;
16674 if (
ML->getLoopDepth() > 1 &&
ML->getSubLoops().empty())
16683 for (
auto I =
ML->block_begin(), IE =
ML->block_end();
I != IE; ++
I)
16685 LoopSize +=
TII->getInstSizeInBytes(J);
16690 if (LoopSize > 16 && LoopSize <= 32)
16704 if (Constraint.
size() == 1) {
16705 switch (Constraint[0]) {
16723 }
else if (Constraint ==
"wc") {
16725 }
else if (Constraint ==
"wa" || Constraint ==
"wd" ||
16726 Constraint ==
"wf" || Constraint ==
"ws" ||
16727 Constraint ==
"wi" || Constraint ==
"ww") {
16740 Value *CallOperandVal =
info.CallOperandVal;
16743 if (!CallOperandVal)
16750 else if ((
StringRef(constraint) ==
"wa" ||
16762 switch (*constraint) {
16792std::pair<unsigned, const TargetRegisterClass *>
16796 if (Constraint.
size() == 1) {
16798 switch (Constraint[0]) {
16800 if (VT == MVT::i64 && Subtarget.
isPPC64())
16801 return std::make_pair(0U, &PPC::G8RC_NOX0RegClass);
16802 return std::make_pair(0U, &PPC::GPRC_NOR0RegClass);
16804 if (VT == MVT::i64 && Subtarget.
isPPC64())
16805 return std::make_pair(0U, &PPC::G8RCRegClass);
16806 return std::make_pair(0U, &PPC::GPRCRegClass);
16812 if (Subtarget.hasSPE()) {
16813 if (VT == MVT::f32 || VT == MVT::i32)
16814 return std::make_pair(0U, &PPC::GPRCRegClass);
16815 if (VT == MVT::f64 || VT == MVT::i64)
16816 return std::make_pair(0U, &PPC::SPERCRegClass);
16818 if (VT == MVT::f32 || VT == MVT::i32)
16819 return std::make_pair(0U, &PPC::F4RCRegClass);
16820 if (VT == MVT::f64 || VT == MVT::i64)
16821 return std::make_pair(0U, &PPC::F8RCRegClass);
16825 if (Subtarget.hasAltivec() && VT.
isVector())
16826 return std::make_pair(0U, &PPC::VRRCRegClass);
16827 else if (Subtarget.hasVSX())
16829 return std::make_pair(0U, &PPC::VFRCRegClass);
16832 return std::make_pair(0U, &PPC::CRRCRegClass);
16834 }
else if (Constraint ==
"wc" && Subtarget.useCRBits()) {
16836 return std::make_pair(0U, &PPC::CRBITRCRegClass);
16837 }
else if ((Constraint ==
"wa" || Constraint ==
"wd" ||
16838 Constraint ==
"wf" || Constraint ==
"wi") &&
16839 Subtarget.hasVSX()) {
16843 return std::make_pair(0U, &PPC::VSRCRegClass);
16844 if (VT == MVT::f32 && Subtarget.hasP8Vector())
16845 return std::make_pair(0U, &PPC::VSSRCRegClass);
16846 return std::make_pair(0U, &PPC::VSFRCRegClass);
16847 }
else if ((Constraint ==
"ws" || Constraint ==
"ww") && Subtarget.hasVSX()) {
16848 if (VT == MVT::f32 && Subtarget.hasP8Vector())
16849 return std::make_pair(0U, &PPC::VSSRCRegClass);
16851 return std::make_pair(0U, &PPC::VSFRCRegClass);
16852 }
else if (Constraint ==
"lr") {
16853 if (VT == MVT::i64)
16854 return std::make_pair(0U, &PPC::LR8RCRegClass);
16856 return std::make_pair(0U, &PPC::LRRCRegClass);
16861 if (Constraint[0] ==
'{' && Constraint[Constraint.
size() - 1] ==
'}') {
16865 if (Constraint.
size() > 3 && Constraint[1] ==
'v' && Constraint[2] ==
's') {
16866 int VSNum = atoi(Constraint.
data() + 3);
16867 assert(VSNum >= 0 && VSNum <= 63 &&
16868 "Attempted to access a vsr out of range");
16870 return std::make_pair(PPC::VSL0 + VSNum, &PPC::VSRCRegClass);
16871 return std::make_pair(PPC::V0 + VSNum - 32, &PPC::VSRCRegClass);
16876 if (Constraint.
size() > 3 && Constraint[1] ==
'f') {
16877 int RegNum = atoi(Constraint.
data() + 2);
16878 if (RegNum > 31 || RegNum < 0)
16880 if (VT == MVT::f32 || VT == MVT::i32)
16881 return Subtarget.hasSPE()
16882 ? std::make_pair(PPC::R0 + RegNum, &PPC::GPRCRegClass)
16883 : std::make_pair(PPC::F0 + RegNum, &PPC::F4RCRegClass);
16884 if (VT == MVT::f64 || VT == MVT::i64)
16885 return Subtarget.hasSPE()
16886 ? std::make_pair(PPC::S0 + RegNum, &PPC::SPERCRegClass)
16887 : std::make_pair(PPC::F0 + RegNum, &PPC::F8RCRegClass);
16891 std::pair<unsigned, const TargetRegisterClass *> R =
16900 if (R.first && VT == MVT::i64 && Subtarget.
isPPC64() &&
16901 PPC::GPRCRegClass.contains(R.first))
16902 return std::make_pair(
TRI->getMatchingSuperReg(R.first,
16903 PPC::sub_32, &PPC::G8RCRegClass),
16904 &PPC::G8RCRegClass);
16907 if (!R.second &&
StringRef(
"{cc}").equals_insensitive(Constraint)) {
16908 R.first = PPC::CR0;
16909 R.second = &PPC::CRRCRegClass;
16913 if (Subtarget.
isAIXABI() && !
TM.getAIXExtendedAltivecABI()) {
16914 if (((R.first >= PPC::V20 && R.first <= PPC::V31) ||
16915 (R.first >= PPC::VF20 && R.first <= PPC::VF31)) &&
16916 (R.second == &PPC::VSRCRegClass || R.second == &PPC::VSFRCRegClass))
16917 errs() <<
"warning: vector registers 20 to 32 are reserved in the "
16918 "default AIX AltiVec ABI and cannot be used\n";
16928 std::vector<SDValue> &Ops,
16933 if (Constraint.
size() > 1)
16936 char Letter = Constraint[0];
16951 EVT TCVT = MVT::i64;
16956 if (isInt<16>(
Value))
16960 if (isShiftedUInt<16, 16>(
Value))
16964 if (isShiftedInt<16, 16>(
Value))
16968 if (isUInt<16>(
Value))
16984 if (isInt<16>(-
Value))
16992 if (Result.getNode()) {
16993 Ops.push_back(Result);
17004 if (
I.getNumOperands() <= 1)
17006 if (!isa<ConstantSDNode>(Ops[1].getNode()))
17008 auto IntrinsicID = Ops[1].getNode()->getAsZExtVal();
17009 if (IntrinsicID != Intrinsic::ppc_tdw && IntrinsicID != Intrinsic::ppc_tw &&
17010 IntrinsicID != Intrinsic::ppc_trapd && IntrinsicID != Intrinsic::ppc_trap)
17013 if (
MDNode *MDN =
I.getMetadata(LLVMContext::MD_annotation))
17041 switch (AM.
Scale) {
17072 unsigned Depth =
Op.getConstantOperandVal(0);
17078 bool isPPC64 = Subtarget.
isPPC64();
17090 isPPC64 ? MVT::i64 : MVT::i32);
17097 SDValue RetAddrFI = getReturnAddrFrameIndex(DAG);
17105 unsigned Depth =
Op.getConstantOperandVal(0);
17112 bool isPPC64 = PtrVT == MVT::i64;
17118 FrameReg = isPPC64 ? PPC::X1 : PPC::R1;
17120 FrameReg = isPPC64 ? PPC::FP8 : PPC::FP;
17134 bool isPPC64 = Subtarget.
isPPC64();
17168 if (isa<JumpTableSDNode>(GA) || isa<BlockAddressSDNode>(GA))
17186 unsigned Intrinsic)
const {
17187 switch (Intrinsic) {
17188 case Intrinsic::ppc_atomicrmw_xchg_i128:
17189 case Intrinsic::ppc_atomicrmw_add_i128:
17190 case Intrinsic::ppc_atomicrmw_sub_i128:
17191 case Intrinsic::ppc_atomicrmw_nand_i128:
17192 case Intrinsic::ppc_atomicrmw_and_i128:
17193 case Intrinsic::ppc_atomicrmw_or_i128:
17194 case Intrinsic::ppc_atomicrmw_xor_i128:
17195 case Intrinsic::ppc_cmpxchg_i128:
17197 Info.memVT = MVT::i128;
17198 Info.ptrVal =
I.getArgOperand(0);
17204 case Intrinsic::ppc_atomic_load_i128:
17206 Info.memVT = MVT::i128;
17207 Info.ptrVal =
I.getArgOperand(0);
17212 case Intrinsic::ppc_atomic_store_i128:
17214 Info.memVT = MVT::i128;
17215 Info.ptrVal =
I.getArgOperand(2);
17220 case Intrinsic::ppc_altivec_lvx:
17221 case Intrinsic::ppc_altivec_lvxl:
17222 case Intrinsic::ppc_altivec_lvebx:
17223 case Intrinsic::ppc_altivec_lvehx:
17224 case Intrinsic::ppc_altivec_lvewx:
17225 case Intrinsic::ppc_vsx_lxvd2x:
17226 case Intrinsic::ppc_vsx_lxvw4x:
17227 case Intrinsic::ppc_vsx_lxvd2x_be:
17228 case Intrinsic::ppc_vsx_lxvw4x_be:
17229 case Intrinsic::ppc_vsx_lxvl:
17230 case Intrinsic::ppc_vsx_lxvll: {
17232 switch (Intrinsic) {
17233 case Intrinsic::ppc_altivec_lvebx:
17236 case Intrinsic::ppc_altivec_lvehx:
17239 case Intrinsic::ppc_altivec_lvewx:
17242 case Intrinsic::ppc_vsx_lxvd2x:
17243 case Intrinsic::ppc_vsx_lxvd2x_be:
17253 Info.ptrVal =
I.getArgOperand(0);
17260 case Intrinsic::ppc_altivec_stvx:
17261 case Intrinsic::ppc_altivec_stvxl:
17262 case Intrinsic::ppc_altivec_stvebx:
17263 case Intrinsic::ppc_altivec_stvehx:
17264 case Intrinsic::ppc_altivec_stvewx:
17265 case Intrinsic::ppc_vsx_stxvd2x:
17266 case Intrinsic::ppc_vsx_stxvw4x:
17267 case Intrinsic::ppc_vsx_stxvd2x_be:
17268 case Intrinsic::ppc_vsx_stxvw4x_be:
17269 case Intrinsic::ppc_vsx_stxvl:
17270 case Intrinsic::ppc_vsx_stxvll: {
17272 switch (Intrinsic) {
17273 case Intrinsic::ppc_altivec_stvebx:
17276 case Intrinsic::ppc_altivec_stvehx:
17279 case Intrinsic::ppc_altivec_stvewx:
17282 case Intrinsic::ppc_vsx_stxvd2x:
17283 case Intrinsic::ppc_vsx_stxvd2x_be:
17293 Info.ptrVal =
I.getArgOperand(1);
17300 case Intrinsic::ppc_stdcx:
17301 case Intrinsic::ppc_stwcx:
17302 case Intrinsic::ppc_sthcx:
17303 case Intrinsic::ppc_stbcx: {
17305 auto Alignment =
Align(8);
17306 switch (Intrinsic) {
17307 case Intrinsic::ppc_stdcx:
17310 case Intrinsic::ppc_stwcx:
17312 Alignment =
Align(4);
17314 case Intrinsic::ppc_sthcx:
17316 Alignment =
Align(2);
17318 case Intrinsic::ppc_stbcx:
17320 Alignment =
Align(1);
17325 Info.ptrVal =
I.getArgOperand(0);
17327 Info.align = Alignment;
17345 if (Subtarget.hasAltivec() &&
Op.size() >= 16) {
17346 if (
Op.isMemset() && Subtarget.hasVSX()) {
17351 if (TailSize > 2 && TailSize <= 4) {
17356 if (
Op.isAligned(
Align(16)) || Subtarget.hasP8Vector())
17375 return !(BitSize == 0 || BitSize > 64);
17383 return NumBits1 == 64 && NumBits2 == 32;
17391 return NumBits1 == 64 && NumBits2 == 32;
17397 if (
LoadSDNode *LD = dyn_cast<LoadSDNode>(Val)) {
17398 EVT MemVT = LD->getMemoryVT();
17399 if ((MemVT == MVT::i1 || MemVT == MVT::i8 || MemVT == MVT::i16 ||
17400 (Subtarget.
isPPC64() && MemVT == MVT::i32)) &&
17416 "invalid fpext types");
17418 if (DestVT == MVT::f128)
17424 return isInt<16>(Imm) || isUInt<16>(Imm);
17428 return isInt<16>(Imm) || isUInt<16>(Imm);
17433 unsigned *
Fast)
const {
17447 !Subtarget.allowsUnalignedFPAccess())
17451 if (Subtarget.hasVSX()) {
17452 if (VT != MVT::v2f64 && VT != MVT::v2i64 &&
17453 VT != MVT::v4f32 && VT != MVT::v4i32)
17460 if (VT == MVT::ppcf128)
17474 if (
auto *ConstNode = dyn_cast<ConstantSDNode>(
C.getNode())) {
17475 if (!ConstNode->getAPIntValue().isSignedIntN(64))
17483 int64_t Imm = ConstNode->getSExtValue();
17484 unsigned Shift = llvm::countr_zero<uint64_t>(Imm);
17486 if (isInt<16>(Imm))
17511 return Subtarget.hasP9Vector();
17519 if (!
I->hasOneUse())
17523 assert(
User &&
"A single use instruction with no uses.");
17525 switch (
I->getOpcode()) {
17526 case Instruction::FMul: {
17528 if (
User->getOpcode() != Instruction::FSub &&
17529 User->getOpcode() != Instruction::FAdd)
17542 case Instruction::Load: {
17555 if (
User->getOpcode() != Instruction::Store)
17575 static const MCPhysReg ScratchRegs[] = {
17576 PPC::X12, PPC::LR8, PPC::CTR8, 0
17579 return ScratchRegs;
17583 const Constant *PersonalityFn)
const {
17584 return Subtarget.
isPPC64() ? PPC::X3 : PPC::R3;
17588 const Constant *PersonalityFn)
const {
17589 return Subtarget.
isPPC64() ? PPC::X4 : PPC::R4;
17594 EVT VT ,
unsigned DefinedValues)
const {
17595 if (VT == MVT::v2i64)
17596 return Subtarget.hasDirectMove();
17598 if (Subtarget.hasVSX())
17632 bool LegalOps,
bool OptForSize,
17634 unsigned Depth)
const {
17638 unsigned Opc =
Op.getOpcode();
17639 EVT VT =
Op.getValueType();
17664 if (Flags.hasNoSignedZeros() ||
Options.NoSignedZerosFPMath) {
17668 N0Cost,
Depth + 1);
17672 N1Cost,
Depth + 1);
17674 if (NegN0 && N0Cost <= N1Cost) {
17675 Cost = std::min(N0Cost, N2Cost);
17676 return DAG.
getNode(Opc, Loc, VT, NegN0, N1, NegN2, Flags);
17677 }
else if (NegN1) {
17678 Cost = std::min(N1Cost, N2Cost);
17679 return DAG.
getNode(Opc, Loc, VT, N0, NegN1, NegN2, Flags);
17722 bool ForCodeSize)
const {
17723 if (!VT.
isSimple() || !Subtarget.hasVSX())
17733 if (Subtarget.hasPrefixInstrs()) {
17738 APSInt IntResult(16,
false);
17743 if (IsExact && IntResult <= 15 && IntResult >= -16)
17745 return Imm.isZero();
17748 return Imm.isPosZero();
17760 unsigned Opcode =
N->getOpcode();
17761 unsigned TargetOpcode;
17780 if (Mask->getZExtValue() == OpSizeInBits - 1)
17786SDValue PPCTargetLowering::combineSHL(
SDNode *
N, DAGCombinerInfo &DCI)
const {
17792 if (!Subtarget.isISA3_0() || !Subtarget.
isPPC64() ||
17795 N->getValueType(0) != MVT::i64)
17810 ShiftBy = DCI.DAG.getConstant(CN1->
getZExtValue(),
DL, MVT::i32);
17816SDValue PPCTargetLowering::combineSRA(
SDNode *
N, DAGCombinerInfo &DCI)
const {
17823SDValue PPCTargetLowering::combineSRL(
SDNode *
N, DAGCombinerInfo &DCI)
const {
17842 auto isZextOfCompareWithConstant = [](
SDValue Op) {
17844 Op.getValueType() != MVT::i64)
17848 if (Cmp.getOpcode() !=
ISD::SETCC || !Cmp.hasOneUse() ||
17849 Cmp.getOperand(0).getValueType() != MVT::i64)
17852 if (
auto *
Constant = dyn_cast<ConstantSDNode>(Cmp.getOperand(1))) {
17853 int64_t NegConstant = 0 -
Constant->getSExtValue();
17856 return isInt<16>(NegConstant);
17862 bool LHSHasPattern = isZextOfCompareWithConstant(
LHS);
17863 bool RHSHasPattern = isZextOfCompareWithConstant(
RHS);
17866 if (LHSHasPattern && !RHSHasPattern)
17868 else if (!LHSHasPattern && !RHSHasPattern)
17874 SDValue Z = Cmp.getOperand(0);
17875 auto *
Constant = cast<ConstantSDNode>(Cmp.getOperand(1));
17876 int64_t NegConstant = 0 -
Constant->getSExtValue();
17878 switch(cast<CondCodeSDNode>(Cmp.getOperand(2))->get()) {
17889 SDValue AddOrZ = NegConstant != 0 ?
Add : Z;
17904 SDValue AddOrZ = NegConstant != 0 ?
Add : Z;
17941 if (!GSDN || !ConstNode)
17948 if (!isInt<34>(NewOffset))
17961SDValue PPCTargetLowering::combineADD(
SDNode *
N, DAGCombinerInfo &DCI)
const {
17981 DAGCombinerInfo &DCI)
const {
17983 if (Subtarget.useCRBits()) {
17985 if (
SDValue CRTruncValue = DAGCombineTruncBoolExt(
N, DCI))
17986 return CRTruncValue;
17993 if (Op0.
getValueType() != MVT::i128 ||
N->getValueType(0) != MVT::i64)
17996 int EltToExtract = DCI.DAG.getDataLayout().isBigEndian() ? 1 : 0;
18006 EltToExtract = EltToExtract ? 0 : 1;
18016 return DCI.DAG.getNode(
18018 DCI.DAG.getTargetConstant(EltToExtract, dl, MVT::i32));
18023SDValue PPCTargetLowering::combineMUL(
SDNode *
N, DAGCombinerInfo &DCI)
const {
18027 if (!ConstOpOrElement)
18035 auto IsProfitable = [
this](
bool IsNeg,
bool IsAddOne,
EVT VT) ->
bool {
18058 return IsAddOne && IsNeg ? VT.
isVector() :
true;
18062 EVT VT =
N->getValueType(0);
18069 if ((MulAmtAbs - 1).isPowerOf2()) {
18073 if (!IsProfitable(IsNeg,
true, VT))
18086 }
else if ((MulAmtAbs + 1).isPowerOf2()) {
18090 if (!IsProfitable(IsNeg,
false, VT))
18111 DAGCombinerInfo &DCI)
const {
18116 EVT VT =
N->getValueType(0);
18119 unsigned Opc =
N->getOpcode();
18121 bool LegalOps = !DCI.isBeforeLegalizeOps();
18129 if (!
Flags.hasNoSignedZeros() && !
Options.NoSignedZerosFPMath)
18145bool PPCTargetLowering::mayBeEmittedAsTailCall(
const CallInst *CI)
const {
18162 if (!Callee ||
Callee->isVarArg())
18175bool PPCTargetLowering::
18176isMaskAndCmp0FoldingBeneficial(
const Instruction &AndI)
const {
18179 if (
const ConstantInt *CI = dyn_cast<ConstantInt>(Mask)) {
18181 if (CI->getBitWidth() > 64)
18183 int64_t ConstVal = CI->getZExtValue();
18184 return isUInt<16>(ConstVal) ||
18185 (isUInt<16>(ConstVal >> 16) && !(ConstVal & 0xFFFF));
18194PPC::AddrMode PPCTargetLowering::getAddrModeForFlags(
unsigned Flags)
const {
18200 if ((Flags & FlagSet) == FlagSet)
18203 if ((Flags & FlagSet) == FlagSet)
18206 if ((Flags & FlagSet) == FlagSet)
18209 if ((Flags & FlagSet) == FlagSet)
18230 if ((FrameIndexAlign % 4) != 0)
18231 FlagSet &= ~PPC::MOF_RPlusSImm16Mult4;
18232 if ((FrameIndexAlign % 16) != 0)
18233 FlagSet &= ~PPC::MOF_RPlusSImm16Mult16;
18237 if ((FrameIndexAlign % 4) == 0)
18239 if ((FrameIndexAlign % 16) == 0)
18252 auto SetAlignFlagsForImm = [&](
uint64_t Imm) {
18253 if ((Imm & 0x3) == 0)
18255 if ((Imm & 0xf) == 0)
18261 const APInt &ConstImm = CN->getAPIntValue();
18280 const APInt &ConstImm = CN->getAPIntValue();
18290 }
else if (
RHS.getOpcode() ==
PPCISD::Lo && !
RHS.getConstantOperandVal(1))
18302 isValidPCRelNode<ConstantPoolSDNode>(
N) ||
18303 isValidPCRelNode<GlobalAddressSDNode>(
N) ||
18304 isValidPCRelNode<JumpTableSDNode>(
N) ||
18305 isValidPCRelNode<BlockAddressSDNode>(
N));
18310unsigned PPCTargetLowering::computeMOFlags(
const SDNode *Parent,
SDValue N,
18315 if (!Subtarget.hasP9Vector())
18319 if (Subtarget.hasPrefixInstrs())
18322 if (Subtarget.hasSPE())
18331 unsigned ParentOp = Parent->
getOpcode();
18335 if ((
ID == Intrinsic::ppc_vsx_lxvp) || (
ID == Intrinsic::ppc_vsx_stxvp)) {
18336 SDValue IntrinOp = (
ID == Intrinsic::ppc_vsx_lxvp)
18347 if (
const LSBaseSDNode *LSB = dyn_cast<LSBaseSDNode>(Parent))
18348 if (LSB->isIndexed())
18353 const MemSDNode *MN = dyn_cast<MemSDNode>(Parent);
18354 assert(MN &&
"Parent should be a MemSDNode!");
18359 "Not expecting scalar integers larger than 16 bytes!");
18362 else if (
Size == 32)
18369 else if (
Size == 256) {
18370 assert(Subtarget.pairedVectorMemops() &&
18371 "256-bit vectors are only available when paired vector memops is "
18379 else if (MemVT == MVT::f128 || MemVT.
isVector())
18389 if (
const LoadSDNode *LN = dyn_cast<LoadSDNode>(Parent)) {
18410 FlagSet &= ~PPC::MOF_NoExt;
18415 bool IsNonP1034BitConst =
18419 IsNonP1034BitConst)
18432 int16_t ForceXFormImm = 0;
18435 Disp =
N.getOperand(0);
18436 Base =
N.getOperand(1);
18447 !
N.getOperand(1).hasOneUse() || !
N.getOperand(0).hasOneUse())) {
18448 Disp =
N.getOperand(0);
18449 Base =
N.getOperand(1);
18463 unsigned NumParts,
MVT PartVT, std::optional<CallingConv::ID>
CC)
const {
18469 if (PartVT == MVT::f64 &&
18470 (ValVT == MVT::i32 || ValVT == MVT::i16 || ValVT == MVT::i8)) {
18479SDValue PPCTargetLowering::lowerToLibCall(
const char *LibCallName,
SDValue Op,
18483 EVT RetVT =
Op.getValueType();
18491 EVT ArgVT =
N.getValueType();
18496 Entry.IsZExt = !Entry.IsSExt;
18497 Args.push_back(Entry);
18505 (
RetTy ==
F.getReturnType() ||
F.getReturnType()->isVoidTy());
18511 .setTailCall(isTailCall)
18518SDValue PPCTargetLowering::lowerLibCallBasedOnType(
18519 const char *LibCallFloatName,
const char *LibCallDoubleName,
SDValue Op,
18521 if (
Op.getValueType() == MVT::f32)
18522 return lowerToLibCall(LibCallFloatName,
Op, DAG);
18524 if (
Op.getValueType() == MVT::f64)
18525 return lowerToLibCall(LibCallDoubleName,
Op, DAG);
18530bool PPCTargetLowering::isLowringToMASSFiniteSafe(
SDValue Op)
const {
18532 return isLowringToMASSSafe(
Op) &&
Flags.hasNoSignedZeros() &&
18536bool PPCTargetLowering::isLowringToMASSSafe(
SDValue Op)
const {
18537 return Op.getNode()->getFlags().hasApproximateFuncs();
18540bool PPCTargetLowering::isScalarMASSConversionEnabled()
const {
18544SDValue PPCTargetLowering::lowerLibCallBase(
const char *LibCallDoubleName,
18545 const char *LibCallFloatName,
18546 const char *LibCallDoubleNameFinite,
18547 const char *LibCallFloatNameFinite,
18550 if (!isScalarMASSConversionEnabled() || !isLowringToMASSSafe(
Op))
18553 if (!isLowringToMASSFiniteSafe(
Op))
18554 return lowerLibCallBasedOnType(LibCallFloatName, LibCallDoubleName,
Op,
18557 return lowerLibCallBasedOnType(LibCallFloatNameFinite,
18558 LibCallDoubleNameFinite,
Op, DAG);
18562 return lowerLibCallBase(
"__xl_pow",
"__xl_powf",
"__xl_pow_finite",
18563 "__xl_powf_finite",
Op, DAG);
18567 return lowerLibCallBase(
"__xl_sin",
"__xl_sinf",
"__xl_sin_finite",
18568 "__xl_sinf_finite",
Op, DAG);
18572 return lowerLibCallBase(
"__xl_cos",
"__xl_cosf",
"__xl_cos_finite",
18573 "__xl_cosf_finite",
Op, DAG);
18577 return lowerLibCallBase(
"__xl_log",
"__xl_logf",
"__xl_log_finite",
18578 "__xl_logf_finite",
Op, DAG);
18582 return lowerLibCallBase(
"__xl_log10",
"__xl_log10f",
"__xl_log10_finite",
18583 "__xl_log10f_finite",
Op, DAG);
18587 return lowerLibCallBase(
"__xl_exp",
"__xl_expf",
"__xl_exp_finite",
18588 "__xl_expf_finite",
Op, DAG);
18595 if (!isa<FrameIndexSDNode>(
N))
18613 unsigned Flags = computeMOFlags(Parent,
N, DAG);
18625 "Must be using PC-Relative calls when a valid PC-Relative node is "
18655 Disp =
N.getOperand(1).getOperand(0);
18660 Base =
N.getOperand(0);
18667 auto *CN = cast<ConstantSDNode>(
N);
18668 EVT CNType = CN->getValueType(0);
18669 uint64_t CNImm = CN->getZExtValue();
18680 if ((CNType == MVT::i32 || isInt<32>(CNImm)) &&
18682 int32_t
Addr = (int32_t)CNImm;
18687 uint32_t LIS = CNType == MVT::i32 ? PPC::LIS : PPC::LIS8;
18703 unsigned Opcode =
N.getOpcode();
18711 Base =
N.getOperand(0);
18730 Base = FI ?
N :
N.getOperand(1);
18742 bool IsVarArg)
const {
18752 return Subtarget.
isPPC64() && Subtarget.hasQuadwordAtomics();
18786 return Intrinsic::ppc_atomicrmw_xchg_i128;
18788 return Intrinsic::ppc_atomicrmw_add_i128;
18790 return Intrinsic::ppc_atomicrmw_sub_i128;
18792 return Intrinsic::ppc_atomicrmw_and_i128;
18794 return Intrinsic::ppc_atomicrmw_or_i128;
18796 return Intrinsic::ppc_atomicrmw_xor_i128;
18798 return Intrinsic::ppc_atomicrmw_nand_i128;
18815 Value *LoHi = Builder.
CreateCall(RMW, {AlignedAddr, IncrLo, IncrHi});
18821 Lo, Builder.
CreateShl(
Hi, ConstantInt::get(ValTy, 64)),
"val64");
18842 Builder.
CreateCall(IntCmpXchg, {AlignedAddr, CmpLo, CmpHi, NewLo, NewHi});
18849 Lo, Builder.
CreateShl(
Hi, ConstantInt::get(ValTy, 64)),
"val64");
unsigned const MachineRegisterInfo * MRI
static unsigned getCallOpcode(const MachineFunction &CallerF, bool IsIndirect, bool IsTailCall)
static SDValue GeneratePerfectShuffle(unsigned ID, SDValue V1, SDValue V2, unsigned PFEntry, SDValue LHS, SDValue RHS, SelectionDAG &DAG, const SDLoc &dl)
GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit the specified operations t...
static bool isSignExtended(SDValue N, SelectionDAG &DAG)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static std::pair< Register, unsigned > getBaseWithConstantOffset(MachineRegisterInfo &MRI, Register Reg)
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
This file implements the APSInt class, which is a simple class that represents an arbitrary sized int...
static bool isLoad(int Opcode)
static bool isFloatingPointZero(SDValue Op)
isFloatingPointZero - Return true if this is +0.0.
Function Alias Analysis Results
Atomic ordering constants.
Analysis containing CSE Info
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
Given that RA is a live propagate it s liveness to any other values it uses(according to Uses). void DeadArgumentEliminationPass
static RegisterPass< DebugifyModulePass > DM("debugify", "Attach debug info to everything")
This file defines the DenseMap class.
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
const HexagonInstrInfo * TII
static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain, ISD::ArgFlagsTy Flags, SelectionDAG &DAG, const SDLoc &dl)
CreateCopyOfByValArgument - Make a copy of an aggregate at address specified by "Src" to address "Dst...
static bool isConstantOrUndef(const SDValue Op)
static bool isSplat(Value *V)
Return true if V is a splat of a value (which is used when multiplying a matrix with a scalar).
unsigned const TargetRegisterInfo * TRI
Module.h This file contains the declarations for the Module class.
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
static CodeModel::Model getCodeModel(const PPCSubtarget &S, const TargetMachine &TM, const MachineOperand &MO)
cl::opt< bool > ANDIGlueBug("expose-ppc-andi-glue-bug", cl::desc("expose the ANDI glue bug on PPC"), cl::Hidden)
static SDValue getCanonicalConstSplat(uint64_t Val, unsigned SplatSize, EVT VT, SelectionDAG &DAG, const SDLoc &dl)
getCanonicalConstSplat - Build a canonical splat immediate of Val with an element size of SplatSize.
static bool IsSelectCC(MachineInstr &MI)
static const TargetRegisterClass * getRegClassForSVT(MVT::SimpleValueType SVT, bool IsPPC64, bool HasP8Vector, bool HasVSX)
static bool isGPRShadowAligned(MCPhysReg Reg, Align RequiredAlign)
static bool needStackSlotPassParameters(const PPCSubtarget &Subtarget, const SmallVectorImpl< ISD::OutputArg > &Outs)
static bool isAlternatingShuffMask(const ArrayRef< int > &Mask, int NumElts)
static SDValue addShuffleForVecExtend(SDNode *N, SelectionDAG &DAG, SDValue Input, uint64_t Elems, uint64_t CorrectElems)
static cl::opt< bool > DisablePPCUnaligned("disable-ppc-unaligned", cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden)
static SDValue combineADDToADDZE(SDNode *N, SelectionDAG &DAG, const PPCSubtarget &Subtarget)
static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG)
static SDValue generateEquivalentSub(SDNode *N, int Size, bool Complement, bool Swap, SDLoc &DL, SelectionDAG &DAG)
This function is called when we have proved that a SETCC node can be replaced by subtraction (and oth...
static unsigned mapArgRegToOffsetAIX(unsigned Reg, const PPCFrameLowering *FL)
static SDValue combineADDToMAT_PCREL_ADDR(SDNode *N, SelectionDAG &DAG, const PPCSubtarget &Subtarget)
static void setAlignFlagsForFI(SDValue N, unsigned &FlagSet, SelectionDAG &DAG)
Set alignment flags based on whether or not the Frame Index is aligned.
static bool isTOCSaveRestoreRequired(const PPCSubtarget &Subtarget)
static void updateForAIXShLibTLSModelOpt(TLSModel::Model &Model, SelectionDAG &DAG, const TargetMachine &TM)
updateForAIXShLibTLSModelOpt - Helper to initialize TLS model opt settings, and then apply the update...
static bool provablyDisjointOr(SelectionDAG &DAG, const SDValue &N)
Used when computing address flags for selecting loads and stores.
static void CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64, SDValue Arg, int SPDiff, unsigned ArgOffset, SmallVectorImpl< TailCallArgumentInfo > &TailCallArguments)
CalculateTailCallArgDest - Remember Argument for later processing.
static bool callsShareTOCBase(const Function *Caller, const GlobalValue *CalleeGV, const TargetMachine &TM)
constexpr uint64_t AIXSmallTlsPolicySizeLimit
static bool isPCRelNode(SDValue N)
static void LowerMemOpCallTo(SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue Arg, SDValue PtrOff, int SPDiff, unsigned ArgOffset, bool isPPC64, bool isTailCall, bool isVector, SmallVectorImpl< SDValue > &MemOpChains, SmallVectorImpl< TailCallArgumentInfo > &TailCallArguments, const SDLoc &dl)
LowerMemOpCallTo - Store the argument to the stack or remember it in case of tail calls.
static cl::opt< unsigned > PPCGatherAllAliasesMaxDepth("ppc-gather-alias-max-depth", cl::init(18), cl::Hidden, cl::desc("max depth when checking alias info in GatherAllAliases()"))
static bool areCallingConvEligibleForTCO_64SVR4(CallingConv::ID CallerCC, CallingConv::ID CalleeCC)
static const MCPhysReg FPR[]
FPR - The set of FP registers that should be allocated for arguments on Darwin and AIX.
static SDNode * isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG)
isCallCompatibleAddress - Return the immediate to use if the specified 32-bit value is representable ...
static Align CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT, ISD::ArgFlagsTy Flags, unsigned PtrByteSize)
CalculateStackSlotAlignment - Calculates the alignment of this argument on the stack.
static bool IsSelect(MachineInstr &MI)
static bool haveEfficientBuildVectorPattern(BuildVectorSDNode *V, bool HasDirectMove, bool HasP8Vector)
Do we have an efficient pattern in a .td file for this node?
static SDValue getSToVPermuted(SDValue OrigSToV, SelectionDAG &DAG, const PPCSubtarget &Subtarget)
static bool CC_AIX(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &S)
static void setUsesTOCBasePtr(MachineFunction &MF)
static SDValue transformCallee(const SDValue &Callee, SelectionDAG &DAG, const SDLoc &dl, const PPCSubtarget &Subtarget)
static unsigned EnsureStackAlignment(const PPCFrameLowering *Lowering, unsigned NumBytes)
EnsureStackAlignment - Round stack frame size up from NumBytes to ensure minimum alignment required f...
static SDValue stripModuloOnShift(const TargetLowering &TLI, SDNode *N, SelectionDAG &DAG)
static bool isStoreConditional(SDValue Intrin, unsigned &StoreWidth)
static bool hasSameArgumentList(const Function *CallerFn, const CallBase &CB)
static bool isFPExtLoad(SDValue Op)
static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, SelectionDAG &DAG, const SDLoc &dl, EVT DestVT=MVT::Other)
BuildIntrinsicOp - Return a unary operator intrinsic node with the specified intrinsic ID.
static bool isConsecutiveLSLoc(SDValue Loc, EVT VT, LSBaseSDNode *Base, unsigned Bytes, int Dist, SelectionDAG &DAG)
static void StoreTailCallArgumentsToStackSlot(SelectionDAG &DAG, SDValue Chain, const SmallVectorImpl< TailCallArgumentInfo > &TailCallArgs, SmallVectorImpl< SDValue > &MemOpChains, const SDLoc &dl)
StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot.
static const char AIXSSPCanaryWordName[]
static cl::opt< bool > UseAbsoluteJumpTables("ppc-use-absolute-jumptables", cl::desc("use absolute jump tables on ppc"), cl::Hidden)
static void setXFormForUnalignedFI(SDValue N, unsigned Flags, PPC::AddrMode &Mode)
static void getMaxByValAlign(Type *Ty, Align &MaxAlign, Align MaxMaxAlign)
getMaxByValAlign - Helper for getByValTypeAlignment to determine the desired ByVal argument alignment...
static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base, unsigned Bytes, int Dist, SelectionDAG &DAG)
static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize, unsigned LHSStart, unsigned RHSStart)
isVMerge - Common function, used to match vmrg* shuffles.
static void getLabelAccessInfo(bool IsPIC, const PPCSubtarget &Subtarget, unsigned &HiOpFlags, unsigned &LoOpFlags, const GlobalValue *GV=nullptr)
Return true if we should reference labels using a PICBase, set the HiOpFlags and LoOpFlags to the tar...
cl::opt< bool > DisableAutoPairedVecSt("disable-auto-paired-vec-st", cl::desc("disable automatically generated 32byte paired vector stores"), cl::init(true), cl::Hidden)
static void buildCallOperands(SmallVectorImpl< SDValue > &Ops, PPCTargetLowering::CallFlags CFlags, const SDLoc &dl, SelectionDAG &DAG, SmallVector< std::pair< unsigned, SDValue >, 8 > &RegsToPass, SDValue Glue, SDValue Chain, SDValue &Callee, int SPDiff, const PPCSubtarget &Subtarget)
static cl::opt< bool > DisableInnermostLoopAlign32("disable-ppc-innermost-loop-align32", cl::desc("don't always align innermost loop to 32 bytes on ppc"), cl::Hidden)
static bool usePartialVectorLoads(SDNode *N, const PPCSubtarget &ST)
Returns true if we should use a direct load into vector instruction (such as lxsd or lfd),...
static SDValue getDataClassTest(SDValue Op, FPClassTest Mask, const SDLoc &Dl, SelectionDAG &DAG, const PPCSubtarget &Subtarget)
static cl::opt< bool > DisableSCO("disable-ppc-sco", cl::desc("disable sibling call optimization on ppc"), cl::Hidden)
static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT)
static cl::opt< bool > DisablePPCPreinc("disable-ppc-preinc", cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden)
static Intrinsic::ID getIntrinsicForAtomicRMWBinOp128(AtomicRMWInst::BinOp BinOp)
static SDValue convertFPToInt(SDValue Op, SelectionDAG &DAG, const PPCSubtarget &Subtarget)
static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags, unsigned PtrByteSize)
CalculateStackSlotSize - Calculates the size reserved for this argument on the stack.
static int CalculateTailCallSPDiff(SelectionDAG &DAG, bool isTailCall, unsigned ParamSize)
CalculateTailCallSPDiff - Get the amount the stack pointer has to be adjusted to accommodate the argu...
static Instruction * callIntrinsic(IRBuilderBase &Builder, Intrinsic::ID Id)
static void fixupShuffleMaskForPermutedSToV(SmallVectorImpl< int > &ShuffV, int LHSMaxIdx, int RHSMinIdx, int RHSMaxIdx, int HalfVec, unsigned ValidLaneWidth, const PPCSubtarget &Subtarget)
static void prepareIndirectCall(SelectionDAG &DAG, SDValue &Callee, SDValue &Glue, SDValue &Chain, const SDLoc &dl)
static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC, SelectionDAG &DAG)
static SDValue isScalarToVec(SDValue Op)
static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl)
static cl::opt< bool > DisablePerfectShuffle("ppc-disable-perfect-shuffle", cl::desc("disable vector permute decomposition"), cl::init(true), cl::Hidden)
static bool getVectorCompareInfo(SDValue Intrin, int &CompareOpc, bool &isDot, const PPCSubtarget &Subtarget)
getVectorCompareInfo - Given an intrinsic, return false if it is not a vector comparison.
static unsigned invertFMAOpcode(unsigned Opc)
static const SDValue * getNormalLoadInput(const SDValue &Op, bool &IsPermuted)
static cl::opt< unsigned > PPCMinimumJumpTableEntries("ppc-min-jump-table-entries", cl::init(64), cl::Hidden, cl::desc("Set minimum number of entries to use a jump table on PPC"))
static bool isValidSplatLoad(const PPCSubtarget &Subtarget, const SDValue &Op, unsigned &Opcode)
static SDValue convertIntToFP(SDValue Op, SDValue Src, SelectionDAG &DAG, const PPCSubtarget &Subtarget, SDValue Chain=SDValue())
static int getEstimateRefinementSteps(EVT VT, const PPCSubtarget &Subtarget)
static void PrepareTailCall(SelectionDAG &DAG, SDValue &InGlue, SDValue &Chain, const SDLoc &dl, int SPDiff, unsigned NumBytes, SDValue LROp, SDValue FPOp, SmallVectorImpl< TailCallArgumentInfo > &TailCallArguments)
static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, SDValue Chain, SDValue OldRetAddr, SDValue OldFP, int SPDiff, const SDLoc &dl)
EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to the appropriate stack sl...
static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, EVT VT, SelectionDAG &DAG, const SDLoc &dl)
BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified amount.
static SDValue combineBVZEXTLOAD(SDNode *N, SelectionDAG &DAG)
static SDValue truncateScalarIntegerArg(ISD::ArgFlagsTy Flags, EVT ValVT, SelectionDAG &DAG, SDValue ArgValue, MVT LocVT, const SDLoc &dl)
static void computeFlagsForAddressComputation(SDValue N, unsigned &FlagSet, SelectionDAG &DAG)
Given a node, compute flags that are used for address computation when selecting load and store instr...
cl::opt< bool > ANDIGlueBug
static SDValue getOutputChainFromCallSeq(SDValue CallSeqStart)
static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT, ISD::ArgFlagsTy Flags, unsigned PtrByteSize, unsigned LinkageSize, unsigned ParamAreaSize, unsigned &ArgOffset, unsigned &AvailableFPRs, unsigned &AvailableVRs)
CalculateStackSlotUsed - Return whether this argument will use its stack slot (instead of being passe...
static cl::opt< unsigned > PPCAIXTLSModelOptUseIEForLDLimit("ppc-aix-shared-lib-tls-model-opt-limit", cl::init(1), cl::Hidden, cl::desc("Set inclusive limit count of TLS local-dynamic access(es) in a " "function to use initial-exec"))
static unsigned getPPCStrictOpcode(unsigned Opc)
static void prepareDescriptorIndirectCall(SelectionDAG &DAG, SDValue &Callee, SDValue &Glue, SDValue &Chain, SDValue CallSeqStart, const CallBase *CB, const SDLoc &dl, bool hasNest, const PPCSubtarget &Subtarget)
static bool isXXBRShuffleMaskHelper(ShuffleVectorSDNode *N, int Width)
static bool isFunctionGlobalAddress(const GlobalValue *CalleeGV)
static bool isSplatBV(SDValue Op)
static SDValue combineBVOfVecSExt(SDNode *N, SelectionDAG &DAG)
static cl::opt< bool > DisableILPPref("disable-ppc-ilp-pref", cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden)
static bool isNByteElemShuffleMask(ShuffleVectorSDNode *, unsigned, int)
Check that the mask is shuffling N byte elements.
static SDValue combineBVOfConsecutiveLoads(SDNode *N, SelectionDAG &DAG)
Reduce the number of loads when building a vector.
static bool isValidPCRelNode(SDValue N)
const char LLVMTargetMachineRef TM
pre isel intrinsic Pre ISel Intrinsic Lowering
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
SI optimize exec mask operations pre RA
static const MCExpr * MaskShift(const MCExpr *Val, uint32_t Mask, uint32_t Shift, MCContext &Ctx)
This file defines the SmallPtrSet class.
This file defines the SmallSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
This file describes how to lower LLVM code to machine code.
This defines the Use class.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
static bool is64Bit(const char *name)
bool isFixed(unsigned ValNo) const
opStatus convert(const fltSemantics &ToSemantics, roundingMode RM, bool *losesInfo)
APInt bitcastToAPInt() const
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
void clearBit(unsigned BitPosition)
Set a given bit to 0.
bool isNegatedPowerOf2() const
Check if this APInt's negated value is a power of two greater than zero.
APInt zext(unsigned width) const
Zero extend to a new width.
uint64_t getZExtValue() const
Get zero extended value.
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
APInt abs() const
Get the absolute value.
bool isNegative() const
Determine sign of this APInt.
bool isSignedIntN(unsigned N) const
Check if this APInt has an N-bits signed integer value.
bool getBoolValue() const
Convert APInt to a boolean value.
double bitsToDouble() const
Converts APInt bits to a double.
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
An arbitrary precision integer that knows its signedness.
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
An instruction that atomically checks whether a specified value is in a memory location,...
Value * getNewValOperand()
an instruction that atomically reads a memory location, combines it with another value,...
BinOp
This enumeration lists the possible modifications atomicrmw can make.
@ UIncWrap
Increment one up to a maximum value.
@ UDecWrap
Decrement one until a minimum value or zero.
BinOp getOperation() const
This is an SDNode representing atomic operations.
StringRef getValueAsString() const
Return the attribute's value as a string.
LLVM Basic Block Representation.
InstListType::const_iterator const_iterator
const Function * getParent() const
Return the enclosing method, or null if none.
int64_t getOffset() const
const BlockAddress * getBlockAddress() const
The address of a basic block.
static BranchProbability getOne()
static BranchProbability getZero()
A "pseudo-class" with methods for operating on BUILD_VECTORs.
bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef, unsigned &SplatBitSize, bool &HasAnyUndefs, unsigned MinSplatBits=0, bool isBigEndian=false) const
Check if this is a constant splat, and if so, find the smallest element size that splats the vector.
CCState - This class holds information needed while lowering arguments and return values.
MachineFunction & getMachineFunction() const
unsigned getFirstUnallocated(ArrayRef< MCPhysReg > Regs) const
getFirstUnallocated - Return the index of the first unallocated register in the set,...
MCRegister AllocateReg(MCPhysReg Reg)
AllocateReg - Attempt to allocate one register.
int64_t AllocateStack(unsigned Size, Align Alignment)
AllocateStack - Allocate a chunk of stack space with the specified size and alignment.
uint64_t getStackSize() const
Returns the size of the currently allocated portion of the stack.
void addLoc(const CCValAssign &V)
CCValAssign - Represent assignment of one arg/retval to a location.
Register getLocReg() const
LocInfo getLocInfo() const
static CCValAssign getMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP, bool IsCustom=false)
static CCValAssign getReg(unsigned ValNo, MVT ValVT, unsigned RegNo, MVT LocVT, LocInfo HTP, bool IsCustom=false)
static CCValAssign getCustomReg(unsigned ValNo, MVT ValVT, unsigned RegNo, MVT LocVT, LocInfo HTP)
int64_t getLocMemOffset() const
unsigned getValNo() const
static CCValAssign getCustomMem(unsigned ValNo, MVT ValVT, int64_t Offset, MVT LocVT, LocInfo HTP)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
bool isStrictFP() const
Determine if the call requires strict floating point semantics.
CallingConv::ID getCallingConv() const
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
bool isMustTailCall() const
Tests if this call site must be tail call optimized.
Value * getCalledOperand() const
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
unsigned arg_size() const
Function * getCaller()
Helper to get the caller (the parent function).
This class represents a function call, abstracting a target machine's calling convention.
ConstantFP - Floating Point Values [float, double].
This is the shared class of boolean and integer constants.
uint64_t getZExtValue() const
const APInt & getAPIntValue() const
int64_t getSExtValue() const
This is an important base class in LLVM.
This class represents an Operation in the Expression.
uint64_t getNumOperands() const
A parsed version of the target data layout string in and methods for querying it.
bool isLittleEndian() const
Layout endianness...
unsigned getLargestLegalIntTypeSizeInBits() const
Returns the size of largest legal integer type size, or 0 if none are set.
IntegerType * getIntPtrType(LLVMContext &C, unsigned AddressSpace=0) const
Returns an integer type with size at least as big as that of a pointer in the given address space.
Align getABITypeAlign(Type *Ty) const
Returns the minimum ABI-required alignment for the specified type.
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
This is a fast-path instruction selection class that generates poor code and doesn't support illegal ...
FunctionLoweringInfo - This contains information that is global to a function that is used when lower...
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
uint64_t getFnAttributeAsParsedInteger(StringRef Kind, uint64_t Default=0) const
For a string attribute Kind, parse attribute as an integer.
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
AttributeList getAttributes() const
Return the attribute list for this Function.
BasicBlockListType::const_iterator const_iterator
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Type * getReturnType() const
Returns the type of the ret val.
bool isVarArg() const
isVarArg - Return true if this function takes a variable number of arguments.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
int64_t getOffset() const
unsigned getTargetFlags() const
const GlobalValue * getGlobal() const
const GlobalObject * getAliaseeObject() const
bool isThreadLocal() const
If the value is "Thread Local", its value isn't shared by the threads.
void setThreadLocalMode(ThreadLocalMode Val)
bool hasHiddenVisibility() const
StringRef getSection() const
Module * getParent()
Get the module that this global value is contained inside of...
bool isStrongDefinitionForLinker() const
Returns true if this global's definition will be the one chosen by the linker.
Type * getValueType() const
bool hasProtectedVisibility() const
Common base class shared among various IRBuilders.
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
BasicBlock * GetInsertBlock() const
Value * CreateShl(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="", bool IsNonNeg=false)
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=std::nullopt, const Twine &Name="", MDNode *FPMathTag=nullptr)
const BasicBlock * getParent() const
bool hasAtomicLoad() const LLVM_READONLY
Return true if this atomic instruction loads from memory.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
This is an important class for using LLVM in a threaded context.
Base class for LoadSDNode and StoreSDNode.
An instruction for reading from memory.
This class is used to represent ISD::LOAD nodes.
const SDValue & getBasePtr() const
ISD::LoadExtType getExtensionType() const
Return whether this is a plain node, or one of the varieties of value-extending loads.
TypeSize getValue() const
Context object for machine code objects.
Base class for the full range of assembler expressions which are needed for parsing.
Wrapper class representing physical registers. Should be passed by value.
MCSymbolXCOFF * getQualNameSymbol() const
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx)
@ INVALID_SIMPLE_VALUE_TYPE
unsigned getVectorNumElements() const
bool isVector() const
Return true if this is a vector value type.
bool isInteger() const
Return true if this is an integer or a vector integer type.
static auto integer_valuetypes()
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
static auto fixedlen_vector_valuetypes()
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isScalarInteger() const
Return true if this is an integer, not including vectors.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
static MVT getIntegerVT(unsigned BitWidth)
static auto fp_valuetypes()
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
void setCallFrameSize(unsigned N)
Set the call frame size on entry to this basic block.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
int CreateFixedObject(uint64_t Size, int64_t SPOffset, bool IsImmutable, bool isAliased=false)
Create a new object at a fixed location on the stack.
int CreateStackObject(uint64_t Size, Align Alignment, bool isSpillSlot, const AllocaInst *Alloca=nullptr, uint8_t ID=0)
Create a new statically sized stack object, returning a nonnegative identifier to represent it.
void setFrameAddressIsTaken(bool T)
void setHasTailCall(bool V=true)
void setReturnAddressIsTaken(bool s)
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
bool hasVAStart() const
Returns true if the function calls the llvm.va_start intrinsic.
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
MCSymbol * getPICBaseSymbol() const
getPICBaseSymbol - Return a function-local symbol to represent the PIC base.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
MachineModuleInfo & getMMI() const
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Register addLiveIn(MCRegister PReg, const TargetRegisterClass *RC)
addLiveIn - Add the specified physical register as a live-in value and create a corresponding virtual...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
@ EK_LabelDifference32
EK_LabelDifference32 - Each entry is the address of the block minus the address of the jump table.
A description of a memory reference used in the backend.
LocationSize getSize() const
Return the size in bytes of the memory reference.
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
Flags getFlags() const
Return the raw flags of the source value,.
const MCContext & getContext() const
MachineOperand class - Representation of each machine instruction operand.
static MachineOperand CreateImm(int64_t Val)
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
Register getLiveInVirtReg(MCRegister PReg) const
getLiveInVirtReg - If PReg is a live-in physical register, return the corresponding live-in virtual r...
An SDNode that represents everything that will be needed to construct a MachineInstr.
This SDNode is used for target intrinsics that touch memory and need an associated MachineMemOperand.
This is an abstract virtual class for memory operations.
AAMDNodes getAAInfo() const
Returns the AA info that describes the dereference.
MachineMemOperand * getMemOperand() const
Return a MachineMemOperand object describing the memory reference performed by operation.
const SDValue & getBasePtr() const
const MachinePointerInfo & getPointerInfo() const
const SDValue & getChain() const
EVT getMemoryVT() const
Return the type of the in-memory value.
A Module instance is used to store all the information related to an LLVM module.
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
uint64_t getReturnSaveOffset() const
getReturnSaveOffset - Return the previous frame offset to save the return address.
uint64_t getFramePointerSaveOffset() const
getFramePointerSaveOffset - Return the previous frame offset to save the frame pointer.
unsigned getLinkageSize() const
getLinkageSize - Return the size of the PowerPC ABI linkage area.
uint64_t getTOCSaveOffset() const
getTOCSaveOffset - Return the previous frame offset to save the TOC register – 64-bit SVR4 ABI only.
PPCFunctionInfo - This class is derived from MachineFunction private PowerPC target-specific informat...
void setVarArgsNumFPR(unsigned Num)
void setReturnAddrSaveIndex(int idx)
bool isAIXFuncUseTLSIEForLD() const
int getReturnAddrSaveIndex() const
unsigned getVarArgsNumFPR() const
void setAIXFuncUseTLSIEForLD()
int getFramePointerSaveIndex() const
void setVarArgsNumGPR(unsigned Num)
void appendParameterType(ParamType Type)
int getVarArgsFrameIndex() const
void setLRStoreRequired()
bool isAIXFuncTLSModelOptInitDone() const
void setTailCallSPDelta(int size)
void setAIXFuncTLSModelOptInitDone()
bool isLRStoreRequired() const
void setMinReservedArea(unsigned size)
unsigned getVarArgsNumGPR() const
unsigned getMinReservedArea() const
void setVarArgsStackOffset(int Offset)
void setVarArgsFrameIndex(int Index)
void addLiveInAttr(Register VReg, ISD::ArgFlagsTy Flags)
This function associates attributes for each live-in virtual register.
int getVarArgsStackOffset() const
void setFramePointerSaveIndex(int Idx)
static bool hasPCRelFlag(unsigned TF)
bool is32BitELFABI() const
unsigned descriptorTOCAnchorOffset() const
bool useSoftFloat() const
const PPCFrameLowering * getFrameLowering() const override
bool needsSwapsForVSXMemOps() const
bool isPPC64() const
isPPC64 - Return true if we are generating code for 64-bit pointer mode.
bool isUsingPCRelativeCalls() const
bool usesFunctionDescriptors() const
True if the ABI is descriptor based.
MCRegister getEnvironmentPointerRegister() const
const PPCInstrInfo * getInstrInfo() const override
unsigned getCPUDirective() const
getCPUDirective - Returns the -m directive specified for the cpu.
POPCNTDKind hasPOPCNTD() const
bool isLittleEndian() const
bool isTargetLinux() const
MCRegister getTOCPointerRegister() const
MCRegister getStackPointerRegister() const
bool is64BitELFABI() const
const PPCTargetMachine & getTargetMachine() const
bool isPredictableSelectIsExpensive() const
bool enableMachineScheduler() const override
Scheduling customization.
const PPCRegisterInfo * getRegisterInfo() const override
bool isGVIndirectSymbol(const GlobalValue *GV) const
True if the GV will be accessed via an indirect symbol.
unsigned descriptorEnvironmentPointerOffset() const
MachineBasicBlock * emitEHSjLjLongJmp(MachineInstr &MI, MachineBasicBlock *MBB) const
CCAssignFn * ccAssignFnForCall(CallingConv::ID CC, bool Return, bool IsVarArg) const
bool isTruncateFree(Type *Ty1, Type *Ty2) const override
isTruncateFree - Return true if it's free to truncate a value of type Ty1 to type Ty2.
Value * emitMaskedAtomicRMWIntrinsic(IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr, Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const override
Perform a masked atomicrmw using a target-specific intrinsic.
MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr &MI, MachineBasicBlock *MBB) const override
This method should be implemented by targets that mark instructions with the 'usesCustomInserter' fla...
bool isFPExtFree(EVT DestVT, EVT SrcVT) const override
Return true if an fpext operation is free (for instance, because single-precision floating-point numb...
PPC::AddrMode SelectForceXFormMode(SDValue N, SDValue &Disp, SDValue &Base, SelectionDAG &DAG) const
SelectForceXFormMode - Given the specified address, force it to be represented as an indexed [r+r] op...
Instruction * emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override
bool hasInlineStackProbe(const MachineFunction &MF) const override
MachineBasicBlock * emitEHSjLjSetJmp(MachineInstr &MI, MachineBasicBlock *MBB) const
const char * getTargetNodeName(unsigned Opcode) const override
getTargetNodeName() - This method returns the name of a target specific DAG node.
bool supportsTailCallFor(const CallBase *CB) const
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override
Return true if folding a constant offset with the given GlobalAddress is legal.
MachineBasicBlock * emitProbedAlloca(MachineInstr &MI, MachineBasicBlock *MBB) const
bool isZExtFree(SDValue Val, EVT VT2) const override
Return true if zero-extending the specific node Val to type VT2 is free (either because it's implicit...
MachineBasicBlock * EmitPartwordAtomicBinary(MachineInstr &MI, MachineBasicBlock *MBB, bool is8bit, unsigned Opcode, unsigned CmpOpcode=0, unsigned CmpPred=0) const
SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG, bool LegalOps, bool OptForSize, NegatibleCost &Cost, unsigned Depth=0) const override
Return the newly negated expression if the cost is not expensive and set the cost in Cost to indicate...
bool SelectAddressRegImm(SDValue N, SDValue &Disp, SDValue &Base, SelectionDAG &DAG, MaybeAlign EncodingAlignment) const
SelectAddressRegImm - Returns true if the address N can be represented by a base register plus a sign...
bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, MachineFunction &MF, unsigned Intrinsic) const override
Given an intrinsic, checks if on the target the intrinsic will need to map to a MemIntrinsicNode (tou...
SDValue expandVSXLoadForLE(SDNode *N, DAGCombinerInfo &DCI) const
bool splitValueIntoRegisterParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, std::optional< CallingConv::ID > CC) const override
Target-specific splitting of values into parts that fit a register storing a legal type.
void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const override
LowerAsmOperandForConstraint - Lower the specified operand into the Ops vector.
void ReplaceNodeResults(SDNode *N, SmallVectorImpl< SDValue > &Results, SelectionDAG &DAG) const override
ReplaceNodeResults - Replace the results of node with an illegal result type with new values built ou...
TargetLowering::AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
bool SelectAddressRegReg(SDValue N, SDValue &Base, SDValue &Index, SelectionDAG &DAG, MaybeAlign EncodingAlignment=std::nullopt) const
SelectAddressRegReg - Given the specified addressed, check to see if it can be more efficiently repre...
MachineBasicBlock * EmitAtomicBinary(MachineInstr &MI, MachineBasicBlock *MBB, unsigned AtomicSize, unsigned BinOpcode, unsigned CmpOpcode=0, unsigned CmpPred=0) const
SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG, SmallVectorImpl< SDNode * > &Created) const override
Targets may override this function to provide custom SDIV lowering for power-of-2 denominators.
void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known, const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth=0) const override
Determine which of the bits specified in Mask are known to be either zero or one and return them in t...
bool SelectAddressRegRegOnly(SDValue N, SDValue &Base, SDValue &Index, SelectionDAG &DAG) const
SelectAddressRegRegOnly - Given the specified addressed, force it to be represented as an indexed [r+...
bool useSoftFloat() const override
SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG) const override
Returns relocation base for the given PIC jumptable.
void insertSSPDeclarations(Module &M) const override
Inserts necessary declarations for SSP (stack protection) purpose.
Value * emitMaskedAtomicCmpXchgIntrinsic(IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr, Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const override
Perform a masked cmpxchg using a target-specific intrinsic.
ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const override
Examine constraint string and operand type and determine a weight value.
uint64_t getByValTypeAlignment(Type *Ty, const DataLayout &DL) const override
getByValTypeAlignment - Return the desired alignment for ByVal aggregate function arguments in the ca...
bool enableAggressiveFMAFusion(EVT VT) const override
Return true if target always benefits from combining into FMA for a given value type.
Register getRegisterByName(const char *RegName, LLT VT, const MachineFunction &MF) const override
Return the register ID of the name passed in.
bool decomposeMulByConstant(LLVMContext &Context, EVT VT, SDValue C) const override
Return true if it is profitable to transform an integer multiplication-by-constant into simpler opera...
unsigned getJumpTableEncoding() const override
Return the entry encoding for a jump table in the current function.
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I=nullptr) const override
isLegalAddressingMode - Return true if the addressing mode represented by AM is legal for this target...
bool preferIncOfAddToSubOfNot(EVT VT) const override
These two forms are equivalent: sub y, (xor x, -1) add (add x, 1), y The variant with two add's is IR...
bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const override
Returns true if it is beneficial to convert a load of a constant to just the constant itself.
const MCPhysReg * getScratchRegisters(CallingConv::ID CC) const override
Returns a 0 terminated array of registers that can be safely used as scratch registers.
bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset, ISD::MemIndexedMode &AM, SelectionDAG &DAG) const override
getPreIndexedAddressParts - returns true by value, base pointer and offset pointer and addressing mod...
bool isProfitableToHoist(Instruction *I) const override
isProfitableToHoist - Check if it is profitable to hoist instruction I to its dominator block.
bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize) const override
Returns true if the target can instruction select the specified FP immediate natively.
ConstraintType getConstraintType(StringRef Constraint) const override
getConstraintType - Given a constraint, return the type of constraint it is for this target.
const MCExpr * getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI, MCContext &Ctx) const override
This returns the relocation base for the given PIC jumptable, the same as getPICJumpTableRelocBase,...
bool shallExtractConstSplatVectorElementToStore(Type *VectorTy, unsigned ElemSizeInBits, unsigned &Index) const override
Return true if the target shall perform extract vector element and store given that the vector is kno...
EVT getOptimalMemOpType(const MemOp &Op, const AttributeList &FuncAttributes) const override
It returns EVT::Other if the type should be determined using generic target-independent logic.
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override
This method will be invoked for all target nodes and for any target-independent nodes that the target...
SDValue expandVSXStoreForLE(SDNode *N, DAGCombinerInfo &DCI) const
void CollectTargetIntrinsicOperands(const CallInst &I, SmallVectorImpl< SDValue > &Ops, SelectionDAG &DAG) const override
bool useLoadStackGuardNode() const override
Override to support customized stack guard loading.
unsigned getStackProbeSize(const MachineFunction &MF) const
PPCTargetLowering(const PPCTargetMachine &TM, const PPCSubtarget &STI)
TargetLowering::AtomicExpansionKind shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override
Returns how the given atomic cmpxchg should be expanded by the IR-level AtomicExpand pass.
bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, EVT VT) const override
isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster than a pair of fmul and fadd i...
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const override
Is unaligned memory access allowed for the given type, and is it fast relative to software emulation.
bool shouldExpandBuildVectorWithShuffles(EVT VT, unsigned DefinedValues) const override
bool SelectAddressRegImm34(SDValue N, SDValue &Disp, SDValue &Base, SelectionDAG &DAG) const
Similar to the 16-bit case but for instructions that take a 34-bit displacement field (prefixed loads...
std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const override
Given a physical register constraint (e.g.
Register getExceptionSelectorRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception typeid on entry to a la...
bool isJumpTableRelative() const override
Register getExceptionPointerRegister(const Constant *PersonalityFn) const override
If a physical register, this returns the register that receives the exception address on entry to an ...
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override
LowerOperation - Provide custom lowering hooks for some operations.
PPC::AddrMode SelectOptimalAddrMode(const SDNode *Parent, SDValue N, SDValue &Disp, SDValue &Base, SelectionDAG &DAG, MaybeAlign Align) const
SelectOptimalAddrMode - Based on a node N and it's Parent (a MemSDNode), compute the address flags of...
Value * getSDagStackGuard(const Module &M) const override
Return the variable that's previously inserted by insertSSPDeclarations, if any, otherwise return nul...
bool SelectAddressPCRel(SDValue N, SDValue &Base) const
SelectAddressPCRel - Represent the specified address as pc relative to be represented as [pc+imm].
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const override
getSetCCResultType - Return the ISD::SETCC ValueType
bool SelectAddressEVXRegReg(SDValue N, SDValue &Base, SDValue &Index, SelectionDAG &DAG) const
SelectAddressEVXRegReg - Given the specified addressed, check to see if it can be more efficiently re...
bool isLegalICmpImmediate(int64_t Imm) const override
isLegalICmpImmediate - Return true if the specified immediate is legal icmp immediate,...
bool isAccessedAsGotIndirect(SDValue N) const
Align getPrefLoopAlignment(MachineLoop *ML) const override
Return the preferred loop alignment.
FastISel * createFastISel(FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo) const override
createFastISel - This method returns a target-specific FastISel object, or null if the target does no...
bool shouldInlineQuadwordAtomics() const
Instruction * emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const override
Inserts in the IR a target-specific intrinsic specifying a fence.
bool isLegalAddImmediate(int64_t Imm) const override
isLegalAddImmediate - Return true if the specified immediate is legal add immediate,...
Common code between 32-bit and 64-bit PowerPC targets.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Wrapper class for IR location info (IR ordering and DebugLoc) to be passed into SDNode creation funct...
This class provides iterator support for SDUse operands that use a specific SDNode.
Represents one node in the SelectionDAG.
ArrayRef< SDUse > ops() const
void dump() const
Dump this node, for debugging.
unsigned getOpcode() const
Return the SelectionDAG opcode value for this node.
bool hasOneUse() const
Return true if there is exactly one use of this node.
iterator_range< use_iterator > uses()
SDNodeFlags getFlags() const
uint64_t getAsZExtVal() const
Helper method returns the zero-extended integer value of a ConstantSDNode.
unsigned getNumValues() const
Return the number of values defined/returned by this operator.
const SDValue & getOperand(unsigned Num) const
uint64_t getConstantOperandVal(unsigned Num) const
Helper method returns the integer value of a ConstantSDNode operand.
use_iterator use_begin() const
Provide iteration support to walk over all uses of an SDNode.
EVT getValueType(unsigned ResNo) const
Return the type of a specified result.
bool hasNUsesOfValue(unsigned NUses, unsigned Value) const
Return true if there are exactly NUSES uses of the indicated value.
op_iterator op_end() const
op_iterator op_begin() const
static use_iterator use_end()
Represents a use of a SDNode.
Unlike LLVM values, Selection DAG nodes may return multiple values as the result of a computation.
SDNode * getNode() const
get the SDNode which holds the desired result
bool hasOneUse() const
Return true if there is exactly one node using value ResNo of Node.
SDValue getValue(unsigned R) const
EVT getValueType() const
Return the ValueType of the referenced return value.
TypeSize getValueSizeInBits() const
Returns the size of the value in bits.
const SDValue & getOperand(unsigned i) const
uint64_t getConstantOperandVal(unsigned i) const
MVT getSimpleValueType() const
Return the simple ValueType of the referenced return value.
unsigned getOpcode() const
unsigned getNumOperands() const
static SectionKind getMetadata()
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
SDValue getExtLoad(ISD::LoadExtType ExtType, const SDLoc &dl, EVT VT, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, EVT MemVT, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getTargetGlobalAddress(const GlobalValue *GV, const SDLoc &DL, EVT VT, int64_t offset=0, unsigned TargetFlags=0)
SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, SDValue RHS)
Helper function to make it easier to build Select's if you just have operands and don't want to check...
SDValue getStackArgumentTokenFactor(SDValue Chain)
Compute a TokenFactor to force all the incoming stack arguments to be loaded from the stack.
const TargetSubtargetInfo & getSubtarget() const
SDValue getMergeValues(ArrayRef< SDValue > Ops, const SDLoc &dl)
Create a MERGE_VALUES node from the given operands.
SDVTList getVTList(EVT VT)
Return an SDVTList that represents the list of values specified.
MachineSDNode * getMachineNode(unsigned Opcode, const SDLoc &dl, EVT VT)
These are used for target selectors to create a new node with specified return type(s),...
SDValue makeEquivalentMemoryOrdering(SDValue OldChain, SDValue NewMemOpChain)
If an existing load has uses of its chain, create a token factor node with that chain and the new mem...
SDValue getSetCC(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, ISD::CondCode Cond, SDValue Chain=SDValue(), bool IsSignaling=false)
Helper function to make it easier to build SetCC's if you just have an ISD::CondCode instead of an SD...
SDValue getConstantFP(double Val, const SDLoc &DL, EVT VT, bool isTarget=false)
Create a ConstantFPSDNode wrapping a constant value.
SDValue getLoad(EVT VT, const SDLoc &dl, SDValue Chain, SDValue Ptr, MachinePointerInfo PtrInfo, MaybeAlign Alignment=MaybeAlign(), MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr)
Loads are not normal binary operators: their result type is not determined by their operands,...
Align getEVTAlign(EVT MemoryVT) const
Compute the default alignment value for the given type.
void addNoMergeSiteInfo(const SDNode *Node, bool NoMerge)
Set NoMergeSiteInfo to be associated with Node if NoMerge is true.
SDValue getNOT(const SDLoc &DL, SDValue Val, EVT VT)
Create a bitwise NOT operation as (XOR Val, -1).
const TargetLowering & getTargetLoweringInfo() const
static constexpr unsigned MaxRecursionDepth
SDValue getTargetJumpTable(int JTI, EVT VT, unsigned TargetFlags=0)
SDValue getUNDEF(EVT VT)
Return an UNDEF node. UNDEF does not have a useful SDLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2, SDValue InGlue, const SDLoc &DL)
Return a new CALLSEQ_END node, which always must have a glue result (to ensure it's not CSE'd).
SDValue getBuildVector(EVT VT, const SDLoc &DL, ArrayRef< SDValue > Ops)
Return an ISD::BUILD_VECTOR node.
SDValue getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue Src, SDValue Size, Align Alignment, bool isVol, bool AlwaysInline, bool isTailCall, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo, const AAMDNodes &AAInfo=AAMDNodes(), AAResults *AA=nullptr)
bool isSplatValue(SDValue V, const APInt &DemandedElts, APInt &UndefElts, unsigned Depth=0) const
Test whether V has a splatted value for all the demanded elements.
SDValue getBitcast(EVT VT, SDValue V)
Return a bitcast using the SDLoc of the value operand, and casting to the provided type.
const DataLayout & getDataLayout() const
SDValue getTargetFrameIndex(int FI, EVT VT)
SDValue getTokenFactor(const SDLoc &DL, SmallVectorImpl< SDValue > &Vals)
Creates a new TokenFactor containing Vals.
bool areNonVolatileConsecutiveLoads(LoadSDNode *LD, LoadSDNode *Base, unsigned Bytes, int Dist) const
Return true if loads are next to each other and can be merged.
SDValue getConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isTarget=false, bool isOpaque=false)
Create a ConstantSDNode wrapping a constant value.
SDValue getTruncStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, EVT SVT, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
SDValue getMDNode(const MDNode *MD)
Return an MDNodeSDNode which holds an MDNode.
void ReplaceAllUsesWith(SDValue From, SDValue To)
Modify anything using 'From' to use 'To' instead.
SDValue getCommutedVectorShuffle(const ShuffleVectorSDNode &SV)
Returns an ISD::VECTOR_SHUFFLE node semantically equivalent to the shuffle node in input but with swa...
SDValue getStore(SDValue Chain, const SDLoc &dl, SDValue Val, SDValue Ptr, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags MMOFlags=MachineMemOperand::MONone, const AAMDNodes &AAInfo=AAMDNodes())
Helper function to build ISD::STORE nodes.
SDValue getCALLSEQ_START(SDValue Chain, uint64_t InSize, uint64_t OutSize, const SDLoc &DL)
Return a new CALLSEQ_START node, that starts new call frame, in which InSize bytes are set up inside ...
SDValue getRegister(unsigned Reg, EVT VT)
SDValue getSExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either sign-extending or trunca...
SDValue getBoolExtOrTrunc(SDValue Op, const SDLoc &SL, EVT VT, EVT OpVT)
Convert Op, which must be of integer type, to the integer type VT, by using an extension appropriate ...
SDValue getExternalSymbol(const char *Sym, EVT VT)
const TargetMachine & getTarget() const
SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either any-extending or truncat...
SDValue getCopyToReg(SDValue Chain, const SDLoc &dl, unsigned Reg, SDValue N)
SDValue getSelectCC(const SDLoc &DL, SDValue LHS, SDValue RHS, SDValue True, SDValue False, ISD::CondCode Cond)
Helper function to make it easier to build SelectCC's if you just have an ISD::CondCode instead of an...
SDValue getIntPtrConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
SDValue getValueType(EVT)
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, ArrayRef< SDUse > Ops)
Gets or creates the specified node.
SDValue getTargetConstant(uint64_t Val, const SDLoc &DL, EVT VT, bool isOpaque=false)
unsigned ComputeNumSignBits(SDValue Op, unsigned Depth=0) const
Return the number of times the sign bit of the register is replicated into the other bits.
SDValue getBoolConstant(bool V, const SDLoc &DL, EVT VT, EVT OpVT)
Create a true or false constant of type VT using the target's BooleanContent for type OpVT.
SDValue getTargetBlockAddress(const BlockAddress *BA, EVT VT, int64_t Offset=0, unsigned TargetFlags=0)
bool isBaseWithConstantOffset(SDValue Op) const
Return true if the specified operand is an ISD::ADD with a ConstantSDNode on the right-hand side,...
SDValue getVectorIdxConstant(uint64_t Val, const SDLoc &DL, bool isTarget=false)
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To)
Replace any uses of From with To, leaving uses of other values produced by From.getNode() alone.
MachineFunction & getMachineFunction() const
SDValue getCopyFromReg(SDValue Chain, const SDLoc &dl, unsigned Reg, EVT VT)
SDValue getSplatBuildVector(EVT VT, const SDLoc &DL, SDValue Op)
Return a splat ISD::BUILD_VECTOR node, consisting of Op splatted to all elements.
SDValue getFrameIndex(int FI, EVT VT, bool isTarget=false)
KnownBits computeKnownBits(SDValue Op, unsigned Depth=0) const
Determine which bits of Op are known to be either zero or one and return them in Known.
SDValue getRegisterMask(const uint32_t *RegMask)
SDValue getZExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT)
Convert Op, which must be of integer type, to the integer type VT, by either zero-extending or trunca...
SDValue getCondCode(ISD::CondCode Cond)
bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth=0) const
Return true if 'Op & Mask' is known to be zero.
SDValue getObjectPtrOffset(const SDLoc &SL, SDValue Ptr, TypeSize Offset)
Create an add instruction with appropriate flags when used for addressing some offset of an object.
LLVMContext * getContext() const
SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef< SDValue > Ops, EVT MemVT, MachinePointerInfo PtrInfo, Align Alignment, MachineMemOperand::Flags Flags=MachineMemOperand::MOLoad|MachineMemOperand::MOStore, LocationSize Size=0, const AAMDNodes &AAInfo=AAMDNodes())
Creates a MemIntrinsicNode that may produce a result and takes a list of operands.
SDValue getTargetExternalSymbol(const char *Sym, EVT VT, unsigned TargetFlags=0)
SDValue getMCSymbol(MCSymbol *Sym, EVT VT)
SDValue CreateStackTemporary(TypeSize Bytes, Align Alignment)
Create a stack temporary based on the size in bytes and the alignment.
SDNode * UpdateNodeOperands(SDNode *N, SDValue Op)
Mutate the specified node in-place to have the specified operands.
SDValue getTargetConstantPool(const Constant *C, EVT VT, MaybeAlign Align=std::nullopt, int Offset=0, unsigned TargetFlags=0)
SDValue getEntryNode() const
Return the token chain corresponding to the entry of the function.
std::pair< SDValue, SDValue > SplitScalar(const SDValue &N, const SDLoc &DL, const EVT &LoVT, const EVT &HiVT)
Split the scalar node with EXTRACT_ELEMENT using the provided VTs and return the low/high part.
SDValue getVectorShuffle(EVT VT, const SDLoc &dl, SDValue N1, SDValue N2, ArrayRef< int > Mask)
Return an ISD::VECTOR_SHUFFLE node.
This SDNode is used to implement the code generator support for the llvm IR shufflevector instruction...
int getMaskElt(unsigned Idx) const
ArrayRef< int > getMask() const
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
This class is used to represent ISD::STORE nodes.
const SDValue & getBasePtr() const
const SDValue & getValue() const
StringRef - Represent a constant reference to a string, i.e.
constexpr size_t size() const
size - Get the string size.
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
A switch()-like statement whose cases are string literals.
StringSwitch & Case(StringLiteral S, T Value)
Class to represent struct types.
Information about stack frame layout on the target.
unsigned getStackAlignment() const
getStackAlignment - This method returns the number of bytes to which the stack pointer must be aligne...
TargetInstrInfo - Interface to description of machine instruction set.
Provides information about what library functions are available for the current target.
void setBooleanVectorContents(BooleanContent Ty)
Specify how the target extends the result of a vector boolean value from a vector of i1 to a wider ty...
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
virtual bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const
Returns true if arguments should be sign-extended in lib calls.
bool PredictableSelectIsExpensive
Tells the code generator that select is more expensive than a branch if the branch is usually predict...
EVT getValueType(const DataLayout &DL, Type *Ty, bool AllowUnknown=false) const
Return the EVT corresponding to this LLVM type.
virtual bool shouldExpandBuildVectorWithShuffles(EVT, unsigned DefinedValues) const
unsigned MaxStoresPerMemcpyOptSize
Likewise for functions with the OptSize attribute.
MachineBasicBlock * emitPatchPoint(MachineInstr &MI, MachineBasicBlock *MBB) const
Replace/modify any TargetFrameIndex operands with a targte-dependent sequence of memory operands that...
virtual const TargetRegisterClass * getRegClassFor(MVT VT, bool isDivergent=false) const
Return the register class that should be used for the specified value type.
void setMinStackArgumentAlignment(Align Alignment)
Set the minimum stack alignment of an argument.
virtual MVT getVectorIdxTy(const DataLayout &DL) const
Returns the type to be used for the index operand of: ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT...
const TargetMachine & getTargetMachine() const
unsigned MaxLoadsPerMemcmp
Specify maximum number of load instructions per memcmp call.
virtual bool isZExtFree(Type *FromTy, Type *ToTy) const
Return true if any actual instruction that defines a value of type FromTy implicitly zero-extends the...
virtual Value * getSDagStackGuard(const Module &M) const
Return the variable that's previously inserted by insertSSPDeclarations, if any, otherwise return nul...
void setIndexedLoadAction(ArrayRef< unsigned > IdxModes, MVT VT, LegalizeAction Action)
Indicate that the specified indexed load does or does not work with the specified type and indicate w...
void setPrefLoopAlignment(Align Alignment)
Set the target's preferred loop alignment.
void setMaxAtomicSizeInBitsSupported(unsigned SizeInBits)
Set the maximum atomic operation size supported by the backend.
Sched::Preference getSchedulingPreference() const
Return target scheduling preference.
void setMinFunctionAlignment(Align Alignment)
Set the target's minimum function alignment.
bool isOperationCustom(unsigned Op, EVT VT) const
Return true if the operation uses custom lowering, regardless of whether the type is legal or not.
unsigned MaxStoresPerMemsetOptSize
Likewise for functions with the OptSize attribute.
bool hasBigEndianPartOrdering(EVT VT, const DataLayout &DL) const
When splitting a value of the specified type into parts, does the Lo or Hi part come first?...
void setBooleanContents(BooleanContent Ty)
Specify how the target extends the result of integer and floating point boolean values from i1 to a w...
unsigned MaxStoresPerMemmove
Specify maximum number of store instructions per memmove call.
virtual Align getPrefLoopAlignment(MachineLoop *ML=nullptr) const
Return the preferred loop alignment.
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL, bool LegalTypes=true) const
Returns the type for the shift amount of a shift opcode.
unsigned MaxStoresPerMemmoveOptSize
Likewise for functions with the OptSize attribute.
void addRegisterClass(MVT VT, const TargetRegisterClass *RC)
Add the specified register class as an available regclass for the specified value type.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
void setIndexedStoreAction(ArrayRef< unsigned > IdxModes, MVT VT, LegalizeAction Action)
Indicate that the specified indexed store does or does not work with the specified type and indicate ...
virtual bool isJumpTableRelative() const
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setLibcallName(RTLIB::Libcall Call, const char *Name)
Rename the default libcall routine name for the specified libcall.
void setPrefFunctionAlignment(Align Alignment)
Set the target's preferred function alignment.
bool isOperationLegal(unsigned Op, EVT VT) const
Return true if the specified operation is legal on this target.
virtual AtomicExpansionKind shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const
Returns how the given atomic cmpxchg should be expanded by the IR-level AtomicExpand pass.
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
void setMinimumJumpTableEntries(unsigned Val)
Indicate the minimum number of blocks to generate jump tables.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
@ ZeroOrOneBooleanContent
@ ZeroOrNegativeOneBooleanContent
bool isOperationLegalOrCustom(unsigned Op, EVT VT, bool LegalOnly=false) const
Return true if the specified operation is legal on this target or can be made legal with custom lower...
unsigned MaxLoadsPerMemcmpOptSize
Likewise for functions with the OptSize attribute.
void setStackPointerRegisterToSaveRestore(Register R)
If set to a physical register, this specifies the register that llvm.savestack/llvm....
void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
If Opc/OrigVT is specified as being promoted, the promotion code defaults to trying a larger integer/...
AtomicExpansionKind
Enum that specifies what an atomic load/AtomicRMWInst is expanded to, if at all.
void setCondCodeAction(ArrayRef< ISD::CondCode > CCs, MVT VT, LegalizeAction Action)
Indicate that the specified condition code is or isn't supported on the target and indicate what to d...
void setTargetDAGCombine(ArrayRef< ISD::NodeType > NTs)
Targets should invoke this method for each target independent node that they want to provide a custom...
virtual AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const
Returns how the IR-level AtomicExpand pass should expand the given AtomicRMW, if at all.
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
unsigned GatherAllAliasesMaxDepth
Depth that GatherAllAliases should continue looking for chain dependencies when trying to find a more...
NegatibleCost
Enum that specifies when a float negation is beneficial.
std::vector< ArgListEntry > ArgListTy
void setHasMultipleConditionRegisters(bool hasManyRegs=true)
Tells the code generator that the target has multiple (allocatable) condition registers that can be u...
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
void setSchedulingPreference(Sched::Preference Pref)
Specify the target scheduling preference.
virtual void insertSSPDeclarations(Module &M) const
Inserts necessary declarations for SSP (stack protection) purpose.
void setJumpIsExpensive(bool isExpensive=true)
Tells the code generator not to expand logic operations on comparison predicates into separate sequen...
virtual MCSymbol * getFunctionEntryPointSymbol(const GlobalValue *Func, const TargetMachine &TM) const
If supported, return the function entry point symbol.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual const MCExpr * getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI, MCContext &Ctx) const
This returns the relocation base for the given PIC jumptable, the same as getPICJumpTableRelocBase,...
SDValue lowerCmpEqZeroToCtlzSrl(SDValue Op, SelectionDAG &DAG) const
virtual bool useLoadStackGuardNode() const
If this function returns true, SelectionDAGBuilder emits a LOAD_STACK_GUARD node when it is lowering ...
void softenSetCCOperands(SelectionDAG &DAG, EVT VT, SDValue &NewLHS, SDValue &NewRHS, ISD::CondCode &CCCode, const SDLoc &DL, const SDValue OldLHS, const SDValue OldRHS) const
Soften the operands of a comparison.
std::pair< SDValue, SDValue > makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, ArrayRef< SDValue > Ops, MakeLibCallOptions CallOptions, const SDLoc &dl, SDValue Chain=SDValue()) const
Returns a pair of (return value, chain).
SDValue getCheaperNegatedExpression(SDValue Op, SelectionDAG &DAG, bool LegalOps, bool OptForSize, unsigned Depth=0) const
This is the helper function to return the newly negated expression only when the cost is cheaper.
virtual ConstraintType getConstraintType(StringRef Constraint) const
Given a constraint, return the type of constraint it is for this target.
virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, SelectionDAG &DAG) const
Lower TLS global address SDNode for target independent emulated TLS model.
std::pair< SDValue, SDValue > LowerCallTo(CallLoweringInfo &CLI) const
This function lowers an abstract call to a function into an actual call.
bool isPositionIndependent() const
virtual SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG, bool LegalOps, bool OptForSize, NegatibleCost &Cost, unsigned Depth=0) const
Return the newly negated expression if the cost is not expensive and set the cost in Cost to indicate...
virtual ConstraintWeight getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const
Examine constraint string and operand type and determine a weight value.
virtual SDValue getSqrtInputTest(SDValue Operand, SelectionDAG &DAG, const DenormalMode &Mode) const
Return a target-dependent comparison result if the input operand is suitable for use with a square ro...
virtual SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG) const
Returns relocation base for the given PIC jumptable.
virtual std::pair< unsigned, const TargetRegisterClass * > getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const
Given a physical register constraint (e.g.
bool verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const
bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node, SDValue &Chain) const
Check whether a given call node is in tail position within its function.
virtual SDValue getSqrtResultForDenormInput(SDValue Operand, SelectionDAG &DAG) const
Return a target-dependent result if the input operand is not suitable for use with a square root esti...
virtual void LowerAsmOperandForConstraint(SDValue Op, StringRef Constraint, std::vector< SDValue > &Ops, SelectionDAG &DAG) const
Lower the specified operand into the Ops vector.
virtual bool isGAPlusOffset(SDNode *N, const GlobalValue *&GA, int64_t &Offset) const
Returns true (and the GlobalValue and the offset) if the node is a GlobalAddress + offset.
virtual unsigned getJumpTableEncoding() const
Return the entry encoding for a jump table in the current function.
Primary interface to the complete machine description for the target machine.
TLSModel::Model getTLSModel(const GlobalValue *GV) const
Returns the TLS model which should be used for the given global variable.
bool useEmulatedTLS() const
Returns true if this target uses emulated TLS.
Reloc::Model getRelocationModel() const
Returns the code generation relocation model.
bool shouldAssumeDSOLocal(const GlobalValue *GV) const
CodeModel::Model getCodeModel() const
Returns the code model.
unsigned UnsafeFPMath
UnsafeFPMath - This flag is enabled when the -enable-unsafe-fp-math flag is specified on the command ...
unsigned NoInfsFPMath
NoInfsFPMath - This flag is enabled when the -enable-no-infs-fp-math flag is specified on the command...
unsigned PPCGenScalarMASSEntries
Enables scalar MASS conversions.
unsigned NoNaNsFPMath
NoNaNsFPMath - This flag is enabled when the -enable-no-nans-fp-math flag is specified on the command...
unsigned GuaranteedTailCallOpt
GuaranteedTailCallOpt - This flag is enabled when -tailcallopt is specified on the commandline.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
static constexpr TypeSize getFixed(ScalarTy ExactSize)
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
bool isEmptyTy() const
Return true if this type is empty, that is, it has no elements or all of its elements are empty.
@ FloatTyID
32-bit floating point type
@ DoubleTyID
64-bit floating point type
@ FP128TyID
128-bit floating point type (112-bit significand)
static Type * getVoidTy(LLVMContext &C)
bool isSized(SmallPtrSetImpl< Type * > *Visited=nullptr) const
Return true if it makes sense to take the size of this type.
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
bool isFunctionTy() const
True if this is an instance of FunctionType.
static IntegerType * getInt64Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
TypeID getTypeID() const
Return the type id for the type.
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
A Use represents the edge between a Value definition and its users.
Value * getOperand(unsigned i) const
unsigned getNumOperands() const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
bool hasOneUse() const
Return true if there is exactly one use of this value.
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ Cold
Attempts to make code in the caller as efficient as possible under the assumption that the call is no...
@ Fast
Attempts to make calls as fast as possible (e.g.
@ C
The default llvm calling convention, compatible with C.
bool isNON_EXTLoad(const SDNode *N)
Returns true if the specified node is a non-extending load.
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
@ STACKRESTORE
STACKRESTORE has two operands, an input chain and a pointer to restore to it returns an output chain.
@ STACKSAVE
STACKSAVE - STACKSAVE has one operand, an input chain.
@ STRICT_FSETCC
STRICT_FSETCC/STRICT_FSETCCS - Constrained versions of SETCC, used for floating-point operands only.
@ DELETED_NODE
DELETED_NODE - This is an illegal value that is used to catch errors.
@ EH_SJLJ_LONGJMP
OUTCHAIN = EH_SJLJ_LONGJMP(INCHAIN, buffer) This corresponds to the eh.sjlj.longjmp intrinsic.
@ SMUL_LOHI
SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing a signed/unsigned value of type i[2...
@ BSWAP
Byte Swap and Counting operators.
@ VAEND
VAEND, VASTART - VAEND and VASTART have three operands: an input chain, pointer, and a SRCVALUE.
@ ATOMIC_STORE
OUTCHAIN = ATOMIC_STORE(INCHAIN, ptr, val) This corresponds to "store atomic" instruction.
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
@ ADD
Simple integer binary arithmetic operators.
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
@ FMA
FMA - Perform a * b + c with no intermediate rounding step.
@ INTRINSIC_VOID
OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...) This node represents a target intrin...
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
@ FADD
Simple binary floating point operators.
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
@ SDIVREM
SDIVREM/UDIVREM - Divide two integers and produce both a quotient and remainder result.
@ FP16_TO_FP
FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions and truncation for half-preci...
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
@ BUILD_PAIR
BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
@ INIT_TRAMPOLINE
INIT_TRAMPOLINE - This corresponds to the init_trampoline intrinsic.
@ FLDEXP
FLDEXP - ldexp, inspired by libm (op0 * 2**op1).
@ STRICT_FSQRT
Constrained versions of libm-equivalent floating point intrinsics.
@ SIGN_EXTEND
Conversion operators.
@ SCALAR_TO_VECTOR
SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a scalar value into element 0 of the...
@ BR
Control flow instructions. These all have token chains.
@ PREFETCH
PREFETCH - This corresponds to a prefetch intrinsic.
@ FSINCOS
FSINCOS - Compute both fsin and fcos as a single operation.
@ FNEG
Perform various unary floating-point operations inspired by libm.
@ BR_CC
BR_CC - Conditional branch.
@ BR_JT
BR_JT - Jumptable branch.
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
@ ATOMIC_LOAD
Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr) This corresponds to "load atomic" instruction.
@ EXTRACT_ELEMENT
EXTRACT_ELEMENT - This is used to get the lower or upper (determined by a Constant,...
@ VACOPY
VACOPY - VACOPY has 5 operands: an input chain, a destination pointer, a source pointer,...
@ TargetGlobalAddress
TargetGlobalAddress - Like GlobalAddress, but the DAG does no folding or anything else with this node...
@ GET_ROUNDING
Returns current rounding mode: -1 Undefined 0 Round to 0 1 Round to nearest, ties to even 2 Round to ...
@ MULHU
MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing an unsigned/signed value of...
@ SHL
Shift and rotation operations.
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
@ EXTRACT_SUBVECTOR
EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR.
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimumNumber or maximumNumber on two values,...
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
@ SELECT_CC
Select with condition operator - This selects between a true value and a false value (ops #2 and #3) ...
@ ATOMIC_CMP_SWAP
Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) For double-word atomic operations: ValLo,...
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
@ DYNAMIC_STACKALLOC
DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned to a specified boundary.
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ VSELECT
Select with a vector condition (op #0) and two vector operands (ops #1 and #2), returning a vector re...
@ STRICT_SINT_TO_FP
STRICT_[US]INT_TO_FP - Convert a signed or unsigned integer to a floating point value.
@ INLINEASM_BR
INLINEASM_BR - Branching version of inline asm. Used by asm-goto.
@ EH_DWARF_CFA
EH_DWARF_CFA - This node represents the pointer to the DWARF Canonical Frame Address (CFA),...
@ FRAMEADDR
FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and llvm.returnaddress on the DAG.
@ STRICT_FP_ROUND
X = STRICT_FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision ...
@ STRICT_FP_TO_SINT
STRICT_FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
@ READCYCLECOUNTER
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
@ STRICT_FP_EXTEND
X = STRICT_FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
@ AND
Bitwise operators - logical and, logical or, logical xor.
@ TRAP
TRAP - Trapping instruction.
@ INTRINSIC_WO_CHAIN
RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...) This node represents a target intrinsic fun...
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
@ STRICT_FADD
Constrained versions of the binary floating point operators.
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
@ TokenFactor
TokenFactor - This node takes multiple tokens as input and produces a single token result.
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
@ INLINEASM
INLINEASM - Represents an inline asm block.
@ EH_SJLJ_SETJMP
RESULT, OUTCHAIN = EH_SJLJ_SETJMP(INCHAIN, buffer) This corresponds to the eh.sjlj....
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
@ VAARG
VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE, and the alignment.
@ BRCOND
BRCOND - Conditional branch.
@ SHL_PARTS
SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded integer shift operations.
@ AssertSext
AssertSext, AssertZext - These nodes record if a register contains a value that has already been zero...
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
@ CALLSEQ_START
CALLSEQ_START/CALLSEQ_END - These operators mark the beginning and end of a call sequence,...
@ GET_DYNAMIC_AREA_OFFSET
GET_DYNAMIC_AREA_OFFSET - get offset from native SP to the address of the most recent dynamic alloca.
@ ADJUST_TRAMPOLINE
ADJUST_TRAMPOLINE - This corresponds to the adjust_trampoline intrinsic.
@ INTRINSIC_W_CHAIN
RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...) This node represents a target in...
@ BUILD_VECTOR
BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a fixed-width vector with the specified,...
bool isNormalStore(const SDNode *N)
Returns true if the specified node is a non-truncating and unindexed store.
bool isZEXTLoad(const SDNode *N)
Returns true if the specified node is a ZEXTLOAD.
bool isUNINDEXEDLoad(const SDNode *N)
Returns true if the specified node is an unindexed load.
bool isEXTLoad(const SDNode *N)
Returns true if the specified node is a EXTLOAD.
bool isBuildVectorAllZeros(const SDNode *N)
Return true if the specified node is a BUILD_VECTOR where all of the elements are 0 or undef.
bool isSignedIntSetCC(CondCode Code)
Return true if this is a setcc instruction that performs a signed comparison when used with integer o...
MemIndexedMode
MemIndexedMode enum - This enum defines the load / store indexed addressing modes.
bool isSEXTLoad(const SDNode *N)
Returns true if the specified node is a SEXTLOAD.
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
LoadExtType
LoadExtType enum - This enum defines the three variants of LOADEXT (load with extension).
bool isUnsignedIntSetCC(CondCode Code)
Return true if this is a setcc instruction that performs an unsigned comparison when used with intege...
bool isNormalLoad(const SDNode *N)
Returns true if the specified node is a non-extending and unindexed load.
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
@ Bitcast
Perform the operation on a different, but equivalently sized type.
@ MO_TLSLDM_FLAG
MO_TLSLDM_FLAG - on AIX the ML relocation type is only valid for a reference to a TOC symbol from the...
@ MO_PIC_LO_FLAG
MO_PIC_LO_FLAG = MO_PIC_FLAG | MO_LO.
@ MO_TPREL_PCREL_FLAG
MO_TPREL_PCREL_FLAG = MO_PCREL_FLAG | MO_TPREL_FLAG.
@ MO_GOT_TPREL_PCREL_FLAG
MO_GOT_TPREL_PCREL_FLAG - A combintaion of flags, if these bits are set they should produce the reloc...
@ MO_GOT_PCREL_FLAG
MO_GOT_PCREL_FLAG = MO_PCREL_FLAG | MO_GOT_FLAG.
@ MO_TLSGDM_FLAG
MO_TLSGDM_FLAG - If this bit is set the symbol reference is relative to the region handle of TLS Gene...
@ MO_PCREL_FLAG
MO_PCREL_FLAG - If this bit is set, the symbol reference is relative to the current instruction addre...
@ MO_TLSLD_FLAG
MO_TLSLD_FLAG - If this bit is set the symbol reference is relative to TLS Local Dynamic model.
@ MO_TLS_PCREL_FLAG
MO_TPREL_PCREL_FLAG = MO_PCREL_FLAG | MO_TLS.
@ MO_PLT
On PPC, the 12 bits are not enough for all target operand flags.
@ MO_TLS
Symbol for VK_PPC_TLS fixup attached to an ADD instruction.
@ MO_TPREL_FLAG
MO_TPREL_FLAG - If this bit is set, the symbol reference is relative to the thread pointer and the sy...
@ MO_LO
MO_LO, MO_HA - lo16(symbol) and ha16(symbol)
@ MO_GOT_TLSLD_PCREL_FLAG
MO_GOT_TLSLD_PCREL_FLAG - A combintaion of flags, if these bits are set they should produce the reloc...
@ MO_PIC_HA_FLAG
MO_PIC_HA_FLAG = MO_PIC_FLAG | MO_HA.
@ MO_TLSGD_FLAG
MO_TLSGD_FLAG - If this bit is set the symbol reference is relative to TLS General Dynamic model for ...
@ MO_GOT_TLSGD_PCREL_FLAG
MO_GOT_TLSGD_PCREL_FLAG - A combintaion of flags, if these bits are set they should produce the reloc...
@ MO_PIC_FLAG
MO_PIC_FLAG - If this bit is set, the symbol reference is relative to the function's picbase,...
@ SEXT_LD_SPLAT
VSRC, CHAIN = SEXT_LD_SPLAT, CHAIN, Ptr - a splatting load memory that sign-extends.
@ FCTIDUZ
Newer FCTI[D,W]UZ floating-point-to-integer conversion instructions for unsigned integers with round ...
@ ADDI_TLSGD_L_ADDR
G8RC = ADDI_TLSGD_L_ADDR G8RReg, Symbol, Symbol - Op that combines ADDI_TLSGD_L and GET_TLS_ADDR unti...
@ FSQRT
Square root instruction.
@ STRICT_FCFID
Constrained integer-to-floating-point conversion instructions.
@ DYNALLOC
The following two target-specific nodes are used for calls through function pointers in the 64-bit SV...
@ COND_BRANCH
CHAIN = COND_BRANCH CHAIN, CRRC, OPC, DESTBB [, INFLAG] - This corresponds to the COND_BRANCH pseudo ...
@ TLSLD_AIX
[GP|G8]RC = TLSLD_AIX, TOC_ENTRY(module handle) Op that requires a single input of the module handle ...
@ CALL_RM
The variants that implicitly define rounding mode for calls with strictfp semantics.
@ STORE_VEC_BE
CHAIN = STORE_VEC_BE CHAIN, VSRC, Ptr - Occurs only for little endian.
@ BDNZ
CHAIN = BDNZ CHAIN, DESTBB - These are used to create counter-based loops.
@ MTVSRZ
Direct move from a GPR to a VSX register (zero)
@ SRL
These nodes represent PPC shifts.
@ VECINSERT
VECINSERT - The PPC vector insert instruction.
@ LXSIZX
GPRC, CHAIN = LXSIZX, CHAIN, Ptr, ByteWidth - This is a load of an integer smaller than 64 bits into ...
@ FNMSUB
FNMSUB - Negated multiply-subtract instruction.
@ RFEBB
CHAIN = RFEBB CHAIN, State - Return from event-based branch.
@ FCTIDZ
FCTI[D,W]Z - The FCTIDZ and FCTIWZ instructions, taking an f32 or f64 operand, producing an f64 value...
@ SC
CHAIN = SC CHAIN, Imm128 - System call.
@ GET_TLS_ADDR
x3 = GET_TLS_ADDR x3, Symbol - For the general-dynamic TLS model, produces a call to __tls_get_addr(s...
@ XXSPLTI32DX
XXSPLTI32DX - The PPC XXSPLTI32DX instruction.
@ ANDI_rec_1_EQ_BIT
i1 = ANDI_rec_1_[EQ|GT]_BIT(i32 or i64 x) - Represents the result of the eq or gt bit of CR0 after ex...
@ FRE
Reciprocal estimate instructions (unary FP ops).
@ ADDIS_GOT_TPREL_HA
G8RC = ADDIS_GOT_TPREL_HA x2, Symbol - Used by the initial-exec TLS model, produces an ADDIS8 instruc...
@ CLRBHRB
CHAIN = CLRBHRB CHAIN - Clear branch history rolling buffer.
@ STORE_COND
CHAIN,Glue = STORE_COND CHAIN, GPR, Ptr The store conditional instruction ST[BHWD]ARX that produces a...
@ SINT_VEC_TO_FP
Extract a subvector from signed integer vector and convert to FP.
@ EXTRACT_SPE
Extract SPE register component, second argument is high or low.
@ XXSWAPD
VSRC, CHAIN = XXSWAPD CHAIN, VSRC - Occurs only for little endian.
@ ADDI_TLSLD_L_ADDR
G8RC = ADDI_TLSLD_L_ADDR G8RReg, Symbol, Symbol - Op that combines ADDI_TLSLD_L and GET_TLSLD_ADDR un...
@ ATOMIC_CMP_SWAP_8
ATOMIC_CMP_SWAP - the exact same as the target-independent nodes except they ensure that the compare ...
@ ST_VSR_SCAL_INT
Store scalar integers from VSR.
@ VCMP
RESVEC = VCMP(LHS, RHS, OPC) - Represents one of the altivec VCMP* instructions.
@ BCTRL
CHAIN,FLAG = BCTRL(CHAIN, INFLAG) - Directly corresponds to a BCTRL instruction.
@ BUILD_SPE64
BUILD_SPE64 and EXTRACT_SPE are analogous to BUILD_PAIR and EXTRACT_ELEMENT but take f64 arguments in...
@ LFIWZX
GPRC, CHAIN = LFIWZX CHAIN, Ptr - This is a floating-point load which zero-extends from a 32-bit inte...
@ RET_GLUE
Return with a glue operand, matched by 'blr'.
@ SCALAR_TO_VECTOR_PERMUTED
PowerPC instructions that have SCALAR_TO_VECTOR semantics tend to place the value into the least sign...
@ EXTRACT_VSX_REG
EXTRACT_VSX_REG = Extract one of the underlying vsx registers of an accumulator or pair register.
@ STXSIX
STXSIX - The STXSI[bh]X instruction.
@ MAT_PCREL_ADDR
MAT_PCREL_ADDR = Materialize a PC Relative address.
@ MFOCRF
R32 = MFOCRF(CRREG, INFLAG) - Represents the MFOCRF instruction.
@ XXSPLT
XXSPLT - The PPC VSX splat instructions.
@ TOC_ENTRY
GPRC = TOC_ENTRY GA, TOC Loads the entry for GA from the TOC, where the TOC base is given by the last...
@ XXPERMDI
XXPERMDI - The PPC XXPERMDI instruction.
@ ADDIS_DTPREL_HA
G8RC = ADDIS_DTPREL_HA x3, Symbol - For the local-dynamic TLS model, produces an ADDIS8 instruction t...
@ ADD_TLS
G8RC = ADD_TLS G8RReg, Symbol - Can be used by the initial-exec and local-exec TLS models,...
@ MTVSRA
Direct move from a GPR to a VSX register (algebraic)
@ VADD_SPLAT
VRRC = VADD_SPLAT Elt, EltSize - Temporary node to be expanded during instruction selection to optimi...
@ PPC32_GOT
GPRC = address of GLOBAL_OFFSET_TABLE.
@ ADDI_DTPREL_L
G8RC = ADDI_DTPREL_L G8RReg, Symbol - For the local-dynamic TLS model, produces an ADDI8 instruction ...
@ BCTRL_LOAD_TOC
CHAIN,FLAG = BCTRL(CHAIN, ADDR, INFLAG) - The combination of a bctrl instruction and the TOC reload r...
@ PPC32_PICGOT
GPRC = address of GLOBAL_OFFSET_TABLE.
@ FCFID
FCFID - The FCFID instruction, taking an f64 operand and producing and f64 value containing the FP re...
@ CR6SET
ch, gl = CR6[UN]SET ch, inglue - Toggle CR bit 6 for SVR4 vararg calls
@ LBRX
GPRC, CHAIN = LBRX CHAIN, Ptr, Type - This is a byte-swapping load instruction.
@ GET_TLS_MOD_AIX
x3 = GET_TLS_MOD_AIX _$TLSML - For the AIX local-dynamic TLS model, produces a call to ....
@ LD_VSX_LH
VSRC, CHAIN = LD_VSX_LH CHAIN, Ptr - This is a floating-point load of a v2f32 value into the lower ha...
@ PROBED_ALLOCA
To avoid stack clash, allocation is performed by block and each block is probed.
@ XXMFACC
XXMFACC = This corresponds to the xxmfacc instruction.
@ ADDIS_TLSGD_HA
G8RC = ADDIS_TLSGD_HA x2, Symbol - For the general-dynamic TLS model, produces an ADDIS8 instruction ...
@ ACC_BUILD
ACC_BUILD = Build an accumulator register from 4 VSX registers.
@ GlobalBaseReg
The result of the mflr at function entry, used for PIC code.
@ LXVD2X
VSRC, CHAIN = LXVD2X_LE CHAIN, Ptr - Occurs only for little endian.
@ XSMAXC
XSMAXC[DQ]P, XSMINC[DQ]P - C-type min/max instructions.
@ CALL
CALL - A direct function call.
@ MTCTR
CHAIN,FLAG = MTCTR(VAL, CHAIN[, INFLAG]) - Directly corresponds to a MTCTR instruction.
@ TC_RETURN
TC_RETURN - A tail call return.
@ STFIWX
STFIWX - The STFIWX instruction.
@ LD_SPLAT
VSRC, CHAIN = LD_SPLAT, CHAIN, Ptr - a splatting load memory instructions such as LXVDSX,...
@ VCMP_rec
RESVEC, OUTFLAG = VCMP_rec(LHS, RHS, OPC) - Represents one of the altivec VCMP*_rec instructions.
@ MFFS
F8RC = MFFS - This moves the FPSCR (not modeled) into the register.
@ PADDI_DTPREL
G8RC = PADDI_DTPREL x3, Symbol - For the pc-rel based local-dynamic TLS model, produces a PADDI8 inst...
@ BUILD_FP128
Direct move of 2 consecutive GPR to a VSX register.
@ VEXTS
VEXTS, ByteWidth - takes an input in VSFRC and produces an output in VSFRC that is sign-extended from...
@ TLS_LOCAL_EXEC_MAT_ADDR
TLS_LOCAL_EXEC_MAT_ADDR = Materialize an address for TLS global address when using local exec access ...
@ VPERM
VPERM - The PPC VPERM Instruction.
@ ADDIS_TLSLD_HA
G8RC = ADDIS_TLSLD_HA x2, Symbol - For the local-dynamic TLS model, produces an ADDIS8 instruction th...
@ XXSPLTI_SP_TO_DP
XXSPLTI_SP_TO_DP - The PPC VSX splat instructions for immediates for converting immediate single prec...
@ GET_TLSLD_ADDR
x3 = GET_TLSLD_ADDR x3, Symbol - For the local-dynamic TLS model, produces a call to __tls_get_addr(s...
@ ADDI_TLSGD_L
x3 = ADDI_TLSGD_L G8RReg, Symbol - For the general-dynamic TLS model, produces an ADDI8 instruction t...
@ DYNAREAOFFSET
This instruction is lowered in PPCRegisterInfo::eliminateFrameIndex to compute an offset from native ...
@ PAIR_BUILD
PAIR_BUILD = Build a vector pair register from 2 VSX registers.
@ STRICT_FADDRTZ
Constrained floating point add in round-to-zero mode.
@ FTSQRT
Test instruction for software square root.
@ FP_EXTEND_HALF
FP_EXTEND_HALF(VECTOR, IDX) - Custom extend upper (IDX=0) half or lower (IDX=1) half of v4f32 to v2f6...
@ CMPB
The CMPB instruction (takes two operands of i32 or i64).
@ VECSHL
VECSHL - The PPC vector shift left instruction.
@ ADDI_TLSLD_L
x3 = ADDI_TLSLD_L G8RReg, Symbol - For the local-dynamic TLS model, produces an ADDI8 instruction tha...
@ FADDRTZ
F8RC = FADDRTZ F8RC, F8RC - This is an FADD done with rounding towards zero.
@ ZEXT_LD_SPLAT
VSRC, CHAIN = ZEXT_LD_SPLAT, CHAIN, Ptr - a splatting load memory that zero-extends.
@ SRA_ADDZE
The combination of sra[wd]i and addze used to implemented signed integer division by a power of 2.
@ EXTSWSLI
EXTSWSLI = The PPC extswsli instruction, which does an extend-sign word and shift left immediate.
@ STXVD2X
CHAIN = STXVD2X CHAIN, VSRC, Ptr - Occurs only for little endian.
@ TLSGD_AIX
GPRC = TLSGD_AIX, TOC_ENTRY, TOC_ENTRY G8RC = TLSGD_AIX, TOC_ENTRY, TOC_ENTRY Op that combines two re...
@ UINT_VEC_TO_FP
Extract a subvector from unsigned integer vector and convert to FP.
@ GET_TPOINTER
x3 = GET_TPOINTER - Used for the local- and initial-exec TLS model on 32-bit AIX, produces a call to ...
@ LXVRZX
LXVRZX - Load VSX Vector Rightmost and Zero Extend This node represents v1i128 BUILD_VECTOR of a zero...
@ MFBHRBE
GPRC, CHAIN = MFBHRBE CHAIN, Entry, Dummy - Move from branch history rolling buffer entry.
@ FCFIDU
Newer FCFID[US] integer-to-floating-point conversion instructions for unsigned integers and single-pr...
@ FSEL
FSEL - Traditional three-operand fsel node.
@ SWAP_NO_CHAIN
An SDNode for swaps that are not associated with any loads/stores and thereby have no chain.
@ LOAD_VEC_BE
VSRC, CHAIN = LOAD_VEC_BE CHAIN, Ptr - Occurs only for little endian.
@ LFIWAX
GPRC, CHAIN = LFIWAX CHAIN, Ptr - This is a floating-point load which sign-extends from a 32-bit inte...
@ STBRX
CHAIN = STBRX CHAIN, GPRC, Ptr, Type - This is a byte-swapping store instruction.
@ LD_GOT_TPREL_L
G8RC = LD_GOT_TPREL_L Symbol, G8RReg - Used by the initial-exec TLS model, produces a LD instruction ...
@ MFVSR
Direct move from a VSX register to a GPR.
@ TLS_DYNAMIC_MAT_PCREL_ADDR
TLS_DYNAMIC_MAT_PCREL_ADDR = Materialize a PC Relative address for TLS global address when using dyna...
@ Hi
Hi/Lo - These represent the high and low 16-bit parts of a global address respectively.
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
SDValue get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG)
get_VSPLTI_elt - If this is a build_vector of constants which can be formed by using a vspltis[bhw] i...
bool isXXBRDShuffleMask(ShuffleVectorSDNode *N)
isXXBRDShuffleMask - Return true if this is a shuffle mask suitable for a XXBRD instruction.
FastISel * createFastISel(FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo)
bool isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, unsigned ShuffleKind, SelectionDAG &DAG)
isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for a VRGH* instruction with the ...
bool isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, SelectionDAG &DAG)
isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a VPKUDUM instruction.
bool isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven, unsigned ShuffleKind, SelectionDAG &DAG)
isVMRGEOShuffleMask - Return true if this is a shuffle mask suitable for a VMRGEW or VMRGOW instructi...
bool isXXBRQShuffleMask(ShuffleVectorSDNode *N)
isXXBRQShuffleMask - Return true if this is a shuffle mask suitable for a XXBRQ instruction.
bool isXXBRWShuffleMask(ShuffleVectorSDNode *N)
isXXBRWShuffleMask - Return true if this is a shuffle mask suitable for a XXBRW instruction.
bool isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, bool &Swap, bool IsLE)
isXXPERMDIShuffleMask - Return true if this is a shuffle mask suitable for a XXPERMDI instruction.
bool isXXBRHShuffleMask(ShuffleVectorSDNode *N)
isXXBRHShuffleMask - Return true if this is a shuffle mask suitable for a XXBRH instruction.
unsigned getSplatIdxForPPCMnemonics(SDNode *N, unsigned EltSize, SelectionDAG &DAG)
getSplatIdxForPPCMnemonics - Return the splat index as a value that is appropriate for PPC mnemonics ...
bool isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, bool &Swap, bool IsLE)
isXXSLDWIShuffleMask - Return true if this is a shuffle mask suitable for a XXSLDWI instruction.
int isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind, SelectionDAG &DAG)
isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift amount, otherwise return -1.
bool isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, unsigned ShuffleKind, SelectionDAG &DAG)
isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for a VRGL* instruction with the ...
bool isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts, unsigned &InsertAtByte, bool &Swap, bool IsLE)
isXXINSERTWMask - Return true if this VECTOR_SHUFFLE can be handled by the XXINSERTW instruction intr...
bool isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize)
isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand specifies a splat of a singl...
bool isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, SelectionDAG &DAG)
isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a VPKUWUM instruction.
bool isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind, SelectionDAG &DAG)
isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a VPKUHUM instruction.
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit.
@ Define
Register definition.
Reg
All possible values of the reg field in the ModR/M byte.
@ XTY_ER
External reference.
initializer< Ty > init(const Ty &Val)
const_iterator end(StringRef path)
Get end iterator over path.
This is an optimization pass for GlobalISel generic memory operations.
static bool isIndirectCall(const MachineInstr &MI)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
bool checkConvertToNonDenormSingle(APFloat &ArgAPFloat)
void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags,...
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
bool isNullConstant(SDValue V)
Returns true if V is a constant integer zero.
SDValue peekThroughBitcasts(SDValue V)
Return the non-bitcasted source operand of V if it exists.
bool CC_PPC32_SVR4_ByVal(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
bool isAligned(Align Lhs, uint64_t SizeInBytes)
Checks that SizeInBytes is a multiple of the alignment.
bool isIntS16Immediate(SDNode *N, int16_t &Imm)
isIntS16Immediate - This method tests to see if the node is either a 32-bit or 64-bit immediate,...
bool CC_PPC32_SVR4_VarArg(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
static bool isRunOfOnes64(uint64_t Val, unsigned &MB, unsigned &ME)
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
unsigned M1(unsigned Val)
bool isReleaseOrStronger(AtomicOrdering AO)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
bool RetCC_PPC_Cold(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
bool convertToNonDenormSingle(APInt &ArgAPInt)
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
bool CC_PPC32_SVR4(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
bool CC_PPC64_ELF(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
bool RetCC_PPC(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
format_object< Ts... > format(const char *Fmt, const Ts &... Vals)
These are helper functions used to produce formatted output.
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ Mod
The access may modify the value stored in memory.
bool isIntS34Immediate(SDNode *N, int64_t &Imm)
isIntS34Immediate - This method tests if value of node given can be accurately represented as a sign ...
bool CCAssignFn(unsigned ValNo, MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags, CCState &State)
CCAssignFn - This function assigns a location for Val, updating State to reflect the change.
@ Mul
Product of integers.
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
auto count(R &&Range, const E &Element)
Wrapper function around std::count to count the number of times an element Element occurs in the give...
DWARFExpression::Operation Op
unsigned M0(unsigned Val)
ConstantSDNode * isConstOrConstSplat(SDValue N, bool AllowUndefs=false, bool AllowTruncation=false)
Returns the SDNode if it is a constant splat BuildVector or constant int.
bool isAcquireOrStronger(AtomicOrdering AO)
constexpr int32_t SignExtend32(uint32_t X)
Sign-extend the number in the bottom B bits of X to a 32-bit integer.
constexpr unsigned BitWidth
Align commonAlignment(Align A, uint64_t Offset)
Returns the alignment that satisfies both alignments.
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
static bool isRunOfOnes(unsigned Val, unsigned &MB, unsigned &ME)
Returns true iff Val consists of one contiguous run of 1s with any number of 0s on either side.
T bit_floor(T Value)
Returns the largest integral power of two no greater than Value if Value is nonzero.
bool isAllOnesConstant(SDValue V)
Returns true if V is an integer constant with all bits set.
static const unsigned PerfectShuffleTable[6561+1]
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This is used by foldLoadsRecursive() to capture a Root Load node which is of type or(load,...
static const fltSemantics & IEEEsingle() LLVM_READNONE
static constexpr roundingMode rmNearestTiesToEven
static const fltSemantics & PPCDoubleDouble() LLVM_READNONE
static constexpr roundingMode rmTowardZero
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
Represent subnormal handling kind for floating point instruction inputs and outputs.
EVT changeVectorElementTypeToInteger() const
Return a vector with the same number of elements as this vector, but with the element type converted ...
TypeSize getStoreSize() const
Return the number of bytes overwritten by a store of the specified value type.
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
bool bitsGT(EVT VT) const
Return true if this has more bits than VT.
bool isFloatingPoint() const
Return true if this is a FP or a vector FP type.
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
uint64_t getScalarSizeInBits() const
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
bool isVector() const
Return true if this is a vector value type.
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
EVT getVectorElementType() const
Given a vector type, return the type of each element.
bool isExtended() const
Test if the given EVT is extended (as opposed to being simple).
bool isScalarInteger() const
Return true if this is an integer, but not a vector.
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
EVT getHalfNumVectorElementsVT(LLVMContext &Context) const
bool isInteger() const
Return true if this is an integer or a vector integer type.
unsigned getByValSize() const
void setByValSize(unsigned S)
Align getNonZeroByValAlign() const
OutputArg - This struct carries flags and a value for a single outgoing (actual) argument or outgoing...
bool isConstant() const
Returns true if we know the value of all bits.
void resetAll()
Resets the known state of all bits.
const APInt & getConstant() const
Returns the value when all bits have a known value.
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getStack(MachineFunction &MF, int64_t Offset, uint8_t ID=0)
Stack pointer relative access.
MachinePointerInfo getWithOffset(int64_t O) const
static MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Structure that collects some common arguments that get passed around between the functions for call l...
const CallingConv::ID CallConv
These are IR-level optimization flags that may be propagated to SDNodes.
This represents a list of ValueType's that has been intern'd by a SelectionDAG.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...
This contains information for each constraint that we are lowering.
This structure contains all information that is necessary for lowering calls.
CallLoweringInfo & setIsPostTypeLegalization(bool Value=true)
CallLoweringInfo & setLibCallee(CallingConv::ID CC, Type *ResultType, SDValue Target, ArgListTy &&ArgsList)
SmallVector< ISD::InputArg, 32 > Ins
CallLoweringInfo & setZExtResult(bool Value=true)
CallLoweringInfo & setDebugLoc(const SDLoc &dl)
CallLoweringInfo & setSExtResult(bool Value=true)
SmallVector< ISD::OutputArg, 32 > Outs
SmallVector< SDValue, 32 > OutVals
bool isBeforeLegalizeOps() const
bool isAfterLegalizeDAG() const
void AddToWorklist(SDNode *N)
bool isBeforeLegalize() const
SDValue CombineTo(SDNode *N, ArrayRef< SDValue > To, bool AddTo=true)