llvm.org GIT mirror llvm / 691b2ff
Remove getDataLayout() from TargetLowering Summary: This change is part of a series of commits dedicated to have a single DataLayout during compilation by using always the one owned by the module. Reviewers: echristo Subscribers: yaron.keren, rafael, llvm-commits, jholewinski Differential Revision: http://reviews.llvm.org/D11042 From: Mehdi Amini <mehdi.amini@apple.com> git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@241779 91177308-0d34-0410-b5e6-96231b3b80d8 Mehdi Amini 5 years ago
16 changed file(s) with 120 addition(s) and 113 deletion(s). Raw diff Collapse all Expand all
160160
161161 public:
162162 const TargetMachine &getTargetMachine() const { return TM; }
163 const DataLayout *getDataLayout() const { return TM.getDataLayout(); }
164163
165164 virtual bool useSoftFloat() const { return false; }
166165
45454545 // Emit a library call.
45464546 TargetLowering::ArgListTy Args;
45474547 TargetLowering::ArgListEntry Entry;
4548 Entry.Ty = TLI->getDataLayout()->getIntPtrType(*getContext());
4548 Entry.Ty = getDataLayout().getIntPtrType(*getContext());
45494549 Entry.Node = Dst; Args.push_back(Entry);
45504550 Entry.Node = Src; Args.push_back(Entry);
45514551 Entry.Node = Size; Args.push_back(Entry);
64336433 bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
64346434 const CallInst &I,
64356435 unsigned Intrinsic) const {
6436 auto &DL = I.getModule()->getDataLayout();
64366437 switch (Intrinsic) {
64376438 case Intrinsic::aarch64_neon_ld2:
64386439 case Intrinsic::aarch64_neon_ld3:
64486449 case Intrinsic::aarch64_neon_ld4r: {
64496450 Info.opc = ISD::INTRINSIC_W_CHAIN;
64506451 // Conservatively set memVT to the entire set of vectors loaded.
6451 uint64_t NumElts = getDataLayout()->getTypeAllocSize(I.getType()) / 8;
6452 uint64_t NumElts = DL.getTypeAllocSize(I.getType()) / 8;
64526453 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
64536454 Info.ptrVal = I.getArgOperand(I.getNumArgOperands() - 1);
64546455 Info.offset = 0;
64746475 Type *ArgTy = I.getArgOperand(ArgI)->getType();
64756476 if (!ArgTy->isVectorTy())
64766477 break;
6477 NumElts += getDataLayout()->getTypeAllocSize(ArgTy) / 8;
6478 NumElts += DL.getTypeAllocSize(ArgTy) / 8;
64786479 }
64796480 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
64806481 Info.ptrVal = I.getArgOperand(I.getNumArgOperands() - 1);
64926493 Info.memVT = MVT::getVT(PtrTy->getElementType());
64936494 Info.ptrVal = I.getArgOperand(0);
64946495 Info.offset = 0;
6495 Info.align = getDataLayout()->getABITypeAlignment(PtrTy->getElementType());
6496 Info.align = DL.getABITypeAlignment(PtrTy->getElementType());
64966497 Info.vol = true;
64976498 Info.readMem = true;
64986499 Info.writeMem = false;
65056506 Info.memVT = MVT::getVT(PtrTy->getElementType());
65066507 Info.ptrVal = I.getArgOperand(1);
65076508 Info.offset = 0;
6508 Info.align = getDataLayout()->getABITypeAlignment(PtrTy->getElementType());
6509 Info.align = DL.getABITypeAlignment(PtrTy->getElementType());
65096510 Info.vol = true;
65106511 Info.readMem = false;
65116512 Info.writeMem = true;
66426643 break;
66436644 case Instruction::GetElementPtr: {
66446645 gep_type_iterator GTI = gep_type_begin(Instr);
6646 auto &DL = Ext->getModule()->getDataLayout();
66456647 std::advance(GTI, U.getOperandNo());
66466648 Type *IdxTy = *GTI;
66476649 // This extension will end up with a shift because of the scaling factor.
66496651 // Get the shift amount based on the scaling factor:
66506652 // log2(sizeof(IdxTy)) - log2(8).
66516653 uint64_t ShiftAmt =
6652 countTrailingZeros(getDataLayout()->getTypeStoreSizeInBits(IdxTy)) - 3;
6654 countTrailingZeros(DL.getTypeStoreSizeInBits(IdxTy)) - 3;
66536655 // Is the constant foldable in the shift of the addressing mode?
66546656 // I.e., shift amount is between 1 and 4 inclusive.
66556657 if (ShiftAmt == 0 || ShiftAmt > 4)
67136715 assert(Shuffles.size() == Indices.size() &&
67146716 "Unmatched number of shufflevectors and indices");
67156717
6716 const DataLayout *DL = getDataLayout();
6718 const DataLayout &DL = LI->getModule()->getDataLayout();
67176719
67186720 VectorType *VecTy = Shuffles[0]->getType();
6719 unsigned VecSize = DL->getTypeAllocSizeInBits(VecTy);
6721 unsigned VecSize = DL.getTypeAllocSizeInBits(VecTy);
67206722
67216723 // Skip illegal vector types.
67226724 if (VecSize != 64 && VecSize != 128)
67266728 // load integer vectors first and then convert to pointer vectors.
67276729 Type *EltTy = VecTy->getVectorElementType();
67286730 if (EltTy->isPointerTy())
6729 VecTy = VectorType::get(DL->getIntPtrType(EltTy),
6730 VecTy->getVectorNumElements());
6731 VecTy =
6732 VectorType::get(DL.getIntPtrType(EltTy), VecTy->getVectorNumElements());
67316733
67326734 Type *PtrTy = VecTy->getPointerTo(LI->getPointerAddressSpace());
67336735 Type *Tys[2] = {VecTy, PtrTy};
68016803 Type *EltTy = VecTy->getVectorElementType();
68026804 VectorType *SubVecTy = VectorType::get(EltTy, NumSubElts);
68036805
6804 const DataLayout *DL = getDataLayout();
6805 unsigned SubVecSize = DL->getTypeAllocSizeInBits(SubVecTy);
6806 const DataLayout &DL = SI->getModule()->getDataLayout();
6807 unsigned SubVecSize = DL.getTypeAllocSizeInBits(SubVecTy);
68066808
68076809 // Skip illegal vector types.
68086810 if (SubVecSize != 64 && SubVecSize != 128)
68156817 // StN intrinsics don't support pointer vectors as arguments. Convert pointer
68166818 // vectors to integer vectors.
68176819 if (EltTy->isPointerTy()) {
6818 Type *IntTy = DL->getIntPtrType(EltTy);
6820 Type *IntTy = DL.getIntPtrType(EltTy);
68196821 unsigned NumOpElts =
68206822 dyn_cast(Op0->getType())->getVectorNumElements();
68216823
415415 if (Factor <= TLI->getMaxSupportedInterleaveFactor()) {
416416 unsigned NumElts = VecTy->getVectorNumElements();
417417 Type *SubVecTy = VectorType::get(VecTy->getScalarType(), NumElts / Factor);
418 unsigned SubVecSize = TLI->getDataLayout()->getTypeAllocSize(SubVecTy);
418 unsigned SubVecSize = DL.getTypeAllocSize(SubVecTy);
419419
420420 // ldN/stN only support legal vector types of size 64 or 128 in bits.
421421 if (NumElts % Factor == 0 && (SubVecSize == 64 || SubVecSize == 128))
544544 }
545545
546546 bool AMDGPUTargetLowering::isZExtFree(Type *Src, Type *Dest) const {
547 const DataLayout *DL = getDataLayout();
548 unsigned SrcSize = DL->getTypeSizeInBits(Src->getScalarType());
549 unsigned DestSize = DL->getTypeSizeInBits(Dest->getScalarType());
547 unsigned SrcSize = Src->getScalarSizeInBits();
548 unsigned DestSize = Dest->getScalarSizeInBits();
550549
551550 return SrcSize == 32 && DestSize == 64;
552551 }
696695 const SDValue &InitPtr,
697696 SDValue Chain,
698697 SelectionDAG &DAG) const {
699 const DataLayout *TD = getDataLayout();
698 const DataLayout &TD = DAG.getDataLayout();
700699 SDLoc DL(InitPtr);
701700 Type *InitTy = Init->getType();
702701
704703 EVT VT = EVT::getEVT(InitTy);
705704 PointerType *PtrTy = PointerType::get(InitTy, AMDGPUAS::PRIVATE_ADDRESS);
706705 return DAG.getStore(Chain, DL, DAG.getConstant(*CI, DL, VT), InitPtr,
707 MachinePointerInfo(UndefValue::get(PtrTy)), false, false,
708 TD->getPrefTypeAlignment(InitTy));
706 MachinePointerInfo(UndefValue::get(PtrTy)), false,
707 false, TD.getPrefTypeAlignment(InitTy));
709708 }
710709
711710 if (const ConstantFP *CFP = dyn_cast(Init)) {
712711 EVT VT = EVT::getEVT(CFP->getType());
713712 PointerType *PtrTy = PointerType::get(CFP->getType(), 0);
714713 return DAG.getStore(Chain, DL, DAG.getConstantFP(*CFP, DL, VT), InitPtr,
715 MachinePointerInfo(UndefValue::get(PtrTy)), false, false,
716 TD->getPrefTypeAlignment(CFP->getType()));
714 MachinePointerInfo(UndefValue::get(PtrTy)), false,
715 false, TD.getPrefTypeAlignment(CFP->getType()));
717716 }
718717
719718 if (StructType *ST = dyn_cast(InitTy)) {
720 const StructLayout *SL = TD->getStructLayout(ST);
719 const StructLayout *SL = TD.getStructLayout(ST);
721720
722721 EVT PtrVT = InitPtr.getValueType();
723722 SmallVector Chains;
744743 else
745744 llvm_unreachable("Unexpected type");
746745
747 unsigned EltSize = TD->getTypeAllocSize(SeqTy->getElementType());
746 unsigned EltSize = TD.getTypeAllocSize(SeqTy->getElementType());
748747 SmallVector Chains;
749748 for (unsigned i = 0; i < NumElements; ++i) {
750749 SDValue Offset = DAG.getConstant(i * EltSize, DL, PtrVT);
761760 EVT VT = EVT::getEVT(InitTy);
762761 PointerType *PtrTy = PointerType::get(InitTy, AMDGPUAS::PRIVATE_ADDRESS);
763762 return DAG.getStore(Chain, DL, DAG.getUNDEF(VT), InitPtr,
764 MachinePointerInfo(UndefValue::get(PtrTy)), false, false,
765 TD->getPrefTypeAlignment(InitTy));
763 MachinePointerInfo(UndefValue::get(PtrTy)), false,
764 false, TD.getPrefTypeAlignment(InitTy));
766765 }
767766
768767 Init->dump();
416416 SDValue SITargetLowering::LowerParameter(SelectionDAG &DAG, EVT VT, EVT MemVT,
417417 SDLoc SL, SDValue Chain,
418418 unsigned Offset, bool Signed) const {
419 const DataLayout *DL = getDataLayout();
419 const DataLayout &DL = DAG.getDataLayout();
420420 MachineFunction &MF = DAG.getMachineFunction();
421421 const SIRegisterInfo *TRI =
422422 static_cast(Subtarget->getRegisterInfo());
425425 Type *Ty = VT.getTypeForEVT(*DAG.getContext());
426426
427427 MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
428 MVT PtrVT = getPointerTy(DAG.getDataLayout(), AMDGPUAS::CONSTANT_ADDRESS);
428 MVT PtrVT = getPointerTy(DL, AMDGPUAS::CONSTANT_ADDRESS);
429429 PointerType *PtrTy = PointerType::get(Ty, AMDGPUAS::CONSTANT_ADDRESS);
430430 SDValue BasePtr = DAG.getCopyFromReg(Chain, SL,
431431 MRI.getLiveInVirtReg(InputPtrReg), PtrVT);
434434 SDValue PtrOffset = DAG.getUNDEF(PtrVT);
435435 MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
436436
437 unsigned Align = DL->getABITypeAlignment(Ty);
437 unsigned Align = DL.getABITypeAlignment(Ty);
438438
439439 if (VT != MemVT && VT.isFloatingPoint()) {
440440 // Do an integer load and convert.
14131413 unsigned AS = Load->getAddressSpace();
14141414 unsigned Align = Load->getAlignment();
14151415 Type *Ty = LoadVT.getTypeForEVT(*DAG.getContext());
1416 unsigned ABIAlignment = getDataLayout()->getABITypeAlignment(Ty);
1416 unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(Ty);
14171417
14181418 // Don't try to replace the load if we have to expand it due to alignment
14191419 // problems. Otherwise we will end up scalarizing the load, and trying to
68446844 const Constant *C = ConstantInt::get(Int32Ty, NumLPads);
68456845
68466846 // MachineConstantPool wants an explicit alignment.
6847 unsigned Align = getDataLayout()->getPrefTypeAlignment(Int32Ty);
6847 unsigned Align = MF->getDataLayout().getPrefTypeAlignment(Int32Ty);
68486848 if (Align == 0)
6849 Align = getDataLayout()->getTypeAllocSize(C->getType());
6849 Align = MF->getDataLayout().getTypeAllocSize(C->getType());
68506850 unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align);
68516851
68526852 unsigned VReg1 = MRI->createVirtualRegister(TRC);
69346934 const Constant *C = ConstantInt::get(Int32Ty, NumLPads);
69356935
69366936 // MachineConstantPool wants an explicit alignment.
6937 unsigned Align = getDataLayout()->getPrefTypeAlignment(Int32Ty);
6937 unsigned Align = MF->getDataLayout().getPrefTypeAlignment(Int32Ty);
69386938 if (Align == 0)
6939 Align = getDataLayout()->getTypeAllocSize(C->getType());
6939 Align = MF->getDataLayout().getTypeAllocSize(C->getType());
69406940 unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align);
69416941
69426942 unsigned VReg1 = MRI->createVirtualRegister(TRC);
73127312 const Constant *C = ConstantInt::get(Int32Ty, LoopSize);
73137313
73147314 // MachineConstantPool wants an explicit alignment.
7315 unsigned Align = getDataLayout()->getPrefTypeAlignment(Int32Ty);
7315 unsigned Align = MF->getDataLayout().getPrefTypeAlignment(Int32Ty);
73167316 if (Align == 0)
7317 Align = getDataLayout()->getTypeAllocSize(C->getType());
7317 Align = MF->getDataLayout().getTypeAllocSize(C->getType());
73187318 unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align);
73197319
73207320 if (IsThumb1)
1108211082 case Intrinsic::arm_neon_vld4lane: {
1108311083 Info.opc = ISD::INTRINSIC_W_CHAIN;
1108411084 // Conservatively set memVT to the entire set of vectors loaded.
11085 uint64_t NumElts = getDataLayout()->getTypeAllocSize(I.getType()) / 8;
11085 auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
11086 uint64_t NumElts = DL.getTypeAllocSize(I.getType()) / 8;
1108611087 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
1108711088 Info.ptrVal = I.getArgOperand(0);
1108811089 Info.offset = 0;
1110211103 case Intrinsic::arm_neon_vst4lane: {
1110311104 Info.opc = ISD::INTRINSIC_VOID;
1110411105 // Conservatively set memVT to the entire set of vectors stored.
11106 auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
1110511107 unsigned NumElts = 0;
1110611108 for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) {
1110711109 Type *ArgTy = I.getArgOperand(ArgI)->getType();
1110811110 if (!ArgTy->isVectorTy())
1110911111 break;
11110 NumElts += getDataLayout()->getTypeAllocSize(ArgTy) / 8;
11112 NumElts += DL.getTypeAllocSize(ArgTy) / 8;
1111111113 }
1111211114 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
1111311115 Info.ptrVal = I.getArgOperand(0);
1112111123 }
1112211124 case Intrinsic::arm_ldaex:
1112311125 case Intrinsic::arm_ldrex: {
11126 auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
1112411127 PointerType *PtrTy = cast(I.getArgOperand(0)->getType());
1112511128 Info.opc = ISD::INTRINSIC_W_CHAIN;
1112611129 Info.memVT = MVT::getVT(PtrTy->getElementType());
1112711130 Info.ptrVal = I.getArgOperand(0);
1112811131 Info.offset = 0;
11129 Info.align = getDataLayout()->getABITypeAlignment(PtrTy->getElementType());
11132 Info.align = DL.getABITypeAlignment(PtrTy->getElementType());
1113011133 Info.vol = true;
1113111134 Info.readMem = true;
1113211135 Info.writeMem = false;
1113411137 }
1113511138 case Intrinsic::arm_stlex:
1113611139 case Intrinsic::arm_strex: {
11140 auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
1113711141 PointerType *PtrTy = cast(I.getArgOperand(1)->getType());
1113811142 Info.opc = ISD::INTRINSIC_W_CHAIN;
1113911143 Info.memVT = MVT::getVT(PtrTy->getElementType());
1114011144 Info.ptrVal = I.getArgOperand(1);
1114111145 Info.offset = 0;
11142 Info.align = getDataLayout()->getABITypeAlignment(PtrTy->getElementType());
11146 Info.align = DL.getABITypeAlignment(PtrTy->getElementType());
1114311147 Info.vol = true;
1114411148 Info.readMem = false;
1114511149 Info.writeMem = true;
1142611430 VectorType *VecTy = Shuffles[0]->getType();
1142711431 Type *EltTy = VecTy->getVectorElementType();
1142811432
11429 const DataLayout *DL = getDataLayout();
11430 unsigned VecSize = DL->getTypeAllocSizeInBits(VecTy);
11431 bool EltIs64Bits = DL->getTypeAllocSizeInBits(EltTy) == 64;
11433 const DataLayout &DL = LI->getModule()->getDataLayout();
11434 unsigned VecSize = DL.getTypeAllocSizeInBits(VecTy);
11435 bool EltIs64Bits = DL.getTypeAllocSizeInBits(EltTy) == 64;
1143211436
1143311437 // Skip illegal vector types and vector types of i64/f64 element (vldN doesn't
1143411438 // support i64/f64 element).
1143811442 // A pointer vector can not be the return type of the ldN intrinsics. Need to
1143911443 // load integer vectors first and then convert to pointer vectors.
1144011444 if (EltTy->isPointerTy())
11441 VecTy = VectorType::get(DL->getIntPtrType(EltTy),
11442 VecTy->getVectorNumElements());
11445 VecTy =
11446 VectorType::get(DL.getIntPtrType(EltTy), VecTy->getVectorNumElements());
1144311447
1144411448 static const Intrinsic::ID LoadInts[3] = {Intrinsic::arm_neon_vld2,
1144511449 Intrinsic::arm_neon_vld3,
1151611520 Type *EltTy = VecTy->getVectorElementType();
1151711521 VectorType *SubVecTy = VectorType::get(EltTy, NumSubElts);
1151811522
11519 const DataLayout *DL = getDataLayout();
11520 unsigned SubVecSize = DL->getTypeAllocSizeInBits(SubVecTy);
11521 bool EltIs64Bits = DL->getTypeAllocSizeInBits(EltTy) == 64;
11523 const DataLayout &DL = SI->getModule()->getDataLayout();
11524 unsigned SubVecSize = DL.getTypeAllocSizeInBits(SubVecTy);
11525 bool EltIs64Bits = DL.getTypeAllocSizeInBits(EltTy) == 64;
1152211526
1152311527 // Skip illegal sub vector types and vector types of i64/f64 element (vstN
1152411528 // doesn't support i64/f64 element).
1153211536 // StN intrinsics don't support pointer vectors as arguments. Convert pointer
1153311537 // vectors to integer vectors.
1153411538 if (EltTy->isPointerTy()) {
11535 Type *IntTy = DL->getIntPtrType(EltTy);
11539 Type *IntTy = DL.getIntPtrType(EltTy);
1153611540
1153711541 // Convert to the corresponding integer vector.
1153811542 Type *IntVecTy =
8282
8383 TargetLowering::ArgListTy Args;
8484 TargetLowering::ArgListEntry Entry;
85 Entry.Ty = TLI->getDataLayout()->getIntPtrType(*DAG.getContext());
85 Entry.Ty = DAG.getDataLayout().getIntPtrType(*DAG.getContext());
8686 Entry.Node = Dst;
8787 Args.push_back(Entry);
8888 if (AEABILibcall == AEABI_MEMCLR) {
15821582 auto &TD = DAG.getDataLayout();
15831583 EVT PTy = getPointerTy(TD);
15841584 unsigned EntrySize =
1585 DAG.getMachineFunction().getJumpTableInfo()->getEntrySize(*getDataLayout());
1585 DAG.getMachineFunction().getJumpTableInfo()->getEntrySize(TD);
15861586
15871587 Index = DAG.getNode(ISD::MUL, DL, PTy, Index,
15881588 DAG.getConstant(EntrySize, DL, PTy));
18751875 }
18761876
18771877 // Increment the pointer, VAList, to the next vaarg.
1878 unsigned ArgSizeInBytes = getDataLayout()->getTypeAllocSize(VT.getTypeForEVT(*DAG.getContext()));
1878 auto &TD = DAG.getDataLayout();
1879 unsigned ArgSizeInBytes =
1880 TD.getTypeAllocSize(VT.getTypeForEVT(*DAG.getContext()));
18791881 SDValue Tmp3 = DAG.getNode(ISD::ADD, DL, VAList.getValueType(), VAList,
18801882 DAG.getConstant(RoundUpToAlignment(ArgSizeInBytes,
18811883 ArgSlotSizeInBytes),
889889 return DAG.getNode(NVPTXISD::Wrapper, dl, PtrVT, Op);
890890 }
891891
892 std::string
893 NVPTXTargetLowering::getPrototype(Type *retTy, const ArgListTy &Args,
894 const SmallVectorImpl &Outs,
895 unsigned retAlignment,
896 const ImmutableCallSite *CS) const {
897 auto PtrVT = getPointerTy(*getDataLayout());
892 std::string NVPTXTargetLowering::getPrototype(
893 const DataLayout &DL, Type *retTy, const ArgListTy &Args,
894 const SmallVectorImpl &Outs, unsigned retAlignment,
895 const ImmutableCallSite *CS) const {
896 auto PtrVT = getPointerTy(DL);
897
898898 bool isABI = (STI.getSmVersion() >= 20);
899899 assert(isABI && "Non-ABI compilation is not supported");
900900 if (!isABI)
924924 O << ".param .b" << PtrVT.getSizeInBits() << " _";
925925 } else if ((retTy->getTypeID() == Type::StructTyID) ||
926926 isa(retTy)) {
927 O << ".param .align "
928 << retAlignment
929 << " .b8 _["
930 << getDataLayout()->getTypeAllocSize(retTy) << "]";
927 auto &DL = CS->getCalledFunction()->getParent()->getDataLayout();
928 O << ".param .align " << retAlignment << " .b8 _["
929 << DL.getTypeAllocSize(retTy) << "]";
931930 } else {
932931 llvm_unreachable("Unknown return type");
933932 }
946945 first = false;
947946
948947 if (!Outs[OIdx].Flags.isByVal()) {
949 const DataLayout *TD = getDataLayout();
950948 if (Ty->isAggregateType() || Ty->isVectorTy()) {
951949 unsigned align = 0;
952950 const CallInst *CallI = cast(CS->getInstruction());
953951 // +1 because index 0 is reserved for return type alignment
954952 if (!llvm::getAlign(*CallI, i + 1, align))
955 align = TD->getABITypeAlignment(Ty);
956 unsigned sz = TD->getTypeAllocSize(Ty);
953 align = DL.getABITypeAlignment(Ty);
954 unsigned sz = DL.getTypeAllocSize(Ty);
957955 O << ".param .align " << align << " .b8 ";
958956 O << "_";
959957 O << "[" << sz << "]";
960958 // update the index for Outs
961959 SmallVector vtparts;
962 ComputeValueVTs(*this, *TD, Ty, vtparts);
960 ComputeValueVTs(*this, DL, Ty, vtparts);
963961 if (unsigned len = vtparts.size())
964962 OIdx += len - 1;
965963 continue;
966964 }
967965 // i8 types in IR will be i16 types in SDAG
968 assert(
969 (getValueType(*TD, Ty) == Outs[OIdx].VT ||
970 (getValueType(*TD, Ty) == MVT::i8 && Outs[OIdx].VT == MVT::i16)) &&
971 "type mismatch between callee prototype and arguments");
966 assert((getValueType(DL, Ty) == Outs[OIdx].VT ||
967 (getValueType(DL, Ty) == MVT::i8 && Outs[OIdx].VT == MVT::i16)) &&
968 "type mismatch between callee prototype and arguments");
972969 // scalar type
973970 unsigned sz = 0;
974971 if (isa(Ty)) {
988985 Type *ETy = PTy->getElementType();
989986
990987 unsigned align = Outs[OIdx].Flags.getByValAlign();
991 unsigned sz = getDataLayout()->getTypeAllocSize(ETy);
988 unsigned sz = DL.getTypeAllocSize(ETy);
992989 O << ".param .align " << align << " .b8 ";
993990 O << "_";
994991 O << "[" << sz << "]";
1002999 const ImmutableCallSite *CS,
10031000 Type *Ty,
10041001 unsigned Idx) const {
1005 const DataLayout *TD = getDataLayout();
10061002 unsigned Align = 0;
10071003 const Value *DirectCallee = CS->getCalledFunction();
10081004
10431039
10441040 // Call is indirect or alignment information is not available, fall back to
10451041 // the ABI type alignment
1046 return TD->getABITypeAlignment(Ty);
1042 auto &DL = CS->getCaller()->getParent()->getDataLayout();
1043 return DL.getABITypeAlignment(Ty);
10471044 }
10481045
10491046 SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
10961093 // aggregate
10971094 SmallVector vtparts;
10981095 SmallVector Offsets;
1099 ComputePTXValueVTs(*this, DL, Ty, vtparts, &Offsets, 0);
1096 ComputePTXValueVTs(*this, DAG.getDataLayout(), Ty, vtparts, &Offsets,
1097 0);
11001098
11011099 unsigned align = getArgumentAlignment(Callee, CS, Ty, paramCount + 1);
11021100 // declare .param .align .b8 .param[];
13211319 SmallVector Offsets;
13221320 const PointerType *PTy = dyn_cast(Args[i].Ty);
13231321 assert(PTy && "Type of a byval parameter should be pointer");
1324 ComputePTXValueVTs(*this, DL, PTy->getElementType(), vtparts, &Offsets, 0);
1322 ComputePTXValueVTs(*this, DAG.getDataLayout(), PTy->getElementType(),
1323 vtparts, &Offsets, 0);
13251324
13261325 // declare .param .align .b8 .param[];
13271326 unsigned sz = Outs[OIdx].Flags.getByValSize();
14151414 // The prototype is embedded in a string and put as the operand for a
14161415 // CallPrototype SDNode which will print out to the value of the string.
14171416 SDVTList ProtoVTs = DAG.getVTList(MVT::Other, MVT::Glue);
1418 std::string Proto = getPrototype(retTy, Args, Outs, retAlignment, CS);
1417 std::string Proto =
1418 getPrototype(DAG.getDataLayout(), retTy, Args, Outs, retAlignment, CS);
14191419 const char *ProtoStr =
14201420 nvTM->getManagedStrPool()->getManagedString(Proto.c_str())->c_str();
14211421 SDValue ProtoOps[] = {
15961596 } else {
15971597 SmallVector VTs;
15981598 SmallVector Offsets;
1599 ComputePTXValueVTs(*this, DL, retTy, VTs, &Offsets, 0);
1599 ComputePTXValueVTs(*this, DAG.getDataLayout(), retTy, VTs, &Offsets, 0);
16001600 assert(VTs.size() == Ins.size() && "Bad value decomposition");
16011601 unsigned RetAlign = getArgumentAlignment(Callee, CS, retTy, 0);
16021602 for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
19191919 }
19201920
19211921 MemSDNode *MemSD = cast(N);
1922 const DataLayout *TD = getDataLayout();
1922 const DataLayout &TD = DAG.getDataLayout();
19231923
19241924 unsigned Align = MemSD->getAlignment();
19251925 unsigned PrefAlign =
1926 TD->getPrefTypeAlignment(ValVT.getTypeForEVT(*DAG.getContext()));
1926 TD.getPrefTypeAlignment(ValVT.getTypeForEVT(*DAG.getContext()));
19271927 if (Align < PrefAlign) {
19281928 // This store is not sufficiently aligned, so bail out and let this vector
19291929 // store be scalarized. Note that we may still be able to emit smaller
20632063 const SmallVectorImpl &Ins, SDLoc dl, SelectionDAG &DAG,
20642064 SmallVectorImpl &InVals) const {
20652065 MachineFunction &MF = DAG.getMachineFunction();
2066 const DataLayout &DL = MF.getDataLayout();
2067 auto PtrVT = getPointerTy(DL);
2066 const DataLayout &DL = DAG.getDataLayout();
2067 auto PtrVT = getPointerTy(DAG.getDataLayout());
20682068
20692069 const Function *F = MF.getFunction();
20702070 const AttributeSet &PAL = F->getAttributes();
21182118 if (Ty->isAggregateType()) {
21192119 SmallVector vtparts;
21202120
2121 ComputePTXValueVTs(*this, DL, Ty, vtparts);
2121 ComputePTXValueVTs(*this, DAG.getDataLayout(), Ty, vtparts);
21222122 assert(vtparts.size() > 0 && "empty aggregate type not expected");
21232123 for (unsigned parti = 0, parte = vtparts.size(); parti != parte;
21242124 ++parti) {
21562156 // NOTE: Here, we lose the ability to issue vector loads for vectors
21572157 // that are a part of a struct. This should be investigated in the
21582158 // future.
2159 ComputePTXValueVTs(*this, DL, Ty, vtparts, &offsets, 0);
2159 ComputePTXValueVTs(*this, DAG.getDataLayout(), Ty, vtparts, &offsets,
2160 0);
21602161 assert(vtparts.size() > 0 && "empty aggregate type not expected");
21612162 bool aggregateIsPacked = false;
21622163 if (StructType *STy = llvm::dyn_cast(Ty))
42514252
42524253 /// ReplaceVectorLoad - Convert vector loads into multi-output scalar loads.
42534254 static void ReplaceLoadVector(SDNode *N, SelectionDAG &DAG,
4254 const DataLayout *TD,
42554255 SmallVectorImpl &Results) {
42564256 EVT ResVT = N->getValueType(0);
42574257 SDLoc DL(N);
42824282 LoadSDNode *LD = cast(N);
42834283
42844284 unsigned Align = LD->getAlignment();
4285 auto &TD = DAG.getDataLayout();
42854286 unsigned PrefAlign =
4286 TD->getPrefTypeAlignment(ResVT.getTypeForEVT(*DAG.getContext()));
4287 TD.getPrefTypeAlignment(ResVT.getTypeForEVT(*DAG.getContext()));
42874288 if (Align < PrefAlign) {
42884289 // This load is not sufficiently aligned, so bail out and let this vector
42894290 // load be scalarized. Note that we may still be able to emit smaller
44954496 default:
44964497 report_fatal_error("Unhandled custom legalization");
44974498 case ISD::LOAD:
4498 ReplaceLoadVector(N, DAG, getDataLayout(), Results);
4499 ReplaceLoadVector(N, DAG, Results);
44994500 return;
45004501 case ISD::INTRINSIC_W_CHAIN:
45014502 ReplaceINTRINSIC_W_CHAIN(N, DAG, Results);
481481 SDValue LowerCall(CallLoweringInfo &CLI,
482482 SmallVectorImpl &InVals) const override;
483483
484 std::string getPrototype(Type *, const ArgListTy &,
484 std::string getPrototype(const DataLayout &DL, Type *, const ArgListTy &,
485485 const SmallVectorImpl &,
486486 unsigned retAlignment,
487487 const ImmutableCallSite *CS) const;
11021102 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
11031103 bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
11041104 SelectionDAG &DAG) {
1105 bool IsLE = DAG.getTarget().getDataLayout()->isLittleEndian();
1105 bool IsLE = DAG.getDataLayout().isLittleEndian();
11061106 if (ShuffleKind == 0) {
11071107 if (IsLE)
11081108 return false;
11331133 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
11341134 bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
11351135 SelectionDAG &DAG) {
1136 bool IsLE = DAG.getTarget().getDataLayout()->isLittleEndian();
1136 bool IsLE = DAG.getDataLayout().isLittleEndian();
11371137 if (ShuffleKind == 0) {
11381138 if (IsLE)
11391139 return false;
11751175 if (!Subtarget.hasP8Vector())
11761176 return false;
11771177
1178 bool IsLE = DAG.getTarget().getDataLayout()->isLittleEndian();
1178 bool IsLE = DAG.getDataLayout().isLittleEndian();
11791179 if (ShuffleKind == 0) {
11801180 if (IsLE)
11811181 return false;
12381238 /// the input operands are swapped (see PPCInstrAltivec.td).
12391239 bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
12401240 unsigned ShuffleKind, SelectionDAG &DAG) {
1241 if (DAG.getTarget().getDataLayout()->isLittleEndian()) {
1241 if (DAG.getDataLayout().isLittleEndian()) {
12421242 if (ShuffleKind == 1) // unary
12431243 return isVMerge(N, UnitSize, 0, 0);
12441244 else if (ShuffleKind == 2) // swapped
12631263 /// the input operands are swapped (see PPCInstrAltivec.td).
12641264 bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
12651265 unsigned ShuffleKind, SelectionDAG &DAG) {
1266 if (DAG.getTarget().getDataLayout()->isLittleEndian()) {
1266 if (DAG.getDataLayout().isLittleEndian()) {
12671267 if (ShuffleKind == 1) // unary
12681268 return isVMerge(N, UnitSize, 8, 8);
12691269 else if (ShuffleKind == 2) // swapped
13531353 */
13541354 bool PPC::isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven,
13551355 unsigned ShuffleKind, SelectionDAG &DAG) {
1356 if (DAG.getTarget().getDataLayout()->isLittleEndian()) {
1356 if (DAG.getDataLayout().isLittleEndian()) {
13571357 unsigned indexOffset = CheckEven ? 4 : 0;
13581358 if (ShuffleKind == 1) // Unary
13591359 return isVMerge(N, indexOffset, 0);
14001400 if (ShiftAmt < i) return -1;
14011401
14021402 ShiftAmt -= i;
1403 bool isLE = DAG.getTarget().getDataLayout()->isLittleEndian();
1403 bool isLE = DAG.getDataLayout().isLittleEndian();
14041404
14051405 if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) {
14061406 // Check the rest of the elements to see if they are consecutive.
14571457 SelectionDAG &DAG) {
14581458 ShuffleVectorSDNode *SVOp = cast(N);
14591459 assert(isSplatShuffleMask(SVOp, EltSize));
1460 if (DAG.getTarget().getDataLayout()->isLittleEndian())
1460 if (DAG.getDataLayout().isLittleEndian())
14611461 return (16 / EltSize) - 1 - (SVOp->getMaskElt(0) / EltSize);
14621462 else
14631463 return SVOp->getMaskElt(0) / EltSize;
24022402
24032403 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
24042404 bool isPPC64 = (PtrVT == MVT::i64);
2405 Type *IntPtrTy =
2406 DAG.getTargetLoweringInfo().getDataLayout()->getIntPtrType(
2407 *DAG.getContext());
2405 Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext());
24082406
24092407 TargetLowering::ArgListTy Args;
24102408 TargetLowering::ArgListEntry Entry;
1014910147
1015010148 EVT MemVT = LD->getMemoryVT();
1015110149 Type *Ty = MemVT.getTypeForEVT(*DAG.getContext());
10152 unsigned ABIAlignment = getDataLayout()->getABITypeAlignment(Ty);
10150 unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(Ty);
1015310151 Type *STy = MemVT.getScalarType().getTypeForEVT(*DAG.getContext());
10154 unsigned ScalarABIAlignment = getDataLayout()->getABITypeAlignment(STy);
10152 unsigned ScalarABIAlignment = DAG.getDataLayout().getABITypeAlignment(STy);
1015510153 if (LD->isUnindexed() && VT.isVector() &&
1015610154 ((Subtarget.hasAltivec() && ISD::isNON_EXTLoad(N) &&
1015710155 // P8 and later hardware should just use LOAD.
994994
995995 PointerType *Ty = cast(CalleeFn->arg_begin()->getType());
996996 Type *ElementTy = Ty->getElementType();
997 return getDataLayout()->getTypeAllocSize(ElementTy);
997 return DAG.getDataLayout().getTypeAllocSize(ElementTy);
998998 }
999999
10001000
1498314983
1498414984 EVT ArgVT = Op.getNode()->getValueType(0);
1498514985 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
14986 uint32_t ArgSize = getDataLayout()->getTypeAllocSize(ArgTy);
14986 uint32_t ArgSize = DAG.getDataLayout().getTypeAllocSize(ArgTy);
1498714987 uint8_t ArgMode;
1498814988
1498914989 // Decide which area this value should be read from.
2171421714 // alignment is valid.
2171521715 unsigned Align = LN0->getAlignment();
2171621716 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
21717 unsigned NewAlign = TLI.getDataLayout()->getABITypeAlignment(
21717 unsigned NewAlign = DAG.getDataLayout().getABITypeAlignment(
2171821718 EltVT.getTypeForEVT(*DAG.getContext()));
2171921719
2172021720 if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, EltVT))
280280 if (!ObjType->isSized())
281281 return false;
282282
283 unsigned ObjSize = XTL.getDataLayout()->getTypeAllocSize(ObjType);
283 auto &DL = GV->getParent()->getDataLayout();
284 unsigned ObjSize = DL.getTypeAllocSize(ObjType);
284285 return ObjSize < CodeModelLargeSize && ObjSize != 0;
285286 }
286287
434435 LD->getAlignment()))
435436 return SDValue();
436437
437 unsigned ABIAlignment = getDataLayout()->
438 getABITypeAlignment(LD->getMemoryVT().getTypeForEVT(*DAG.getContext()));
438 auto &TD = DAG.getDataLayout();
439 unsigned ABIAlignment = TD.getABITypeAlignment(
440 LD->getMemoryVT().getTypeForEVT(*DAG.getContext()));
439441 // Leave aligned load alone.
440442 if (LD->getAlignment() >= ABIAlignment)
441443 return SDValue();
485487 }
486488
487489 // Lower to a call to __misaligned_load(BasePtr).
488 Type *IntPtrTy = getDataLayout()->getIntPtrType(*DAG.getContext());
490 Type *IntPtrTy = TD.getIntPtrType(*DAG.getContext());
489491 TargetLowering::ArgListTy Args;
490492 TargetLowering::ArgListEntry Entry;
491493
516518 ST->getAlignment())) {
517519 return SDValue();
518520 }
519 unsigned ABIAlignment = getDataLayout()->
520 getABITypeAlignment(ST->getMemoryVT().getTypeForEVT(*DAG.getContext()));
521 unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(
522 ST->getMemoryVT().getTypeForEVT(*DAG.getContext()));
521523 // Leave aligned store alone.
522524 if (ST->getAlignment() >= ABIAlignment) {
523525 return SDValue();
545547 }
546548
547549 // Lower to a call to __misaligned_store(BasePtr, Value).
548 Type *IntPtrTy = getDataLayout()->getIntPtrType(*DAG.getContext());
550 Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext());
549551 TargetLowering::ArgListTy Args;
550552 TargetLowering::ArgListEntry Entry;
551553
18281830 if (StoreBits % 8) {
18291831 break;
18301832 }
1831 unsigned ABIAlignment = getDataLayout()->getABITypeAlignment(
1833 unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(
18321834 ST->getMemoryVT().getTypeForEVT(*DCI.DAG.getContext()));
18331835 unsigned Alignment = ST->getAlignment();
18341836 if (Alignment >= ABIAlignment) {
3535 const TargetLowering &TLI = *DAG.getSubtarget().getTargetLowering();
3636 TargetLowering::ArgListTy Args;
3737 TargetLowering::ArgListEntry Entry;
38 Entry.Ty = TLI.getDataLayout()->getIntPtrType(*DAG.getContext());
38 Entry.Ty = DAG.getDataLayout().getIntPtrType(*DAG.getContext());
3939 Entry.Node = Dst; Args.push_back(Entry);
4040 Entry.Node = Src; Args.push_back(Entry);
4141 Entry.Node = Size; Args.push_back(Entry);