llvm.org GIT mirror llvm / 6f69123
Merging r229413: ------------------------------------------------------------------------ r229413 | atrick | 2015-02-16 13:10:47 -0500 (Mon, 16 Feb 2015) | 16 lines AArch64: Safely handle the incoming sret call argument. This adds a safe interface to the machine independent InputArg struct for accessing the index of the original (IR-level) argument. When a non-native return type is lowered, we generate the hidden machine-level sret argument on-the-fly. Before this fix, we were representing this argument as OrigArgIndex == 0, which is an outright lie. In particular this crashed in the AArch64 backend where we actually try to access the type of the original argument. Now we use a sentinel value for machine arguments that have no original argument index. AArch64, ARM, Mips, and PPC now check for this case before accessing the original argument. Fixes <rdar://19792160> Null pointer assertion in AArch64TargetLowering ------------------------------------------------------------------------ git-svn-id: https://llvm.org/svn/llvm-project/llvm/branches/release_36@236856 91177308-0d34-0410-b5e6-96231b3b80d8 Tom Stellard 4 years ago
10 changed file(s) with 70 addition(s) and 30 deletion(s). Raw diff Collapse all Expand all
133133
134134 /// Index original Function's argument.
135135 unsigned OrigArgIndex;
136 /// Sentinel value for implicit machine-level input arguments.
137 static const unsigned NoArgIndex = UINT_MAX;
136138
137139 /// Offset in bytes of current input value relative to the beginning of
138140 /// original argument. E.g. if argument was splitted into four 32 bit
145147 : Flags(flags), Used(used), OrigArgIndex(origIdx), PartOffset(partOffs) {
146148 VT = vt.getSimpleVT();
147149 ArgVT = argvt;
150 }
151
152 bool isOrigArg() const {
153 return OrigArgIndex != NoArgIndex;
154 }
155
156 unsigned getOrigArgIndex() const {
157 assert(OrigArgIndex != NoArgIndex && "Implicit machine-level argument");
158 return OrigArgIndex;
148159 }
149160 };
150161
76377637 ISD::ArgFlagsTy Flags;
76387638 Flags.setSRet();
76397639 MVT RegisterVT = TLI->getRegisterType(*DAG.getContext(), ValueVTs[0]);
7640 ISD::InputArg RetArg(Flags, RegisterVT, ValueVTs[0], true, 0, 0);
7640 ISD::InputArg RetArg(Flags, RegisterVT, ValueVTs[0], true,
7641 ISD::InputArg::NoArgIndex, 0);
76417642 Ins.push_back(RetArg);
76427643 }
76437644
20302030 unsigned CurArgIdx = 0;
20312031 for (unsigned i = 0; i != NumArgs; ++i) {
20322032 MVT ValVT = Ins[i].VT;
2033 std::advance(CurOrigArg, Ins[i].OrigArgIndex - CurArgIdx);
2034 CurArgIdx = Ins[i].OrigArgIndex;
2035
2036 // Get type of the original argument.
2037 EVT ActualVT = getValueType(CurOrigArg->getType(), /*AllowUnknown*/ true);
2038 MVT ActualMVT = ActualVT.isSimple() ? ActualVT.getSimpleVT() : MVT::Other;
2039 // If ActualMVT is i1/i8/i16, we should set LocVT to i8/i8/i16.
2040 if (ActualMVT == MVT::i1 || ActualMVT == MVT::i8)
2041 ValVT = MVT::i8;
2042 else if (ActualMVT == MVT::i16)
2043 ValVT = MVT::i16;
2044
2033 if (Ins[i].isOrigArg()) {
2034 std::advance(CurOrigArg, Ins[i].getOrigArgIndex() - CurArgIdx);
2035 CurArgIdx = Ins[i].getOrigArgIndex();
2036
2037 // Get type of the original argument.
2038 EVT ActualVT = getValueType(CurOrigArg->getType(), /*AllowUnknown*/ true);
2039 MVT ActualMVT = ActualVT.isSimple() ? ActualVT.getSimpleVT() : MVT::Other;
2040 // If ActualMVT is i1/i8/i16, we should set LocVT to i8/i8/i16.
2041 if (ActualMVT == MVT::i1 || ActualMVT == MVT::i8)
2042 ValVT = MVT::i8;
2043 else if (ActualMVT == MVT::i16)
2044 ValVT = MVT::i16;
2045 }
20452046 CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, /*IsVarArg=*/false);
20462047 bool Res =
20472048 AssignFn(i, ValVT, ValVT, CCValAssign::Full, Ins[i].Flags, CCInfo);
30913091
30923092 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
30933093 CCValAssign &VA = ArgLocs[i];
3094 std::advance(CurOrigArg, Ins[VA.getValNo()].OrigArgIndex - CurArgIdx);
3095 CurArgIdx = Ins[VA.getValNo()].OrigArgIndex;
3094 if (Ins[VA.getValNo()].isOrigArg()) {
3095 std::advance(CurOrigArg,
3096 Ins[VA.getValNo()].getOrigArgIndex() - CurArgIdx);
3097 CurArgIdx = Ins[VA.getValNo()].getOrigArgIndex();
3098 }
30963099 // Arguments stored in registers.
30973100 if (VA.isRegLoc()) {
30983101 EVT RegVT = VA.getLocVT();
31723175 assert(VA.isMemLoc());
31733176 assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered");
31743177
3175 int index = ArgLocs[i].getValNo();
3178 int index = VA.getValNo();
31763179
31773180 // Some Ins[] entries become multiple ArgLoc[] entries.
31783181 // Process them only once.
31853188 // Since they could be overwritten by lowering of arguments in case of
31863189 // a tail call.
31873190 if (Flags.isByVal()) {
3191 assert(Ins[index].isOrigArg() &&
3192 "Byval arguments cannot be implicit");
31883193 unsigned CurByValIndex = CCInfo.getInRegsParamsProcessed();
31893194
31903195 ByValStoreOffset = RoundUpToAlignment(ByValStoreOffset, Flags.getByValAlign());
131131 continue;
132132 }
133133
134 assert(Ins[i].OrigArgIndex < MF.getFunction()->arg_size());
135 std::advance(FuncArg, Ins[i].OrigArgIndex);
134 assert(Ins[i].getOrigArgIndex() < MF.getFunction()->arg_size());
135 std::advance(FuncArg, Ins[i].getOrigArgIndex());
136136
137137 OriginalArgWasF128.push_back(
138138 originalTypeIsF128(FuncArg->getType(), nullptr));
29322932
29332933 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
29342934 CCValAssign &VA = ArgLocs[i];
2935 std::advance(FuncArg, Ins[i].OrigArgIndex - CurArgIdx);
2936 CurArgIdx = Ins[i].OrigArgIndex;
2935 if (Ins[i].isOrigArg()) {
2936 std::advance(FuncArg, Ins[i].getOrigArgIndex() - CurArgIdx);
2937 CurArgIdx = Ins[i].getOrigArgIndex();
2938 }
29372939 EVT ValVT = VA.getValVT();
29382940 ISD::ArgFlagsTy Flags = Ins[i].Flags;
29392941 bool IsRegLoc = VA.isRegLoc();
29402942
29412943 if (Flags.isByVal()) {
2944 assert(Ins[i].isOrigArg() && "Byval arguments cannot be implicit");
29422945 unsigned FirstByValReg, LastByValReg;
29432946 unsigned ByValIdx = CCInfo.getInRegsParamsProcessed();
29442947 CCInfo.getInRegsParamInfo(ByValIdx, FirstByValReg, LastByValReg);
26872687 unsigned ObjSize = ObjectVT.getStoreSize();
26882688 unsigned ArgSize = ObjSize;
26892689 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
2690 std::advance(FuncArg, Ins[ArgNo].OrigArgIndex - CurArgIdx);
2691 CurArgIdx = Ins[ArgNo].OrigArgIndex;
2692
2690 if (Ins[ArgNo].isOrigArg()) {
2691 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx);
2692 CurArgIdx = Ins[ArgNo].getOrigArgIndex();
2693 }
26932694 /* Respect alignment of argument on the stack. */
26942695 unsigned Align =
26952696 CalculateStackSlotAlignment(ObjectVT, OrigVT, Flags, PtrByteSize);
27032704 // FIXME the codegen can be much improved in some cases.
27042705 // We do not have to keep everything in memory.
27052706 if (Flags.isByVal()) {
2707 assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit");
2708
27062709 // ObjSize is the true size, ArgSize rounded up to multiple of registers.
27072710 ObjSize = Flags.getByValSize();
27082711 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
30633066 unsigned ObjSize = ObjectVT.getSizeInBits()/8;
30643067 unsigned ArgSize = ObjSize;
30653068 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
3066 std::advance(FuncArg, Ins[ArgNo].OrigArgIndex - CurArgIdx);
3067 CurArgIdx = Ins[ArgNo].OrigArgIndex;
3068
3069 if (Ins[ArgNo].isOrigArg()) {
3070 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx);
3071 CurArgIdx = Ins[ArgNo].getOrigArgIndex();
3072 }
30693073 unsigned CurArgOffset = ArgOffset;
30703074
30713075 // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary.
30863090 // FIXME the codegen can be much improved in some cases.
30873091 // We do not have to keep everything in memory.
30883092 if (Flags.isByVal()) {
3093 assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit");
3094
30893095 // ObjSize is the true size, ArgSize rounded up to multiple of registers.
30903096 ObjSize = Flags.getByValSize();
30913097 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
16971697 // XXX - I think PartOffset should give you this, but it seems to give the
16981698 // size of the register which isn't useful.
16991699
1700 unsigned ValBase = ArgLocs[In.OrigArgIndex].getLocMemOffset();
1700 unsigned ValBase = ArgLocs[In.getOrigArgIndex()].getLocMemOffset();
17011701 unsigned PartOffset = VA.getLocMemOffset();
17021702 unsigned Offset = 36 + VA.getLocMemOffset();
17031703
449449 // We REALLY want the ORIGINAL number of vertex elements here, e.g. a
450450 // three or five element vertex only needs three or five registers,
451451 // NOT four or eigth.
452 Type *ParamType = FType->getParamType(Arg.OrigArgIndex);
452 Type *ParamType = FType->getParamType(Arg.getOrigArgIndex());
453453 unsigned NumElements = ParamType->getVectorNumElements();
454454
455455 for (unsigned j = 0; j != NumElements; ++j) {
532532 Offset, Ins[i].Flags.isSExt());
533533
534534 const PointerType *ParamTy =
535 dyn_cast(FType->getParamType(Ins[i].OrigArgIndex));
535 dyn_cast(FType->getParamType(Ins[i].getOrigArgIndex()));
536536 if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS &&
537537 ParamTy && ParamTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
538538 // On SI local pointers are just offsets into LDS, so they are always
567567 if (Arg.VT.isVector()) {
568568
569569 // Build a vector from the registers
570 Type *ParamType = FType->getParamType(Arg.OrigArgIndex);
570 Type *ParamType = FType->getParamType(Arg.getOrigArgIndex());
571571 unsigned NumElements = ParamType->getVectorNumElements();
572572
573573 SmallVector Regs;
0 ; RUN: llc %s -o - -mtriple=arm64-apple-ios7.0 | FileCheck %s
1 ;
2 ; Handle implicit sret arguments that are generated on-the-fly during lowering.
3 ; Null pointer assertion in AArch64TargetLowering
4
5 ; CHECK-LABEL: big_retval
6 ; ... str or stp for the first 1024 bits
7 ; CHECK: strb wzr, [x8, #128]
8 ; CHECK: ret
9 define i1032 @big_retval() {
10 entry:
11 ret i1032 0
12 }