llvm.org GIT mirror llvm / b313223
Masked Load/Store - Changed the order of parameters in intrinsics. No functional changes. The documentation is coming. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@224829 91177308-0d34-0410-b5e6-96231b3b80d8 Elena Demikhovsky 4 years ago
11 changed file(s) with 101 addition(s) and 74 deletion(s). Raw diff Collapse all Expand all
7676 Void, VarArg, MMX, Metadata, Half, Float, Double,
7777 Integer, Vector, Pointer, Struct,
7878 Argument, ExtendArgument, TruncArgument, HalfVecArgument,
79 SameVecWidthArgument
79 SameVecWidthArgument, PtrToArgument
8080 } Kind;
8181
8282 union {
9797 unsigned getArgumentNumber() const {
9898 assert(Kind == Argument || Kind == ExtendArgument ||
9999 Kind == TruncArgument || Kind == HalfVecArgument ||
100 Kind == SameVecWidthArgument);
100 Kind == SameVecWidthArgument || Kind == PtrToArgument);
101101 return Argument_Info >> 2;
102102 }
103103 ArgKind getArgumentKind() const {
104104 assert(Kind == Argument || Kind == ExtendArgument ||
105105 Kind == TruncArgument || Kind == HalfVecArgument ||
106 Kind == SameVecWidthArgument);
106 Kind == SameVecWidthArgument || Kind == PtrToArgument);
107107 return (ArgKind)(Argument_Info & 3);
108108 }
109109
115115 : LLVMMatchType {
116116 ValueType ElTy = elty.VT;
117117 }
118 class LLVMPointerTo : LLVMMatchType;
118119
119120 // Match the type of another intrinsic parameter that is expected to be a
120121 // vector type, but change the element count to be half as many
566567
567568 //===-------------------------- Masked Intrinsics -------------------------===//
568569 //
569 def int_masked_store : Intrinsic<[], [llvm_ptr_ty, llvm_anyvector_ty,
570 def int_masked_store : Intrinsic<[], [llvm_anyvector_ty, LLVMPointerTo<0>,
570571 llvm_i32_ty,
571572 LLVMVectorSameWidth<0, llvm_i1_ty>],
572573 [IntrReadWriteArgMem]>;
573574
574575 def int_masked_load : Intrinsic<[llvm_anyvector_ty],
575 [llvm_ptr_ty, LLVMMatchType<0>, llvm_i32_ty,
576 LLVMVectorSameWidth<0, llvm_i1_ty>],
576 [LLVMPointerTo<0>, llvm_i32_ty,
577 LLVMVectorSameWidth<0, llvm_i1_ty>, LLVMMatchType<0>],
577578 [IntrReadArgMem]>;
578579 //===----------------------------------------------------------------------===//
579580 // Target-specific intrinsics
36433643 void SelectionDAGBuilder::visitMaskedStore(const CallInst &I) {
36443644 SDLoc sdl = getCurSDLoc();
36453645
3646 Value *PtrOperand = I.getArgOperand(0);
3646 // llvm.masked.store.*(Src0, Ptr, alignemt, Mask)
3647 Value *PtrOperand = I.getArgOperand(1);
36473648 SDValue Ptr = getValue(PtrOperand);
3648 SDValue Src0 = getValue(I.getArgOperand(1));
3649 SDValue Src0 = getValue(I.getArgOperand(0));
36493650 SDValue Mask = getValue(I.getArgOperand(3));
36503651 EVT VT = Src0.getValueType();
36513652 unsigned Alignment = (cast(I.getArgOperand(2)))->getZExtValue();
36683669 void SelectionDAGBuilder::visitMaskedLoad(const CallInst &I) {
36693670 SDLoc sdl = getCurSDLoc();
36703671
3672 // @llvm.masked.load.*(Ptr, alignment, Mask, Src0)
36713673 Value *PtrOperand = I.getArgOperand(0);
36723674 SDValue Ptr = getValue(PtrOperand);
3673 SDValue Src0 = getValue(I.getArgOperand(1));
3674 SDValue Mask = getValue(I.getArgOperand(3));
3675 SDValue Src0 = getValue(I.getArgOperand(3));
3676 SDValue Mask = getValue(I.getArgOperand(2));
36753677
36763678 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
36773679 EVT VT = TLI.getValueType(I.getType());
3678 unsigned Alignment = (cast(I.getArgOperand(2)))->getZExtValue();
3680 unsigned Alignment = (cast(I.getArgOperand(1)))->getZExtValue();
36793681 if (!Alignment)
36803682 Alignment = DAG.getEVTAlignment(VT);
36813683
536536 IIT_V1 = 27,
537537 IIT_VARARG = 28,
538538 IIT_HALF_VEC_ARG = 29,
539 IIT_SAME_VEC_WIDTH_ARG = 30
539 IIT_SAME_VEC_WIDTH_ARG = 30,
540 IIT_PTR_TO_ARG = 31
540541 };
541542
542543
647648 case IIT_SAME_VEC_WIDTH_ARG: {
648649 unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
649650 OutputTable.push_back(IITDescriptor::get(IITDescriptor::SameVecWidthArgument,
651 ArgInfo));
652 return;
653 }
654 case IIT_PTR_TO_ARG: {
655 unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]);
656 OutputTable.push_back(IITDescriptor::get(IITDescriptor::PtrToArgument,
650657 ArgInfo));
651658 return;
652659 }
757764 case IITDescriptor::HalfVecArgument:
758765 return VectorType::getHalfElementsVectorType(cast(
759766 Tys[D.getArgumentNumber()]));
760 case IITDescriptor::SameVecWidthArgument:
767 case IITDescriptor::SameVecWidthArgument: {
761768 Type *EltTy = DecodeFixedType(Infos, Tys, Context);
762769 Type *Ty = Tys[D.getArgumentNumber()];
763770 if (VectorType *VTy = dyn_cast(Ty)) {
764771 return VectorType::get(EltTy, VTy->getNumElements());
765772 }
766773 llvm_unreachable("unhandled");
774 }
775 case IITDescriptor::PtrToArgument: {
776 Type *Ty = Tys[D.getArgumentNumber()];
777 return PointerType::getUnqual(Ty);
778 }
767779 }
768780 llvm_unreachable("unhandled");
769781 }
186186 /// Ops - an array of operands.
187187 CallInst *IRBuilderBase::CreateMaskedLoad(ArrayRef Ops) {
188188 // The only one overloaded type - the type of passthru value in this case
189 Type *DataTy = Ops[1]->getType();
189 Type *DataTy = Ops[3]->getType();
190190 return CreateMaskedIntrinsic(Intrinsic::masked_load, Ops, DataTy);
191191 }
192192
194194 /// Ops - an array of operands.
195195 CallInst *IRBuilderBase::CreateMaskedStore(ArrayRef Ops) {
196196 // DataTy - type of the data to be stored - the only one overloaded type
197 Type *DataTy = Ops[1]->getType();
197 Type *DataTy = Ops[0]->getType();
198198 return CreateMaskedIntrinsic(Intrinsic::masked_store, Ops, DataTy);
199199 }
200200
24402440 return VerifyIntrinsicType(ThisArgType->getVectorElementType(),
24412441 Infos, ArgTys);
24422442 }
2443 case IITDescriptor::PtrToArgument: {
2444 if (D.getArgumentNumber() >= ArgTys.size())
2445 return true;
2446 Type * ReferenceType = ArgTys[D.getArgumentNumber()];
2447 PointerType *ThisArgType = dyn_cast(Ty);
2448 return (!ThisArgType || ThisArgType->getElementType() != ReferenceType);
2449 }
24432450 }
24442451 llvm_unreachable("unhandled");
24452452 }
11581158 return X86TTI::getIntImmCost(Imm, Ty);
11591159 }
11601160
1161 bool X86TTI::isLegalMaskedLoad(Type *DataType, int Consecutive) const {
1162 int ScalarWidth = DataType->getScalarSizeInBits();
1161 bool X86TTI::isLegalMaskedLoad(Type *DataTy, int Consecutive) const {
1162 int DataWidth = DataTy->getPrimitiveSizeInBits();
11631163
11641164 // Todo: AVX512 allows gather/scatter, works with strided and random as well
1165 if ((ScalarWidth < 32) || (Consecutive == 0))
1165 if ((DataWidth < 32) || (Consecutive == 0))
11661166 return false;
11671167 if (ST->hasAVX512() || ST->hasAVX2())
11681168 return true;
18791879
18801880 Instruction *NewSI;
18811881 if (Legal->isMaskRequired(SI)) {
1882 Type *I8PtrTy =
1883 Builder.getInt8PtrTy(PartPtr->getType()->getPointerAddressSpace());
1884
1885 Value *I8Ptr = Builder.CreateBitCast(PartPtr, I8PtrTy);
1886
18871882 VectorParts Cond = createBlockInMask(SI->getParent());
18881883 SmallVector Ops;
1889 Ops.push_back(I8Ptr);
18901884 Ops.push_back(StoredVal[Part]);
1885 Ops.push_back(VecPtr);
18911886 Ops.push_back(Builder.getInt32(Alignment));
18921887 Ops.push_back(Cond[Part]);
18931888 NewSI = Builder.CreateMaskedStore(Ops);
19141909 }
19151910
19161911 Instruction* NewLI;
1912 Value *VecPtr = Builder.CreateBitCast(PartPtr,
1913 DataTy->getPointerTo(AddressSpace));
19171914 if (Legal->isMaskRequired(LI)) {
1918 Type *I8PtrTy =
1919 Builder.getInt8PtrTy(PartPtr->getType()->getPointerAddressSpace());
1920
1921 Value *I8Ptr = Builder.CreateBitCast(PartPtr, I8PtrTy);
1922
19231915 VectorParts SrcMask = createBlockInMask(LI->getParent());
19241916 SmallVector Ops;
1925 Ops.push_back(I8Ptr);
1926 Ops.push_back(UndefValue::get(DataTy));
1917 Ops.push_back(VecPtr);
19271918 Ops.push_back(Builder.getInt32(Alignment));
19281919 Ops.push_back(SrcMask[Part]);
1920 Ops.push_back(UndefValue::get(DataTy));
19291921 NewLI = Builder.CreateMaskedLoad(Ops);
19301922 }
19311923 else {
1932 Value *VecPtr = Builder.CreateBitCast(PartPtr,
1933 DataTy->getPointerTo(AddressSpace));
19341924 NewLI = Builder.CreateAlignedLoad(VecPtr, Alignment, "wide.load");
19351925 }
19361926 propagateMetadata(NewLI, LI);
88 ; AVX2: vpmaskmovd (%rdi)
99 ; AVX2-NOT: blend
1010
11 define <16 x i32> @test1(<16 x i32> %trigger, i8* %addr) {
11 define <16 x i32> @test1(<16 x i32> %trigger, <16 x i32>* %addr) {
1212 %mask = icmp eq <16 x i32> %trigger, zeroinitializer
13 %res = call <16 x i32> @llvm.masked.load.v16i32(i8* %addr, <16 x i32>undef, i32 4, <16 x i1>%mask)
13 %res = call <16 x i32> @llvm.masked.load.v16i32(<16 x i32>* %addr, i32 4, <16 x i1>%mask, <16 x i32>undef)
1414 ret <16 x i32> %res
1515 }
1616
2121 ; AVX2: vpmaskmovd {{.*}}(%rdi)
2222 ; AVX2: vpmaskmovd {{.*}}(%rdi)
2323 ; AVX2-NOT: blend
24 define <16 x i32> @test2(<16 x i32> %trigger, i8* %addr) {
24 define <16 x i32> @test2(<16 x i32> %trigger, <16 x i32>* %addr) {
2525 %mask = icmp eq <16 x i32> %trigger, zeroinitializer
26 %res = call <16 x i32> @llvm.masked.load.v16i32(i8* %addr, <16 x i32>zeroinitializer, i32 4, <16 x i1>%mask)
26 %res = call <16 x i32> @llvm.masked.load.v16i32(<16 x i32>* %addr, i32 4, <16 x i1>%mask, <16 x i32>zeroinitializer)
2727 ret <16 x i32> %res
2828 }
2929
3030 ; AVX512-LABEL: test3
3131 ; AVX512: vmovdqu32 %zmm1, (%rdi) {%k1}
3232
33 define void @test3(<16 x i32> %trigger, i8* %addr, <16 x i32> %val) {
33 define void @test3(<16 x i32> %trigger, <16 x i32>* %addr, <16 x i32> %val) {
3434 %mask = icmp eq <16 x i32> %trigger, zeroinitializer
35 call void @llvm.masked.store.v16i32(i8* %addr, <16 x i32>%val, i32 4, <16 x i1>%mask)
35 call void @llvm.masked.store.v16i32(<16 x i32>%val, <16 x i32>* %addr, i32 4, <16 x i1>%mask)
3636 ret void
3737 }
3838
4343 ; AVX2: vmaskmovps {{.*}}(%rdi)
4444 ; AVX2: vmaskmovps {{.*}}(%rdi)
4545 ; AVX2: blend
46 define <16 x float> @test4(<16 x i32> %trigger, i8* %addr, <16 x float> %dst) {
46 define <16 x float> @test4(<16 x i32> %trigger, <16 x float>* %addr, <16 x float> %dst) {
4747 %mask = icmp eq <16 x i32> %trigger, zeroinitializer
48 %res = call <16 x float> @llvm.masked.load.v16f32(i8* %addr, <16 x float>%dst, i32 4, <16 x i1>%mask)
48 %res = call <16 x float> @llvm.masked.load.v16f32(<16 x float>* %addr, i32 4, <16 x i1>%mask, <16 x float> %dst)
4949 ret <16 x float> %res
5050 }
5151
5757 ; AVX2: vblendvpd
5858 ; AVX2: vmaskmovpd
5959 ; AVX2: vblendvpd
60 define <8 x double> @test5(<8 x i32> %trigger, i8* %addr, <8 x double> %dst) {
60 define <8 x double> @test5(<8 x i32> %trigger, <8 x double>* %addr, <8 x double> %dst) {
6161 %mask = icmp eq <8 x i32> %trigger, zeroinitializer
62 %res = call <8 x double> @llvm.masked.load.v8f64(i8* %addr, <8 x double>%dst, i32 4, <8 x i1>%mask)
62 %res = call <8 x double> @llvm.masked.load.v8f64(<8 x double>* %addr, i32 4, <8 x i1>%mask, <8 x double>%dst)
6363 ret <8 x double> %res
6464 }
6565
6666 ; AVX2-LABEL: test6
6767 ; AVX2: vmaskmovpd
6868 ; AVX2: vblendvpd
69 define <2 x double> @test6(<2 x i64> %trigger, i8* %addr, <2 x double> %dst) {
69 define <2 x double> @test6(<2 x i64> %trigger, <2 x double>* %addr, <2 x double> %dst) {
7070 %mask = icmp eq <2 x i64> %trigger, zeroinitializer
71 %res = call <2 x double> @llvm.masked.load.v2f64(i8* %addr, <2 x double>%dst, i32 4, <2 x i1>%mask)
71 %res = call <2 x double> @llvm.masked.load.v2f64(<2 x double>* %addr, i32 4, <2 x i1>%mask, <2 x double>%dst)
7272 ret <2 x double> %res
7373 }
7474
7575 ; AVX2-LABEL: test7
7676 ; AVX2: vmaskmovps {{.*}}(%rdi)
7777 ; AVX2: blend
78 define <4 x float> @test7(<4 x i32> %trigger, i8* %addr, <4 x float> %dst) {
78 define <4 x float> @test7(<4 x i32> %trigger, <4 x float>* %addr, <4 x float> %dst) {
7979 %mask = icmp eq <4 x i32> %trigger, zeroinitializer
80 %res = call <4 x float> @llvm.masked.load.v4f32(i8* %addr, <4 x float>%dst, i32 4, <4 x i1>%mask)
80 %res = call <4 x float> @llvm.masked.load.v4f32(<4 x float>* %addr, i32 4, <4 x i1>%mask, <4 x float>%dst)
8181 ret <4 x float> %res
8282 }
8383
8484 ; AVX2-LABEL: test8
8585 ; AVX2: vpmaskmovd {{.*}}(%rdi)
8686 ; AVX2: blend
87 define <4 x i32> @test8(<4 x i32> %trigger, i8* %addr, <4 x i32> %dst) {
87 define <4 x i32> @test8(<4 x i32> %trigger, <4 x i32>* %addr, <4 x i32> %dst) {
8888 %mask = icmp eq <4 x i32> %trigger, zeroinitializer
89 %res = call <4 x i32> @llvm.masked.load.v4i32(i8* %addr, <4 x i32>%dst, i32 4, <4 x i1>%mask)
89 %res = call <4 x i32> @llvm.masked.load.v4i32(<4 x i32>* %addr, i32 4, <4 x i1>%mask, <4 x i32>%dst)
9090 ret <4 x i32> %res
9191 }
9292
9393 ; AVX2-LABEL: test9
9494 ; AVX2: vpmaskmovd %xmm
95 define void @test9(<4 x i32> %trigger, i8* %addr, <4 x i32> %val) {
95 define void @test9(<4 x i32> %trigger, <4 x i32>* %addr, <4 x i32> %val) {
9696 %mask = icmp eq <4 x i32> %trigger, zeroinitializer
97 call void @llvm.masked.store.v4i32(i8* %addr, <4 x i32>%val, i32 4, <4 x i1>%mask)
97 call void @llvm.masked.store.v4i32(<4 x i32>%val, <4 x i32>* %addr, i32 4, <4 x i1>%mask)
9898 ret void
9999 }
100100
101101 ; AVX2-LABEL: test10
102102 ; AVX2: vmaskmovpd (%rdi), %ymm
103103 ; AVX2: blend
104 define <4 x double> @test10(<4 x i32> %trigger, i8* %addr, <4 x double> %dst) {
104 define <4 x double> @test10(<4 x i32> %trigger, <4 x double>* %addr, <4 x double> %dst) {
105105 %mask = icmp eq <4 x i32> %trigger, zeroinitializer
106 %res = call <4 x double> @llvm.masked.load.v4f64(i8* %addr, <4 x double>%dst, i32 4, <4 x i1>%mask)
106 %res = call <4 x double> @llvm.masked.load.v4f64(<4 x double>* %addr, i32 4, <4 x i1>%mask, <4 x double>%dst)
107107 ret <4 x double> %res
108108 }
109109
110110 ; AVX2-LABEL: test11
111111 ; AVX2: vmaskmovps
112112 ; AVX2: vblendvps
113 define <8 x float> @test11(<8 x i32> %trigger, i8* %addr, <8 x float> %dst) {
113 define <8 x float> @test11(<8 x i32> %trigger, <8 x float>* %addr, <8 x float> %dst) {
114114 %mask = icmp eq <8 x i32> %trigger, zeroinitializer
115 %res = call <8 x float> @llvm.masked.load.v8f32(i8* %addr, <8 x float>%dst, i32 4, <8 x i1>%mask)
115 %res = call <8 x float> @llvm.masked.load.v8f32(<8 x float>* %addr, i32 4, <8 x i1>%mask, <8 x float>%dst)
116116 ret <8 x float> %res
117117 }
118118
119119 ; AVX2-LABEL: test12
120120 ; AVX2: vpmaskmovd %ymm
121 define void @test12(<8 x i32> %trigger, i8* %addr, <8 x i32> %val) {
121 define void @test12(<8 x i32> %trigger, <8 x i32>* %addr, <8 x i32> %val) {
122122 %mask = icmp eq <8 x i32> %trigger, zeroinitializer
123 call void @llvm.masked.store.v8i32(i8* %addr, <8 x i32>%val, i32 4, <8 x i1>%mask)
123 call void @llvm.masked.store.v8i32(<8 x i32>%val, <8 x i32>* %addr, i32 4, <8 x i1>%mask)
124124 ret void
125125 }
126126
127 declare <16 x i32> @llvm.masked.load.v16i32(i8*, <16 x i32>, i32, <16 x i1>)
128 declare <4 x i32> @llvm.masked.load.v4i32(i8*, <4 x i32>, i32, <4 x i1>)
129 declare void @llvm.masked.store.v16i32(i8*, <16 x i32>, i32, <16 x i1>)
130 declare void @llvm.masked.store.v8i32(i8*, <8 x i32>, i32, <8 x i1>)
131 declare void @llvm.masked.store.v4i32(i8*, <4 x i32>, i32, <4 x i1>)
132 declare <16 x float> @llvm.masked.load.v16f32(i8*, <16 x float>, i32, <16 x i1>)
133 declare <8 x float> @llvm.masked.load.v8f32(i8*, <8 x float>, i32, <8 x i1>)
134 declare <4 x float> @llvm.masked.load.v4f32(i8*, <4 x float>, i32, <4 x i1>)
135 declare void @llvm.masked.store.v16f32(i8*, <16 x float>, i32, <16 x i1>)
136 declare <8 x double> @llvm.masked.load.v8f64(i8*, <8 x double>, i32, <8 x i1>)
137 declare <4 x double> @llvm.masked.load.v4f64(i8*, <4 x double>, i32, <4 x i1>)
138 declare <2 x double> @llvm.masked.load.v2f64(i8*, <2 x double>, i32, <2 x i1>)
139 declare void @llvm.masked.store.v8f64(i8*, <8 x double>, i32, <8 x i1>)
140 declare void @llvm.masked.store.v2f64(i8*, <2 x double>, i32, <2 x i1>)
141 declare void @llvm.masked.store.v2i64(i8*, <2 x i64>, i32, <2 x i1>)
127 ; AVX512-LABEL: test13
128 ; AVX512: vmovups %zmm1, (%rdi) {%k1}
142129
130 define void @test13(<16 x i32> %trigger, <16 x float>* %addr, <16 x float> %val) {
131 %mask = icmp eq <16 x i32> %trigger, zeroinitializer
132 call void @llvm.masked.store.v16f32(<16 x float>%val, <16 x float>* %addr, i32 4, <16 x i1>%mask)
133 ret void
134 }
135
136 declare <16 x i32> @llvm.masked.load.v16i32(<16 x i32>*, i32, <16 x i1>, <16 x i32>)
137 declare <4 x i32> @llvm.masked.load.v4i32(<4 x i32>*, i32, <4 x i1>, <4 x i32>)
138 declare void @llvm.masked.store.v16i32(<16 x i32>, <16 x i32>*, i32, <16 x i1>)
139 declare void @llvm.masked.store.v8i32(<8 x i32>, <8 x i32>*, i32, <8 x i1>)
140 declare void @llvm.masked.store.v4i32(<4 x i32>, <4 x i32>*, i32, <4 x i1>)
141 declare void @llvm.masked.store.v16f32(<16 x float>, <16 x float>*, i32, <16 x i1>)
142 declare void @llvm.masked.store.v16f32p(<16 x float>*, <16 x float>**, i32, <16 x i1>)
143 declare <16 x float> @llvm.masked.load.v16f32(<16 x float>*, i32, <16 x i1>, <16 x float>)
144 declare <8 x float> @llvm.masked.load.v8f32(<8 x float>*, i32, <8 x i1>, <8 x float>)
145 declare <4 x float> @llvm.masked.load.v4f32(<4 x float>*, i32, <4 x i1>, <4 x float>)
146 declare <8 x double> @llvm.masked.load.v8f64(<8 x double>*, i32, <8 x i1>, <8 x double>)
147 declare <4 x double> @llvm.masked.load.v4f64(<4 x double>*, i32, <4 x i1>, <4 x double>)
148 declare <2 x double> @llvm.masked.load.v2f64(<2 x double>*, i32, <2 x i1>, <2 x double>)
149 declare void @llvm.masked.store.v8f64(<8 x double>, <8 x double>*, i32, <8 x i1>)
150 declare void @llvm.masked.store.v2f64(<2 x double>, <2 x double>*, i32, <2 x i1>)
151 declare void @llvm.masked.store.v2i64(<2 x i64>, <2 x i64>*, i32, <2 x i1>)
152
530530 // overloaded, all the types can be specified directly.
531531 assert(((!TyEl->isSubClassOf("LLVMExtendedType") &&
532532 !TyEl->isSubClassOf("LLVMTruncatedType") &&
533 !TyEl->isSubClassOf("LLVMVectorSameWidth")) ||
533 !TyEl->isSubClassOf("LLVMVectorSameWidth") &&
534 !TyEl->isSubClassOf("LLVMPointerToElt")) ||
534535 VT == MVT::iAny || VT == MVT::vAny) &&
535536 "Expected iAny or vAny type");
536537 } else
257257 IIT_V1 = 27,
258258 IIT_VARARG = 28,
259259 IIT_HALF_VEC_ARG = 29,
260 IIT_SAME_VEC_WIDTH_ARG = 30
260 IIT_SAME_VEC_WIDTH_ARG = 30,
261 IIT_PTR_TO_ARG = 31
261262 };
262263
263264
311312 MVT::SimpleValueType VT = getValueType(R->getValueAsDef("ElTy"));
312313 EncodeFixedValueType(VT, Sig);
313314 return;
315 }
316 else if (R->isSubClassOf("LLVMPointerTo")) {
317 Sig.push_back(IIT_PTR_TO_ARG);
314318 }
315319 else
316320 Sig.push_back(IIT_ARG);