llvm.org GIT mirror llvm / cbc2c76
Merging r313366: ------------------------------------------------------------------------ r313366 | ctopper | 2017-09-15 10:09:03 -0700 (Fri, 15 Sep 2017) | 9 lines [X86] Don't create i64 constants on 32-bit targets when lowering v64i1 constant build vectors When handling a v64i1 build vector of constants on 32-bit targets we were creating an illegal i64 constant that we then bitcasted back to v64i1. We need to instead create two 32-bit constants, bitcast them to v32i1 and concat the result. We should also take care to handle the halves being all zeros/ones after the split. This patch splits the build vector and then recursively lowers the two pieces. This allows us to handle the all ones and all zeros cases with minimal effort. Ideally we'd just do the split and concat, and let lowering get called again on the new nodes, but getNode has special handling for CONCAT_VECTORS that reassembles the pieces back into a single BUILD_VECTOR. Hopefully the two temporary BUILD_VECTORS we had to create to do this that don't get returned don't cause any issues. Fixes PR34605. Differential Revision: https://reviews.llvm.org/D37858 ------------------------------------------------------------------------ git-svn-id: https://llvm.org/svn/llvm-project/llvm/branches/release_50@315198 91177308-0d34-0410-b5e6-96231b3b80d8 Craig Topper 3 years ago
2 changed file(s) with 75 addition(s) and 0 deletion(s). Raw diff Collapse all Expand all
70257025 return DAG.getTargetConstant(1, dl, VT);
70267026
70277027 if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) {
7028 if (VT == MVT::v64i1 && !Subtarget.is64Bit()) {
7029 // Split the pieces.
7030 SDValue Lower =
7031 DAG.getBuildVector(MVT::v32i1, dl, Op.getNode()->ops().slice(0, 32));
7032 SDValue Upper =
7033 DAG.getBuildVector(MVT::v32i1, dl, Op.getNode()->ops().slice(32, 32));
7034 // We have to manually lower both halves so getNode doesn't try to
7035 // reassemble the build_vector.
7036 Lower = LowerBUILD_VECTORvXi1(Lower, DAG);
7037 Upper = LowerBUILD_VECTORvXi1(Upper, DAG);
7038 return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, Lower, Upper);
7039 }
70287040 SDValue Imm = ConvertI1VectorToInteger(Op, DAG);
70297041 if (Imm.getValueSizeInBits() == VT.getSizeInBits())
70307042 return DAG.getBitcast(VT, Imm);
0 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
1 ; RUN: llc < %s -mtriple=i386-unknown-linux-gnu -mattr=avx512bw,avx512vl,avx512dq | FileCheck %s
2
3 define void @pr34605(i8* nocapture %s, i32 %p) {
4 ; CHECK-LABEL: pr34605:
5 ; CHECK: # BB#0: # %entry
6 ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
7 ; CHECK-NEXT: vpbroadcastd {{[0-9]+}}(%esp), %zmm0
8 ; CHECK-NEXT: vpcmpeqd {{\.LCPI.*}}, %zmm0, %k0
9 ; CHECK-NEXT: vpcmpeqd {{\.LCPI.*}}, %zmm0, %k1
10 ; CHECK-NEXT: kunpckwd %k0, %k1, %k0
11 ; CHECK-NEXT: vpcmpeqd {{\.LCPI.*}}, %zmm0, %k1
12 ; CHECK-NEXT: vpcmpeqd {{\.LCPI.*}}, %zmm0, %k2
13 ; CHECK-NEXT: kunpckwd %k1, %k2, %k1
14 ; CHECK-NEXT: kunpckdq %k0, %k1, %k0
15 ; CHECK-NEXT: kxord %k0, %k0, %k1
16 ; CHECK-NEXT: movl $1, %ecx
17 ; CHECK-NEXT: kmovd %ecx, %k2
18 ; CHECK-NEXT: kunpckdq %k2, %k1, %k1
19 ; CHECK-NEXT: kandq %k1, %k0, %k1
20 ; CHECK-NEXT: vmovdqu8 {{\.LCPI.*}}, %zmm0 {%k1} {z}
21 ; CHECK-NEXT: vmovdqu8 %zmm0, (%eax)
22 ; CHECK-NEXT: vpxord %zmm0, %zmm0, %zmm0
23 ; CHECK-NEXT: vmovdqu32 %zmm0, 64(%eax)
24 ; CHECK-NEXT: vmovdqu32 %zmm0, 128(%eax)
25 ; CHECK-NEXT: vmovdqu32 %zmm0, 192(%eax)
26 ; CHECK-NEXT: vmovdqu32 %zmm0, 256(%eax)
27 ; CHECK-NEXT: vmovdqu32 %zmm0, 320(%eax)
28 ; CHECK-NEXT: vmovdqu32 %zmm0, 384(%eax)
29 ; CHECK-NEXT: vmovdqu32 %zmm0, 448(%eax)
30 ; CHECK-NEXT: vzeroupper
31 ; CHECK-NEXT: retl
32 entry:
33 %broadcast.splatinsert = insertelement <64 x i32> undef, i32 %p, i32 0
34 %broadcast.splat = shufflevector <64 x i32> %broadcast.splatinsert, <64 x i32> undef, <64 x i32> zeroinitializer
35 %0 = icmp eq <64 x i32> %broadcast.splat,
36 %1 = and <64 x i1> %0,
37 %2 = zext <64 x i1> %1 to <64 x i8>
38 %3 = bitcast i8* %s to <64 x i8>*
39 store <64 x i8> %2, <64 x i8>* %3, align 1
40 %4 = getelementptr inbounds i8, i8* %s, i32 64
41 %5 = bitcast i8* %4 to <64 x i8>*
42 store <64 x i8> zeroinitializer, <64 x i8>* %5, align 1
43 %6 = getelementptr inbounds i8, i8* %s, i32 128
44 %7 = bitcast i8* %6 to <64 x i8>*
45 store <64 x i8> zeroinitializer, <64 x i8>* %7, align 1
46 %8 = getelementptr inbounds i8, i8* %s, i32 192
47 %9 = bitcast i8* %8 to <64 x i8>*
48 store <64 x i8> zeroinitializer, <64 x i8>* %9, align 1
49 %10 = getelementptr inbounds i8, i8* %s, i32 256
50 %11 = bitcast i8* %10 to <64 x i8>*
51 store <64 x i8> zeroinitializer, <64 x i8>* %11, align 1
52 %12 = getelementptr inbounds i8, i8* %s, i32 320
53 %13 = bitcast i8* %12 to <64 x i8>*
54 store <64 x i8> zeroinitializer, <64 x i8>* %13, align 1
55 %14 = getelementptr inbounds i8, i8* %s, i32 384
56 %15 = bitcast i8* %14 to <64 x i8>*
57 store <64 x i8> zeroinitializer, <64 x i8>* %15, align 1
58 %16 = getelementptr inbounds i8, i8* %s, i32 448
59 %17 = bitcast i8* %16 to <64 x i8>*
60 store <64 x i8> zeroinitializer, <64 x i8>* %17, align 1
61 ret void
62 }