llvm.org GIT mirror llvm / 606660f
Always set alignment of vectorized LD/ST in SLP-Vectorizer. <rdar://problem/16812145> git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@207983 91177308-0d34-0410-b5e6-96231b3b80d8 Yi Jiang 5 years ago
2 changed file(s) with 31 addition(s) and 0 deletion(s). Raw diff Collapse all Expand all
16211621 VecTy->getPointerTo(AS));
16221622 unsigned Alignment = LI->getAlignment();
16231623 LI = Builder.CreateLoad(VecPtr);
1624 if (!Alignment)
1625 Alignment = DL->getABITypeAlignment(LI->getPointerOperand()->getType());
16241626 LI->setAlignment(Alignment);
16251627 E->VectorizedValue = LI;
16261628 return propagateMetadata(LI, E->Scalars);
16401642 Value *VecPtr = Builder.CreateBitCast(SI->getPointerOperand(),
16411643 VecTy->getPointerTo(AS));
16421644 StoreInst *S = Builder.CreateStore(VecValue, VecPtr);
1645 if (!Alignment)
1646 Alignment = DL->getABITypeAlignment(SI->getPointerOperand()->getType());
16431647 S->setAlignment(Alignment);
16441648 E->VectorizedValue = S;
16451649 return propagateMetadata(S, E->Scalars);
0 ; RUN: opt < %s -basicaa -slp-vectorizer -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
1
2 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
3 target triple = "x86_64-apple-macosx10.8.0"
4
5 ; Simple 3-pair chain with loads and stores
6 ; CHECK: test1
7 define void @test1(double* %a, double* %b, double* %c) {
8 entry:
9 %agg.tmp.i.i.sroa.0 = alloca [3 x double], align 16
10 ; CHECK: %[[V0:[0-9]+]] = load <2 x double>* %[[V2:[0-9]+]], align 8
11 %i0 = load double* %a
12 %i1 = load double* %b
13 %mul = fmul double %i0, %i1
14 %store1 = getelementptr inbounds [3 x double]* %agg.tmp.i.i.sroa.0, i64 0, i64 1
15 %store2 = getelementptr inbounds [3 x double]* %agg.tmp.i.i.sroa.0, i64 0, i64 2
16 %arrayidx3 = getelementptr inbounds double* %a, i64 1
17 %i3 = load double* %arrayidx3, align 8
18 %arrayidx4 = getelementptr inbounds double* %b, i64 1
19 %i4 = load double* %arrayidx4, align 8
20 %mul5 = fmul double %i3, %i4
21 ; CHECK: store <2 x double> %[[V1:[0-9]+]], <2 x double>* %[[V2:[0-9]+]], align 8
22 store double %mul, double* %store1
23 store double %mul5, double* %store2, align 16
24 ; CHECK: ret
25 ret void
26 }