llvm.org GIT mirror llvm / 524e53d
Merging r294203: ------------------------------------------------------------------------ r294203 | john.brawn | 2017-02-06 10:07:20 -0800 (Mon, 06 Feb 2017) | 9 lines [AArch64] Fix incorrect MachinePointerInfo in splitStoreSplat When splitting up one store into several in splitStoreSplat we have to make sure we get the MachinePointerInfo right, otherwise alias analysis thinks they all store to the same location. This can then cause invalid scheduling later on. Differential Revision: https://reviews.llvm.org/D29446 ------------------------------------------------------------------------ git-svn-id: https://llvm.org/svn/llvm-project/llvm/branches/release_40@294242 91177308-0d34-0410-b5e6-96231b3b80d8 Hans Wennborg 2 years ago
3 changed file(s) with 134 addition(s) and 2 deletion(s). Raw diff Collapse all Expand all
89338933 // instructions (stp).
89348934 SDLoc DL(&St);
89358935 SDValue BasePtr = St.getBasePtr();
8936 const MachinePointerInfo &PtrInfo = St.getPointerInfo();
89368937 SDValue NewST1 =
8937 DAG.getStore(St.getChain(), DL, SplatVal, BasePtr, St.getPointerInfo(),
8938 DAG.getStore(St.getChain(), DL, SplatVal, BasePtr, PtrInfo,
89388939 OrigAlignment, St.getMemOperand()->getFlags());
89398940
89408941 unsigned Offset = EltOffset;
89438944 SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i64, BasePtr,
89448945 DAG.getConstant(Offset, DL, MVT::i64));
89458946 NewST1 = DAG.getStore(NewST1.getValue(0), DL, SplatVal, OffsetPtr,
8946 St.getPointerInfo(), Alignment,
8947 PtrInfo.getWithOffset(Offset), Alignment,
89478948 St.getMemOperand()->getFlags());
89488949 Offset += EltOffset;
89498950 }
0 ; RUN: llc -mtriple=aarch64 -mcpu=cortex-a53 < %s | FileCheck %s
1
2 ; Tests to check that zero stores which are generated as STP xzr, xzr aren't
3 ; scheduled incorrectly due to incorrect alias information
4
5 declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1)
6 %struct.tree_common = type { i8*, i8*, i32 }
7
8 ; Original test case which exhibited the bug
9 define void @test1(%struct.tree_common* %t, i32 %code, i8* %type) {
10 ; CHECK-LABEL: test1:
11 ; CHECK: stp xzr, xzr, [x0, #8]
12 ; CHECK: stp xzr, x2, [x0]
13 ; CHECK: str w1, [x0, #16]
14 entry:
15 %0 = bitcast %struct.tree_common* %t to i8*
16 tail call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 24, i32 8, i1 false)
17 %code1 = getelementptr inbounds %struct.tree_common, %struct.tree_common* %t, i64 0, i32 2
18 store i32 %code, i32* %code1, align 8
19 %type2 = getelementptr inbounds %struct.tree_common, %struct.tree_common* %t, i64 0, i32 1
20 store i8* %type, i8** %type2, align 8
21 ret void
22 }
23
24 ; Store to each struct element instead of using memset
25 define void @test2(%struct.tree_common* %t, i32 %code, i8* %type) {
26 ; CHECK-LABEL: test2:
27 ; CHECK: stp xzr, xzr, [x0]
28 ; CHECK: str wzr, [x0, #16]
29 ; CHECK: str w1, [x0, #16]
30 ; CHECK: str x2, [x0, #8]
31 entry:
32 %0 = getelementptr inbounds %struct.tree_common, %struct.tree_common* %t, i64 0, i32 0
33 %1 = getelementptr inbounds %struct.tree_common, %struct.tree_common* %t, i64 0, i32 1
34 %2 = getelementptr inbounds %struct.tree_common, %struct.tree_common* %t, i64 0, i32 2
35 store i8* zeroinitializer, i8** %0, align 8
36 store i8* zeroinitializer, i8** %1, align 8
37 store i32 zeroinitializer, i32* %2, align 8
38 store i32 %code, i32* %2, align 8
39 store i8* %type, i8** %1, align 8
40 ret void
41 }
42
43 ; Vector store instead of memset
44 define void @test3(%struct.tree_common* %t, i32 %code, i8* %type) {
45 ; CHECK-LABEL: test3:
46 ; CHECK: stp xzr, xzr, [x0, #8]
47 ; CHECK: stp xzr, x2, [x0]
48 ; CHECK: str w1, [x0, #16]
49 entry:
50 %0 = bitcast %struct.tree_common* %t to <3 x i64>*
51 store <3 x i64> zeroinitializer, <3 x i64>* %0, align 8
52 %code1 = getelementptr inbounds %struct.tree_common, %struct.tree_common* %t, i64 0, i32 2
53 store i32 %code, i32* %code1, align 8
54 %type2 = getelementptr inbounds %struct.tree_common, %struct.tree_common* %t, i64 0, i32 1
55 store i8* %type, i8** %type2, align 8
56 ret void
57 }
58
59 ; Vector store, then store to vector elements
60 define void @test4(<3 x i64>* %p, i64 %x, i64 %y) {
61 ; CHECK-LABEL: test4:
62 ; CHECK: stp xzr, xzr, [x0, #8]
63 ; CHECK: stp xzr, x2, [x0]
64 ; CHECK: str x1, [x0, #16]
65 entry:
66 store <3 x i64> zeroinitializer, <3 x i64>* %p, align 8
67 %0 = bitcast <3 x i64>* %p to i64*
68 %1 = getelementptr inbounds i64, i64* %0, i64 2
69 store i64 %x, i64* %1, align 8
70 %2 = getelementptr inbounds i64, i64* %0, i64 1
71 store i64 %y, i64* %2, align 8
72 ret void
73 }
0 ; REQUIRES: asserts
1 ; RUN: llc < %s -mtriple=aarch64 -mcpu=cyclone -mattr=+use-aa -enable-misched -verify-misched -debug-only=misched -o - 2>&1 > /dev/null | FileCheck %s
2
3 ; Tests to check that the scheduler dependencies derived from alias analysis are
4 ; correct when we have loads that have been split up so that they can later be
5 ; merged into STP.
6
7 ; CHECK: ********** MI Scheduling **********
8 ; CHECK: test_splat:BB#0 entry
9 ; CHECK: SU({{[0-9]+}}): STRWui %vreg{{[0-9]+}}, %vreg{{[0-9]+}}, 3; mem:ST4[%3+8]
10 ; CHECK: Successors:
11 ; CHECK-NEXT: ord [[SU1:SU\([0-9]+\)]]
12 ; CHECK: SU({{[0-9]+}}): STRWui %vreg{{[0-9]+}}, %vreg{{[0-9]+}}, 2; mem:ST4[%3+4]
13 ; CHECK: Successors:
14 ; CHECK-NEXT: ord [[SU2:SU\([0-9]+\)]]
15 ; CHECK: [[SU1]]: STRWui %vreg{{[0-9]+}}, %vreg{{[0-9]+}}, 3; mem:ST4[%2]
16 ; CHECK: [[SU2]]: STRWui %vreg{{[0-9]+}}, %vreg{{[0-9]+}}, 2; mem:ST4[%1]
17 define void @test_splat(i32 %x, i32 %y, i32* %p) {
18 entry:
19 %val = load i32, i32* %p, align 4
20 %0 = getelementptr inbounds i32, i32* %p, i64 1
21 %1 = getelementptr inbounds i32, i32* %p, i64 2
22 %2 = getelementptr inbounds i32, i32* %p, i64 3
23 %vec0 = insertelement <4 x i32> undef, i32 %val, i32 0
24 %vec1 = insertelement <4 x i32> %vec0, i32 %val, i32 1
25 %vec2 = insertelement <4 x i32> %vec1, i32 %val, i32 2
26 %vec3 = insertelement <4 x i32> %vec2, i32 %val, i32 3
27 %3 = bitcast i32* %0 to <4 x i32>*
28 store <4 x i32> %vec3, <4 x i32>* %3, align 4
29 store i32 %x, i32* %2, align 4
30 store i32 %y, i32* %1, align 4
31 ret void
32 }
33
34 declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1)
35 %struct.tree_common = type { i8*, i8*, i32 }
36
37 ; CHECK: ********** MI Scheduling **********
38 ; CHECK: test_zero:BB#0 entry
39 ; CHECK: SU({{[0-9]+}}): STRXui %XZR, %vreg{{[0-9]+}}, 2; mem:ST8[%0+16]
40 ; CHECK: Successors:
41 ; CHECK-NEXT: ord [[SU3:SU\([0-9]+\)]]
42 ; CHECK: SU({{[0-9]+}}): STRXui %XZR, %vreg{{[0-9]+}}, 1; mem:ST8[%0+8]
43 ; CHECK: Successors:
44 ; CHECK-NEXT: ord [[SU4:SU\([0-9]+\)]]
45 ; CHECK: [[SU3]]: STRWui %vreg{{[0-9]+}}, %vreg{{[0-9]+}}, 4; mem:ST4[%code1]
46 ; CHECK: [[SU4]]: STRXui %vreg{{[0-9]+}}, %vreg{{[0-9]+}}, 1; mem:ST8[%type2]
47 define void @test_zero(%struct.tree_common* %t, i32 %code, i8* %type) {
48 entry:
49 %0 = bitcast %struct.tree_common* %t to i8*
50 tail call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 24, i32 8, i1 false)
51 %code1 = getelementptr inbounds %struct.tree_common, %struct.tree_common* %t, i64 0, i32 2
52 store i32 %code, i32* %code1, align 8
53 %type2 = getelementptr inbounds %struct.tree_common, %struct.tree_common* %t, i64 0, i32 1
54 store i8* %type, i8** %type2, align 8
55 ret void
56 }