llvm.org GIT mirror llvm / 891346a
[AArch64] Disable LDP/STP for quads Disable LDP/STP for quads on Exynos M1 as they are not as efficient as pairs of regular LDR/STR. Patch by Abderrazek Zaafrani <a.zaafrani@samsung.com>. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@266223 91177308-0d34-0410-b5e6-96231b3b80d8 Evandro Menezes 3 years ago
3 changed file(s) with 93 addition(s) and 0 deletion(s). Raw diff Collapse all Expand all
14071407 // MachineMemOperands hints are set by the AArch64StorePairSuppress pass.
14081408 if (isLdStPairSuppressed(MI))
14091409 return false;
1410
1411 // Do not pair quad ld/st for Exynos.
1412 if (Subtarget.isExynosM1()) {
1413 switch (MI->getOpcode()) {
1414 default:
1415 break;
1416
1417 case AArch64::LDURQi:
1418 case AArch64::STURQi:
1419 case AArch64::LDRQui:
1420 case AArch64::STRQui:
1421 return false;
1422 }
1423 }
14101424
14111425 return true;
14121426 }
0 ; REQUIRES: asserts
11 ; RUN: llc < %s -mtriple=arm64-linux-gnu -mcpu=cortex-a57 -verify-misched -debug-only=misched -o - 2>&1 > /dev/null | FileCheck %s
2 ; RUN: llc < %s -mtriple=arm64-linux-gnu -mcpu=exynos-m1 -verify-misched -debug-only=misched -o - 2>&1 > /dev/null | FileCheck --check-prefix=EXYNOS %s
23
34 ; Test ldr clustering.
45 ; CHECK: ********** MI Scheduling **********
67 ; CHECK: Cluster loads SU(1) - SU(2)
78 ; CHECK: SU(1): %vreg{{[0-9]+}} = LDRWui
89 ; CHECK: SU(2): %vreg{{[0-9]+}} = LDRWui
10 ; EXYNOS: ********** MI Scheduling **********
11 ; EXYNOS-LABEL: ldr_int:BB#0
12 ; EXYNOS: Cluster loads SU(1) - SU(2)
13 ; EXYNOS: SU(1): %vreg{{[0-9]+}} = LDRWui
14 ; EXYNOS: SU(2): %vreg{{[0-9]+}} = LDRWui
915 define i32 @ldr_int(i32* %a) nounwind {
1016 %p1 = getelementptr inbounds i32, i32* %a, i32 1
1117 %tmp1 = load i32, i32* %p1, align 2
2127 ; CHECK: Cluster loads SU(1) - SU(2)
2228 ; CHECK: SU(1): %vreg{{[0-9]+}} = LDRSWui
2329 ; CHECK: SU(2): %vreg{{[0-9]+}} = LDRSWui
30 ; EXYNOS: ********** MI Scheduling **********
31 ; EXYNOS-LABEL: ldp_sext_int:BB#0
32 ; EXYNOS: Cluster loads SU(1) - SU(2)
33 ; EXYNOS: SU(1): %vreg{{[0-9]+}} = LDRSWui
34 ; EXYNOS: SU(2): %vreg{{[0-9]+}} = LDRSWui
2435 define i64 @ldp_sext_int(i32* %p) nounwind {
2536 %tmp = load i32, i32* %p, align 4
2637 %add.ptr = getelementptr inbounds i32, i32* %p, i64 1
3748 ; CHECK: Cluster loads SU(2) - SU(1)
3849 ; CHECK: SU(1): %vreg{{[0-9]+}} = LDURWi
3950 ; CHECK: SU(2): %vreg{{[0-9]+}} = LDURWi
51 ; EXYNOS: ********** MI Scheduling **********
52 ; EXYNOS-LABEL: ldur_int:BB#0
53 ; EXYNOS: Cluster loads SU(2) - SU(1)
54 ; EXYNOS: SU(1): %vreg{{[0-9]+}} = LDURWi
55 ; EXYNOS: SU(2): %vreg{{[0-9]+}} = LDURWi
4056 define i32 @ldur_int(i32* %a) nounwind {
4157 %p1 = getelementptr inbounds i32, i32* %a, i32 -1
4258 %tmp1 = load i32, i32* %p1, align 2
5268 ; CHECK: Cluster loads SU(3) - SU(4)
5369 ; CHECK: SU(3): %vreg{{[0-9]+}} = LDRSWui
5470 ; CHECK: SU(4): %vreg{{[0-9]+}}:sub_32 = LDRWui
71 ; EXYNOS: ********** MI Scheduling **********
72 ; EXYNOS-LABEL: ldp_half_sext_zext_int:BB#0
73 ; EXYNOS: Cluster loads SU(3) - SU(4)
74 ; EXYNOS: SU(3): %vreg{{[0-9]+}} = LDRSWui
75 ; EXYNOS: SU(4): %vreg{{[0-9]+}}:sub_32 = LDRWui
5576 define i64 @ldp_half_sext_zext_int(i64* %q, i32* %p) nounwind {
5677 %tmp0 = load i64, i64* %q, align 4
5778 %tmp = load i32, i32* %p, align 4
7091 ; CHECK: Cluster loads SU(3) - SU(4)
7192 ; CHECK: SU(3): %vreg{{[0-9]+}}:sub_32 = LDRWui
7293 ; CHECK: SU(4): %vreg{{[0-9]+}} = LDRSWui
94 ; EXYNOS: ********** MI Scheduling **********
95 ; EXYNOS-LABEL: ldp_half_zext_sext_int:BB#0
96 ; EXYNOS: Cluster loads SU(3) - SU(4)
97 ; EXYNOS: SU(3): %vreg{{[0-9]+}}:sub_32 = LDRWui
98 ; EXYNOS: SU(4): %vreg{{[0-9]+}} = LDRSWui
7399 define i64 @ldp_half_zext_sext_int(i64* %q, i32* %p) nounwind {
74100 %tmp0 = load i64, i64* %q, align 4
75101 %tmp = load i32, i32* %p, align 4
88114 ; CHECK-NOT: Cluster loads
89115 ; CHECK: SU(1): %vreg{{[0-9]+}} = LDRWui
90116 ; CHECK: SU(2): %vreg{{[0-9]+}} = LDRWui
117 ; EXYNOS: ********** MI Scheduling **********
118 ; EXYNOS-LABEL: ldr_int_volatile:BB#0
119 ; EXYNOS-NOT: Cluster loads
120 ; EXYNOS: SU(1): %vreg{{[0-9]+}} = LDRWui
121 ; EXYNOS: SU(2): %vreg{{[0-9]+}} = LDRWui
91122 define i32 @ldr_int_volatile(i32* %a) nounwind {
92123 %p1 = getelementptr inbounds i32, i32* %a, i32 1
93124 %tmp1 = load volatile i32, i32* %p1, align 2
96127 %tmp3 = add i32 %tmp1, %tmp2
97128 ret i32 %tmp3
98129 }
130
131 ; Test ldq clustering (no clustering for Exynos).
132 ; CHECK: ********** MI Scheduling **********
133 ; CHECK-LABEL: ldq_cluster:BB#0
134 ; CHECK: Cluster loads SU(1) - SU(3)
135 ; CHECK: SU(1): %vreg{{[0-9]+}} = LDRQui
136 ; CHECK: SU(3): %vreg{{[0-9]+}} = LDRQui
137 ; EXYNOS: ********** MI Scheduling **********
138 ; EXYNOS-LABEL: ldq_cluster:BB#0
139 ; EXYNOS-NOT: Cluster loads
140 define <2 x i64> @ldq_cluster(i64* %p) {
141 %a1 = bitcast i64* %p to <2 x i64>*
142 %tmp1 = load <2 x i64>, < 2 x i64>* %a1, align 8
143 %add.ptr2 = getelementptr inbounds i64, i64* %p, i64 2
144 %a2 = bitcast i64* %add.ptr2 to <2 x i64>*
145 %tmp2 = add nsw <2 x i64> %tmp1, %tmp1
146 %tmp3 = load <2 x i64>, <2 x i64>* %a2, align 8
147 %res = mul nsw <2 x i64> %tmp2, %tmp3
148 ret <2 x i64> %res
149 }
0 ; RUN: llc < %s -march=aarch64 -mcpu=exynos-m1 -verify-machineinstrs -asm-verbose=false | FileCheck %s
1
2 ; CHECK-LABEL: test_exynos_nopair_st
3 ; CHECK: str
4 ; CHECK: stur
5 ; CHECK-NOT: stp
6 define void @test_exynos_nopair_st(double* %ptr, <2 x double> %v1, <2 x double> %v2) {
7 %tmp1 = bitcast double* %ptr to <2 x double>*
8 store <2 x double> %v2, <2 x double>* %tmp1, align 16
9 %add.ptr = getelementptr inbounds double, double* %ptr, i64 -2
10 %tmp = bitcast double* %add.ptr to <2 x double>*
11 store <2 x double> %v1, <2 x double>* %tmp, align 16
12 ret void
13 }
14
15 ; CHECK-LABEL: test_exynos_nopair_ld
16 ; CHECK: ldr
17 ; CHECK: ldr
18 ; CHECK-NOT: ldp
19 define <2 x i64> @test_exynos_nopair_ld(i64* %p) {
20 %a1 = bitcast i64* %p to <2 x i64>*
21 %tmp1 = load <2 x i64>, < 2 x i64>* %a1, align 8
22 %add.ptr2 = getelementptr inbounds i64, i64* %p, i64 2
23 %a2 = bitcast i64* %add.ptr2 to <2 x i64>*
24 %tmp2 = load <2 x i64>, <2 x i64>* %a2, align 8
25 %add = add nsw <2 x i64> %tmp1, %tmp2
26 ret <2 x i64> %add
27 }