llvm.org GIT mirror llvm / 0e28b65
[SLP] add tests for bitcasted vector pointer load; NFC I'm not sure if this falls within the scope of SLP, but we could create vector loads for some of these patterns. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@365055 91177308-0d34-0410-b5e6-96231b3b80d8 Sanjay Patel 2 months ago
1 changed file(s) with 102 addition(s) and 0 deletion(s). Raw diff Collapse all Expand all
0 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
1 ; RUN: opt < %s -slp-vectorizer -S -mtriple=x86_64-- -mattr=+avx | FileCheck %s --check-prefixes=CHECK,AVX1
2 ; RUN: opt < %s -slp-vectorizer -S -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX2
3
4 define float @matching_scalar(<4 x float>* dereferenceable(16) %p) {
5 ; CHECK-LABEL: @matching_scalar(
6 ; CHECK-NEXT: [[BC:%.*]] = bitcast <4 x float>* [[P:%.*]] to float*
7 ; CHECK-NEXT: [[R:%.*]] = load float, float* [[BC]], align 16
8 ; CHECK-NEXT: ret float [[R]]
9 ;
10 %bc = bitcast <4 x float>* %p to float*
11 %r = load float, float* %bc, align 16
12 ret float %r
13 }
14
15 define i32 @nonmatching_scalar(<4 x float>* dereferenceable(16) %p) {
16 ; CHECK-LABEL: @nonmatching_scalar(
17 ; CHECK-NEXT: [[BC:%.*]] = bitcast <4 x float>* [[P:%.*]] to i32*
18 ; CHECK-NEXT: [[R:%.*]] = load i32, i32* [[BC]], align 16
19 ; CHECK-NEXT: ret i32 [[R]]
20 ;
21 %bc = bitcast <4 x float>* %p to i32*
22 %r = load i32, i32* %bc, align 16
23 ret i32 %r
24 }
25
26 define i64 @larger_scalar(<4 x float>* dereferenceable(16) %p) {
27 ; CHECK-LABEL: @larger_scalar(
28 ; CHECK-NEXT: [[BC:%.*]] = bitcast <4 x float>* [[P:%.*]] to i64*
29 ; CHECK-NEXT: [[R:%.*]] = load i64, i64* [[BC]], align 16
30 ; CHECK-NEXT: ret i64 [[R]]
31 ;
32 %bc = bitcast <4 x float>* %p to i64*
33 %r = load i64, i64* %bc, align 16
34 ret i64 %r
35 }
36
37 define i8 @smaller_scalar(<4 x float>* dereferenceable(16) %p) {
38 ; CHECK-LABEL: @smaller_scalar(
39 ; CHECK-NEXT: [[BC:%.*]] = bitcast <4 x float>* [[P:%.*]] to i8*
40 ; CHECK-NEXT: [[R:%.*]] = load i8, i8* [[BC]], align 16
41 ; CHECK-NEXT: ret i8 [[R]]
42 ;
43 %bc = bitcast <4 x float>* %p to i8*
44 %r = load i8, i8* %bc, align 16
45 ret i8 %r
46 }
47
48 define i8 @smaller_scalar_256bit_vec(<8 x float>* dereferenceable(32) %p) {
49 ; CHECK-LABEL: @smaller_scalar_256bit_vec(
50 ; CHECK-NEXT: [[BC:%.*]] = bitcast <8 x float>* [[P:%.*]] to i8*
51 ; CHECK-NEXT: [[R:%.*]] = load i8, i8* [[BC]], align 32
52 ; CHECK-NEXT: ret i8 [[R]]
53 ;
54 %bc = bitcast <8 x float>* %p to i8*
55 %r = load i8, i8* %bc, align 32
56 ret i8 %r
57 }
58
59 define i8 @smaller_scalar_less_aligned(<4 x float>* dereferenceable(16) %p) {
60 ; CHECK-LABEL: @smaller_scalar_less_aligned(
61 ; CHECK-NEXT: [[BC:%.*]] = bitcast <4 x float>* [[P:%.*]] to i8*
62 ; CHECK-NEXT: [[R:%.*]] = load i8, i8* [[BC]], align 4
63 ; CHECK-NEXT: ret i8 [[R]]
64 ;
65 %bc = bitcast <4 x float>* %p to i8*
66 %r = load i8, i8* %bc, align 4
67 ret i8 %r
68 }
69
70 define float @matching_scalar_small_deref(<4 x float>* dereferenceable(15) %p) {
71 ; CHECK-LABEL: @matching_scalar_small_deref(
72 ; CHECK-NEXT: [[BC:%.*]] = bitcast <4 x float>* [[P:%.*]] to float*
73 ; CHECK-NEXT: [[R:%.*]] = load float, float* [[BC]], align 16
74 ; CHECK-NEXT: ret float [[R]]
75 ;
76 %bc = bitcast <4 x float>* %p to float*
77 %r = load float, float* %bc, align 16
78 ret float %r
79 }
80
81 define float @matching_scalar_volatile(<4 x float>* dereferenceable(16) %p) {
82 ; CHECK-LABEL: @matching_scalar_volatile(
83 ; CHECK-NEXT: [[BC:%.*]] = bitcast <4 x float>* [[P:%.*]] to float*
84 ; CHECK-NEXT: [[R:%.*]] = load volatile float, float* [[BC]], align 16
85 ; CHECK-NEXT: ret float [[R]]
86 ;
87 %bc = bitcast <4 x float>* %p to float*
88 %r = load volatile float, float* %bc, align 16
89 ret float %r
90 }
91
92 define float @nonvector(double* dereferenceable(16) %p) {
93 ; CHECK-LABEL: @nonvector(
94 ; CHECK-NEXT: [[BC:%.*]] = bitcast double* [[P:%.*]] to float*
95 ; CHECK-NEXT: [[R:%.*]] = load float, float* [[BC]], align 16
96 ; CHECK-NEXT: ret float [[R]]
97 ;
98 %bc = bitcast double* %p to float*
99 %r = load float, float* %bc, align 16
100 ret float %r
101 }