llvm.org GIT mirror llvm / dddc90d
[x86] add another test for load splitting with extracted stores (PR42305); NFC git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@363732 91177308-0d34-0410-b5e6-96231b3b80d8 Sanjay Patel 5 months ago
1 changed file(s) with 69 addition(s) and 1 deletion(s). Raw diff Collapse all Expand all
128128
129129 ; PR42305 - https://bugs.llvm.org/show_bug.cgi?id=42305
130130
131 define void @load_split(<8 x float>* %ld, <4 x float>* %st1, <4 x float>* %st2) {
131 define void @load_split(<8 x float>* %ld, <4 x float>* %st1, <4 x float>* %st2) nounwind {
132132 ; X86-SSE-LABEL: load_split:
133133 ; X86-SSE: # %bb.0:
134134 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
173173 store <4 x float> %t128, <4 x float>* %st2, align 1
174174 ret void
175175 }
176
177 define void @load_split_more(float* %src, i32* %idx, float* %dst) nounwind {
178 ; X86-SSE-LABEL: load_split_more:
179 ; X86-SSE: # %bb.0:
180 ; X86-SSE-NEXT: pushl %esi
181 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
182 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
183 ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx
184 ; X86-SSE-NEXT: movl (%edx), %esi
185 ; X86-SSE-NEXT: movups (%ecx), %xmm0
186 ; X86-SSE-NEXT: movups 16(%ecx), %xmm1
187 ; X86-SSE-NEXT: movups %xmm0, (%eax,%esi,4)
188 ; X86-SSE-NEXT: movl 4(%edx), %ecx
189 ; X86-SSE-NEXT: movups %xmm1, (%eax,%ecx,4)
190 ; X86-SSE-NEXT: popl %esi
191 ; X86-SSE-NEXT: retl
192 ;
193 ; X86-AVX-LABEL: load_split_more:
194 ; X86-AVX: # %bb.0:
195 ; X86-AVX-NEXT: pushl %esi
196 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
197 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx
198 ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %edx
199 ; X86-AVX-NEXT: movl (%edx), %esi
200 ; X86-AVX-NEXT: vmovups (%ecx), %xmm0
201 ; X86-AVX-NEXT: vmovups 16(%ecx), %xmm1
202 ; X86-AVX-NEXT: vmovups %xmm0, (%eax,%esi,4)
203 ; X86-AVX-NEXT: movl 4(%edx), %ecx
204 ; X86-AVX-NEXT: vmovups %xmm1, (%eax,%ecx,4)
205 ; X86-AVX-NEXT: popl %esi
206 ; X86-AVX-NEXT: retl
207 ;
208 ; X64-SSE-LABEL: load_split_more:
209 ; X64-SSE: # %bb.0:
210 ; X64-SSE-NEXT: movslq (%rsi), %rax
211 ; X64-SSE-NEXT: movups (%rdi), %xmm0
212 ; X64-SSE-NEXT: movups 16(%rdi), %xmm1
213 ; X64-SSE-NEXT: movups %xmm0, (%rdx,%rax,4)
214 ; X64-SSE-NEXT: movslq 4(%rsi), %rax
215 ; X64-SSE-NEXT: movups %xmm1, (%rdx,%rax,4)
216 ; X64-SSE-NEXT: retq
217 ;
218 ; X64-AVX-LABEL: load_split_more:
219 ; X64-AVX: # %bb.0:
220 ; X64-AVX-NEXT: movslq (%rsi), %rax
221 ; X64-AVX-NEXT: vmovups (%rdi), %xmm0
222 ; X64-AVX-NEXT: vmovups 16(%rdi), %xmm1
223 ; X64-AVX-NEXT: vmovups %xmm0, (%rdx,%rax,4)
224 ; X64-AVX-NEXT: movslq 4(%rsi), %rax
225 ; X64-AVX-NEXT: vmovups %xmm1, (%rdx,%rax,4)
226 ; X64-AVX-NEXT: retq
227 %v.i = bitcast float* %src to <8 x float>*
228 %tmp = load <8 x float>, <8 x float>* %v.i, align 1
229 %tmp1 = load i32, i32* %idx, align 4
230 %idx.ext = sext i32 %tmp1 to i64
231 %add.ptr1 = getelementptr inbounds float, float* %dst, i64 %idx.ext
232 %extract = shufflevector <8 x float> %tmp, <8 x float> undef, <4 x i32>
233 %v.i11 = bitcast float* %add.ptr1 to <4 x float>*
234 store <4 x float> %extract, <4 x float>* %v.i11, align 1
235 %arrayidx2 = getelementptr inbounds i32, i32* %idx, i64 1
236 %tmp2 = load i32, i32* %arrayidx2, align 4
237 %idx.ext3 = sext i32 %tmp2 to i64
238 %add.ptr4 = getelementptr inbounds float, float* %dst, i64 %idx.ext3
239 %extract5 = shufflevector <8 x float> %tmp, <8 x float> undef, <4 x i32>
240 %v.i10 = bitcast float* %add.ptr4 to <4 x float>*
241 store <4 x float> %extract5, <4 x float>* %v.i10, align 1
242 ret void
243 }