llvm.org GIT mirror llvm / ddbfbcf
test/CodeGen/X86: FileCheck-ize and add explicit -mtriple=x86_64-linux. They are useless to Win64 target. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@127732 91177308-0d34-0410-b5e6-96231b3b80d8 NAKAMURA Takumi 8 years ago
12 changed file(s) with 62 addition(s) and 24 deletion(s). Raw diff Collapse all Expand all
None ; RUN: llc < %s -march=x86-64 | grep paddw | count 2
1 ; RUN: llc < %s -march=x86-64 | not grep mov
0 ; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s
1 ; CHECK-NOT: mov
2 ; CHECK: paddw
3 ; CHECK-NOT: mov
4 ; CHECK: paddw
5 ; CHECK-NOT: paddw
6 ; CHECK-NOT: mov
27
38 ; The 2-addr pass should ensure that identical code is produced for these functions
49 ; no extra copy should be generated.
None ; RUN: llc < %s -march=x86 | not grep lea
1 ; RUN: llc < %s -march=x86-64 | not grep lea
0 ; RUN: llc < %s -march=x86 | FileCheck %s
1 ; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s
2 ; CHECK-NOT: lea
23
34 @B = external global [1000 x i8], align 32
45 @A = external global [1000 x i8], align 32
None ; RUN: llc < %s -march=x86-64 | grep min | count 1
1 ; RUN: llc < %s -march=x86-64 | grep max | count 1
2 ; RUN: llc < %s -march=x86-64 | grep mov | count 2
0 ; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s
1 ; CHECK-NOT: {{(min|max|mov)}}
2 ; CHECK: mov
3 ; CHECK-NOT: {{(min|max|mov)}}
4 ; CHECK: min
5 ; CHECK-NOT: {{(min|max|mov)}}
6 ; CHECK: mov
7 ; CHECK-NOT: {{(min|max|mov)}}
8 ; CHECK: max
9 ; CHECK-NOT: {{(min|max|mov)}}
310
411 declare float @bar()
512
None ; RUN: llc < %s -march=x86-64 | not grep mov
0 ; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s
1 ; CHECK-NOT: mov
12
23 define <4 x float> @foo(<4 x float>* %p, <4 x float> %x) nounwind {
34 %t = load <4 x float>* %p
None ; RUN: llc < %s -march=x86-64 | grep movap | count 2
0 ; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s
1 ; CHECK-NOT: movapd
2 ; CHECK: movaps
3 ; CHECK-NOT: movaps
4 ; CHECK: movapd
5 ; CHECK-NOT: movap
16
27 define void @foo(<4 x float>* %p, <4 x float> %x) nounwind {
38 store <4 x float> %x, <4 x float>* %p
None ; RUN: llc < %s -march=x86-64 | grep movaps | count 1
0 ; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s
1 ; CHECK: movaps
2 ; CHECK-NOT: movaps
13
24 define void @bar(<2 x i64>* %p, <2 x i64> %x) nounwind {
35 store <2 x i64> %x, <2 x i64>* %p
None ; RUN: llc -march=x86-64 < %s | FileCheck %s
0 ; RUN: llc -mtriple=x86_64-linux < %s | FileCheck %s
11
22 ; Commute the comparison to avoid a move.
33 ; PR7500.
None ; RUN: llc < %s -march=x86-64 -mattr=+64bit,+sse3 -print-failed-fuse-candidates |& \
1 ; RUN: grep fail | count 1
0 ; RUN: llc < %s -mtriple=x86_64-linux -mattr=+64bit,+sse3 -print-failed-fuse-candidates |& FileCheck %s
1 ; CHECK: fail
2 ; CHECK-NOT: fail
23
34 declare float @test_f(float %f)
45 declare double @test_d(double %f)
None ; RUN: llc < %s -march=x86-64 | grep {testb \[%\]al, \[%\]al}
0 ; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s
1 ; CHECK: testb %al, %al
12
23 %struct.__va_list_tag = type { i32, i32, i8*, i8* }
34
None ; RUN: llc < %s -march=x86 -relocation-model=static | not grep lea
1 ; RUN: llc < %s -march=x86-64 | not grep lea
0 ; RUN: llc < %s -march=x86 -relocation-model=static | FileCheck %s
1 ; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s
2 ; CHECK-NOT: lea
23
34 ; P should be sunk into the loop and folded into the address mode. There
45 ; shouldn't be any lea instructions inside the loop.
None ; RUN: llc < %s -march=x86 | not grep lea
1 ; RUN: llc < %s -march=x86-64 | not grep lea
0 ; RUN: llc < %s -march=x86 | FileCheck %s
1 ; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s
2 ; CHECK-NOT: lea
23
34 @B = external global [1000 x float], align 32
45 @A = external global [1000 x float], align 32
None ; RUN: llc < %s -march=x86-64 -tailcallopt | grep TAILCALL
0 ; RUN: llc < %s -mtriple=x86_64-linux -tailcallopt | FileCheck %s
1
2 ; FIXME: Win64 does not support byval.
3
4 ; Expect the entry point.
5 ; CHECK: tailcaller:
6
17 ; Expect 2 rep;movs because of tail call byval lowering.
2 ; RUN: llc < %s -march=x86-64 -tailcallopt | grep rep | wc -l | grep 2
8 ; CHECK: rep;
9 ; CHECK: rep;
10
311 ; A sequence of copyto/copyfrom virtual registers is used to deal with byval
412 ; lowering appearing after moving arguments to registers. The following two
513 ; checks verify that the register allocator changes those sequences to direct
614 ; moves to argument register where it can (for registers that are not used in
715 ; byval lowering - not rsi, not rdi, not rcx).
816 ; Expect argument 4 to be moved directly to register edx.
9 ; RUN: llc < %s -march=x86-64 -tailcallopt | grep movl | grep {7} | grep edx
17 ; CHECK: movl $7, %edx
18
1019 ; Expect argument 6 to be moved directly to register r8.
11 ; RUN: llc < %s -march=x86-64 -tailcallopt | grep movl | grep {17} | grep r8
20 ; CHECK: movl $17, %r8d
21
22 ; Expect not call but jmp to @tailcallee.
23 ; CHECK: jmp tailcallee
24
25 ; Expect the trailer.
26 ; CHECK: .size tailcaller
1227
1328 %struct.s = type { i64, i64, i64, i64, i64, i64, i64, i64,
1429 i64, i64, i64, i64, i64, i64, i64, i64,
2439 %tmp4 = tail call fastcc i64 @tailcallee(%struct.s* %a byval, i64 %tmp3, i64 %b, i64 7, i64 13, i64 17)
2540 ret i64 %tmp4
2641 }
27
28