llvm.org GIT mirror llvm / 25ff19d
[SLP] Fix PR35047: Fix default cost model for cast op in X86. Summary: The cost calculation for default case on X86 target does not always follow correct wayt because of missing 4-th argument in `BaseT::getCastInstrCost()` call. Added this missing parameter. Reviewers: hfinkel, mkuper, RKSimon, spatel Subscribers: llvm-commits Differential Revision: https://reviews.llvm.org/D39687 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@317576 91177308-0d34-0410-b5e6-96231b3b80d8 Alexey Bataev 1 year, 11 months ago
3 changed file(s) with 30 addition(s) and 39 deletion(s). Raw diff Collapse all Expand all
14361436 return Entry->Cost;
14371437 }
14381438
1439 return BaseT::getCastInstrCost(Opcode, Dst, Src);
1439 return BaseT::getCastInstrCost(Opcode, Dst, Src, I);
14401440 }
14411441
14421442 int X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
0 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
1 ; RUN: opt < %s -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7 -basicaa -slp-vectorizer -dce -S | FileCheck %s --check-prefix=CHECK --check-prefix=SSE
2 ; RUN: opt < %s -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx -basicaa -slp-vectorizer -dce -S | FileCheck %s --check-prefix=CHECK --check-prefix=AVX
1 ; RUN: opt < %s -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7 -basicaa -slp-vectorizer -dce -S | FileCheck %s
2 ; RUN: opt < %s -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx -basicaa -slp-vectorizer -dce -S | FileCheck %s
33
44 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
55
1313 define i32 @test_sext_4i8_to_4i32(i32* noalias nocapture %A, i8* noalias nocapture %B) {
1414 ; CHECK-LABEL: @test_sext_4i8_to_4i32(
1515 ; CHECK-NEXT: entry:
16 ; CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* %B to <4 x i8>*
16 ; CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[B:%.*]] to <4 x i8>*
1717 ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i8>, <4 x i8>* [[TMP0]], align 1
1818 ; CHECK-NEXT: [[TMP2:%.*]] = sext <4 x i8> [[TMP1]] to <4 x i32>
19 ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i32* %A to <4 x i32>*
19 ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i32* [[A:%.*]] to <4 x i32>*
2020 ; CHECK-NEXT: store <4 x i32> [[TMP2]], <4 x i32>* [[TMP3]], align 4
2121 ; CHECK-NEXT: ret i32 undef
2222 ;
4545 define i32 @test_zext_4i16_to_4i32(i32* noalias nocapture %A, i16* noalias nocapture %B) {
4646 ; CHECK-LABEL: @test_zext_4i16_to_4i32(
4747 ; CHECK-NEXT: entry:
48 ; CHECK-NEXT: [[TMP0:%.*]] = bitcast i16* %B to <4 x i16>*
48 ; CHECK-NEXT: [[TMP0:%.*]] = bitcast i16* [[B:%.*]] to <4 x i16>*
4949 ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i16>, <4 x i16>* [[TMP0]], align 1
5050 ; CHECK-NEXT: [[TMP2:%.*]] = zext <4 x i16> [[TMP1]] to <4 x i32>
51 ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i32* %A to <4 x i32>*
51 ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i32* [[A:%.*]] to <4 x i32>*
5252 ; CHECK-NEXT: store <4 x i32> [[TMP2]], <4 x i32>* [[TMP3]], align 4
5353 ; CHECK-NEXT: ret i32 undef
5454 ;
7575 }
7676
7777 define i64 @test_sext_4i16_to_4i64(i64* noalias nocapture %A, i16* noalias nocapture %B) {
78 ; SSE-LABEL: @test_sext_4i16_to_4i64(
79 ; SSE-NEXT: entry:
80 ; SSE-NEXT: [[TMP0:%.*]] = bitcast i16* %B to <2 x i16>*
81 ; SSE-NEXT: [[TMP1:%.*]] = load <2 x i16>, <2 x i16>* [[TMP0]], align 1
82 ; SSE-NEXT: [[TMP2:%.*]] = sext <2 x i16> [[TMP1]] to <2 x i64>
83 ; SSE-NEXT: [[TMP3:%.*]] = bitcast i64* %A to <2 x i64>*
84 ; SSE-NEXT: store <2 x i64> [[TMP2]], <2 x i64>* [[TMP3]], align 4
85 ; SSE-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, i16* %B, i64 2
86 ; SSE-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i64, i64* %A, i64 2
87 ; SSE-NEXT: [[TMP4:%.*]] = bitcast i16* [[ARRAYIDX5]] to <2 x i16>*
88 ; SSE-NEXT: [[TMP5:%.*]] = load <2 x i16>, <2 x i16>* [[TMP4]], align 1
89 ; SSE-NEXT: [[TMP6:%.*]] = sext <2 x i16> [[TMP5]] to <2 x i64>
90 ; SSE-NEXT: [[TMP7:%.*]] = bitcast i64* [[ARRAYIDX7]] to <2 x i64>*
91 ; SSE-NEXT: store <2 x i64> [[TMP6]], <2 x i64>* [[TMP7]], align 4
92 ; SSE-NEXT: ret i64 undef
93 ;
94 ; AVX-LABEL: @test_sext_4i16_to_4i64(
95 ; AVX-NEXT: entry:
96 ; AVX-NEXT: [[TMP0:%.*]] = bitcast i16* %B to <4 x i16>*
97 ; AVX-NEXT: [[TMP1:%.*]] = load <4 x i16>, <4 x i16>* [[TMP0]], align 1
98 ; AVX-NEXT: [[TMP2:%.*]] = sext <4 x i16> [[TMP1]] to <4 x i64>
99 ; AVX-NEXT: [[TMP3:%.*]] = bitcast i64* %A to <4 x i64>*
100 ; AVX-NEXT: store <4 x i64> [[TMP2]], <4 x i64>* [[TMP3]], align 4
101 ; AVX-NEXT: ret i64 undef
78 ; CHECK-LABEL: @test_sext_4i16_to_4i64(
79 ; CHECK-NEXT: entry:
80 ; CHECK-NEXT: [[TMP0:%.*]] = bitcast i16* [[B:%.*]] to <2 x i16>*
81 ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i16>, <2 x i16>* [[TMP0]], align 1
82 ; CHECK-NEXT: [[TMP2:%.*]] = sext <2 x i16> [[TMP1]] to <2 x i64>
83 ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i64* [[A:%.*]] to <2 x i64>*
84 ; CHECK-NEXT: store <2 x i64> [[TMP2]], <2 x i64>* [[TMP3]], align 4
85 ; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, i16* [[B]], i64 2
86 ; CHECK-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i64, i64* [[A]], i64 2
87 ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i16* [[ARRAYIDX5]] to <2 x i16>*
88 ; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i16>, <2 x i16>* [[TMP4]], align 1
89 ; CHECK-NEXT: [[TMP6:%.*]] = sext <2 x i16> [[TMP5]] to <2 x i64>
90 ; CHECK-NEXT: [[TMP7:%.*]] = bitcast i64* [[ARRAYIDX7]] to <2 x i64>*
91 ; CHECK-NEXT: store <2 x i64> [[TMP6]], <2 x i64>* [[TMP7]], align 4
92 ; CHECK-NEXT: ret i64 undef
10293 ;
10394 entry:
10495 %0 = load i16, i16* %B, align 1
1616 ; CHECK-NEXT: [[SHL3:%.*]] = shl nuw nsw i32 [[CONV2]], 8
1717 ; CHECK-NEXT: [[OR:%.*]] = or i32 [[SHL3]], [[CONV]]
1818 ; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i8, i8* [[DATA]], i64 2
19 ; CHECK-NEXT: [[TMP2:%.*]] = load i8, i8* [[ARRAYIDX4]], align 1
20 ; CHECK-NEXT: [[CONV5:%.*]] = zext i8 [[TMP2]] to i32
21 ; CHECK-NEXT: [[SHL6:%.*]] = shl nuw nsw i32 [[CONV5]], 16
22 ; CHECK-NEXT: [[OR7:%.*]] = or i32 [[OR]], [[SHL6]]
1923 ; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i8, i8* [[DATA]], i64 3
20 ; CHECK-NEXT: [[TMP2:%.*]] = bitcast i8* [[ARRAYIDX4]] to <2 x i8>*
21 ; CHECK-NEXT: [[TMP3:%.*]] = load <2 x i8>, <2 x i8>* [[TMP2]], align 1
22 ; CHECK-NEXT: [[TMP4:%.*]] = zext <2 x i8> [[TMP3]] to <2 x i32>
23 ; CHECK-NEXT: [[TMP5:%.*]] = shl nuw <2 x i32> [[TMP4]],
24 ; CHECK-NEXT: [[TMP6:%.*]] = extractelement <2 x i32> [[TMP5]], i32 0
25 ; CHECK-NEXT: [[OR7:%.*]] = or i32 [[OR]], [[TMP6]]
26 ; CHECK-NEXT: [[TMP7:%.*]] = extractelement <2 x i32> [[TMP5]], i32 1
27 ; CHECK-NEXT: [[OR11:%.*]] = or i32 [[OR7]], [[TMP7]]
24 ; CHECK-NEXT: [[TMP3:%.*]] = load i8, i8* [[ARRAYIDX8]], align 1
25 ; CHECK-NEXT: [[CONV9:%.*]] = zext i8 [[TMP3]] to i32
26 ; CHECK-NEXT: [[SHL10:%.*]] = shl nuw i32 [[CONV9]], 24
27 ; CHECK-NEXT: [[OR11:%.*]] = or i32 [[OR7]], [[SHL10]]
2828 ; CHECK-NEXT: ret i32 [[OR11]]
2929 ;
3030 entry: