llvm.org GIT mirror llvm / 7cc8c72
[InstSimplify][NFC] Tests for skipping 'div-by-0' checks before @llvm.umul.with.overflow These may remain after @llvm.umul.with.overflow was canonicalized from the code that was originally doing the check via division. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@366751 91177308-0d34-0410-b5e6-96231b3b80d8 Roman Lebedev a month ago
2 changed file(s) with 188 addition(s) and 0 deletion(s). Raw diff Collapse all Expand all
0 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
1 ; RUN: opt %s -instsimplify -S | FileCheck %s
2
3 declare { i4, i1 } @llvm.smul.with.overflow.i4(i4, i4) #1
4
5 define i1 @t0_smul(i4 %size, i4 %nmemb) {
6 ; CHECK-LABEL: @t0_smul(
7 ; CHECK-NEXT: [[CMP:%.*]] = icmp ne i4 [[SIZE:%.*]], 0
8 ; CHECK-NEXT: [[SMUL:%.*]] = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]])
9 ; CHECK-NEXT: [[SMUL_OV:%.*]] = extractvalue { i4, i1 } [[SMUL]], 1
10 ; CHECK-NEXT: [[AND:%.*]] = and i1 [[SMUL_OV]], [[CMP]]
11 ; CHECK-NEXT: ret i1 [[AND]]
12 ;
13 %cmp = icmp ne i4 %size, 0
14 %smul = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 %size, i4 %nmemb)
15 %smul.ov = extractvalue { i4, i1 } %smul, 1
16 %and = and i1 %smul.ov, %cmp
17 ret i1 %and
18 }
19
20 define i1 @t1_commutative(i4 %size, i4 %nmemb) {
21 ; CHECK-LABEL: @t1_commutative(
22 ; CHECK-NEXT: [[CMP:%.*]] = icmp ne i4 [[SIZE:%.*]], 0
23 ; CHECK-NEXT: [[SMUL:%.*]] = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]])
24 ; CHECK-NEXT: [[SMUL_OV:%.*]] = extractvalue { i4, i1 } [[SMUL]], 1
25 ; CHECK-NEXT: [[AND:%.*]] = and i1 [[CMP]], [[SMUL_OV]]
26 ; CHECK-NEXT: ret i1 [[AND]]
27 ;
28 %cmp = icmp ne i4 %size, 0
29 %smul = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 %size, i4 %nmemb)
30 %smul.ov = extractvalue { i4, i1 } %smul, 1
31 %and = and i1 %cmp, %smul.ov ; swapped
32 ret i1 %and
33 }
34
35 define i1 @n2_wrong_size(i4 %size0, i4 %size1, i4 %nmemb) {
36 ; CHECK-LABEL: @n2_wrong_size(
37 ; CHECK-NEXT: [[CMP:%.*]] = icmp ne i4 [[SIZE1:%.*]], 0
38 ; CHECK-NEXT: [[SMUL:%.*]] = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 [[SIZE0:%.*]], i4 [[NMEMB:%.*]])
39 ; CHECK-NEXT: [[SMUL_OV:%.*]] = extractvalue { i4, i1 } [[SMUL]], 1
40 ; CHECK-NEXT: [[AND:%.*]] = and i1 [[SMUL_OV]], [[CMP]]
41 ; CHECK-NEXT: ret i1 [[AND]]
42 ;
43 %cmp = icmp ne i4 %size1, 0 ; not %size0
44 %smul = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 %size0, i4 %nmemb)
45 %smul.ov = extractvalue { i4, i1 } %smul, 1
46 %and = and i1 %smul.ov, %cmp
47 ret i1 %and
48 }
49
50 define i1 @n3_wrong_pred(i4 %size, i4 %nmemb) {
51 ; CHECK-LABEL: @n3_wrong_pred(
52 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i4 [[SIZE:%.*]], 0
53 ; CHECK-NEXT: [[SMUL:%.*]] = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]])
54 ; CHECK-NEXT: [[SMUL_OV:%.*]] = extractvalue { i4, i1 } [[SMUL]], 1
55 ; CHECK-NEXT: [[AND:%.*]] = and i1 [[SMUL_OV]], [[CMP]]
56 ; CHECK-NEXT: ret i1 [[AND]]
57 ;
58 %cmp = icmp eq i4 %size, 0 ; not 'ne'
59 %smul = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 %size, i4 %nmemb)
60 %smul.ov = extractvalue { i4, i1 } %smul, 1
61 %and = and i1 %smul.ov, %cmp
62 ret i1 %and
63 }
64
65 define i1 @n4_not_and(i4 %size, i4 %nmemb) {
66 ; CHECK-LABEL: @n4_not_and(
67 ; CHECK-NEXT: [[CMP:%.*]] = icmp ne i4 [[SIZE:%.*]], 0
68 ; CHECK-NEXT: [[SMUL:%.*]] = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]])
69 ; CHECK-NEXT: [[SMUL_OV:%.*]] = extractvalue { i4, i1 } [[SMUL]], 1
70 ; CHECK-NEXT: [[AND:%.*]] = or i1 [[SMUL_OV]], [[CMP]]
71 ; CHECK-NEXT: ret i1 [[AND]]
72 ;
73 %cmp = icmp ne i4 %size, 0
74 %smul = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 %size, i4 %nmemb)
75 %smul.ov = extractvalue { i4, i1 } %smul, 1
76 %and = or i1 %smul.ov, %cmp ; not 'and'
77 ret i1 %and
78 }
79
80 define i1 @n5_not_zero(i4 %size, i4 %nmemb) {
81 ; CHECK-LABEL: @n5_not_zero(
82 ; CHECK-NEXT: [[CMP:%.*]] = icmp ne i4 [[SIZE:%.*]], 1
83 ; CHECK-NEXT: [[SMUL:%.*]] = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]])
84 ; CHECK-NEXT: [[SMUL_OV:%.*]] = extractvalue { i4, i1 } [[SMUL]], 1
85 ; CHECK-NEXT: [[AND:%.*]] = and i1 [[SMUL_OV]], [[CMP]]
86 ; CHECK-NEXT: ret i1 [[AND]]
87 ;
88 %cmp = icmp ne i4 %size, 1 ; should be '0'
89 %smul = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 %size, i4 %nmemb)
90 %smul.ov = extractvalue { i4, i1 } %smul, 1
91 %and = and i1 %smul.ov, %cmp
92 ret i1 %and
93 }
0 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
1 ; RUN: opt %s -instsimplify -S | FileCheck %s
2
3 declare { i4, i1 } @llvm.umul.with.overflow.i4(i4, i4) #1
4
5 define i1 @t0_umul(i4 %size, i4 %nmemb) {
6 ; CHECK-LABEL: @t0_umul(
7 ; CHECK-NEXT: [[CMP:%.*]] = icmp ne i4 [[SIZE:%.*]], 0
8 ; CHECK-NEXT: [[UMUL:%.*]] = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]])
9 ; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { i4, i1 } [[UMUL]], 1
10 ; CHECK-NEXT: [[AND:%.*]] = and i1 [[UMUL_OV]], [[CMP]]
11 ; CHECK-NEXT: ret i1 [[AND]]
12 ;
13 %cmp = icmp ne i4 %size, 0
14 %umul = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 %size, i4 %nmemb)
15 %umul.ov = extractvalue { i4, i1 } %umul, 1
16 %and = and i1 %umul.ov, %cmp
17 ret i1 %and
18 }
19
20 define i1 @t1_commutative(i4 %size, i4 %nmemb) {
21 ; CHECK-LABEL: @t1_commutative(
22 ; CHECK-NEXT: [[CMP:%.*]] = icmp ne i4 [[SIZE:%.*]], 0
23 ; CHECK-NEXT: [[UMUL:%.*]] = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]])
24 ; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { i4, i1 } [[UMUL]], 1
25 ; CHECK-NEXT: [[AND:%.*]] = and i1 [[CMP]], [[UMUL_OV]]
26 ; CHECK-NEXT: ret i1 [[AND]]
27 ;
28 %cmp = icmp ne i4 %size, 0
29 %umul = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 %size, i4 %nmemb)
30 %umul.ov = extractvalue { i4, i1 } %umul, 1
31 %and = and i1 %cmp, %umul.ov ; swapped
32 ret i1 %and
33 }
34
35 define i1 @n2_wrong_size(i4 %size0, i4 %size1, i4 %nmemb) {
36 ; CHECK-LABEL: @n2_wrong_size(
37 ; CHECK-NEXT: [[CMP:%.*]] = icmp ne i4 [[SIZE1:%.*]], 0
38 ; CHECK-NEXT: [[UMUL:%.*]] = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 [[SIZE0:%.*]], i4 [[NMEMB:%.*]])
39 ; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { i4, i1 } [[UMUL]], 1
40 ; CHECK-NEXT: [[AND:%.*]] = and i1 [[UMUL_OV]], [[CMP]]
41 ; CHECK-NEXT: ret i1 [[AND]]
42 ;
43 %cmp = icmp ne i4 %size1, 0 ; not %size0
44 %umul = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 %size0, i4 %nmemb)
45 %umul.ov = extractvalue { i4, i1 } %umul, 1
46 %and = and i1 %umul.ov, %cmp
47 ret i1 %and
48 }
49
50 define i1 @n3_wrong_pred(i4 %size, i4 %nmemb) {
51 ; CHECK-LABEL: @n3_wrong_pred(
52 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i4 [[SIZE:%.*]], 0
53 ; CHECK-NEXT: [[UMUL:%.*]] = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]])
54 ; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { i4, i1 } [[UMUL]], 1
55 ; CHECK-NEXT: [[AND:%.*]] = and i1 [[UMUL_OV]], [[CMP]]
56 ; CHECK-NEXT: ret i1 [[AND]]
57 ;
58 %cmp = icmp eq i4 %size, 0 ; not 'ne'
59 %umul = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 %size, i4 %nmemb)
60 %umul.ov = extractvalue { i4, i1 } %umul, 1
61 %and = and i1 %umul.ov, %cmp
62 ret i1 %and
63 }
64
65 define i1 @n4_not_and(i4 %size, i4 %nmemb) {
66 ; CHECK-LABEL: @n4_not_and(
67 ; CHECK-NEXT: [[CMP:%.*]] = icmp ne i4 [[SIZE:%.*]], 0
68 ; CHECK-NEXT: [[UMUL:%.*]] = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]])
69 ; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { i4, i1 } [[UMUL]], 1
70 ; CHECK-NEXT: [[AND:%.*]] = or i1 [[UMUL_OV]], [[CMP]]
71 ; CHECK-NEXT: ret i1 [[AND]]
72 ;
73 %cmp = icmp ne i4 %size, 0
74 %umul = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 %size, i4 %nmemb)
75 %umul.ov = extractvalue { i4, i1 } %umul, 1
76 %and = or i1 %umul.ov, %cmp ; not 'and'
77 ret i1 %and
78 }
79
80 define i1 @n5_not_zero(i4 %size, i4 %nmemb) {
81 ; CHECK-LABEL: @n5_not_zero(
82 ; CHECK-NEXT: [[CMP:%.*]] = icmp ne i4 [[SIZE:%.*]], 1
83 ; CHECK-NEXT: [[UMUL:%.*]] = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]])
84 ; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { i4, i1 } [[UMUL]], 1
85 ; CHECK-NEXT: [[AND:%.*]] = and i1 [[UMUL_OV]], [[CMP]]
86 ; CHECK-NEXT: ret i1 [[AND]]
87 ;
88 %cmp = icmp ne i4 %size, 1 ; should be '0'
89 %umul = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 %size, i4 %nmemb)
90 %umul.ov = extractvalue { i4, i1 } %umul, 1
91 %and = and i1 %umul.ov, %cmp
92 ret i1 %and
93 }