llvm.org GIT mirror llvm / 7795456
[InstCombine] foldICmpWithLowBitMaskedVal(): handle uncanonical ((-1 << y) >> y) mask Summary: The last low-bit-mask-pattern-producing-pattern i can think of. https://rise4fun.com/Alive/UGzE <- non-canonical But we can not canonicalize it because of extra uses. https://bugs.llvm.org/show_bug.cgi?id=38123 Reviewers: spatel, craig.topper, RKSimon Reviewed By: spatel Subscribers: llvm-commits Differential Revision: https://reviews.llvm.org/D52148 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@342548 91177308-0d34-0410-b5e6-96231b3b80d8 Roman Lebedev 1 year, 9 months ago
3 changed file(s) with 43 addition(s) and 55 deletion(s). Raw diff Collapse all Expand all
28852885 /// icmp SrcPred (x & Mask), x to icmp DstPred x, Mask
28862886 /// Where Mask is some pattern that produces all-ones in low bits:
28872887 /// (-1 >> y)
2888 /// ((-1 << y) >> y) <- non-canonical, has extra uses
28882889 /// ~(-1 << y)
28892890 /// ((1 << y) + (-1)) <- non-canonical, has extra uses
28902891 /// The Mask can be a constant, too.
28932894 static Value *foldICmpWithLowBitMaskedVal(ICmpInst &I,
28942895 InstCombiner::BuilderTy &Builder) {
28952896 ICmpInst::Predicate SrcPred;
2896 Value *X, *M;
2897 auto m_VariableMask =
2898 m_CombineOr(m_CombineOr(m_Not(m_Shl(m_AllOnes(), m_Value())),
2899 m_Add(m_Shl(m_One(), m_Value()), m_AllOnes())),
2900 m_LShr(m_AllOnes(), m_Value()));
2897 Value *X, *M, *Y;
2898 auto m_VariableMask = m_CombineOr(
2899 m_CombineOr(m_Not(m_Shl(m_AllOnes(), m_Value())),
2900 m_Add(m_Shl(m_One(), m_Value()), m_AllOnes())),
2901 m_CombineOr(m_LShr(m_AllOnes(), m_Value()),
2902 m_LShr(m_Shl(m_AllOnes(), m_Value(Y)), m_Deferred(Y))));
29012903 auto m_Mask = m_CombineOr(m_VariableMask, m_LowBitMask());
29022904 if (!match(&I, m_c_ICmp(SrcPred,
29032905 m_c_And(m_CombineAnd(m_Mask, m_Value(M)), m_Value(X)),
2222 ; CHECK-NEXT: [[T0:%.*]] = shl i8 -1, [[Y:%.*]]
2323 ; CHECK-NEXT: call void @use8(i8 [[T0]])
2424 ; CHECK-NEXT: [[T1:%.*]] = lshr i8 [[T0]], [[Y]]
25 ; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]]
26 ; CHECK-NEXT: [[RET:%.*]] = icmp eq i8 [[T2]], [[X]]
27 ; CHECK-NEXT: ret i1 [[RET]]
25 ; CHECK-NEXT: [[TMP1:%.*]] = icmp uge i8 [[T1]], [[X:%.*]]
26 ; CHECK-NEXT: ret i1 [[TMP1]]
2827 ;
2928 %t0 = shl i8 -1, %y
3029 call void @use8(i8 %t0)
4342 ; CHECK-NEXT: [[T0:%.*]] = shl <2 x i8> , [[Y:%.*]]
4443 ; CHECK-NEXT: call void @use2i8(<2 x i8> [[T0]])
4544 ; CHECK-NEXT: [[T1:%.*]] = lshr <2 x i8> [[T0]], [[Y]]
46 ; CHECK-NEXT: [[T2:%.*]] = and <2 x i8> [[T1]], [[X:%.*]]
47 ; CHECK-NEXT: [[RET:%.*]] = icmp eq <2 x i8> [[T2]], [[X]]
48 ; CHECK-NEXT: ret <2 x i1> [[RET]]
45 ; CHECK-NEXT: [[TMP1:%.*]] = icmp uge <2 x i8> [[T1]], [[X:%.*]]
46 ; CHECK-NEXT: ret <2 x i1> [[TMP1]]
4947 ;
5048 %t0 = shl <2 x i8> , %y
5149 call void @use2i8(<2 x i8> %t0)
6058 ; CHECK-NEXT: [[T0:%.*]] = shl <3 x i8> , [[Y:%.*]]
6159 ; CHECK-NEXT: call void @use3i8(<3 x i8> [[T0]])
6260 ; CHECK-NEXT: [[T1:%.*]] = lshr <3 x i8> [[T0]], [[Y]]
63 ; CHECK-NEXT: [[T2:%.*]] = and <3 x i8> [[T1]], [[X:%.*]]
64 ; CHECK-NEXT: [[RET:%.*]] = icmp eq <3 x i8> [[T2]], [[X]]
65 ; CHECK-NEXT: ret <3 x i1> [[RET]]
61 ; CHECK-NEXT: [[TMP1:%.*]] = icmp uge <3 x i8> [[T1]], [[X:%.*]]
62 ; CHECK-NEXT: ret <3 x i1> [[TMP1]]
6663 ;
6764 %t0 = shl <3 x i8> , %y
6865 call void @use3i8(<3 x i8> %t0)
8481 ; CHECK-NEXT: call void @use8(i8 [[T0]])
8582 ; CHECK-NEXT: [[T1:%.*]] = lshr i8 [[T0]], [[Y]]
8683 ; CHECK-NEXT: [[X:%.*]] = call i8 @gen8()
87 ; CHECK-NEXT: [[T2:%.*]] = and i8 [[X]], [[T1]]
88 ; CHECK-NEXT: [[RET:%.*]] = icmp eq i8 [[T2]], [[X]]
89 ; CHECK-NEXT: ret i1 [[RET]]
84 ; CHECK-NEXT: [[TMP1:%.*]] = icmp ule i8 [[X]], [[T1]]
85 ; CHECK-NEXT: ret i1 [[TMP1]]
9086 ;
9187 %t0 = shl i8 -1, %y
9288 call void @use8(i8 %t0)
10399 ; CHECK-NEXT: call void @use8(i8 [[T0]])
104100 ; CHECK-NEXT: [[T1:%.*]] = lshr i8 [[T0]], [[Y]]
105101 ; CHECK-NEXT: [[X:%.*]] = call i8 @gen8()
106 ; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X]]
107 ; CHECK-NEXT: [[RET:%.*]] = icmp eq i8 [[X]], [[T2]]
108 ; CHECK-NEXT: ret i1 [[RET]]
102 ; CHECK-NEXT: [[TMP1:%.*]] = icmp ule i8 [[X]], [[T1]]
103 ; CHECK-NEXT: ret i1 [[TMP1]]
109104 ;
110105 %t0 = shl i8 -1, %y
111106 call void @use8(i8 %t0)
122117 ; CHECK-NEXT: call void @use8(i8 [[T0]])
123118 ; CHECK-NEXT: [[T1:%.*]] = lshr i8 [[T0]], [[Y]]
124119 ; CHECK-NEXT: [[X:%.*]] = call i8 @gen8()
125 ; CHECK-NEXT: [[T2:%.*]] = and i8 [[X]], [[T1]]
126 ; CHECK-NEXT: [[RET:%.*]] = icmp eq i8 [[X]], [[T2]]
127 ; CHECK-NEXT: ret i1 [[RET]]
120 ; CHECK-NEXT: [[TMP1:%.*]] = icmp ule i8 [[X]], [[T1]]
121 ; CHECK-NEXT: ret i1 [[TMP1]]
128122 ;
129123 %t0 = shl i8 -1, %y
130124 call void @use8(i8 %t0)
145139 ; CHECK-NEXT: call void @use8(i8 [[T0]])
146140 ; CHECK-NEXT: [[T1:%.*]] = lshr i8 [[T0]], [[Y]]
147141 ; CHECK-NEXT: call void @use8(i8 [[T1]])
148 ; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]]
149 ; CHECK-NEXT: [[RET:%.*]] = icmp eq i8 [[T2]], [[X]]
150 ; CHECK-NEXT: ret i1 [[RET]]
142 ; CHECK-NEXT: [[TMP1:%.*]] = icmp uge i8 [[T1]], [[X:%.*]]
143 ; CHECK-NEXT: ret i1 [[TMP1]]
151144 ;
152145 %t0 = shl i8 -1, %y
153146 call void @use8(i8 %t0) ; needed anyway
165158 ; CHECK-NEXT: [[T1:%.*]] = lshr i8 [[T0]], [[Y]]
166159 ; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]]
167160 ; CHECK-NEXT: call void @use8(i8 [[T2]])
168 ; CHECK-NEXT: [[RET:%.*]] = icmp eq i8 [[T2]], [[X]]
169 ; CHECK-NEXT: ret i1 [[RET]]
161 ; CHECK-NEXT: [[TMP1:%.*]] = icmp uge i8 [[T1]], [[X]]
162 ; CHECK-NEXT: ret i1 [[TMP1]]
170163 ;
171164 %t0 = shl i8 -1, %y
172165 call void @use8(i8 %t0) ; needed anyway
185178 ; CHECK-NEXT: call void @use8(i8 [[T1]])
186179 ; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]]
187180 ; CHECK-NEXT: call void @use8(i8 [[T2]])
188 ; CHECK-NEXT: [[RET:%.*]] = icmp eq i8 [[T2]], [[X]]
189 ; CHECK-NEXT: ret i1 [[RET]]
181 ; CHECK-NEXT: [[TMP1:%.*]] = icmp uge i8 [[T1]], [[X]]
182 ; CHECK-NEXT: ret i1 [[TMP1]]
190183 ;
191184 %t0 = shl i8 -1, %y
192185 call void @use8(i8 %t0)
2222 ; CHECK-NEXT: [[T0:%.*]] = shl i8 -1, [[Y:%.*]]
2323 ; CHECK-NEXT: call void @use8(i8 [[T0]])
2424 ; CHECK-NEXT: [[T1:%.*]] = lshr i8 [[T0]], [[Y]]
25 ; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]]
26 ; CHECK-NEXT: [[RET:%.*]] = icmp ne i8 [[T2]], [[X]]
27 ; CHECK-NEXT: ret i1 [[RET]]
25 ; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i8 [[T1]], [[X:%.*]]
26 ; CHECK-NEXT: ret i1 [[TMP1]]
2827 ;
2928 %t0 = shl i8 -1, %y
3029 call void @use8(i8 %t0)
4342 ; CHECK-NEXT: [[T0:%.*]] = shl <2 x i8> , [[Y:%.*]]
4443 ; CHECK-NEXT: call void @use2i8(<2 x i8> [[T0]])
4544 ; CHECK-NEXT: [[T1:%.*]] = lshr <2 x i8> [[T0]], [[Y]]
46 ; CHECK-NEXT: [[T2:%.*]] = and <2 x i8> [[T1]], [[X:%.*]]
47 ; CHECK-NEXT: [[RET:%.*]] = icmp ne <2 x i8> [[T2]], [[X]]
48 ; CHECK-NEXT: ret <2 x i1> [[RET]]
45 ; CHECK-NEXT: [[TMP1:%.*]] = icmp ult <2 x i8> [[T1]], [[X:%.*]]
46 ; CHECK-NEXT: ret <2 x i1> [[TMP1]]
4947 ;
5048 %t0 = shl <2 x i8> , %y
5149 call void @use2i8(<2 x i8> %t0)
6058 ; CHECK-NEXT: [[T0:%.*]] = shl <3 x i8> , [[Y:%.*]]
6159 ; CHECK-NEXT: call void @use3i8(<3 x i8> [[T0]])
6260 ; CHECK-NEXT: [[T1:%.*]] = lshr <3 x i8> [[T0]], [[Y]]
63 ; CHECK-NEXT: [[T2:%.*]] = and <3 x i8> [[T1]], [[X:%.*]]
64 ; CHECK-NEXT: [[RET:%.*]] = icmp ne <3 x i8> [[T2]], [[X]]
65 ; CHECK-NEXT: ret <3 x i1> [[RET]]
61 ; CHECK-NEXT: [[TMP1:%.*]] = icmp ult <3 x i8> [[T1]], [[X:%.*]]
62 ; CHECK-NEXT: ret <3 x i1> [[TMP1]]
6663 ;
6764 %t0 = shl <3 x i8> , %y
6865 call void @use3i8(<3 x i8> %t0)
8481 ; CHECK-NEXT: call void @use8(i8 [[T0]])
8582 ; CHECK-NEXT: [[T1:%.*]] = lshr i8 [[T0]], [[Y]]
8683 ; CHECK-NEXT: [[X:%.*]] = call i8 @gen8()
87 ; CHECK-NEXT: [[T2:%.*]] = and i8 [[X]], [[T1]]
88 ; CHECK-NEXT: [[RET:%.*]] = icmp ne i8 [[T2]], [[X]]
89 ; CHECK-NEXT: ret i1 [[RET]]
84 ; CHECK-NEXT: [[TMP1:%.*]] = icmp ugt i8 [[X]], [[T1]]
85 ; CHECK-NEXT: ret i1 [[TMP1]]
9086 ;
9187 %t0 = shl i8 -1, %y
9288 call void @use8(i8 %t0)
10399 ; CHECK-NEXT: call void @use8(i8 [[T0]])
104100 ; CHECK-NEXT: [[T1:%.*]] = lshr i8 [[T0]], [[Y]]
105101 ; CHECK-NEXT: [[X:%.*]] = call i8 @gen8()
106 ; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X]]
107 ; CHECK-NEXT: [[RET:%.*]] = icmp ne i8 [[X]], [[T2]]
108 ; CHECK-NEXT: ret i1 [[RET]]
102 ; CHECK-NEXT: [[TMP1:%.*]] = icmp ugt i8 [[X]], [[T1]]
103 ; CHECK-NEXT: ret i1 [[TMP1]]
109104 ;
110105 %t0 = shl i8 -1, %y
111106 call void @use8(i8 %t0)
122117 ; CHECK-NEXT: call void @use8(i8 [[T0]])
123118 ; CHECK-NEXT: [[T1:%.*]] = lshr i8 [[T0]], [[Y]]
124119 ; CHECK-NEXT: [[X:%.*]] = call i8 @gen8()
125 ; CHECK-NEXT: [[T2:%.*]] = and i8 [[X]], [[T1]]
126 ; CHECK-NEXT: [[RET:%.*]] = icmp ne i8 [[X]], [[T2]]
127 ; CHECK-NEXT: ret i1 [[RET]]
120 ; CHECK-NEXT: [[TMP1:%.*]] = icmp ugt i8 [[X]], [[T1]]
121 ; CHECK-NEXT: ret i1 [[TMP1]]
128122 ;
129123 %t0 = shl i8 -1, %y
130124 call void @use8(i8 %t0)
145139 ; CHECK-NEXT: call void @use8(i8 [[T0]])
146140 ; CHECK-NEXT: [[T1:%.*]] = lshr i8 [[T0]], [[Y]]
147141 ; CHECK-NEXT: call void @use8(i8 [[T1]])
148 ; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]]
149 ; CHECK-NEXT: [[RET:%.*]] = icmp ne i8 [[T2]], [[X]]
150 ; CHECK-NEXT: ret i1 [[RET]]
142 ; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i8 [[T1]], [[X:%.*]]
143 ; CHECK-NEXT: ret i1 [[TMP1]]
151144 ;
152145 %t0 = shl i8 -1, %y
153146 call void @use8(i8 %t0) ; needed anyway
165158 ; CHECK-NEXT: [[T1:%.*]] = lshr i8 [[T0]], [[Y]]
166159 ; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]]
167160 ; CHECK-NEXT: call void @use8(i8 [[T2]])
168 ; CHECK-NEXT: [[RET:%.*]] = icmp ne i8 [[T2]], [[X]]
169 ; CHECK-NEXT: ret i1 [[RET]]
161 ; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i8 [[T1]], [[X]]
162 ; CHECK-NEXT: ret i1 [[TMP1]]
170163 ;
171164 %t0 = shl i8 -1, %y
172165 call void @use8(i8 %t0) ; needed anyway
185178 ; CHECK-NEXT: call void @use8(i8 [[T1]])
186179 ; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]]
187180 ; CHECK-NEXT: call void @use8(i8 [[T2]])
188 ; CHECK-NEXT: [[RET:%.*]] = icmp ne i8 [[T2]], [[X]]
189 ; CHECK-NEXT: ret i1 [[RET]]
181 ; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i8 [[T1]], [[X]]
182 ; CHECK-NEXT: ret i1 [[TMP1]]
190183 ;
191184 %t0 = shl i8 -1, %y
192185 call void @use8(i8 %t0)