llvm.org GIT mirror llvm / ead22e2
[SelectionDAG] Add SIGN_EXTEND_VECTOR_INREG support to computeKnownBits. Differential Revision: https://reviews.llvm.org/D56168 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@350179 91177308-0d34-0410-b5e6-96231b3b80d8 Craig Topper 9 months ago
2 changed file(s) with 13 addition(s) and 11 deletion(s). Raw diff Collapse all Expand all
28262826 Known.Zero.setBitsFrom(InVT.getScalarSizeInBits());
28272827 break;
28282828 }
2829 // TODO ISD::SIGN_EXTEND_VECTOR_INREG
2829 case ISD::SIGN_EXTEND_VECTOR_INREG: {
2830 EVT InVT = Op.getOperand(0).getValueType();
2831 APInt InDemandedElts = DemandedElts.zextOrSelf(InVT.getVectorNumElements());
2832 Known = computeKnownBits(Op.getOperand(0), InDemandedElts, Depth + 1);
2833 // If the sign bit is known to be zero or one, then sext will extend
2834 // it to the top bits, else it will just zext.
2835 Known = Known.sext(BitWidth);
2836 break;
2837 }
28302838 case ISD::SIGN_EXTEND: {
28312839 Known = computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
28322840 // If the sign bit is known to be zero or one, then sext will extend
282282 ; SSE41-LABEL: combine_vec_shl_ext_shl1:
283283 ; SSE41: # %bb.0:
284284 ; SSE41-NEXT: pmullw {{.*}}(%rip), %xmm0
285 ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
286 ; SSE41-NEXT: pmovsxwd %xmm1, %xmm1
287285 ; SSE41-NEXT: pmovsxwd %xmm0, %xmm0
288 ; SSE41-NEXT: movdqa %xmm0, %xmm2
289 ; SSE41-NEXT: pslld $30, %xmm2
290 ; SSE41-NEXT: pslld $31, %xmm0
291 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
292 ; SSE41-NEXT: movdqa %xmm1, %xmm2
293 ; SSE41-NEXT: pslld $28, %xmm2
294 ; SSE41-NEXT: pslld $29, %xmm1
295 ; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
286 ; SSE41-NEXT: pslld $30, %xmm0
287 ; SSE41-NEXT: pxor %xmm1, %xmm1
288 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3],xmm0[4,5,6,7]
289 ; SSE41-NEXT: pxor %xmm1, %xmm1
296290 ; SSE41-NEXT: retq
297291 ;
298292 ; AVX-LABEL: combine_vec_shl_ext_shl1: