llvm.org GIT mirror llvm / f5e38ea
[DAGCombine] Add TODOs for some combines that should support non-uniform vectors We tend to only test for scalar/scalar consts when really we could support non-uniform vectors using ISD::matchUnaryPredicate/matchBinaryPredicate etc. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@363924 91177308-0d34-0410-b5e6-96231b3b80d8 Simon Pilgrim a month ago
1 changed file(s) with 15 addition(s) and 0 deletion(s). Raw diff Collapse all Expand all
22012201
22022202 // We need a constant operand for the add/sub, and the other operand is a
22032203 // logical shift right: add (srl), C or sub C, (srl).
2204 // TODO - support non-uniform vector amounts.
22042205 bool IsAdd = N->getOpcode() == ISD::ADD;
22052206 SDValue ConstantOp = IsAdd ? N->getOperand(1) : N->getOperand(0);
22062207 SDValue ShiftOp = IsAdd ? N->getOperand(0) : N->getOperand(1);
44914492 }
44924493
44934494 // Turn compare of constants whose difference is 1 bit into add+and+setcc.
4495 // TODO - support non-uniform vector amounts.
44944496 if ((IsAnd && CC1 == ISD::SETNE) || (!IsAnd && CC1 == ISD::SETEQ)) {
44954497 // Match a shared variable operand and 2 non-opaque constant operands.
44964498 ConstantSDNode *C0 = isConstOrConstSplat(LR);
70807082 }
70817083
70827084 // fold (rot x, c) -> (rot x, c % BitSize)
7085 // TODO - support non-uniform vector amounts.
70837086 if (ConstantSDNode *Cst = isConstOrConstSplat(N1)) {
70847087 if (Cst->getAPIntValue().uge(Bitsize)) {
70857088 uint64_t RotAmt = Cst->getAPIntValue().urem(Bitsize);
71567159 ConstantSDNode *N1C = isConstOrConstSplat(N1);
71577160
71587161 // fold (shl c1, c2) -> c1<
7162 // TODO - support non-uniform vector shift amounts.
71597163 ConstantSDNode *N0C = getAsNonOpaqueConstant(N0);
71607164 if (N0C && N1C && !N1C->isOpaque())
71617165 return DAG.FoldConstantArithmetic(ISD::SHL, SDLoc(N), VT, N0C, N1C);
71757179 return DAG.getNode(ISD::SHL, SDLoc(N), VT, N0, NewOp1);
71767180 }
71777181
7182 // TODO - support non-uniform vector shift amounts.
71787183 if (N1C && SimplifyDemandedBits(SDValue(N, 0)))
71797184 return SDValue(N, 0);
71807185
72537258 // fold (shl (zext (srl x, C)), C) -> (zext (shl (srl x, C), C))
72547259 // Only fold this if the inner zext has no other uses to avoid increasing
72557260 // the total number of instructions.
7261 // TODO - support non-uniform vector shift amounts.
72567262 if (N1C && N0.getOpcode() == ISD::ZERO_EXTEND && N0.hasOneUse() &&
72577263 N0.getOperand(0).getOpcode() == ISD::SRL) {
72587264 SDValue N0Op0 = N0.getOperand(0);
72767282
72777283 // fold (shl (sr[la] exact X, C1), C2) -> (shl X, (C2-C1)) if C1 <= C2
72787284 // fold (shl (sr[la] exact X, C1), C2) -> (sr[la] X, (C2-C1)) if C1 > C2
7285 // TODO - support non-uniform vector shift amounts.
72797286 if (N1C && (N0.getOpcode() == ISD::SRL || N0.getOpcode() == ISD::SRA) &&
72807287 N0->getFlags().hasExact()) {
72817288 if (ConstantSDNode *N0C1 = isConstOrConstSplat(N0.getOperand(1))) {
73867393 ConstantSDNode *N1C = isConstOrConstSplat(N1);
73877394
73887395 // fold (sra c1, c2) -> (sra c1, c2)
7396 // TODO - support non-uniform vector shift amounts.
73897397 ConstantSDNode *N0C = getAsNonOpaqueConstant(N0);
73907398 if (N0C && N1C && !N1C->isOpaque())
73917399 return DAG.FoldConstantArithmetic(ISD::SRA, SDLoc(N), VT, N0C, N1C);
74847492
74857493 // fold (sra (trunc (srl x, c1)), c2) -> (trunc (sra x, c1 + c2))
74867494 // if c1 is equal to the number of bits the trunc removes
7495 // TODO - support non-uniform vector shift amounts.
74877496 if (N0.getOpcode() == ISD::TRUNCATE &&
74887497 (N0.getOperand(0).getOpcode() == ISD::SRL ||
74897498 N0.getOperand(0).getOpcode() == ISD::SRA) &&
75087517 }
75097518
75107519 // Simplify, based on bits shifted out of the LHS.
7520 // TODO - support non-uniform vector shift amounts.
75117521 if (N1C && SimplifyDemandedBits(SDValue(N, 0)))
75127522 return SDValue(N, 0);
75137523
75397549 ConstantSDNode *N1C = isConstOrConstSplat(N1);
75407550
75417551 // fold (srl c1, c2) -> c1 >>u c2
7552 // TODO - support non-uniform vector shift amounts.
75427553 ConstantSDNode *N0C = getAsNonOpaqueConstant(N0);
75437554 if (N0C && N1C && !N1C->isOpaque())
75447555 return DAG.FoldConstantArithmetic(ISD::SRL, SDLoc(N), VT, N0C, N1C);
75797590 }
75807591
75817592 // fold (srl (trunc (srl x, c1)), c2) -> 0 or (trunc (srl x, (add c1, c2)))
7593 // TODO - support non-uniform vector shift amounts.
75827594 if (N1C && N0.getOpcode() == ISD::TRUNCATE &&
75837595 N0.getOperand(0).getOpcode() == ISD::SRL) {
75847596 if (auto N001C = isConstOrConstSplat(N0.getOperand(0).getOperand(1))) {
76137625 }
76147626
76157627 // fold (srl (anyextend x), c) -> (and (anyextend (srl x, c)), mask)
7628 // TODO - support non-uniform vector shift amounts.
76167629 if (N1C && N0.getOpcode() == ISD::ANY_EXTEND) {
76177630 // Shifting in all undef bits?
76187631 EVT SmallVT = N0.getOperand(0).getValueType();
76897702
76907703 // fold operands of srl based on knowledge that the low bits are not
76917704 // demanded.
7705 // TODO - support non-uniform vector shift amounts.
76927706 if (N1C && SimplifyDemandedBits(SDValue(N, 0)))
76937707 return SDValue(N, 0);
76947708
77517765 return V.isUndef() || isNullOrNullSplat(V, /*AllowUndefs*/ true);
77527766 };
77537767
7768 // TODO - support non-uniform vector shift amounts.
77547769 if (ConstantSDNode *Cst = isConstOrConstSplat(N2)) {
77557770 EVT ShAmtTy = N2.getValueType();
77567771