llvm.org GIT mirror
[DAGCombine] Add TODOs for some combines that should support non-uniform vectors We tend to only test for scalar/scalar consts when really we could support non-uniform vectors using ISD::matchUnaryPredicate/matchBinaryPredicate etc. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@363924 91177308-0d34-0410-b5e6-96231b3b80d8 Simon Pilgrim 2 months ago
1 changed file(s) with 15 addition(s) and 0 deletion(s).
 2201 2201 2202 2202 // We need a constant operand for the add/sub, and the other operand is a 2203 2203 // logical shift right: add (srl), C or sub C, (srl). 2204 // TODO - support non-uniform vector amounts. 2204 2205 bool IsAdd = N->getOpcode() == ISD::ADD; 2205 2206 SDValue ConstantOp = IsAdd ? N->getOperand(1) : N->getOperand(0); 2206 2207 SDValue ShiftOp = IsAdd ? N->getOperand(0) : N->getOperand(1); 4491 4492 } 4492 4493 4493 4494 // Turn compare of constants whose difference is 1 bit into add+and+setcc. 4495 // TODO - support non-uniform vector amounts. 4494 4496 if ((IsAnd && CC1 == ISD::SETNE) || (!IsAnd && CC1 == ISD::SETEQ)) { 4495 4497 // Match a shared variable operand and 2 non-opaque constant operands. 4496 4498 ConstantSDNode *C0 = isConstOrConstSplat(LR); 7080 7082 } 7081 7083 7082 7084 // fold (rot x, c) -> (rot x, c % BitSize) 7085 // TODO - support non-uniform vector amounts. 7083 7086 if (ConstantSDNode *Cst = isConstOrConstSplat(N1)) { 7084 7087 if (Cst->getAPIntValue().uge(Bitsize)) { 7085 7088 uint64_t RotAmt = Cst->getAPIntValue().urem(Bitsize); 7156 7159 ConstantSDNode *N1C = isConstOrConstSplat(N1); 7157 7160 7158 7161 // fold (shl c1, c2) -> c1< 7162 // TODO - support non-uniform vector shift amounts. 7159 7163 ConstantSDNode *N0C = getAsNonOpaqueConstant(N0); 7160 7164 if (N0C && N1C && !N1C->isOpaque()) 7161 7165 return DAG.FoldConstantArithmetic(ISD::SHL, SDLoc(N), VT, N0C, N1C); 7175 7179 return DAG.getNode(ISD::SHL, SDLoc(N), VT, N0, NewOp1); 7176 7180 } 7177 7181 7182 // TODO - support non-uniform vector shift amounts. 7178 7183 if (N1C && SimplifyDemandedBits(SDValue(N, 0))) 7179 7184 return SDValue(N, 0); 7180 7185 7253 7258 // fold (shl (zext (srl x, C)), C) -> (zext (shl (srl x, C), C)) 7254 7259 // Only fold this if the inner zext has no other uses to avoid increasing 7255 7260 // the total number of instructions. 7261 // TODO - support non-uniform vector shift amounts. 7256 7262 if (N1C && N0.getOpcode() == ISD::ZERO_EXTEND && N0.hasOneUse() && 7257 7263 N0.getOperand(0).getOpcode() == ISD::SRL) { 7258 7264 SDValue N0Op0 = N0.getOperand(0); 7276 7282 7277 7283 // fold (shl (sr[la] exact X, C1), C2) -> (shl X, (C2-C1)) if C1 <= C2 7278 7284 // fold (shl (sr[la] exact X, C1), C2) -> (sr[la] X, (C2-C1)) if C1 > C2 7285 // TODO - support non-uniform vector shift amounts. 7279 7286 if (N1C && (N0.getOpcode() == ISD::SRL || N0.getOpcode() == ISD::SRA) && 7280 7287 N0->getFlags().hasExact()) { 7281 7288 if (ConstantSDNode *N0C1 = isConstOrConstSplat(N0.getOperand(1))) { 7386 7393 ConstantSDNode *N1C = isConstOrConstSplat(N1); 7387 7394 7388 7395 // fold (sra c1, c2) -> (sra c1, c2) 7396 // TODO - support non-uniform vector shift amounts. 7389 7397 ConstantSDNode *N0C = getAsNonOpaqueConstant(N0); 7390 7398 if (N0C && N1C && !N1C->isOpaque()) 7391 7399 return DAG.FoldConstantArithmetic(ISD::SRA, SDLoc(N), VT, N0C, N1C); 7484 7492 7485 7493 // fold (sra (trunc (srl x, c1)), c2) -> (trunc (sra x, c1 + c2)) 7486 7494 // if c1 is equal to the number of bits the trunc removes 7495 // TODO - support non-uniform vector shift amounts. 7487 7496 if (N0.getOpcode() == ISD::TRUNCATE && 7488 7497 (N0.getOperand(0).getOpcode() == ISD::SRL || 7489 7498 N0.getOperand(0).getOpcode() == ISD::SRA) && 7508 7517 } 7509 7518 7510 7519 // Simplify, based on bits shifted out of the LHS. 7520 // TODO - support non-uniform vector shift amounts. 7511 7521 if (N1C && SimplifyDemandedBits(SDValue(N, 0))) 7512 7522 return SDValue(N, 0); 7513 7523 7539 7549 ConstantSDNode *N1C = isConstOrConstSplat(N1); 7540 7550 7541 7551 // fold (srl c1, c2) -> c1 >>u c2 7552 // TODO - support non-uniform vector shift amounts. 7542 7553 ConstantSDNode *N0C = getAsNonOpaqueConstant(N0); 7543 7554 if (N0C && N1C && !N1C->isOpaque()) 7544 7555 return DAG.FoldConstantArithmetic(ISD::SRL, SDLoc(N), VT, N0C, N1C); 7579 7590 } 7580 7591 7581 7592 // fold (srl (trunc (srl x, c1)), c2) -> 0 or (trunc (srl x, (add c1, c2))) 7593 // TODO - support non-uniform vector shift amounts. 7582 7594 if (N1C && N0.getOpcode() == ISD::TRUNCATE && 7583 7595 N0.getOperand(0).getOpcode() == ISD::SRL) { 7584 7596 if (auto N001C = isConstOrConstSplat(N0.getOperand(0).getOperand(1))) { 7613 7625 } 7614 7626 7615 7627 // fold (srl (anyextend x), c) -> (and (anyextend (srl x, c)), mask) 7628 // TODO - support non-uniform vector shift amounts. 7616 7629 if (N1C && N0.getOpcode() == ISD::ANY_EXTEND) { 7617 7630 // Shifting in all undef bits? 7618 7631 EVT SmallVT = N0.getOperand(0).getValueType(); 7689 7702 7690 7703 // fold operands of srl based on knowledge that the low bits are not 7691 7704 // demanded. 7705 // TODO - support non-uniform vector shift amounts. 7692 7706 if (N1C && SimplifyDemandedBits(SDValue(N, 0))) 7693 7707 return SDValue(N, 0); 7694 7708 7751 7765 return V.isUndef() || isNullOrNullSplat(V, /*AllowUndefs*/ true); 7752 7766 }; 7753 7767 7768 // TODO - support non-uniform vector shift amounts. 7754 7769 if (ConstantSDNode *Cst = isConstOrConstSplat(N2)) { 7755 7770 EVT ShAmtTy = N2.getValueType(); 7756 7771