llvm.org GIT mirror llvm / 5f4e6e1
[FastISel][X86] Optimize selects when the condition comes from a compare. Optimize the select instructions sequence to use the EFLAGS directly from a compare when possible. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@211543 91177308-0d34-0410-b5e6-96231b3b80d8 Juergen Ributzka 5 years ago
6 changed file(s) with 476 addition(s) and 44 deletion(s). Raw diff Collapse all Expand all
109109 bool X86SelectShift(const Instruction *I);
110110
111111 bool X86SelectDivRem(const Instruction *I);
112
113 bool X86FastEmitCMoveSelect(const Instruction *I);
112114
113115 bool X86SelectSelect(const Instruction *I);
114116
16101612 return true;
16111613 }
16121614
1613 bool X86FastISel::X86SelectSelect(const Instruction *I) {
1614 MVT VT;
1615 if (!isTypeLegal(I->getType(), VT))
1616 return false;
1617
1618 // We only use cmov here, if we don't have a cmov instruction bail.
1619 if (!Subtarget->hasCMov()) return false;
1620
1621 unsigned Opc = 0;
1622 const TargetRegisterClass *RC = nullptr;
1623 if (VT == MVT::i16) {
1624 Opc = X86::CMOVE16rr;
1625 RC = &X86::GR16RegClass;
1626 } else if (VT == MVT::i32) {
1627 Opc = X86::CMOVE32rr;
1628 RC = &X86::GR32RegClass;
1629 } else if (VT == MVT::i64) {
1630 Opc = X86::CMOVE64rr;
1631 RC = &X86::GR64RegClass;
1632 } else {
1633 return false;
1634 }
1635
1636 unsigned Op0Reg = getRegForValue(I->getOperand(0));
1637 if (Op0Reg == 0) return false;
1638 unsigned Op1Reg = getRegForValue(I->getOperand(1));
1639 if (Op1Reg == 0) return false;
1640 unsigned Op2Reg = getRegForValue(I->getOperand(2));
1641 if (Op2Reg == 0) return false;
1642
1643 // Selects operate on i1, however, Op0Reg is 8 bits width and may contain
1644 // garbage. Indeed, only the less significant bit is supposed to be accurate.
1645 // If we read more than the lsb, we may see non-zero values whereas lsb
1646 // is zero. Therefore, we have to truncate Op0Reg to i1 for the select.
1647 // This is achieved by performing TEST against 1.
1648 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri))
1649 .addReg(Op0Reg).addImm(1);
1650 unsigned ResultReg = createResultReg(RC);
1651 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
1652 .addReg(Op1Reg).addReg(Op2Reg);
1615 /// \brief Emit a conditional move instruction (if the are supported) to lower
1616 /// the select.
1617 bool X86FastISel::X86FastEmitCMoveSelect(const Instruction *I) {
1618 MVT RetVT;
1619 if (!isTypeLegal(I->getType(), RetVT))
1620 return false;
1621
1622 // Check if the subtarget supports these instructions.
1623 if (!Subtarget->hasCMov())
1624 return false;
1625
1626 // FIXME: Add support for i8.
1627 unsigned Opc;
1628 switch (RetVT.SimpleTy) {
1629 default: return false;
1630 case MVT::i16: Opc = X86::CMOVNE16rr; break;
1631 case MVT::i32: Opc = X86::CMOVNE32rr; break;
1632 case MVT::i64: Opc = X86::CMOVNE64rr; break;
1633 }
1634
1635 const Value *Cond = I->getOperand(0);
1636 const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT);
1637 bool NeedTest = true;
1638
1639 // Optimize conditons coming from a compare.
1640 if (const auto *CI = dyn_cast(Cond)) {
1641 CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);
1642
1643 // FCMP_OEQ and FCMP_UNE cannot be checked with a single instruction.
1644 static unsigned SETFOpcTable[2][3] = {
1645 { X86::SETNPr, X86::SETEr , X86::TEST8rr },
1646 { X86::SETPr, X86::SETNEr, X86::OR8rr }
1647 };
1648 unsigned *SETFOpc = nullptr;
1649 switch (Predicate) {
1650 default: break;
1651 case CmpInst::FCMP_OEQ:
1652 SETFOpc = &SETFOpcTable[0][0];
1653 Predicate = CmpInst::ICMP_NE;
1654 break;
1655 case CmpInst::FCMP_UNE:
1656 SETFOpc = &SETFOpcTable[1][0];
1657 Predicate = CmpInst::ICMP_NE;
1658 break;
1659 }
1660
1661 X86::CondCode CC;
1662 bool NeedSwap;
1663 std::tie(CC, NeedSwap) = getX86ConditonCode(Predicate);
1664 assert(CC <= X86::LAST_VALID_COND && "Unexpected condition code.");
1665 Opc = X86::getCMovFromCond(CC, RC->getSize());
1666
1667 const Value *CmpLHS = CI->getOperand(0);
1668 const Value *CmpRHS = CI->getOperand(1);
1669 if (NeedSwap)
1670 std::swap(CmpLHS, CmpRHS);
1671
1672 EVT CmpVT = TLI.getValueType(CmpLHS->getType());
1673 // Emit a compare of the LHS and RHS, setting the flags.
1674 if (!X86FastEmitCompare(CmpLHS, CmpRHS, CmpVT))
1675 return false;
1676
1677 if (SETFOpc) {
1678 unsigned FlagReg1 = createResultReg(&X86::GR8RegClass);
1679 unsigned FlagReg2 = createResultReg(&X86::GR8RegClass);
1680 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(SETFOpc[0]),
1681 FlagReg1);
1682 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(SETFOpc[1]),
1683 FlagReg2);
1684 auto const &II = TII.get(SETFOpc[2]);
1685 if (II.getNumDefs()) {
1686 unsigned TmpReg = createResultReg(&X86::GR8RegClass);
1687 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, TmpReg)
1688 .addReg(FlagReg2).addReg(FlagReg1);
1689 } else {
1690 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1691 .addReg(FlagReg2).addReg(FlagReg1);
1692 }
1693 }
1694 NeedTest = false;
1695 }
1696
1697 if (NeedTest) {
1698 // Selects operate on i1, however, CondReg is 8 bits width and may contain
1699 // garbage. Indeed, only the less significant bit is supposed to be
1700 // accurate. If we read more than the lsb, we may see non-zero values
1701 // whereas lsb is zero. Therefore, we have to truncate Op0Reg to i1 for
1702 // the select. This is achieved by performing TEST against 1.
1703 unsigned CondReg = getRegForValue(Cond);
1704 if (CondReg == 0)
1705 return false;
1706 bool CondIsKill = hasTrivialKill(Cond);
1707
1708 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::TEST8ri))
1709 .addReg(CondReg, getKillRegState(CondIsKill)).addImm(1);
1710 }
1711
1712 const Value *LHS = I->getOperand(1);
1713 const Value *RHS = I->getOperand(2);
1714
1715 unsigned RHSReg = getRegForValue(RHS);
1716 bool RHSIsKill = hasTrivialKill(RHS);
1717
1718 unsigned LHSReg = getRegForValue(LHS);
1719 bool LHSIsKill = hasTrivialKill(LHS);
1720
1721 if (!LHSReg || !RHSReg)
1722 return false;
1723
1724 unsigned ResultReg = FastEmitInst_rr(Opc, RC, RHSReg, RHSIsKill,
1725 LHSReg, LHSIsKill);
16531726 UpdateValueMap(I, ResultReg);
16541727 return true;
1728 }
1729
1730 bool X86FastISel::X86SelectSelect(const Instruction *I) {
1731 MVT RetVT;
1732 if (!isTypeLegal(I->getType(), RetVT))
1733 return false;
1734
1735 // Check if we can fold the select.
1736 if (const auto *CI = dyn_cast(I->getOperand(0))) {
1737 CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);
1738 const Value *Opnd = nullptr;
1739 switch (Predicate) {
1740 default: break;
1741 case CmpInst::FCMP_FALSE: Opnd = I->getOperand(2); break;
1742 case CmpInst::FCMP_TRUE: Opnd = I->getOperand(1); break;
1743 }
1744 // No need for a select anymore - this is an unconditional move.
1745 if (Opnd) {
1746 unsigned OpReg = getRegForValue(Opnd);
1747 if (OpReg == 0)
1748 return false;
1749 bool OpIsKill = hasTrivialKill(Opnd);
1750 const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT);
1751 unsigned ResultReg = createResultReg(RC);
1752 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1753 TII.get(TargetOpcode::COPY), ResultReg)
1754 .addReg(OpReg, getKillRegState(OpIsKill));
1755 UpdateValueMap(I, ResultReg);
1756 return true;
1757 }
1758 }
1759
1760 // First try to use real conditional move instructions.
1761 if (X86FastEmitCMoveSelect(I))
1762 return true;
1763
1764 return false;
16551765 }
16561766
16571767 bool X86FastISel::X86SelectFPExt(const Instruction *I) {
26952695
26962696 /// getCMovFromCond - Return a cmov opcode for the given condition,
26972697 /// register size in bytes, and operand type.
2698 static unsigned getCMovFromCond(X86::CondCode CC, unsigned RegBytes,
2699 bool HasMemoryOperand) {
2698 unsigned X86::getCMovFromCond(CondCode CC, unsigned RegBytes,
2699 bool HasMemoryOperand) {
27002700 static const uint16_t Opc[32][3] = {
27012701 { X86::CMOVA16rr, X86::CMOVA32rr, X86::CMOVA64rr },
27022702 { X86::CMOVAE16rr, X86::CMOVAE32rr, X86::CMOVAE64rr },
6464 /// \brief Return a set opcode for the given condition and whether it has
6565 /// a memory operand.
6666 unsigned getSETFromCond(CondCode CC, bool HasMemoryOperand = false);
67
68 /// \brief Return a cmov opcode for the given condition, register size in
69 /// bytes, and operand type.
70 unsigned getCMovFromCond(CondCode CC, unsigned RegBytes,
71 bool HasMemoryOperand = false);
6772
6873 // Turn CMov opcode into condition code.
6974 CondCode getCondFromCMovOpc(unsigned Opc);
0 ; RUN: llc < %s -fast-isel -fast-isel-abort -mtriple=x86_64-apple-darwin10 | FileCheck %s
1
2 ; Test conditional move for the supported types (i16, i32, and i32) and
3 ; conditon input (argument or cmp). Currently i8 is not supported.
4
5 define zeroext i16 @select_cmov_i16(i1 zeroext %cond, i16 zeroext %a, i16 zeroext %b) {
6 ; CHECK-LABEL: select_cmov_i16
7 ; CHECK: testb $1, %dil
8 ; CHECK-NEXT: cmovew %dx, %si
9 ; CHECK-NEXT: movzwl %si, %eax
10 %1 = select i1 %cond, i16 %a, i16 %b
11 ret i16 %1
12 }
13
14 define zeroext i16 @select_cmp_cmov_i16(i16 zeroext %a, i16 zeroext %b) {
15 ; CHECK-LABEL: select_cmp_cmov_i16
16 ; CHECK: cmpw %si, %di
17 ; CHECK-NEXT: cmovbw %di, %si
18 ; CHECK-NEXT: movzwl %si, %eax
19 %1 = icmp ult i16 %a, %b
20 %2 = select i1 %1, i16 %a, i16 %b
21 ret i16 %2
22 }
23
24 define i32 @select_cmov_i32(i1 zeroext %cond, i32 %a, i32 %b) {
25 ; CHECK-LABEL: select_cmov_i32
26 ; CHECK: testb $1, %dil
27 ; CHECK-NEXT: cmovel %edx, %esi
28 ; CHECK-NEXT: movl %esi, %eax
29 %1 = select i1 %cond, i32 %a, i32 %b
30 ret i32 %1
31 }
32
33 define i32 @select_cmp_cmov_i32(i32 %a, i32 %b) {
34 ; CHECK-LABEL: select_cmp_cmov_i32
35 ; CHECK: cmpl %esi, %edi
36 ; CHECK-NEXT: cmovbl %edi, %esi
37 ; CHECK-NEXT: movl %esi, %eax
38 %1 = icmp ult i32 %a, %b
39 %2 = select i1 %1, i32 %a, i32 %b
40 ret i32 %2
41 }
42
43 define i64 @select_cmov_i64(i1 zeroext %cond, i64 %a, i64 %b) {
44 ; CHECK-LABEL: select_cmov_i64
45 ; CHECK: testb $1, %dil
46 ; CHECK-NEXT: cmoveq %rdx, %rsi
47 ; CHECK-NEXT: movq %rsi, %rax
48 %1 = select i1 %cond, i64 %a, i64 %b
49 ret i64 %1
50 }
51
52 define i64 @select_cmp_cmov_i64(i64 %a, i64 %b) {
53 ; CHECK-LABEL: select_cmp_cmov_i64
54 ; CHECK: cmpq %rsi, %rdi
55 ; CHECK-NEXT: cmovbq %rdi, %rsi
56 ; CHECK-NEXT: movq %rsi, %rax
57 %1 = icmp ult i64 %a, %b
58 %2 = select i1 %1, i64 %a, i64 %b
59 ret i64 %2
60 }
61
0 ; RUN: llc < %s -mtriple=x86_64-apple-darwin10 | FileCheck %s
1 ; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -fast-isel -fast-isel-abort | FileCheck %s
2
3 ; Test all the cmp predicates that can feed an integer conditional move.
4
5 define i64 @select_fcmp_false_cmov(double %a, double %b, i64 %c, i64 %d) {
6 ; CHECK-LABEL: select_fcmp_false_cmov
7 ; CHECK: movq %rsi, %rax
8 ; CHECK-NEXT: retq
9 %1 = fcmp false double %a, %b
10 %2 = select i1 %1, i64 %c, i64 %d
11 ret i64 %2
12 }
13
14 define i64 @select_fcmp_oeq_cmov(double %a, double %b, i64 %c, i64 %d) {
15 ; CHECK-LABEL: select_fcmp_oeq_cmov
16 ; CHECK: ucomisd %xmm1, %xmm0
17 ; CHECK-NEXT: setnp %al
18 ; CHECK-NEXT: sete %cl
19 ; CHECK-NEXT: testb %al, %cl
20 ; CHECK-NEXT: cmoveq %rsi, %rdi
21 %1 = fcmp oeq double %a, %b
22 %2 = select i1 %1, i64 %c, i64 %d
23 ret i64 %2
24 }
25
26 define i64 @select_fcmp_ogt_cmov(double %a, double %b, i64 %c, i64 %d) {
27 ; CHECK-LABEL: select_fcmp_ogt_cmov
28 ; CHECK: ucomisd %xmm1, %xmm0
29 ; CHECK-NEXT: cmovbeq %rsi, %rdi
30 %1 = fcmp ogt double %a, %b
31 %2 = select i1 %1, i64 %c, i64 %d
32 ret i64 %2
33 }
34
35 define i64 @select_fcmp_oge_cmov(double %a, double %b, i64 %c, i64 %d) {
36 ; CHECK-LABEL: select_fcmp_oge_cmov
37 ; CHECK: ucomisd %xmm1, %xmm0
38 ; CHECK-NEXT: cmovbq %rsi, %rdi
39 %1 = fcmp oge double %a, %b
40 %2 = select i1 %1, i64 %c, i64 %d
41 ret i64 %2
42 }
43
44 define i64 @select_fcmp_olt_cmov(double %a, double %b, i64 %c, i64 %d) {
45 ; CHECK-LABEL: select_fcmp_olt_cmov
46 ; CHECK: ucomisd %xmm0, %xmm1
47 ; CHECK-NEXT: cmovbeq %rsi, %rdi
48 %1 = fcmp olt double %a, %b
49 %2 = select i1 %1, i64 %c, i64 %d
50 ret i64 %2
51 }
52
53 define i64 @select_fcmp_ole_cmov(double %a, double %b, i64 %c, i64 %d) {
54 ; CHECK-LABEL: select_fcmp_ole_cmov
55 ; CHECK: ucomisd %xmm0, %xmm1
56 ; CHECK-NEXT: cmovbq %rsi, %rdi
57 %1 = fcmp ole double %a, %b
58 %2 = select i1 %1, i64 %c, i64 %d
59 ret i64 %2
60 }
61
62 define i64 @select_fcmp_one_cmov(double %a, double %b, i64 %c, i64 %d) {
63 ; CHECK-LABEL: select_fcmp_one_cmov
64 ; CHECK: ucomisd %xmm1, %xmm0
65 ; CHECK-NEXT: cmoveq %rsi, %rdi
66 %1 = fcmp one double %a, %b
67 %2 = select i1 %1, i64 %c, i64 %d
68 ret i64 %2
69 }
70
71 define i64 @select_fcmp_ord_cmov(double %a, double %b, i64 %c, i64 %d) {
72 ; CHECK-LABEL: select_fcmp_ord_cmov
73 ; CHECK: ucomisd %xmm1, %xmm0
74 ; CHECK-NEXT: cmovpq %rsi, %rdi
75 %1 = fcmp ord double %a, %b
76 %2 = select i1 %1, i64 %c, i64 %d
77 ret i64 %2
78 }
79
80 define i64 @select_fcmp_uno_cmov(double %a, double %b, i64 %c, i64 %d) {
81 ; CHECK-LABEL: select_fcmp_uno_cmov
82 ; CHECK: ucomisd %xmm1, %xmm0
83 ; CHECK-NEXT: cmovnpq %rsi, %rdi
84 %1 = fcmp uno double %a, %b
85 %2 = select i1 %1, i64 %c, i64 %d
86 ret i64 %2
87 }
88
89 define i64 @select_fcmp_ueq_cmov(double %a, double %b, i64 %c, i64 %d) {
90 ; CHECK-LABEL: select_fcmp_ueq_cmov
91 ; CHECK: ucomisd %xmm1, %xmm0
92 ; CHECK-NEXT: cmovneq %rsi, %rdi
93 %1 = fcmp ueq double %a, %b
94 %2 = select i1 %1, i64 %c, i64 %d
95 ret i64 %2
96 }
97
98 define i64 @select_fcmp_ugt_cmov(double %a, double %b, i64 %c, i64 %d) {
99 ; CHECK-LABEL: select_fcmp_ugt_cmov
100 ; CHECK: ucomisd %xmm0, %xmm1
101 ; CHECK-NEXT: cmovaeq %rsi, %rdi
102 %1 = fcmp ugt double %a, %b
103 %2 = select i1 %1, i64 %c, i64 %d
104 ret i64 %2
105 }
106
107 define i64 @select_fcmp_uge_cmov(double %a, double %b, i64 %c, i64 %d) {
108 ; CHECK-LABEL: select_fcmp_uge_cmov
109 ; CHECK: ucomisd %xmm0, %xmm1
110 ; CHECK-NEXT: cmovaq %rsi, %rdi
111 %1 = fcmp uge double %a, %b
112 %2 = select i1 %1, i64 %c, i64 %d
113 ret i64 %2
114 }
115
116 define i64 @select_fcmp_ult_cmov(double %a, double %b, i64 %c, i64 %d) {
117 ; CHECK-LABEL: select_fcmp_ult_cmov
118 ; CHECK: ucomisd %xmm1, %xmm0
119 ; CHECK-NEXT: cmovaeq %rsi, %rdi
120 %1 = fcmp ult double %a, %b
121 %2 = select i1 %1, i64 %c, i64 %d
122 ret i64 %2
123 }
124
125 define i64 @select_fcmp_ule_cmov(double %a, double %b, i64 %c, i64 %d) {
126 ; CHECK-LABEL: select_fcmp_ule_cmov
127 ; CHECK: ucomisd %xmm1, %xmm0
128 ; CHECK-NEXT: cmovaq %rsi, %rdi
129 %1 = fcmp ule double %a, %b
130 %2 = select i1 %1, i64 %c, i64 %d
131 ret i64 %2
132 }
133
134 define i64 @select_fcmp_une_cmov(double %a, double %b, i64 %c, i64 %d) {
135 ; CHECK-LABEL: select_fcmp_une_cmov
136 ; CHECK: ucomisd %xmm1, %xmm0
137 ; CHECK-NEXT: setp %al
138 ; CHECK-NEXT: setne %cl
139 ; CHECK-NEXT: orb %al, %cl
140 ; CHECK-NEXT: cmoveq %rsi, %rdi
141 %1 = fcmp une double %a, %b
142 %2 = select i1 %1, i64 %c, i64 %d
143 ret i64 %2
144 }
145
146 define i64 @select_fcmp_true_cmov(double %a, double %b, i64 %c, i64 %d) {
147 ; CHECK-LABEL: select_fcmp_true_cmov
148 ; CHECK: movq %rdi, %rax
149 %1 = fcmp true double %a, %b
150 %2 = select i1 %1, i64 %c, i64 %d
151 ret i64 %2
152 }
153
154 define i64 @select_icmp_eq_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
155 ; CHECK-LABEL: select_icmp_eq_cmov
156 ; CHECK: cmpq %rsi, %rdi
157 ; CHECK-NEXT: cmovneq %rcx, %rdx
158 ; CHECK-NEXT: movq %rdx, %rax
159 %1 = icmp eq i64 %a, %b
160 %2 = select i1 %1, i64 %c, i64 %d
161 ret i64 %2
162 }
163
164 define i64 @select_icmp_ne_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
165 ; CHECK-LABEL: select_icmp_ne_cmov
166 ; CHECK: cmpq %rsi, %rdi
167 ; CHECK-NEXT: cmoveq %rcx, %rdx
168 ; CHECK-NEXT: movq %rdx, %rax
169 %1 = icmp ne i64 %a, %b
170 %2 = select i1 %1, i64 %c, i64 %d
171 ret i64 %2
172 }
173
174 define i64 @select_icmp_ugt_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
175 ; CHECK-LABEL: select_icmp_ugt_cmov
176 ; CHECK: cmpq %rsi, %rdi
177 ; CHECK-NEXT: cmovbeq %rcx, %rdx
178 ; CHECK-NEXT: movq %rdx, %rax
179 %1 = icmp ugt i64 %a, %b
180 %2 = select i1 %1, i64 %c, i64 %d
181 ret i64 %2
182 }
183
184
185 define i64 @select_icmp_uge_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
186 ; CHECK-LABEL: select_icmp_uge_cmov
187 ; CHECK: cmpq %rsi, %rdi
188 ; CHECK-NEXT: cmovbq %rcx, %rdx
189 ; CHECK-NEXT: movq %rdx, %rax
190 %1 = icmp uge i64 %a, %b
191 %2 = select i1 %1, i64 %c, i64 %d
192 ret i64 %2
193 }
194
195 define i64 @select_icmp_ult_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
196 ; CHECK-LABEL: select_icmp_ult_cmov
197 ; CHECK: cmpq %rsi, %rdi
198 ; CHECK-NEXT: cmovaeq %rcx, %rdx
199 ; CHECK-NEXT: movq %rdx, %rax
200 %1 = icmp ult i64 %a, %b
201 %2 = select i1 %1, i64 %c, i64 %d
202 ret i64 %2
203 }
204
205 define i64 @select_icmp_ule_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
206 ; CHECK-LABEL: select_icmp_ule_cmov
207 ; CHECK: cmpq %rsi, %rdi
208 ; CHECK-NEXT: cmovaq %rcx, %rdx
209 ; CHECK-NEXT: movq %rdx, %rax
210 %1 = icmp ule i64 %a, %b
211 %2 = select i1 %1, i64 %c, i64 %d
212 ret i64 %2
213 }
214
215 define i64 @select_icmp_sgt_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
216 ; CHECK-LABEL: select_icmp_sgt_cmov
217 ; CHECK: cmpq %rsi, %rdi
218 ; CHECK-NEXT: cmovleq %rcx, %rdx
219 ; CHECK-NEXT: movq %rdx, %rax
220 %1 = icmp sgt i64 %a, %b
221 %2 = select i1 %1, i64 %c, i64 %d
222 ret i64 %2
223 }
224
225 define i64 @select_icmp_sge_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
226 ; CHECK-LABEL: select_icmp_sge_cmov
227 ; CHECK: cmpq %rsi, %rdi
228 ; CHECK-NEXT: cmovlq %rcx, %rdx
229 ; CHECK-NEXT: movq %rdx, %rax
230 %1 = icmp sge i64 %a, %b
231 %2 = select i1 %1, i64 %c, i64 %d
232 ret i64 %2
233 }
234
235 define i64 @select_icmp_slt_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
236 ; CHECK-LABEL: select_icmp_slt_cmov
237 ; CHECK: cmpq %rsi, %rdi
238 ; CHECK-NEXT: cmovgeq %rcx, %rdx
239 ; CHECK-NEXT: movq %rdx, %rax
240 %1 = icmp slt i64 %a, %b
241 %2 = select i1 %1, i64 %c, i64 %d
242 ret i64 %2
243 }
244
245 define i64 @select_icmp_sle_cmov(i64 %a, i64 %b, i64 %c, i64 %d) {
246 ; CHECK-LABEL: select_icmp_sle_cmov
247 ; CHECK: cmpq %rsi, %rdi
248 ; CHECK-NEXT: cmovgq %rcx, %rdx
249 ; CHECK-NEXT: movq %rdx, %rax
250 %1 = icmp sle i64 %a, %b
251 %2 = select i1 %1, i64 %c, i64 %d
252 ret i64 %2
253 }
254
33 ; lsb is zero.
44 ;
55
6 ; CHECK-LABEL: fastisel_select:
6 ; CHECK-LABEL: fastisel_select:
77 ; CHECK: subb {{%[a-z0-9]+}}, [[RES:%[a-z0-9]+]]
88 ; CHECK: testb $1, [[RES]]
9 ; CHECK: cmovel
9 ; CHECK: cmovnel %edi, %esi
1010 define i32 @fastisel_select(i1 %exchSub2211_, i1 %trunc_8766) {
1111 %shuffleInternal15257_8932 = sub i1 %exchSub2211_, %trunc_8766
1212 %counter_diff1345 = select i1 %shuffleInternal15257_8932, i32 1204476887, i32 0