llvm.org GIT mirror llvm / d8286e4
[X86] Merge the different SETcc instructions for each condition code into single instructions that store the condition code as an operand. Summary: This avoids needing an isel pattern for each condition code. And it removes translation switches for converting between SETcc instructions and condition codes. Now the printer, encoder and disassembler take care of converting the immediate. We use InstAliases to handle the assembly matching. But we print using the asm string in the instruction definition. The instruction itself is marked IsCodeGenOnly=1 to hide it from the assembly parser. Reviewers: andreadb, courbet, RKSimon, spatel, lebedev.ri Reviewed By: andreadb Subscribers: hiraditya, lebedev.ri, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D60138 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@357801 91177308-0d34-0410-b5e6-96231b3b80d8 Craig Topper 1 year, 7 months ago
29 changed file(s) with 468 addition(s) and 417 deletion(s). Raw diff Collapse all Expand all
351351 MRMSrcMemCC = 36,
352352
353353 /// MRMXm - This form is used for instructions that use the Mod/RM byte
354 /// to specify a memory source, but doesn't use the middle field. And has
355 /// a condition code.
356 ///
357 MRMXmCC = 38,
358
359 /// MRMXm - This form is used for instructions that use the Mod/RM byte
354360 /// to specify a memory source, but doesn't use the middle field.
355361 ///
356 MRMXm = 39, // Instruction that uses Mod/RM but not the middle field.
362 MRMXm = 39,
357363
358364 // Next, instructions that operate on a memory r/m operand...
359365 MRM0m = 40, MRM1m = 41, MRM2m = 42, MRM3m = 43, // Format /0 /1 /2 /3
384390 ///
385391 MRMSrcRegCC = 52,
386392
393 /// MRMXCCr - This form is used for instructions that use the Mod/RM byte
394 /// to specify a register source, but doesn't use the middle field. And has
395 /// a condition code.
396 ///
397 MRMXrCC = 54,
398
387399 /// MRMXr - This form is used for instructions that use the Mod/RM byte
388400 /// to specify a register source, but doesn't use the middle field.
389401 ///
390 MRMXr = 55, // Instruction that uses Mod/RM but not the middle field.
402 MRMXr = 55,
391403
392404 // Instructions that operate on a register r/m operand...
393405 MRM0r = 56, MRM1r = 57, MRM2r = 58, MRM3r = 59, // Format /0 /1 /2 /3
778790 case X86II::MRMSrcReg4VOp3:
779791 case X86II::MRMSrcRegOp4:
780792 case X86II::MRMSrcRegCC:
793 case X86II::MRMXrCC:
781794 case X86II::MRMXr:
782795 case X86II::MRM0r: case X86II::MRM1r:
783796 case X86II::MRM2r: case X86II::MRM3r:
784797 case X86II::MRM4r: case X86II::MRM5r:
785798 case X86II::MRM6r: case X86II::MRM7r:
786799 return -1;
800 case X86II::MRMXmCC:
787801 case X86II::MRMXm:
788802 case X86II::MRM0m: case X86II::MRM1m:
789803 case X86II::MRM2m: case X86II::MRM3m:
10801080 CurOp += X86::AddrNumOperands;
10811081 REX |= isREXExtendedReg(MI, CurOp++) << 2; // REX.R
10821082 break;
1083 case X86II::MRMXm:
1083 case X86II::MRMXmCC: case X86II::MRMXm:
10841084 case X86II::MRM0m: case X86II::MRM1m:
10851085 case X86II::MRM2m: case X86II::MRM3m:
10861086 case X86II::MRM4m: case X86II::MRM5m:
10881088 REX |= isREXExtendedReg(MI, MemOperand+X86::AddrBaseReg) << 0; // REX.B
10891089 REX |= isREXExtendedReg(MI, MemOperand+X86::AddrIndexReg) << 1; // REX.X
10901090 break;
1091 case X86II::MRMXr:
1091 case X86II::MRMXrCC: case X86II::MRMXr:
10921092 case X86II::MRM0r: case X86II::MRM1r:
10931093 case X86II::MRM2r: case X86II::MRM3r:
10941094 case X86II::MRM4r: case X86II::MRM5r:
15051505 break;
15061506 }
15071507
1508 case X86II::MRMXrCC: {
1509 unsigned RegOp = CurOp++;
1510
1511 unsigned CC = MI.getOperand(CurOp++).getImm();
1512 EmitByte(BaseOpcode + CC, CurByte, OS);
1513 EmitRegModRMByte(MI.getOperand(RegOp), 0, CurByte, OS);
1514 break;
1515 }
1516
15081517 case X86II::MRMXr:
15091518 case X86II::MRM0r: case X86II::MRM1r:
15101519 case X86II::MRM2r: case X86II::MRM3r:
15191528 (Form == X86II::MRMXr) ? 0 : Form-X86II::MRM0r,
15201529 CurByte, OS);
15211530 break;
1531
1532 case X86II::MRMXmCC: {
1533 unsigned FirstMemOp = CurOp;
1534 CurOp = FirstMemOp + X86::AddrNumOperands;
1535
1536 unsigned CC = MI.getOperand(CurOp++).getImm();
1537 EmitByte(BaseOpcode + CC, CurByte, OS);
1538
1539 emitMemModRMByte(MI, FirstMemOp, 0, TSFlags, Rex, CurByte, OS, Fixups, STI);
1540 break;
1541 }
15221542
15231543 case X86II::MRMXm:
15241544 case X86II::MRM0m: case X86II::MRM1m:
14791479
14801480 // FCMP_OEQ and FCMP_UNE cannot be checked with a single instruction.
14811481 static const uint16_t SETFOpcTable[2][3] = {
1482 { X86::SETEr, X86::SETNPr, X86::AND8rr },
1483 { X86::SETNEr, X86::SETPr, X86::OR8rr }
1482 { X86::COND_E, X86::COND_NP, X86::AND8rr },
1483 { X86::COND_NE, X86::COND_P, X86::OR8rr }
14841484 };
14851485 const uint16_t *SETFOpc = nullptr;
14861486 switch (Predicate) {
14961496
14971497 unsigned FlagReg1 = createResultReg(&X86::GR8RegClass);
14981498 unsigned FlagReg2 = createResultReg(&X86::GR8RegClass);
1499 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(SETFOpc[0]),
1500 FlagReg1);
1501 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(SETFOpc[1]),
1502 FlagReg2);
1499 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::SETCCr),
1500 FlagReg1).addImm(SETFOpc[0]);
1501 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::SETCCr),
1502 FlagReg2).addImm(SETFOpc[1]);
15031503 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(SETFOpc[2]),
15041504 ResultReg).addReg(FlagReg1).addReg(FlagReg2);
15051505 updateValueMap(I, ResultReg);
15101510 bool SwapArgs;
15111511 std::tie(CC, SwapArgs) = X86::getX86ConditionCode(Predicate);
15121512 assert(CC <= X86::LAST_VALID_COND && "Unexpected condition code.");
1513 unsigned Opc = X86::getSETFromCond(CC);
15141513
15151514 if (SwapArgs)
15161515 std::swap(LHS, RHS);
15191518 if (!X86FastEmitCompare(LHS, RHS, VT, I->getDebugLoc()))
15201519 return false;
15211520
1522 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg);
1521 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::SETCCr),
1522 ResultReg).addImm(CC);
15231523 updateValueMap(I, ResultReg);
15241524 return true;
15251525 }
20462046
20472047 // FCMP_OEQ and FCMP_UNE cannot be checked with a single instruction.
20482048 static const uint16_t SETFOpcTable[2][3] = {
2049 { X86::SETNPr, X86::SETEr , X86::TEST8rr },
2050 { X86::SETPr, X86::SETNEr, X86::OR8rr }
2049 { X86::COND_NP, X86::COND_E, X86::TEST8rr },
2050 { X86::COND_P, X86::COND_NE, X86::OR8rr }
20512051 };
20522052 const uint16_t *SETFOpc = nullptr;
20532053 switch (Predicate) {
20792079 if (SETFOpc) {
20802080 unsigned FlagReg1 = createResultReg(&X86::GR8RegClass);
20812081 unsigned FlagReg2 = createResultReg(&X86::GR8RegClass);
2082 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(SETFOpc[0]),
2083 FlagReg1);
2084 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(SETFOpc[1]),
2085 FlagReg2);
2082 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::SETCCr),
2083 FlagReg1).addImm(SETFOpc[0]);
2084 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::SETCCr),
2085 FlagReg2).addImm(SETFOpc[1]);
20862086 auto const &II = TII.get(SETFOpc[2]);
20872087 if (II.getNumDefs()) {
20882088 unsigned TmpReg = createResultReg(&X86::GR8RegClass);
28962896 isCommutativeIntrinsic(II))
28972897 std::swap(LHS, RHS);
28982898
2899 unsigned BaseOpc, CondOpc;
2899 unsigned BaseOpc, CondCode;
29002900 switch (II->getIntrinsicID()) {
29012901 default: llvm_unreachable("Unexpected intrinsic!");
29022902 case Intrinsic::sadd_with_overflow:
2903 BaseOpc = ISD::ADD; CondOpc = X86::SETOr; break;
2903 BaseOpc = ISD::ADD; CondCode = X86::COND_O; break;
29042904 case Intrinsic::uadd_with_overflow:
2905 BaseOpc = ISD::ADD; CondOpc = X86::SETBr; break;
2905 BaseOpc = ISD::ADD; CondCode = X86::COND_B; break;
29062906 case Intrinsic::ssub_with_overflow:
2907 BaseOpc = ISD::SUB; CondOpc = X86::SETOr; break;
2907 BaseOpc = ISD::SUB; CondCode = X86::COND_O; break;
29082908 case Intrinsic::usub_with_overflow:
2909 BaseOpc = ISD::SUB; CondOpc = X86::SETBr; break;
2909 BaseOpc = ISD::SUB; CondCode = X86::COND_B; break;
29102910 case Intrinsic::smul_with_overflow:
2911 BaseOpc = X86ISD::SMUL; CondOpc = X86::SETOr; break;
2911 BaseOpc = X86ISD::SMUL; CondCode = X86::COND_O; break;
29122912 case Intrinsic::umul_with_overflow:
2913 BaseOpc = X86ISD::UMUL; CondOpc = X86::SETOr; break;
2913 BaseOpc = X86ISD::UMUL; CondCode = X86::COND_O; break;
29142914 }
29152915
29162916 unsigned LHSReg = getRegForValue(LHS);
29272927 };
29282928
29292929 if (CI->isOne() && (BaseOpc == ISD::ADD || BaseOpc == ISD::SUB) &&
2930 CondOpc == X86::SETOr) {
2930 CondCode == X86::COND_O) {
29312931 // We can use INC/DEC.
29322932 ResultReg = createResultReg(TLI.getRegClassFor(VT));
29332933 bool IsDec = BaseOpc == ISD::SUB;
29862986 // Assign to a GPR since the overflow return value is lowered to a SETcc.
29872987 unsigned ResultReg2 = createResultReg(&X86::GR8RegClass);
29882988 assert((ResultReg+1) == ResultReg2 && "Nonconsecutive result registers.");
2989 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(CondOpc),
2990 ResultReg2);
2989 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(X86::SETCCr),
2990 ResultReg2).addImm(CondCode);
29912991
29922992 updateValueMap(II, ResultReg, 2);
29932993 return true;
6666
6767 FunctionPass *llvm::createX86FixupSetCC() { return new X86FixupSetCCPass(); }
6868
69 bool X86FixupSetCCPass::isSetCCr(unsigned Opcode) {
70 switch (Opcode) {
71 default:
72 return false;
73 case X86::SETOr:
74 case X86::SETNOr:
75 case X86::SETBr:
76 case X86::SETAEr:
77 case X86::SETEr:
78 case X86::SETNEr:
79 case X86::SETBEr:
80 case X86::SETAr:
81 case X86::SETSr:
82 case X86::SETNSr:
83 case X86::SETPr:
84 case X86::SETNPr:
85 case X86::SETLr:
86 case X86::SETGEr:
87 case X86::SETLEr:
88 case X86::SETGr:
89 return true;
90 }
91 }
92
9369 // We expect the instruction *immediately* before the setcc to imp-def
9470 // EFLAGS (because of scheduling glue). To make this less brittle w.r.t
9571 // scheduling, look backwards until we hit the beginning of the
127103 // Find a setcc that is used by a zext.
128104 // This doesn't have to be the only use, the transformation is safe
129105 // regardless.
130 if (!isSetCCr(MI.getOpcode()))
106 if (MI.getOpcode() != X86::SETCCr)
131107 continue;
132108
133109 MachineInstr *ZExt = nullptr;
600600 // Otherwise we can just rewrite in-place.
601601 if (X86::getCondFromCMov(MI) != X86::COND_INVALID) {
602602 rewriteCMov(*TestMBB, TestPos, TestLoc, MI, *FlagUse, CondRegs);
603 } else if (X86::getCondFromSETOpc(MI.getOpcode()) !=
604 X86::COND_INVALID) {
603 } else if (X86::getCondFromSETCC(MI) != X86::COND_INVALID) {
605604 rewriteSetCC(*TestMBB, TestPos, TestLoc, MI, *FlagUse, CondRegs);
606605 } else if (MI.getOpcode() == TargetOpcode::COPY) {
607606 rewriteCopy(MI, *FlagUse, CopyDefI);
728727 // Scan backwards across the range of instructions with live EFLAGS.
729728 for (MachineInstr &MI :
730729 llvm::reverse(llvm::make_range(MBB.begin(), TestPos))) {
731 X86::CondCode Cond = X86::getCondFromSETOpc(MI.getOpcode());
730 X86::CondCode Cond = X86::getCondFromSETCC(MI);
732731 if (Cond != X86::COND_INVALID && !MI.mayStore() && MI.getOperand(0).isReg() &&
733732 TRI->isVirtualRegister(MI.getOperand(0).getReg())) {
734733 assert(MI.getOperand(0).isDef() &&
749748 DebugLoc TestLoc, X86::CondCode Cond) {
750749 unsigned Reg = MRI->createVirtualRegister(PromoteRC);
751750 auto SetI = BuildMI(TestMBB, TestPos, TestLoc,
752 TII->get(X86::getSETFromCond(Cond)), Reg);
751 TII->get(X86::SETCCr), Reg).addImm(Cond);
753752 (void)SetI;
754753 LLVM_DEBUG(dbgs() << " save cond: "; SetI->dump());
755754 ++NumSetCCsInserted;
10221021 MachineInstr &SetCCI,
10231022 MachineOperand &FlagUse,
10241023 CondRegArray &CondRegs) {
1025 X86::CondCode Cond = X86::getCondFromSETOpc(SetCCI.getOpcode());
1024 X86::CondCode Cond = X86::getCondFromSETCC(SetCCI);
10261025 // Note that we can't usefully rewrite this to the inverse without complex
10271026 // analysis of the users of the setCC. Largely we rely on duplicates which
10281027 // could have been avoided already being avoided here.
23252325 X86::CondCode CC = X86::COND_INVALID;
23262326 if (CC == X86::COND_INVALID)
23272327 CC = X86::getCondFromBranchOpc(N->getMachineOpcode());
2328 if (CC == X86::COND_INVALID)
2329 CC = X86::getCondFromSETOpc(N->getMachineOpcode());
23302328 if (CC == X86::COND_INVALID) {
23312329 unsigned Opc = N->getMachineOpcode();
2332 if (Opc == X86::CMOV16rr || Opc == X86::CMOV32rr || Opc == X86::CMOV64rr)
2330 if (Opc == X86::SETCCr)
2331 CC = static_cast(N->getConstantOperandVal(0));
2332 else if (Opc == X86::SETCCm)
2333 CC = static_cast(N->getConstantOperandVal(5));
2334 else if (Opc == X86::CMOV16rr || Opc == X86::CMOV32rr ||
2335 Opc == X86::CMOV64rr)
23332336 CC = static_cast(N->getConstantOperandVal(2));
23342337 else if (Opc == X86::CMOV16rm || Opc == X86::CMOV32rm ||
23352338 Opc == X86::CMOV64rm)
5454 } // Uses = [EFLAGS], Predicates = [HasCMov], Constraints = "$src1 = $dst"
5555 } // isCodeGenOnly = 1, ForceDisassemble = 1
5656
57 multiclass CMOV_Aliases {
58 def : InstAlias
57 // SetCC instructions.
58 let Uses = [EFLAGS], isCodeGenOnly = 1, ForceDisassemble = 1 in {
59 def SETCCr : I<0x90, MRMXrCC, (outs GR8:$dst), (ins ccode:$cond),
60 "set${cond}\t$dst",
61 [(set GR8:$dst, (X86setcc imm:$cond, EFLAGS))]>,
62 TB, Sched<[WriteSETCC]>;
63 def SETCCm : I<0x90, MRMXmCC, (outs), (ins i8mem:$dst, ccode:$cond),
64 "set${cond}\t$dst",
65 [(store (X86setcc imm:$cond, EFLAGS), addr:$dst)]>,
66 TB, Sched<[WriteSETCCStore]>;
67 } // Uses = [EFLAGS]
68
69 multiclass CMOV_SETCC_Aliases {
70 def : InstAlias<"cmov"#Cond#"{w}\t{$src, $dst|$dst, $src}",
5971 (CMOV16rr GR16:$dst, GR16:$src, CC), 0>;
60 def : InstAlias<Name#"{w}\t{$src, $dst|$dst, $src}",
72 def : InstAlias<"cmov"#Cond#"{w}\t{$src, $dst|$dst, $src}",
6173 (CMOV16rm GR16:$dst, i16mem:$src, CC), 0>;
62 def : InstAlias<Name#"{l}\t{$src, $dst|$dst, $src}",
74 def : InstAlias<"cmov"#Cond#"{l}\t{$src, $dst|$dst, $src}",
6375 (CMOV32rr GR32:$dst, GR32:$src, CC), 0>;
64 def : InstAlias<Name#"{l}\t{$src, $dst|$dst, $src}",
76 def : InstAlias<"cmov"#Cond#"{l}\t{$src, $dst|$dst, $src}",
6577 (CMOV32rm GR32:$dst, i32mem:$src, CC), 0>;
66 def : InstAlias<Name#"{q}\t{$src, $dst|$dst, $src}",
78 def : InstAlias<"cmov"#Cond#"{q}\t{$src, $dst|$dst, $src}",
6779 (CMOV64rr GR64:$dst, GR64:$src, CC), 0>;
68 def : InstAlias<Name#"{q}\t{$src, $dst|$dst, $src}",
80 def : InstAlias<"cmov"#Cond#"{q}\t{$src, $dst|$dst, $src}",
6981 (CMOV64rm GR64:$dst, i64mem:$src, CC), 0>;
82
83 def : InstAlias<"set"#Cond#"\t$dst", (SETCCr GR8:$dst, CC), 0>;
84 def : InstAlias<"set"#Cond#"\t$dst", (SETCCm i8mem:$dst, CC), 0>;
7085 }
7186
72 defm : CMOV_Aliases<"cmovo" , 0>;
73 defm : CMOV_Aliases<"cmovno", 1>;
74 defm : CMOV_Aliases<"cmovb" , 2>;
75 defm : CMOV_Aliases<"cmovae", 3>;
76 defm : CMOV_Aliases<"cmove" , 4>;
77 defm : CMOV_Aliases<"cmovne", 5>;
78 defm : CMOV_Aliases<"cmovbe", 6>;
79 defm : CMOV_Aliases<"cmova" , 7>;
80 defm : CMOV_Aliases<"cmovs" , 8>;
81 defm : CMOV_Aliases<"cmovns", 9>;
82 defm : CMOV_Aliases<"cmovp" , 10>;
83 defm : CMOV_Aliases<"cmovnp", 11>;
84 defm : CMOV_Aliases<"cmovl" , 12>;
85 defm : CMOV_Aliases<"cmovge", 13>;
86 defm : CMOV_Aliases<"cmovle", 14>;
87 defm : CMOV_Aliases<"cmovg" , 15>;
88
89
90 // SetCC instructions.
91 multiclass SETCC opc, string Mnemonic, PatLeaf OpNode> {
92 let Uses = [EFLAGS] in {
93 def r : I
94 !strconcat(Mnemonic, "\t$dst"),
95 [(set GR8:$dst, (X86setcc OpNode, EFLAGS))]>,
96 TB, Sched<[WriteSETCC]>;
97 def m : I
98 !strconcat(Mnemonic, "\t$dst"),
99 [(store (X86setcc OpNode, EFLAGS), addr:$dst)]>,
100 TB, Sched<[WriteSETCCStore]>;
101 } // Uses = [EFLAGS]
102 }
103
104 defm SETO : SETCC<0x90, "seto", X86_COND_O>; // is overflow bit set
105 defm SETNO : SETCC<0x91, "setno", X86_COND_NO>; // is overflow bit not set
106 defm SETB : SETCC<0x92, "setb", X86_COND_B>; // unsigned less than
107 defm SETAE : SETCC<0x93, "setae", X86_COND_AE>; // unsigned greater or equal
108 defm SETE : SETCC<0x94, "sete", X86_COND_E>; // equal to
109 defm SETNE : SETCC<0x95, "setne", X86_COND_NE>; // not equal to
110 defm SETBE : SETCC<0x96, "setbe", X86_COND_BE>; // unsigned less than or equal
111 defm SETA : SETCC<0x97, "seta", X86_COND_A>; // unsigned greater than
112 defm SETS : SETCC<0x98, "sets", X86_COND_S>; // is signed bit set
113 defm SETNS : SETCC<0x99, "setns", X86_COND_NS>; // is not signed
114 defm SETP : SETCC<0x9A, "setp", X86_COND_P>; // is parity bit set
115 defm SETNP : SETCC<0x9B, "setnp", X86_COND_NP>; // is parity bit not set
116 defm SETL : SETCC<0x9C, "setl", X86_COND_L>; // signed less than
117 defm SETGE : SETCC<0x9D, "setge", X86_COND_GE>; // signed greater or equal
118 defm SETLE : SETCC<0x9E, "setle", X86_COND_LE>; // signed less than or equal
119 defm SETG : SETCC<0x9F, "setg", X86_COND_G>; // signed greater than
87 defm : CMOV_SETCC_Aliases<"o" , 0>;
88 defm : CMOV_SETCC_Aliases<"no", 1>;
89 defm : CMOV_SETCC_Aliases<"b" , 2>;
90 defm : CMOV_SETCC_Aliases<"ae", 3>;
91 defm : CMOV_SETCC_Aliases<"e" , 4>;
92 defm : CMOV_SETCC_Aliases<"ne", 5>;
93 defm : CMOV_SETCC_Aliases<"be", 6>;
94 defm : CMOV_SETCC_Aliases<"a" , 7>;
95 defm : CMOV_SETCC_Aliases<"s" , 8>;
96 defm : CMOV_SETCC_Aliases<"ns", 9>;
97 defm : CMOV_SETCC_Aliases<"p" , 10>;
98 defm : CMOV_SETCC_Aliases<"np", 11>;
99 defm : CMOV_SETCC_Aliases<"l" , 12>;
100 defm : CMOV_SETCC_Aliases<"ge", 13>;
101 defm : CMOV_SETCC_Aliases<"le", 14>;
102 defm : CMOV_SETCC_Aliases<"g" , 15>;
120103
121104 // SALC is an undocumented instruction. Information for this instruction can be found
122105 // here http://www.rcollins.org/secrets/opcodes/SALC.html
353353 // this happens, it is great. However, if we are left with an 8-bit sbb and an
354354 // and, we might as well just match it as a setb.
355355 def : Pat<(and (i8 (X86setcc_c X86_COND_B, EFLAGS)), 1),
356 (SETBr)>;
356 (SETCCr (i8 2))>;
357357
358358 // Patterns to give priority when both inputs are zero so that we don't use
359359 // an immediate for the RHS.
321321 { X86::PUSH16r, X86::PUSH16rmm, TB_FOLDED_LOAD },
322322 { X86::PUSH32r, X86::PUSH32rmm, TB_FOLDED_LOAD },
323323 { X86::PUSH64r, X86::PUSH64rmm, TB_FOLDED_LOAD },
324 { X86::SETAEr, X86::SETAEm, TB_FOLDED_STORE },
325 { X86::SETAr, X86::SETAm, TB_FOLDED_STORE },
326 { X86::SETBEr, X86::SETBEm, TB_FOLDED_STORE },
327 { X86::SETBr, X86::SETBm, TB_FOLDED_STORE },
328 { X86::SETEr, X86::SETEm, TB_FOLDED_STORE },
329 { X86::SETGEr, X86::SETGEm, TB_FOLDED_STORE },
330 { X86::SETGr, X86::SETGm, TB_FOLDED_STORE },
331 { X86::SETLEr, X86::SETLEm, TB_FOLDED_STORE },
332 { X86::SETLr, X86::SETLm, TB_FOLDED_STORE },
333 { X86::SETNEr, X86::SETNEm, TB_FOLDED_STORE },
334 { X86::SETNOr, X86::SETNOm, TB_FOLDED_STORE },
335 { X86::SETNPr, X86::SETNPm, TB_FOLDED_STORE },
336 { X86::SETNSr, X86::SETNSm, TB_FOLDED_STORE },
337 { X86::SETOr, X86::SETOm, TB_FOLDED_STORE },
338 { X86::SETPr, X86::SETPm, TB_FOLDED_STORE },
339 { X86::SETSr, X86::SETSm, TB_FOLDED_STORE },
324 { X86::SETCCr, X86::SETCCm, TB_FOLDED_STORE },
340325 { X86::TAILJMPr, X86::TAILJMPm, TB_FOLDED_LOAD },
341326 { X86::TAILJMPr64, X86::TAILJMPm64, TB_FOLDED_LOAD },
342327 { X86::TAILJMPr64_REX, X86::TAILJMPm64_REX, TB_FOLDED_LOAD },
3030 def MRMSrcMem4VOp3 : Format<34>;
3131 def MRMSrcMemOp4 : Format<35>;
3232 def MRMSrcMemCC : Format<36>;
33 def MRMXmCC: Format<38>;
3334 def MRMXm : Format<39>;
3435 def MRM0m : Format<40>; def MRM1m : Format<41>; def MRM2m : Format<42>;
3536 def MRM3m : Format<43>; def MRM4m : Format<44>; def MRM5m : Format<45>;
3940 def MRMSrcReg4VOp3 : Format<50>;
4041 def MRMSrcRegOp4 : Format<51>;
4142 def MRMSrcRegCC : Format<52>;
43 def MRMXrCC: Format<54>;
4244 def MRMXr : Format<55>;
4345 def MRM0r : Format<56>; def MRM1r : Format<57>; def MRM2r : Format<58>;
4446 def MRM3r : Format<59>; def MRM4r : Format<60>; def MRM5r : Format<61>;
20002000 }
20012001 }
20022002
2003 /// Return condition code of a SET opcode.
2004 X86::CondCode X86::getCondFromSETOpc(unsigned Opc) {
2005 switch (Opc) {
2003 /// Return condition code of a SETCC opcode.
2004 X86::CondCode X86::getCondFromSETCC(const MachineInstr &MI) {
2005 switch (MI.getOpcode()) {
20062006 default: return X86::COND_INVALID;
2007 case X86::SETAr: case X86::SETAm: return X86::COND_A;
2008 case X86::SETAEr: case X86::SETAEm: return X86::COND_AE;
2009 case X86::SETBr: case X86::SETBm: return X86::COND_B;
2010 case X86::SETBEr: case X86::SETBEm: return X86::COND_BE;
2011 case X86::SETEr: case X86::SETEm: return X86::COND_E;
2012 case X86::SETGr: case X86::SETGm: return X86::COND_G;
2013 case X86::SETGEr: case X86::SETGEm: return X86::COND_GE;
2014 case X86::SETLr: case X86::SETLm: return X86::COND_L;
2015 case X86::SETLEr: case X86::SETLEm: return X86::COND_LE;
2016 case X86::SETNEr: case X86::SETNEm: return X86::COND_NE;
2017 case X86::SETNOr: case X86::SETNOm: return X86::COND_NO;
2018 case X86::SETNPr: case X86::SETNPm: return X86::COND_NP;
2019 case X86::SETNSr: case X86::SETNSm: return X86::COND_NS;
2020 case X86::SETOr: case X86::SETOm: return X86::COND_O;
2021 case X86::SETPr: case X86::SETPm: return X86::COND_P;
2022 case X86::SETSr: case X86::SETSm: return X86::COND_S;
2007 case X86::SETCCr: case X86::SETCCm:
2008 return static_cast(
2009 MI.getOperand(MI.getDesc().getNumOperands() - 1).getImm());
20232010 }
20242011 }
20252012
21382125 return std::make_pair(CC, NeedSwap);
21392126 }
21402127
2141 /// Return a set opcode for the given condition and
2142 /// whether it has memory operand.
2143 unsigned X86::getSETFromCond(CondCode CC, bool HasMemoryOperand) {
2144 static const uint16_t Opc[16][2] = {
2145 { X86::SETOr, X86::SETOm },
2146 { X86::SETNOr, X86::SETNOm },
2147 { X86::SETBr, X86::SETBm },
2148 { X86::SETAEr, X86::SETAEm },
2149 { X86::SETEr, X86::SETEm },
2150 { X86::SETNEr, X86::SETNEm },
2151 { X86::SETBEr, X86::SETBEm },
2152 { X86::SETAr, X86::SETAm },
2153 { X86::SETSr, X86::SETSm },
2154 { X86::SETNSr, X86::SETNSm },
2155 { X86::SETPr, X86::SETPm },
2156 { X86::SETNPr, X86::SETNPm },
2157 { X86::SETLr, X86::SETLm },
2158 { X86::SETGEr, X86::SETGEm },
2159 { X86::SETLEr, X86::SETLEm },
2160 { X86::SETGr, X86::SETGm },
2161 };
2162
2163 assert(CC <= LAST_VALID_COND && "Can only handle standard cond codes");
2164 return Opc[CC][HasMemoryOperand ? 1 : 0];
2128 /// Return a setcc opcode based on whether it has memory operand.
2129 unsigned X86::getSETOpc(bool HasMemoryOperand) {
2130 return HasMemoryOperand ? X86::SETCCr : X86::SETCCm;
21652131 }
21662132
21672133 /// Return a cmov opcode for the given register size in bytes, and operand type.
35543520 // If we are done with the basic block, we need to check whether EFLAGS is
35553521 // live-out.
35563522 bool IsSafe = false;
3557 SmallVectorunsigned /*NewOpc*/>, 4> OpsToUpdate;
3523 SmallVectorX86::CondCode>, 4> OpsToUpdate;
35583524 MachineBasicBlock::iterator E = CmpInstr.getParent()->end();
35593525 for (++I; I != E; ++I) {
35603526 const MachineInstr &Instr = *I;
35713537
35723538 // EFLAGS is used by this instruction.
35733539 X86::CondCode OldCC = X86::COND_INVALID;
3574 bool OpcIsSET = false;
35753540 if (IsCmpZero || IsSwapped) {
35763541 // We decode the condition code from opcode.
35773542 if (Instr.isBranch())
35783543 OldCC = X86::getCondFromBranchOpc(Instr.getOpcode());
35793544 else {
3580 OldCC = X86::getCondFromSETOpc(Instr.getOpcode());
3581 if (OldCC != X86::COND_INVALID)
3582 OpcIsSET = true;
3583 else
3545 OldCC = X86::getCondFromSETCC(Instr);
3546 if (OldCC == X86::COND_INVALID)
35843547 OldCC = X86::getCondFromCMov(Instr);
35853548 }
35863549 if (OldCC == X86::COND_INVALID) return false;
36263589 }
36273590
36283591 if ((ShouldUpdateCC || IsSwapped) && ReplacementCC != OldCC) {
3629 // Synthesize the new opcode.
3630 bool HasMemoryOperand = Instr.hasOneMemOperand();
3631 unsigned NewOpc;
3632 if (Instr.isBranch())
3633 NewOpc = GetCondBranchFromCond(ReplacementCC);
3634 else if(OpcIsSET)
3635 NewOpc = getSETFromCond(ReplacementCC, HasMemoryOperand);
3636 else {
3637 NewOpc = ReplacementCC;
3638 }
3639
36403592 // Push the MachineInstr to OpsToUpdate.
36413593 // If it is safe to remove CmpInstr, the condition code of these
36423594 // instructions will be modified.
3643 OpsToUpdate.push_back(std::make_pair(&*I, NewOpc));
3595 OpsToUpdate.push_back(std::make_pair(&*I, ReplacementCC));
36443596 }
36453597 if (ModifyEFLAGS || Instr.killsRegister(X86::EFLAGS, TRI)) {
36463598 // It is safe to remove CmpInstr if EFLAGS is updated again or killed.
36953647
36963648 // Modify the condition code of instructions in OpsToUpdate.
36973649 for (auto &Op : OpsToUpdate) {
3698 if (X86::getCondFromCMov(*Op.first) != X86::COND_INVALID)
3650 if (Op.first->isBranch())
3651 Op.first->setDesc(get(GetCondBranchFromCond(Op.second)));
3652 else
36993653 Op.first->getOperand(Op.first->getDesc().getNumOperands() - 1)
37003654 .setImm(Op.second);
3701 else
3702 Op.first->setDesc(get(Op.second));
37033655 }
37043656 return true;
37053657 }
4141 /// the instruction operands should be swaped to match the condition code.
4242 std::pair getX86ConditionCode(CmpInst::Predicate Predicate);
4343
44 /// Return a set opcode for the given condition and whether it has
45 /// a memory operand.
46 unsigned getSETFromCond(CondCode CC, bool HasMemoryOperand = false);
44 /// Return a setcc opcode based on whether it has a memory operand.
45 unsigned getSETOpc(bool HasMemoryOperand = false);
4746
4847 /// Return a cmov opcode for the given register size in bytes, and operand type.
4948 unsigned getCMovOpcode(unsigned RegBytes, bool HasMemoryOperand = false);
5251 CondCode getCondFromBranchOpc(unsigned Opc);
5352
5453 // Turn setCC opcode into condition code.
55 CondCode getCondFromSETOpc(unsigned Opc);
54 CondCode getCondFromSETCC(const MachineInstr &MI);
5655
5756 // Turn CMov opcode into condition code.
5857 CondCode getCondFromCMov(const MachineInstr &MI);
946946 bool SwapArgs;
947947 std::tie(CC, SwapArgs) = X86::getX86ConditionCode(
948948 (CmpInst::Predicate)I.getOperand(1).getPredicate());
949 unsigned OpSet = X86::getSETFromCond(CC);
950949
951950 unsigned LHS = I.getOperand(2).getReg();
952951 unsigned RHS = I.getOperand(3).getReg();
980979 .addReg(RHS);
981980
982981 MachineInstr &SetInst = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
983 TII.get(OpSet), I.getOperand(0).getReg());
982 TII.get(X86::SETCCr), I.getOperand(0).getReg()).addImm(CC);
984983
985984 constrainSelectedInstRegOperands(CmpInst, TII, TRI, RBI);
986985 constrainSelectedInstRegOperands(SetInst, TII, TRI, RBI);
10011000
10021001 // FCMP_OEQ and FCMP_UNE cannot be checked with a single instruction.
10031002 static const uint16_t SETFOpcTable[2][3] = {
1004 {X86::SETEr, X86::SETNPr, X86::AND8rr},
1005 {X86::SETNEr, X86::SETPr, X86::OR8rr}};
1003 {X86::COND_E, X86::COND_NP, X86::AND8rr},
1004 {X86::COND_NE, X86::COND_P, X86::OR8rr}};
10061005 const uint16_t *SETFOpc = nullptr;
10071006 switch (Predicate) {
10081007 default:
10421041 unsigned FlagReg1 = MRI.createVirtualRegister(&X86::GR8RegClass);
10431042 unsigned FlagReg2 = MRI.createVirtualRegister(&X86::GR8RegClass);
10441043 MachineInstr &Set1 = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1045 TII.get(SETFOpc[0]), FlagReg1);
1044 TII.get(X86::SETCCr), FlagReg1).addImm(SETFOpc[0]);
10461045 MachineInstr &Set2 = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
1047 TII.get(SETFOpc[1]), FlagReg2);
1046 TII.get(X86::SETCCr), FlagReg2).addImm(SETFOpc[1]);
10481047 MachineInstr &Set3 = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
10491048 TII.get(SETFOpc[2]), ResultReg)
10501049 .addReg(FlagReg1)
10621061 bool SwapArgs;
10631062 std::tie(CC, SwapArgs) = X86::getX86ConditionCode(Predicate);
10641063 assert(CC <= X86::LAST_VALID_COND && "Unexpected condition code.");
1065 unsigned Opc = X86::getSETFromCond(CC);
10661064
10671065 if (SwapArgs)
10681066 std::swap(LhsReg, RhsReg);
10741072 .addReg(RhsReg);
10751073
10761074 MachineInstr &Set =
1077 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opc), ResultReg);
1075 *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::SETCCr), ResultReg).addImm(CC);
10781076 constrainSelectedInstRegOperands(CmpInst, TII, TRI, RBI);
10791077 constrainSelectedInstRegOperands(Set, TII, TRI, RBI);
10801078 I.eraseFromParent();
735735 ADC16i16, SBB16i16,
736736 ADC32i32, SBB32i32,
737737 ADC64i32, SBB64i32)>;
738 def: InstRW<[BWWriteResGroup20], (instregex "SET(A|BE)r")>;
739738
740739 def BWWriteResGroup22 : SchedWriteRes<[BWPort4,BWPort6,BWPort237]> {
741740 let Latency = 2;
814813 let ResourceCycles = [1,1,1,1];
815814 }
816815 def: InstRW<[BWWriteResGroup38], (instrs CALL64pcrel32)>;
817 def: InstRW<[BWWriteResGroup38], (instregex "SET(A|BE)m")>;
818816
819817 def BWWriteResGroup39 : SchedWriteRes<[BWPort0,BWPort1]> {
820818 let Latency = 4;
16261624 def : InstRW<[BWCMOVA_CMOVBErr], (instrs CMOV16rr, CMOV32rr, CMOV64rr)>;
16271625 def : InstRW<[BWCMOVA_CMOVBErm], (instrs CMOV16rm, CMOV32rm, CMOV64rm)>;
16281626
1627 // SETCCs that use both Z and C flag require an extra uop.
1628 def BWWriteSETA_SETBEr : SchedWriteRes<[BWPort06,BWPort0156]> {
1629 let Latency = 2;
1630 let ResourceCycles = [1,1];
1631 let NumMicroOps = 2;
1632 }
1633
1634 def BWWriteSETA_SETBEm : SchedWriteRes<[BWPort4,BWPort237,BWPort06,BWPort0156]> {
1635 let Latency = 3;
1636 let ResourceCycles = [1,1,1,1];
1637 let NumMicroOps = 4;
1638 }
1639
1640 def BWSETA_SETBErr : SchedWriteVariant<[
1641 SchedVar, [BWWriteSETA_SETBEr]>,
1642 SchedVar
1643 ]>;
1644
1645 def BWSETA_SETBErm : SchedWriteVariant<[
1646 SchedVar, [BWWriteSETA_SETBEm]>,
1647 SchedVar
1648 ]>;
1649
1650 def : InstRW<[BWSETA_SETBErr], (instrs SETCCr)>;
1651 def : InstRW<[BWSETA_SETBErm], (instrs SETCCm)>;
1652
16291653 } // SchedModel
11251125 let ResourceCycles = [1,1];
11261126 }
11271127 def: InstRW<[HWWriteResGroup35], (instrs CWD, JCXZ, JECXZ, JRCXZ)>;
1128 def: InstRW<[HWWriteResGroup35], (instregex "SET(A|BE)r")>;
11291128
11301129 def HWWriteResGroup36_2 : SchedWriteRes<[HWPort5,HWPort23]> {
11311130 let Latency = 7;
11711170 let ResourceCycles = [1,1,1,1];
11721171 }
11731172 def: InstRW<[HWWriteResGroup45], (instrs CALL64pcrel32)>;
1174 def: InstRW<[HWWriteResGroup45], (instregex "SET(A|BE)m")>;
11751173
11761174 def HWWriteResGroup46 : SchedWriteRes<[HWPort4,HWPort23,HWPort237,HWPort06]> {
11771175 let Latency = 8;
19101908 def : InstRW<[HWCMOVA_CMOVBErr], (instrs CMOV16rr, CMOV32rr, CMOV64rr)>;
19111909 def : InstRW<[HWCMOVA_CMOVBErm], (instrs CMOV16rm, CMOV32rm, CMOV64rm)>;
19121910
1911 // SETCCs that use both Z and C flag require an extra uop.
1912 def HWWriteSETA_SETBEr : SchedWriteRes<[HWPort06,HWPort0156]> {
1913 let Latency = 2;
1914 let ResourceCycles = [1,1];
1915 let NumMicroOps = 2;
1916 }
1917
1918 def HWWriteSETA_SETBEm : SchedWriteRes<[HWPort4,HWPort237,HWPort06,HWPort0156]> {
1919 let Latency = 3;
1920 let ResourceCycles = [1,1,1,1];
1921 let NumMicroOps = 4;
1922 }
1923
1924 def HWSETA_SETBErr : SchedWriteVariant<[
1925 SchedVar, [HWWriteSETA_SETBEr]>,
1926 SchedVar
1927 ]>;
1928
1929 def HWSETA_SETBErm : SchedWriteVariant<[
1930 SchedVar, [HWWriteSETA_SETBEm]>,
1931 SchedVar
1932 ]>;
1933
1934 def : InstRW<[HWSETA_SETBErr], (instrs SETCCr)>;
1935 def : InstRW<[HWSETA_SETBErm], (instrs SETCCm)>;
1936
19131937 } // SchedModel
7171 CheckImmOperand_s<7, "X86::COND_A">,
7272 CheckImmOperand_s<7, "X86::COND_BE">
7373 ]>;
74
75 // A predicate to check for COND_A and COND_BE SETCCs which have an extra uop
76 // on recent Intel CPUs.
77 def IsSETAr_Or_SETBEr : CheckAny<[
78 CheckImmOperand_s<1, "X86::COND_A">,
79 CheckImmOperand_s<1, "X86::COND_BE">
80 ]>;
81
82 def IsSETAm_Or_SETBEm : CheckAny<[
83 CheckImmOperand_s<5, "X86::COND_A">,
84 CheckImmOperand_s<5, "X86::COND_BE">
85 ]>;
614614 MMX_PSIGNDrr,
615615 MMX_PSIGNWrr)>;
616616
617 def SBWriteResGroup9 : SchedWriteRes<[SBPort05]> {
618 let Latency = 2;
619 let NumMicroOps = 2;
620 let ResourceCycles = [2];
621 }
622 def: InstRW<[SBWriteResGroup9], (instregex "SET(A|BE)r")>;
623
624617 def SBWriteResGroup11 : SchedWriteRes<[SBPort015]> {
625618 let Latency = 2;
626619 let NumMicroOps = 2;
770763 let ResourceCycles = [1,3];
771764 }
772765 def: InstRW<[SBWriteResGroup41], (instrs FNINIT)>;
773
774 def SBWriteResGroup43 : SchedWriteRes<[SBPort4,SBPort23,SBPort05]> {
775 let Latency = 3;
776 let NumMicroOps = 4;
777 let ResourceCycles = [1,1,2];
778 }
779 def: InstRW<[SBWriteResGroup43], (instregex "SET(A|BE)m")>;
780766
781767 def SBWriteResGroup45 : SchedWriteRes<[SBPort0,SBPort4,SBPort23,SBPort15]> {
782768 let Latency = 5;
11971183 def : InstRW<[SBCMOVA_CMOVBErr], (instrs CMOV16rr, CMOV32rr, CMOV64rr)>;
11981184 def : InstRW<[SBCMOVA_CMOVBErm], (instrs CMOV16rm, CMOV32rm, CMOV64rm)>;
11991185
1186 // SETCCs that use both Z and C flag require an extra uop.
1187 def SBWriteSETA_SETBEr : SchedWriteRes<[SBPort05]> {
1188 let Latency = 2;
1189 let ResourceCycles = [2];
1190 let NumMicroOps = 2;
1191 }
1192
1193 def SBWriteSETA_SETBEm : SchedWriteRes<[SBPort4,SBPort23,SBPort05]> {
1194 let Latency = 3;
1195 let ResourceCycles = [1,1,2];
1196 let NumMicroOps = 4;
1197 }
1198
1199 def SBSETA_SETBErr : SchedWriteVariant<[
1200 SchedVar, [SBWriteSETA_SETBEr]>,
1201 SchedVar
1202 ]>;
1203
1204 def SBSETA_SETBErm : SchedWriteVariant<[
1205 SchedVar, [SBWriteSETA_SETBEm]>,
1206 SchedVar
1207 ]>;
1208
1209 def : InstRW<[SBSETA_SETBErr], (instrs SETCCr)>;
1210 def : InstRW<[SBSETA_SETBErm], (instrs SETCCm)>;
1211
12001212 } // SchedModel
697697 def: InstRW<[SKLWriteResGroup14], (instrs FDECSTP,
698698 MMX_MOVDQ2Qrr)>;
699699
700 def SKLWriteResGroup15 : SchedWriteRes<[SKLPort06]> {
701 let Latency = 2;
702 let NumMicroOps = 2;
703 let ResourceCycles = [2];
704 }
705 def: InstRW<[SKLWriteResGroup15], (instregex "SET(A|BE)r")>;
706
707700 def SKLWriteResGroup17 : SchedWriteRes<[SKLPort0156]> {
708701 let Latency = 2;
709702 let NumMicroOps = 2;
838831 let ResourceCycles = [1,1,1];
839832 }
840833 def: InstRW<[SKLWriteResGroup43], (instrs FNSTSWm)>;
841
842 def SKLWriteResGroup44 : SchedWriteRes<[SKLPort4,SKLPort237,SKLPort06]> {
843 let Latency = 3;
844 let NumMicroOps = 4;
845 let ResourceCycles = [1,1,2];
846 }
847 def: InstRW<[SKLWriteResGroup44], (instregex "SET(A|BE)m")>;
848834
849835 def SKLWriteResGroup45 : SchedWriteRes<[SKLPort4,SKLPort6,SKLPort237,SKLPort0156]> {
850836 let Latency = 3;
17811767 def : InstRW<[SKLCMOVA_CMOVBErr], (instrs CMOV16rr, CMOV32rr, CMOV64rr)>;
17821768 def : InstRW<[SKLCMOVA_CMOVBErm], (instrs CMOV16rm, CMOV32rm, CMOV64rm)>;
17831769
1770 // SETCCs that use both Z and C flag require an extra uop.
1771 def SKLWriteSETA_SETBEr : SchedWriteRes<[SKLPort06]> {
1772 let Latency = 2;
1773 let ResourceCycles = [2];
1774 let NumMicroOps = 2;
1775 }
1776
1777 def SKLWriteSETA_SETBEm : SchedWriteRes<[SKLPort4,SKLPort237,SKLPort06]> {
1778 let Latency = 3;
1779 let ResourceCycles = [1,1,2];
1780 let NumMicroOps = 4;
1781 }
1782
1783 def SKLSETA_SETBErr : SchedWriteVariant<[
1784 SchedVar, [SKLWriteSETA_SETBEr]>,
1785 SchedVar
1786 ]>;
1787
1788 def SKLSETA_SETBErm : SchedWriteVariant<[
1789 SchedVar, [SKLWriteSETA_SETBEm]>,
1790 SchedVar
1791 ]>;
1792
1793 def : InstRW<[SKLSETA_SETBErr], (instrs SETCCr)>;
1794 def : InstRW<[SKLSETA_SETBErm], (instrs SETCCm)>;
1795
17841796 } // SchedModel
721721 def: InstRW<[SKXWriteResGroup14], (instrs FDECSTP,
722722 MMX_MOVDQ2Qrr)>;
723723
724 def SKXWriteResGroup15 : SchedWriteRes<[SKXPort06]> {
725 let Latency = 2;
726 let NumMicroOps = 2;
727 let ResourceCycles = [2];
728 }
729 def: InstRW<[SKXWriteResGroup15], (instregex "SET(A|BE)r")>;
730
731724 def SKXWriteResGroup17 : SchedWriteRes<[SKXPort0156]> {
732725 let Latency = 2;
733726 let NumMicroOps = 2;
899892 let ResourceCycles = [1,1,1];
900893 }
901894 def: InstRW<[SKXWriteResGroup45], (instrs FNSTSWm)>;
902
903 def SKXWriteResGroup46 : SchedWriteRes<[SKXPort4,SKXPort237,SKXPort06]> {
904 let Latency = 3;
905 let NumMicroOps = 4;
906 let ResourceCycles = [1,1,2];
907 }
908 def: InstRW<[SKXWriteResGroup46], (instregex "SET(A|BE)m")>;
909895
910896 def SKXWriteResGroup47 : SchedWriteRes<[SKXPort4,SKXPort6,SKXPort237,SKXPort0156]> {
911897 let Latency = 3;
24972483 def : InstRW<[SKXCMOVA_CMOVBErr], (instrs CMOV16rr, CMOV32rr, CMOV64rr)>;
24982484 def : InstRW<[SKXCMOVA_CMOVBErm], (instrs CMOV16rm, CMOV32rm, CMOV64rm)>;
24992485
2486 // SETCCs that use both Z and C flag require an extra uop.
2487 def SKXWriteSETA_SETBEr : SchedWriteRes<[SKXPort06]> {
2488 let Latency = 2;
2489 let ResourceCycles = [2];
2490 let NumMicroOps = 2;
2491 }
2492
2493 def SKXWriteSETA_SETBEm : SchedWriteRes<[SKXPort4,SKXPort237,SKXPort06]> {
2494 let Latency = 3;
2495 let ResourceCycles = [1,1,2];
2496 let NumMicroOps = 4;
2497 }
2498
2499 def SKXSETA_SETBErr : SchedWriteVariant<[
2500 SchedVar, [SKXWriteSETA_SETBEr]>,
2501 SchedVar
2502 ]>;
2503
2504 def SKXSETA_SETBErm : SchedWriteVariant<[
2505 SchedVar, [SKXWriteSETA_SETBEm]>,
2506 SchedVar
2507 ]>;
2508
2509 def : InstRW<[SKXSETA_SETBErr], (instrs SETCCr)>;
2510 def : InstRW<[SKXSETA_SETBErm], (instrs SETCCm)>;
2511
25002512 } // SchedModel
471471 let ResourceCycles = [2];
472472 let NumMicroOps = 2;
473473 }
474 def : InstRW<[PdWriteSETGEmSETGmSETLEmSETLm], (instrs SETGEm, SETGm,
475 SETLEm, SETLm)>;
474
475 def PdSETGEmSETGmSETLEmSETLm : SchedWriteVariant<[
476 SchedVar>, [PdWriteSETGEmSETGmSETLEmSETLm]>,
477 SchedVar>, [PdWriteSETGEmSETGmSETLEmSETLm]>,
478 SchedVar>, [PdWriteSETGEmSETGmSETLEmSETLm]>,
479 SchedVar>, [PdWriteSETGEmSETGmSETLEmSETLm]>,
480 SchedVar
481 ]>;
482 def : InstRW<[PdSETGEmSETGmSETLEmSETLm], (instrs SETCCm)>;
476483
477484 defm : PdWriteRes;
478485
9898 ; CHECK: [[COPY:%[0-9]+]]:gr8 = COPY $dil
9999 ; CHECK: [[COPY1:%[0-9]+]]:gr8 = COPY $sil
100100 ; CHECK: CMP8rr [[COPY]], [[COPY1]], implicit-def $eflags
101 ; CHECK: [[SETEr:%[0-9]+]]:gr8 = SETEr implicit $eflags
102 ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETEr]], %subreg.sub_8bit
101 ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 4, implicit $eflags
102 ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETCCr]], %subreg.sub_8bit
103103 ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags
104104 ; CHECK: $eax = COPY [[AND32ri8_]]
105105 ; CHECK: RET 0, implicit $eax
129129 ; CHECK: [[COPY:%[0-9]+]]:gr16 = COPY $di
130130 ; CHECK: [[COPY1:%[0-9]+]]:gr16 = COPY $si
131131 ; CHECK: CMP16rr [[COPY]], [[COPY1]], implicit-def $eflags
132 ; CHECK: [[SETEr:%[0-9]+]]:gr8 = SETEr implicit $eflags
133 ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETEr]], %subreg.sub_8bit
132 ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 4, implicit $eflags
133 ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETCCr]], %subreg.sub_8bit
134134 ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags
135135 ; CHECK: $eax = COPY [[AND32ri8_]]
136136 ; CHECK: RET 0, implicit $eax
160160 ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
161161 ; CHECK: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
162162 ; CHECK: CMP64rr [[COPY]], [[COPY1]], implicit-def $eflags
163 ; CHECK: [[SETEr:%[0-9]+]]:gr8 = SETEr implicit $eflags
164 ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETEr]], %subreg.sub_8bit
163 ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 4, implicit $eflags
164 ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETCCr]], %subreg.sub_8bit
165165 ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags
166166 ; CHECK: $eax = COPY [[AND32ri8_]]
167167 ; CHECK: RET 0, implicit $eax
191191 ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi
192192 ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
193193 ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags
194 ; CHECK: [[SETEr:%[0-9]+]]:gr8 = SETEr implicit $eflags
195 ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETEr]], %subreg.sub_8bit
194 ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 4, implicit $eflags
195 ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETCCr]], %subreg.sub_8bit
196196 ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags
197197 ; CHECK: $eax = COPY [[AND32ri8_]]
198198 ; CHECK: RET 0, implicit $eax
222222 ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi
223223 ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
224224 ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags
225 ; CHECK: [[SETNEr:%[0-9]+]]:gr8 = SETNEr implicit $eflags
226 ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETNEr]], %subreg.sub_8bit
225 ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 5, implicit $eflags
226 ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETCCr]], %subreg.sub_8bit
227227 ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags
228228 ; CHECK: $eax = COPY [[AND32ri8_]]
229229 ; CHECK: RET 0, implicit $eax
253253 ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi
254254 ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
255255 ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags
256 ; CHECK: [[SETAr:%[0-9]+]]:gr8 = SETAr implicit $eflags
257 ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETAr]], %subreg.sub_8bit
256 ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 7, implicit $eflags
257 ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETCCr]], %subreg.sub_8bit
258258 ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags
259259 ; CHECK: $eax = COPY [[AND32ri8_]]
260260 ; CHECK: RET 0, implicit $eax
284284 ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi
285285 ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
286286 ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags
287 ; CHECK: [[SETAEr:%[0-9]+]]:gr8 = SETAEr implicit $eflags
288 ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETAEr]], %subreg.sub_8bit
287 ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 3, implicit $eflags
288 ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETCCr]], %subreg.sub_8bit
289289 ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags
290290 ; CHECK: $eax = COPY [[AND32ri8_]]
291291 ; CHECK: RET 0, implicit $eax
315315 ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi
316316 ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
317317 ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags
318 ; CHECK: [[SETBr:%[0-9]+]]:gr8 = SETBr implicit $eflags
319 ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETBr]], %subreg.sub_8bit
318 ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 2, implicit $eflags
319 ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETCCr]], %subreg.sub_8bit
320320 ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags
321321 ; CHECK: $eax = COPY [[AND32ri8_]]
322322 ; CHECK: RET 0, implicit $eax
346346 ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi
347347 ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
348348 ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags
349 ; CHECK: [[SETBEr:%[0-9]+]]:gr8 = SETBEr implicit $eflags
350 ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETBEr]], %subreg.sub_8bit
349 ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 6, implicit $eflags
350 ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETCCr]], %subreg.sub_8bit
351351 ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags
352352 ; CHECK: $eax = COPY [[AND32ri8_]]
353353 ; CHECK: RET 0, implicit $eax
377377 ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi
378378 ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
379379 ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags
380 ; CHECK: [[SETGr:%[0-9]+]]:gr8 = SETGr implicit $eflags
381 ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETGr]], %subreg.sub_8bit
380 ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 15, implicit $eflags
381 ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETCCr]], %subreg.sub_8bit
382382 ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags
383383 ; CHECK: $eax = COPY [[AND32ri8_]]
384384 ; CHECK: RET 0, implicit $eax
408408 ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi
409409 ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
410410 ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags
411 ; CHECK: [[SETGEr:%[0-9]+]]:gr8 = SETGEr implicit $eflags
412 ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETGEr]], %subreg.sub_8bit
411 ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 13, implicit $eflags
412 ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETCCr]], %subreg.sub_8bit
413413 ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags
414414 ; CHECK: $eax = COPY [[AND32ri8_]]
415415 ; CHECK: RET 0, implicit $eax
439439 ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi
440440 ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
441441 ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags
442 ; CHECK: [[SETLr:%[0-9]+]]:gr8 = SETLr implicit $eflags
443 ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETLr]], %subreg.sub_8bit
442 ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 12, implicit $eflags
443 ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETCCr]], %subreg.sub_8bit
444444 ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags
445445 ; CHECK: $eax = COPY [[AND32ri8_]]
446446 ; CHECK: RET 0, implicit $eax
470470 ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi
471471 ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
472472 ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags
473 ; CHECK: [[SETLEr:%[0-9]+]]:gr8 = SETLEr implicit $eflags
474 ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETLEr]], %subreg.sub_8bit
473 ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 14, implicit $eflags
474 ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETCCr]], %subreg.sub_8bit
475475 ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags
476476 ; CHECK: $eax = COPY [[AND32ri8_]]
477477 ; CHECK: RET 0, implicit $eax
126126 ; ALL: [[COPY4:%[0-9]+]]:gr8 = COPY [[COPY3]].sub_8bit
127127 ; ALL: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def $eflags
128128 ; ALL: CMP32rr [[COPY]], [[MOV32r0_]], implicit-def $eflags
129 ; ALL: [[SETGr:%[0-9]+]]:gr8 = SETGr implicit $eflags
130 ; ALL: TEST8ri [[SETGr]], 1, implicit-def $eflags
129 ; ALL: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 15, implicit $eflags
130 ; ALL: TEST8ri [[SETCCr]], 1, implicit-def $eflags
131131 ; ALL: JNE_1 %bb.2, implicit $eflags
132132 ; ALL: bb.1.cond.false:
133133 ; ALL: successors: %bb.2(0x80000000)
185185 ; ALL: [[COPY4:%[0-9]+]]:gr16 = COPY [[COPY3]].sub_16bit
186186 ; ALL: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def $eflags
187187 ; ALL: CMP32rr [[COPY]], [[MOV32r0_]], implicit-def $eflags
188 ; ALL: [[SETGr:%[0-9]+]]:gr8 = SETGr implicit $eflags
189 ; ALL: TEST8ri [[SETGr]], 1, implicit-def $eflags
188 ; ALL: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 15, implicit $eflags
189 ; ALL: TEST8ri [[SETCCr]], 1, implicit-def $eflags
190190 ; ALL: JNE_1 %bb.2, implicit $eflags
191191 ; ALL: bb.1.cond.false:
192192 ; ALL: successors: %bb.2(0x80000000)
240240 ; ALL: [[COPY2:%[0-9]+]]:gr32 = COPY $edx
241241 ; ALL: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def $eflags
242242 ; ALL: CMP32rr [[COPY]], [[MOV32r0_]], implicit-def $eflags
243 ; ALL: [[SETGr:%[0-9]+]]:gr8 = SETGr implicit $eflags
244 ; ALL: TEST8ri [[SETGr]], 1, implicit-def $eflags
243 ; ALL: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 15, implicit $eflags
244 ; ALL: TEST8ri [[SETCCr]], 1, implicit-def $eflags
245245 ; ALL: JNE_1 %bb.1, implicit $eflags
246246 ; ALL: JMP_1 %bb.2
247247 ; ALL: bb.1.cond.true:
303303 ; ALL: [[COPY2:%[0-9]+]]:gr64 = COPY $rdx
304304 ; ALL: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def $eflags
305305 ; ALL: CMP32rr [[COPY]], [[MOV32r0_]], implicit-def $eflags
306 ; ALL: [[SETGr:%[0-9]+]]:gr8 = SETGr implicit $eflags
307 ; ALL: TEST8ri [[SETGr]], 1, implicit-def $eflags
306 ; ALL: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 15, implicit $eflags
307 ; ALL: TEST8ri [[SETCCr]], 1, implicit-def $eflags
308308 ; ALL: JNE_1 %bb.1, implicit $eflags
309309 ; ALL: JMP_1 %bb.2
310310 ; ALL: bb.1.cond.true:
375375 ; ALL: [[COPY4:%[0-9]+]]:fr32 = COPY [[COPY3]]
376376 ; ALL: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def $eflags
377377 ; ALL: CMP32rr [[COPY]], [[MOV32r0_]], implicit-def $eflags
378 ; ALL: [[SETGr:%[0-9]+]]:gr8 = SETGr implicit $eflags
379 ; ALL: TEST8ri [[SETGr]], 1, implicit-def $eflags
378 ; ALL: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 15, implicit $eflags
379 ; ALL: TEST8ri [[SETCCr]], 1, implicit-def $eflags
380380 ; ALL: JNE_1 %bb.2, implicit $eflags
381381 ; ALL: bb.1.cond.false:
382382 ; ALL: successors: %bb.2(0x80000000)
436436 ; ALL: [[COPY4:%[0-9]+]]:fr64 = COPY [[COPY3]]
437437 ; ALL: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def $eflags
438438 ; ALL: CMP32rr [[COPY]], [[MOV32r0_]], implicit-def $eflags
439 ; ALL: [[SETGr:%[0-9]+]]:gr8 = SETGr implicit $eflags
440 ; ALL: TEST8ri [[SETGr]], 1, implicit-def $eflags
439 ; ALL: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 15, implicit $eflags
440 ; ALL: TEST8ri [[SETCCr]], 1, implicit-def $eflags
441441 ; ALL: JNE_1 %bb.2, implicit $eflags
442442 ; ALL: bb.1.cond.false:
443443 ; ALL: successors: %bb.2(0x80000000)
168168 ; CHECK: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
169169 ; CHECK: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]]
170170 ; CHECK: UCOMISSrr [[COPY1]], [[COPY3]], implicit-def $eflags
171 ; CHECK: [[SETEr:%[0-9]+]]:gr8 = SETEr implicit $eflags
172 ; CHECK: [[SETNPr:%[0-9]+]]:gr8 = SETNPr implicit $eflags
173 ; CHECK: [[AND8rr:%[0-9]+]]:gr8 = AND8rr [[SETEr]], [[SETNPr]], implicit-def $eflags
171 ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 4, implicit $eflags
172 ; CHECK: [[SETCCr1:%[0-9]+]]:gr8 = SETCCr 11, implicit $eflags
173 ; CHECK: [[AND8rr:%[0-9]+]]:gr8 = AND8rr [[SETCCr]], [[SETCCr1]], implicit-def $eflags
174174 ; CHECK: $al = COPY [[AND8rr]]
175175 ; CHECK: RET 0, implicit $al
176176 %2:vecr(s128) = COPY $xmm0
208208 ; CHECK: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
209209 ; CHECK: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]]
210210 ; CHECK: UCOMISSrr [[COPY1]], [[COPY3]], implicit-def $eflags
211 ; CHECK: [[SETAr:%[0-9]+]]:gr8 = SETAr implicit $eflags
212 ; CHECK: $al = COPY [[SETAr]]
211 ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 7, implicit $eflags
212 ; CHECK: $al = COPY [[SETCCr]]
213213 ; CHECK: RET 0, implicit $al
214214 %2:vecr(s128) = COPY $xmm0
215215 %0:vecr(s32) = G_TRUNC %2(s128)
246246 ; CHECK: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
247247 ; CHECK: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]]
248248 ; CHECK: UCOMISSrr [[COPY1]], [[COPY3]], implicit-def $eflags
249 ; CHECK: [[SETAEr:%[0-9]+]]:gr8 = SETAEr implicit $eflags
250 ; CHECK: $al = COPY [[SETAEr]]
249 ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 3, implicit $eflags
250 ; CHECK: $al = COPY [[SETCCr]]
251251 ; CHECK: RET 0, implicit $al
252252 %2:vecr(s128) = COPY $xmm0
253253 %0:vecr(s32) = G_TRUNC %2(s128)
284284 ; CHECK: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
285285 ; CHECK: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]]
286286 ; CHECK: UCOMISSrr [[COPY3]], [[COPY1]], implicit-def $eflags
287 ; CHECK: [[SETAr:%[0-9]+]]:gr8 = SETAr implicit $eflags
288 ; CHECK: $al = COPY [[SETAr]]
287 ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 7, implicit $eflags
288 ; CHECK: $al = COPY [[SETCCr]]
289289 ; CHECK: RET 0, implicit $al
290290 %2:vecr(s128) = COPY $xmm0
291291 %0:vecr(s32) = G_TRUNC %2(s128)
322322 ; CHECK: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
323323 ; CHECK: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]]
324324 ; CHECK: UCOMISSrr [[COPY3]], [[COPY1]], implicit-def $eflags
325 ; CHECK: [[SETAEr:%[0-9]+]]:gr8 = SETAEr implicit $eflags
326 ; CHECK: $al = COPY [[SETAEr]]
325 ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 3, implicit $eflags
326 ; CHECK: $al = COPY [[SETCCr]]
327327 ; CHECK: RET 0, implicit $al
328328 %2:vecr(s128) = COPY $xmm0
329329 %0:vecr(s32) = G_TRUNC %2(s128)
360360 ; CHECK: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
361361 ; CHECK: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]]
362362 ; CHECK: UCOMISSrr [[COPY1]], [[COPY3]], implicit-def $eflags
363 ; CHECK: [[SETNEr:%[0-9]+]]:gr8 = SETNEr implicit $eflags
364 ; CHECK: $al = COPY [[SETNEr]]
363 ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 5, implicit $eflags
364 ; CHECK: $al = COPY [[SETCCr]]
365365 ; CHECK: RET 0, implicit $al
366366 %2:vecr(s128) = COPY $xmm0
367367 %0:vecr(s32) = G_TRUNC %2(s128)
398398 ; CHECK: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
399399 ; CHECK: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]]
400400 ; CHECK: UCOMISSrr [[COPY1]], [[COPY3]], implicit-def $eflags
401 ; CHECK: [[SETNPr:%[0-9]+]]:gr8 = SETNPr implicit $eflags
402 ; CHECK: $al = COPY [[SETNPr]]
401 ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 11, implicit $eflags
402 ; CHECK: $al = COPY [[SETCCr]]
403403 ; CHECK: RET 0, implicit $al
404404 %2:vecr(s128) = COPY $xmm0
405405 %0:vecr(s32) = G_TRUNC %2(s128)
436436 ; CHECK: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
437437 ; CHECK: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]]
438438 ; CHECK: UCOMISSrr [[COPY1]], [[COPY3]], implicit-def $eflags
439 ; CHECK: [[SETPr:%[0-9]+]]:gr8 = SETPr implicit $eflags
440 ; CHECK: $al = COPY [[SETPr]]
439 ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 10, implicit $eflags
440 ; CHECK: $al = COPY [[SETCCr]]
441441 ; CHECK: RET 0, implicit $al
442442 %2:vecr(s128) = COPY $xmm0
443443 %0:vecr(s32) = G_TRUNC %2(s128)
474474 ; CHECK: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
475475 ; CHECK: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]]
476476 ; CHECK: UCOMISSrr [[COPY1]], [[COPY3]], implicit-def $eflags
477 ; CHECK: [[SETEr:%[0-9]+]]:gr8 = SETEr implicit $eflags
478 ; CHECK: $al = COPY [[SETEr]]
477 ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 4, implicit $eflags
478 ; CHECK: $al = COPY [[SETCCr]]
479479 ; CHECK: RET 0, implicit $al
480480 %2:vecr(s128) = COPY $xmm0
481481 %0:vecr(s32) = G_TRUNC %2(s128)
512512 ; CHECK: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
513513 ; CHECK: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]]
514514 ; CHECK: UCOMISSrr [[COPY3]], [[COPY1]], implicit-def $eflags
515 ; CHECK: [[SETBr:%[0-9]+]]:gr8 = SETBr implicit $eflags
516 ; CHECK: $al = COPY [[SETBr]]
515 ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 2, implicit $eflags
516 ; CHECK: $al = COPY [[SETCCr]]
517517 ; CHECK: RET 0, implicit $al
518518 %2:vecr(s128) = COPY $xmm0
519519 %0:vecr(s32) = G_TRUNC %2(s128)
550550 ; CHECK: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
551551 ; CHECK: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]]
552552 ; CHECK: UCOMISSrr [[COPY3]], [[COPY1]], implicit-def $eflags
553 ; CHECK: [[SETBEr:%[0-9]+]]:gr8 = SETBEr implicit $eflags
554 ; CHECK: $al = COPY [[SETBEr]]
553 ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 6, implicit $eflags
554 ; CHECK: $al = COPY [[SETCCr]]
555555 ; CHECK: RET 0, implicit $al
556556 %2:vecr(s128) = COPY $xmm0
557557 %0:vecr(s32) = G_TRUNC %2(s128)
588588 ; CHECK: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
589589 ; CHECK: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]]
590590 ; CHECK: UCOMISSrr [[COPY1]], [[COPY3]], implicit-def $eflags
591 ; CHECK: [[SETBr:%[0-9]+]]:gr8 = SETBr implicit $eflags
592 ; CHECK: $al = COPY [[SETBr]]
591 ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 2, implicit $eflags
592 ; CHECK: $al = COPY [[SETCCr]]
593593 ; CHECK: RET 0, implicit $al
594594 %2:vecr(s128) = COPY $xmm0
595595 %0:vecr(s32) = G_TRUNC %2(s128)
626626 ; CHECK: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
627627 ; CHECK: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]]
628628 ; CHECK: UCOMISSrr [[COPY1]], [[COPY3]], implicit-def $eflags
629 ; CHECK: [[SETBEr:%[0-9]+]]:gr8 = SETBEr implicit $eflags
630 ; CHECK: $al = COPY [[SETBEr]]
629 ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 6, implicit $eflags
630 ; CHECK: $al = COPY [[SETCCr]]
631631 ; CHECK: RET 0, implicit $al
632632 %2:vecr(s128) = COPY $xmm0
633633 %0:vecr(s32) = G_TRUNC %2(s128)
664664 ; CHECK: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
665665 ; CHECK: [[COPY3:%[0-9]+]]:fr32 = COPY [[COPY2]]
666666 ; CHECK: UCOMISSrr [[COPY1]], [[COPY3]], implicit-def $eflags
667 ; CHECK: [[SETNEr:%[0-9]+]]:gr8 = SETNEr implicit $eflags
668 ; CHECK: [[SETPr:%[0-9]+]]:gr8 = SETPr implicit $eflags
669 ; CHECK: [[OR8rr:%[0-9]+]]:gr8 = OR8rr [[SETNEr]], [[SETPr]], implicit-def $eflags
667 ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 5, implicit $eflags
668 ; CHECK: [[SETCCr1:%[0-9]+]]:gr8 = SETCCr 10, implicit $eflags
669 ; CHECK: [[OR8rr:%[0-9]+]]:gr8 = OR8rr [[SETCCr]], [[SETCCr1]], implicit-def $eflags
670670 ; CHECK: $al = COPY [[OR8rr]]
671671 ; CHECK: RET 0, implicit $al
672672 %2:vecr(s128) = COPY $xmm0
704704 ; CHECK: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
705705 ; CHECK: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]]
706706 ; CHECK: UCOMISDrr [[COPY1]], [[COPY3]], implicit-def $eflags
707 ; CHECK: [[SETEr:%[0-9]+]]:gr8 = SETEr implicit $eflags
708 ; CHECK: [[SETNPr:%[0-9]+]]:gr8 = SETNPr implicit $eflags
709 ; CHECK: [[AND8rr:%[0-9]+]]:gr8 = AND8rr [[SETEr]], [[SETNPr]], implicit-def $eflags
707 ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 4, implicit $eflags
708 ; CHECK: [[SETCCr1:%[0-9]+]]:gr8 = SETCCr 11, implicit $eflags
709 ; CHECK: [[AND8rr:%[0-9]+]]:gr8 = AND8rr [[SETCCr]], [[SETCCr1]], implicit-def $eflags
710710 ; CHECK: $al = COPY [[AND8rr]]
711711 ; CHECK: RET 0, implicit $al
712712 %2:vecr(s128) = COPY $xmm0
744744 ; CHECK: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
745745 ; CHECK: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]]
746746 ; CHECK: UCOMISDrr [[COPY1]], [[COPY3]], implicit-def $eflags
747 ; CHECK: [[SETAr:%[0-9]+]]:gr8 = SETAr implicit $eflags
748 ; CHECK: $al = COPY [[SETAr]]
747 ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 7, implicit $eflags
748 ; CHECK: $al = COPY [[SETCCr]]
749749 ; CHECK: RET 0, implicit $al
750750 %2:vecr(s128) = COPY $xmm0
751751 %0:vecr(s64) = G_TRUNC %2(s128)
782782 ; CHECK: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
783783 ; CHECK: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]]
784784 ; CHECK: UCOMISDrr [[COPY1]], [[COPY3]], implicit-def $eflags
785 ; CHECK: [[SETAEr:%[0-9]+]]:gr8 = SETAEr implicit $eflags
786 ; CHECK: $al = COPY [[SETAEr]]
785 ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 3, implicit $eflags
786 ; CHECK: $al = COPY [[SETCCr]]
787787 ; CHECK: RET 0, implicit $al
788788 %2:vecr(s128) = COPY $xmm0
789789 %0:vecr(s64) = G_TRUNC %2(s128)
820820 ; CHECK: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
821821 ; CHECK: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]]
822822 ; CHECK: UCOMISDrr [[COPY3]], [[COPY1]], implicit-def $eflags
823 ; CHECK: [[SETAr:%[0-9]+]]:gr8 = SETAr implicit $eflags
824 ; CHECK: $al = COPY [[SETAr]]
823 ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 7, implicit $eflags
824 ; CHECK: $al = COPY [[SETCCr]]
825825 ; CHECK: RET 0, implicit $al
826826 %2:vecr(s128) = COPY $xmm0
827827 %0:vecr(s64) = G_TRUNC %2(s128)
858858 ; CHECK: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
859859 ; CHECK: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]]
860860 ; CHECK: UCOMISDrr [[COPY3]], [[COPY1]], implicit-def $eflags
861 ; CHECK: [[SETAEr:%[0-9]+]]:gr8 = SETAEr implicit $eflags
862 ; CHECK: $al = COPY [[SETAEr]]
861 ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 3, implicit $eflags
862 ; CHECK: $al = COPY [[SETCCr]]
863863 ; CHECK: RET 0, implicit $al
864864 %2:vecr(s128) = COPY $xmm0
865865 %0:vecr(s64) = G_TRUNC %2(s128)
896896 ; CHECK: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
897897 ; CHECK: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]]
898898 ; CHECK: UCOMISDrr [[COPY1]], [[COPY3]], implicit-def $eflags
899 ; CHECK: [[SETNEr:%[0-9]+]]:gr8 = SETNEr implicit $eflags
900 ; CHECK: $al = COPY [[SETNEr]]
899 ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 5, implicit $eflags
900 ; CHECK: $al = COPY [[SETCCr]]
901901 ; CHECK: RET 0, implicit $al
902902 %2:vecr(s128) = COPY $xmm0
903903 %0:vecr(s64) = G_TRUNC %2(s128)
934934 ; CHECK: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
935935 ; CHECK: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]]
936936 ; CHECK: UCOMISDrr [[COPY1]], [[COPY3]], implicit-def $eflags
937 ; CHECK: [[SETNPr:%[0-9]+]]:gr8 = SETNPr implicit $eflags
938 ; CHECK: $al = COPY [[SETNPr]]
937 ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 11, implicit $eflags
938 ; CHECK: $al = COPY [[SETCCr]]
939939 ; CHECK: RET 0, implicit $al
940940 %2:vecr(s128) = COPY $xmm0
941941 %0:vecr(s64) = G_TRUNC %2(s128)
972972 ; CHECK: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
973973 ; CHECK: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]]
974974 ; CHECK: UCOMISDrr [[COPY1]], [[COPY3]], implicit-def $eflags
975 ; CHECK: [[SETPr:%[0-9]+]]:gr8 = SETPr implicit $eflags
976 ; CHECK: $al = COPY [[SETPr]]
975 ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 10, implicit $eflags
976 ; CHECK: $al = COPY [[SETCCr]]
977977 ; CHECK: RET 0, implicit $al
978978 %2:vecr(s128) = COPY $xmm0
979979 %0:vecr(s64) = G_TRUNC %2(s128)
10101010 ; CHECK: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
10111011 ; CHECK: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]]
10121012 ; CHECK: UCOMISDrr [[COPY1]], [[COPY3]], implicit-def $eflags
1013 ; CHECK: [[SETEr:%[0-9]+]]:gr8 = SETEr implicit $eflags
1014 ; CHECK: $al = COPY [[SETEr]]
1013 ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 4, implicit $eflags
1014 ; CHECK: $al = COPY [[SETCCr]]
10151015 ; CHECK: RET 0, implicit $al
10161016 %2:vecr(s128) = COPY $xmm0
10171017 %0:vecr(s64) = G_TRUNC %2(s128)
10481048 ; CHECK: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
10491049 ; CHECK: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]]
10501050 ; CHECK: UCOMISDrr [[COPY3]], [[COPY1]], implicit-def $eflags
1051 ; CHECK: [[SETBr:%[0-9]+]]:gr8 = SETBr implicit $eflags
1052 ; CHECK: $al = COPY [[SETBr]]
1051 ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 2, implicit $eflags
1052 ; CHECK: $al = COPY [[SETCCr]]
10531053 ; CHECK: RET 0, implicit $al
10541054 %2:vecr(s128) = COPY $xmm0
10551055 %0:vecr(s64) = G_TRUNC %2(s128)
10861086 ; CHECK: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
10871087 ; CHECK: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]]
10881088 ; CHECK: UCOMISDrr [[COPY3]], [[COPY1]], implicit-def $eflags
1089 ; CHECK: [[SETBEr:%[0-9]+]]:gr8 = SETBEr implicit $eflags
1090 ; CHECK: $al = COPY [[SETBEr]]
1089 ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 6, implicit $eflags
1090 ; CHECK: $al = COPY [[SETCCr]]
10911091 ; CHECK: RET 0, implicit $al
10921092 %2:vecr(s128) = COPY $xmm0
10931093 %0:vecr(s64) = G_TRUNC %2(s128)
11241124 ; CHECK: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
11251125 ; CHECK: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]]
11261126 ; CHECK: UCOMISDrr [[COPY1]], [[COPY3]], implicit-def $eflags
1127 ; CHECK: [[SETBr:%[0-9]+]]:gr8 = SETBr implicit $eflags
1128 ; CHECK: $al = COPY [[SETBr]]
1127 ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 2, implicit $eflags
1128 ; CHECK: $al = COPY [[SETCCr]]
11291129 ; CHECK: RET 0, implicit $al
11301130 %2:vecr(s128) = COPY $xmm0
11311131 %0:vecr(s64) = G_TRUNC %2(s128)
11621162 ; CHECK: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
11631163 ; CHECK: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]]
11641164 ; CHECK: UCOMISDrr [[COPY1]], [[COPY3]], implicit-def $eflags
1165 ; CHECK: [[SETBEr:%[0-9]+]]:gr8 = SETBEr implicit $eflags
1166 ; CHECK: $al = COPY [[SETBEr]]
1165 ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 6, implicit $eflags
1166 ; CHECK: $al = COPY [[SETCCr]]
11671167 ; CHECK: RET 0, implicit $al
11681168 %2:vecr(s128) = COPY $xmm0
11691169 %0:vecr(s64) = G_TRUNC %2(s128)
12001200 ; CHECK: [[COPY2:%[0-9]+]]:vr128 = COPY $xmm1
12011201 ; CHECK: [[COPY3:%[0-9]+]]:fr64 = COPY [[COPY2]]
12021202 ; CHECK: UCOMISDrr [[COPY1]], [[COPY3]], implicit-def $eflags
1203 ; CHECK: [[SETNEr:%[0-9]+]]:gr8 = SETNEr implicit $eflags
1204 ; CHECK: [[SETPr:%[0-9]+]]:gr8 = SETPr implicit $eflags
1205 ; CHECK: [[OR8rr:%[0-9]+]]:gr8 = OR8rr [[SETNEr]], [[SETPr]], implicit-def $eflags
1203 ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 5, implicit $eflags
1204 ; CHECK: [[SETCCr1:%[0-9]+]]:gr8 = SETCCr 10, implicit $eflags
1205 ; CHECK: [[OR8rr:%[0-9]+]]:gr8 = OR8rr [[SETCCr]], [[SETCCr1]], implicit-def $eflags
12061206 ; CHECK: $al = COPY [[OR8rr]]
12071207 ; CHECK: RET 0, implicit $al
12081208 %2:vecr(s128) = COPY $xmm0
118118 CMP64rr %0, %1, implicit-def $eflags
119119 %2:gr64 = COPY $eflags
120120 ; CHECK-NOT: COPY{{( killed)?}} $eflags
121 ; CHECK: %[[A_REG:[^:]*]]:gr8 = SETAr implicit $eflags
122 ; CHECK-NEXT: %[[B_REG:[^:]*]]:gr8 = SETBr implicit $eflags
121 ; CHECK: %[[A_REG:[^:]*]]:gr8 = SETCCr 7, implicit $eflags
122 ; CHECK-NEXT: %[[B_REG:[^:]*]]:gr8 = SETCCr 2, implicit $eflags
123123 ; CHECK-NOT: COPY{{( killed)?}} $eflags
124124
125125 ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
174174 CMP64rr %0, %1, implicit-def $eflags
175175 %2:gr64 = COPY $eflags
176176 ; CHECK-NOT: COPY{{( killed)?}} $eflags
177 ; CHECK: %[[A_REG:[^:]*]]:gr8 = SETAr implicit $eflags
178 ; CHECK-NEXT: %[[B_REG:[^:]*]]:gr8 = SETBr implicit $eflags
177 ; CHECK: %[[A_REG:[^:]*]]:gr8 = SETCCr 7, implicit $eflags
178 ; CHECK-NEXT: %[[B_REG:[^:]*]]:gr8 = SETCCr 2, implicit $eflags
179179 ; CHECK-NOT: COPY{{( killed)?}} $eflags
180180
181181 ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
229229 CMP64rr %0, %1, implicit-def $eflags
230230 %2:gr64 = COPY $eflags
231231 ; CHECK-NOT: COPY{{( killed)?}} $eflags
232 ; CHECK: %[[A_REG:[^:]*]]:gr8 = SETAr implicit $eflags
233 ; CHECK-NEXT: %[[B_REG:[^:]*]]:gr8 = SETBr implicit $eflags
234 ; CHECK-NEXT: %[[E_REG:[^:]*]]:gr8 = SETEr implicit $eflags
235 ; CHECK-NEXT: %[[NE_REG:[^:]*]]:gr8 = SETNEr implicit $eflags
232 ; CHECK: %[[A_REG:[^:]*]]:gr8 = SETCCr 7, implicit $eflags
233 ; CHECK-NEXT: %[[B_REG:[^:]*]]:gr8 = SETCCr 2, implicit $eflags
234 ; CHECK-NEXT: %[[E_REG:[^:]*]]:gr8 = SETCCr 4, implicit $eflags
235 ; CHECK-NEXT: %[[NE_REG:[^:]*]]:gr8 = SETCCr 5, implicit $eflags
236236 ; CHECK-NOT: COPY{{( killed)?}} $eflags
237237
238238 ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
240240 ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
241241
242242 $eflags = COPY %2
243 %3:gr8 = SETAr implicit $eflags
244 %4:gr8 = SETBr implicit $eflags
245 %5:gr8 = SETEr implicit $eflags
246 SETNEm $rsp, 1, $noreg, -16, $noreg, implicit killed $eflags
243 %3:gr8 = SETCCr 7, implicit $eflags
244 %4:gr8 = SETCCr 2, implicit $eflags
245 %5:gr8 = SETCCr 4, implicit $eflags
246 SETCCm $rsp, 1, $noreg, -16, $noreg, 5, implicit killed $eflags
247247 MOV8mr $rsp, 1, $noreg, -16, $noreg, killed %3
248248 MOV8mr $rsp, 1, $noreg, -16, $noreg, killed %4
249249 MOV8mr $rsp, 1, $noreg, -16, $noreg, killed %5
272272 CMP64rr %0, %1, implicit-def $eflags
273273 %2:gr64 = COPY $eflags
274274 ; CHECK-NOT: COPY{{( killed)?}} $eflags
275 ; CHECK: %[[A_REG:[^:]*]]:gr8 = SETAr implicit $eflags
276 ; CHECK-NEXT: %[[B_REG:[^:]*]]:gr8 = SETBr implicit $eflags
277 ; CHECK-NEXT: %[[E_REG:[^:]*]]:gr8 = SETEr implicit $eflags
275 ; CHECK: %[[A_REG:[^:]*]]:gr8 = SETCCr 7, implicit $eflags
276 ; CHECK-NEXT: %[[B_REG:[^:]*]]:gr8 = SETCCr 2, implicit $eflags
277 ; CHECK-NEXT: %[[E_REG:[^:]*]]:gr8 = SETCCr 4, implicit $eflags
278278 ; CHECK-NOT: COPY{{( killed)?}} $eflags
279279
280280 ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
318318 %2:gr64 = ADD64rr %0, %1, implicit-def $eflags
319319 %3:gr64 = COPY $eflags
320320 ; CHECK-NOT: COPY{{( killed)?}} $eflags
321 ; CHECK: %[[CF_REG:[^:]*]]:gr8 = SETBr implicit $eflags
321 ; CHECK: %[[CF_REG:[^:]*]]:gr8 = SETCCr 2, implicit $eflags
322322 ; CHECK-NOT: COPY{{( killed)?}} $eflags
323323
324324 ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
352352 %2:gr64 = SUB64rr %0, %1, implicit-def $eflags
353353 %3:gr64 = COPY killed $eflags
354354 ; CHECK-NOT: COPY{{( killed)?}} $eflags
355 ; CHECK: %[[CF_REG:[^:]*]]:gr8 = SETBr implicit $eflags
355 ; CHECK: %[[CF_REG:[^:]*]]:gr8 = SETCCr 2, implicit $eflags
356356 ; CHECK-NOT: COPY{{( killed)?}} $eflags
357357
358358 ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
386386 %2:gr64 = ADD64rr %0, %1, implicit-def $eflags
387387 %3:gr64 = COPY $eflags
388388 ; CHECK-NOT: COPY{{( killed)?}} $eflags
389 ; CHECK: %[[E_REG:[^:]*]]:gr8 = SETEr implicit $eflags
390 ; CHECK-NEXT: %[[CF_REG:[^:]*]]:gr8 = SETBr implicit $eflags
389 ; CHECK: %[[E_REG:[^:]*]]:gr8 = SETCCr 4, implicit $eflags
390 ; CHECK-NEXT: %[[CF_REG:[^:]*]]:gr8 = SETCCr 2, implicit $eflags
391391 ; CHECK-NOT: COPY{{( killed)?}} $eflags
392392
393393 ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
425425 %2:gr64 = ADD64rr %0, %1, implicit-def $eflags
426426 %3:gr64 = COPY $eflags
427427 ; CHECK-NOT: COPY{{( killed)?}} $eflags
428 ; CHECK: %[[E_REG:[^:]*]]:gr8 = SETEr implicit $eflags
429 ; CHECK-NEXT: %[[OF_REG:[^:]*]]:gr8 = SETOr implicit $eflags
428 ; CHECK: %[[E_REG:[^:]*]]:gr8 = SETCCr 4, implicit $eflags
429 ; CHECK-NEXT: %[[OF_REG:[^:]*]]:gr8 = SETCCr 0, implicit $eflags
430430 ; CHECK-NOT: COPY{{( killed)?}} $eflags
431431
432432 ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
464464 %2:gr64 = ADD64rr %0, %1, implicit-def $eflags
465465 %3:gr64 = COPY $eflags
466466 ; CHECK-NOT: COPY{{( killed)?}} $eflags
467 ; CHECK: %[[CF_REG:[^:]*]]:gr8 = SETBr implicit $eflags
467 ; CHECK: %[[CF_REG:[^:]*]]:gr8 = SETCCr 2, implicit $eflags
468468 ; CHECK-NOT: COPY{{( killed)?}} $eflags
469469
470470 ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
498498 %2:gr64 = ADD64rr %0, %1, implicit-def $eflags
499499 %3:gr64 = COPY $eflags
500500 ; CHECK-NOT: COPY{{( killed)?}} $eflags
501 ; CHECK: %[[CF_REG:[^:]*]]:gr8 = SETBr implicit $eflags
501 ; CHECK: %[[CF_REG:[^:]*]]:gr8 = SETCCr 2, implicit $eflags
502502 ; CHECK-NOT: COPY{{( killed)?}} $eflags
503503
504504 ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
532532 %2:gr64 = ADD64rr %0, %1, implicit-def $eflags
533533 %3:gr64 = COPY $eflags
534534 ; CHECK-NOT: COPY{{( killed)?}} $eflags
535 ; CHECK: %[[CF_REG:[^:]*]]:gr8 = SETBr implicit $eflags
535 ; CHECK: %[[CF_REG:[^:]*]]:gr8 = SETCCr 2, implicit $eflags
536536 ; CHECK-NOT: COPY{{( killed)?}} $eflags
537537
538538 ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
598598 CMP64rr %0, %1, implicit-def $eflags
599599 %2:gr64 = COPY $eflags
600600 ; CHECK-NOT: COPY{{( killed)?}} $eflags
601 ; CHECK: %[[S_REG:[^:]*]]:gr8 = SETSr implicit $eflags
602 ; CHECK-NEXT: %[[NE_REG:[^:]*]]:gr8 = SETNEr implicit $eflags
603 ; CHECK-NEXT: %[[A_REG:[^:]*]]:gr8 = SETAr implicit $eflags
604 ; CHECK-NEXT: %[[B_REG:[^:]*]]:gr8 = SETBr implicit $eflags
601 ; CHECK: %[[S_REG:[^:]*]]:gr8 = SETCCr 8, implicit $eflags
602 ; CHECK-NEXT: %[[NE_REG:[^:]*]]:gr8 = SETCCr 5, implicit $eflags
603 ; CHECK-NEXT: %[[A_REG:[^:]*]]:gr8 = SETCCr 7, implicit $eflags
604 ; CHECK-NEXT: %[[B_REG:[^:]*]]:gr8 = SETCCr 2, implicit $eflags
605605 ; CHECK-NOT: COPY{{( killed)?}} $eflags
606606
607607 ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
671671 CMP64rr %0, %1, implicit-def $eflags
672672 %2:gr64 = COPY $eflags
673673 ; CHECK-NOT: COPY{{( killed)?}} $eflags
674 ; CHECK: %[[S_REG:[^:]*]]:gr8 = SETSr implicit $eflags
675 ; CHECK-NEXT: %[[P_REG:[^:]*]]:gr8 = SETPr implicit $eflags
676 ; CHECK-NEXT: %[[NE_REG:[^:]*]]:gr8 = SETNEr implicit $eflags
677 ; CHECK-NEXT: %[[A_REG:[^:]*]]:gr8 = SETAr implicit $eflags
678 ; CHECK-NEXT: %[[B_REG:[^:]*]]:gr8 = SETBr implicit $eflags
679 ; CHECK-NEXT: %[[O_REG:[^:]*]]:gr8 = SETOr implicit $eflags
674 ; CHECK: %[[S_REG:[^:]*]]:gr8 = SETCCr 8, implicit $eflags
675 ; CHECK-NEXT: %[[P_REG:[^:]*]]:gr8 = SETCCr 10, implicit $eflags
676 ; CHECK-NEXT: %[[NE_REG:[^:]*]]:gr8 = SETCCr 5, implicit $eflags
677 ; CHECK-NEXT: %[[A_REG:[^:]*]]:gr8 = SETCCr 7, implicit $eflags
678 ; CHECK-NEXT: %[[B_REG:[^:]*]]:gr8 = SETCCr 2, implicit $eflags
679 ; CHECK-NEXT: %[[O_REG:[^:]*]]:gr8 = SETCCr 0, implicit $eflags
680680 ; CHECK-NOT: COPY{{( killed)?}} $eflags
681681
682682 ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
823823 JMP_1 %bb.4
824824 ; CHECK: bb.1:
825825 ; CHECK-NOT: COPY{{( killed)?}} $eflags
826 ; CHECK: %[[A_REG:[^:]*]]:gr8 = SETAr implicit $eflags
827 ; CHECK-NEXT: %[[E_REG:[^:]*]]:gr8 = SETEr implicit $eflags
828 ; CHECK-NEXT: %[[B_REG:[^:]*]]:gr8 = SETBr implicit $eflags
826 ; CHECK: %[[A_REG:[^:]*]]:gr8 = SETCCr 7, implicit $eflags
827 ; CHECK-NEXT: %[[E_REG:[^:]*]]:gr8 = SETCCr 4, implicit $eflags
828 ; CHECK-NEXT: %[[B_REG:[^:]*]]:gr8 = SETCCr 2, implicit $eflags
829829 ; CHECK-NOT: COPY{{( killed)?}} $eflags
830830
831831 bb.2:
961961 %0:gr64 = COPY $rdi
962962 %1:gr64 = COPY $rsi
963963 CMP64rr %0, %1, implicit-def $eflags
964 %2:gr8 = SETAr implicit $eflags
965 %3:gr8 = SETAEr implicit $eflags
964 %2:gr8 = SETCCr 7, implicit $eflags
965 %3:gr8 = SETCCr 3, implicit $eflags
966966 %4:gr64 = COPY $eflags
967967 ; CHECK: CMP64rr %0, %1, implicit-def $eflags
968 ; CHECK-NEXT: %[[A_REG:[^:]*]]:gr8 = SETAr implicit $eflags
969 ; CHECK-NEXT: %[[AE_REG:[^:]*]]:gr8 = SETAEr implicit $eflags
968 ; CHECK-NEXT: %[[A_REG:[^:]*]]:gr8 = SETCCr 7, implicit $eflags
969 ; CHECK-NEXT: %[[AE_REG:[^:]*]]:gr8 = SETCCr 3, implicit $eflags
970970 ; CHECK-NOT: COPY{{( killed)?}} $eflags
971971
972972 ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
10191019 %0:gr64 = COPY $rdi
10201020 %1:gr64 = COPY $rsi
10211021 CMP64rr %0, %1, implicit-def $eflags
1022 SETEm %0, 1, $noreg, -16, $noreg, implicit $eflags
1022 SETCCm %0, 1, $noreg, -16, $noreg, 4, implicit $eflags
10231023 %2:gr64 = COPY $eflags
10241024 ; CHECK: CMP64rr %0, %1, implicit-def $eflags
10251025 ; We cannot reuse this SETE because it stores the flag directly to memory,
10261026 ; so we have two SETEs here. FIXME: It'd be great if something could fold
10271027 ; these automatically. If not, maybe we want to unfold SETcc instructions
10281028 ; writing to memory so we can reuse them.
1029 ; CHECK-NEXT: SETEm {{.*}} implicit $eflags
1030 ; CHECK-NEXT: %[[E_REG:[^:]*]]:gr8 = SETEr implicit $eflags
1029 ; CHECK-NEXT: SETCCm {{.*}} 4, implicit $eflags
1030 ; CHECK-NEXT: %[[E_REG:[^:]*]]:gr8 = SETCCr 4, implicit $eflags
10311031 ; CHECK-NOT: COPY{{( killed)?}} $eflags
10321032
10331033 ADJCALLSTACKDOWN64 0, 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
4444 $ebp = SHR32rCL killed $ebp, implicit-def dead $eflags, implicit $cl
4545 $ebp = XOR32rr killed $ebp, killed $ebx, implicit-def dead $eflags
4646 TEST32rr $edx, $edx, implicit-def $eflags
47 $cl = SETNEr implicit $eflags
47 $cl = SETCCr 5, implicit $eflags
4848 ; This %bl def is antidependent on the above use of $ebx
4949 $bl = MOV8rm $esp, 1, $noreg, 3, _ ; :: (load 1 from %stack.0)
5050 $cl = OR8rr killed $cl, $bl, implicit-def dead $eflags
5353 $ecx = MOV32rm $esp, 1, $noreg, 24, _ ; :: (load 4 from %stack.2)
5454 $edx = SAR32rCL killed $edx, implicit-def dead $eflags, implicit $cl
5555 TEST32rr killed $edx, $edx, implicit-def $eflags
56 $cl = SETNEr implicit $eflags
56 $cl = SETCCr 5, implicit $eflags
5757 ; Verify that removal of the $bl antidependence does not use $ch
5858 ; as a replacement register.
5959 ; CHECK: $cl = AND8rr killed $cl, killed $b
6666 liveins: $cl, $eax, $ebp, $esi
6767
6868 OR32mr $esp, 1, $noreg, 8, $noreg, killed $eax, implicit-def $eflags ; :: (store 4 into %stack.1)
69 $dl = SETNEr implicit $eflags, implicit-def $edx
69 $dl = SETCCr 5, implicit $eflags, implicit-def $edx
7070
7171 bb.3:
7272 liveins: $cl, $ebp, $edx, $esi
9292 ; CHECK: dead [[MOV32rm]].sub_8bit:gr32 = ADD8ri [[MOV32rm]].sub_8bit, -1, implicit-def $eflags
9393 ; CHECK: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm %stack.2, 1, $noreg, 0, $noreg :: (load 4 from %stack.2)
9494 ; CHECK: [[ADCX32rm:%[0-9]+]]:gr32 = ADCX32rm [[ADCX32rm]], %stack.1, 1, $noreg, 0, $noreg, implicit-def $eflags, implicit killed $eflags :: (load 4 from %stack.1)
95 ; CHECK: [[SETBr:%[0-9]+]]:gr8 = SETBr implicit killed $eflags
95 ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 2, implicit killed $eflags
9696 ; CHECK: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm %stack.0, 1, $noreg, 0, $noreg :: (load 8 from %stack.0)
9797 ; CHECK: MOV32mr [[MOV64rm]], 1, $noreg, 0, $noreg, [[ADCX32rm]] :: (store 4 into %ir.4, align 1)
98 ; CHECK: $al = COPY [[SETBr]]
98 ; CHECK: $al = COPY [[SETCCr]]
9999 ; CHECK: RET 0, $al
100100 %3:gr64 = COPY $rcx
101101 %2:gr32 = COPY $edx
104104 INLINEASM &nop, 1, 3145738, def dead %4, 12, implicit-def dead early-clobber $rax, 12, implicit-def dead early-clobber $rbx, 12, implicit-def dead early-clobber $rcx, 12, implicit-def dead early-clobber $rdx, 12, implicit-def dead early-clobber $rsi, 12, implicit-def dead early-clobber $rdi, 12, implicit-def dead early-clobber $rbp, 12, implicit-def dead early-clobber $r8, 12, implicit-def dead early-clobber $r9, 12, implicit-def dead early-clobber $r10, 12, implicit-def dead early-clobber $r11, 12, implicit-def dead early-clobber $r12, 12, implicit-def dead early-clobber $r13, 12, implicit-def dead early-clobber $r14, 12, implicit-def dead early-clobber $r15
105105 dead %0.sub_8bit:gr32 = ADD8ri %0.sub_8bit, -1, implicit-def $eflags
106106 %7:gr32 = ADCX32rr %7, %2, implicit-def $eflags, implicit killed $eflags
107 %8:gr8 = SETBr implicit killed $eflags
107 %8:gr8 = SETCCr 2, implicit killed $eflags
108108 MOV32mr %3, 1, $noreg, 0, $noreg, %7 :: (store 4 into %ir.4, align 1)
109109 $al = COPY %8
110110 RET 0, killed $al
144144 ; CHECK: dead [[MOV32rm]].sub_8bit:gr32 = ADD8ri [[MOV32rm]].sub_8bit, -1, implicit-def $eflags
145145 ; CHECK: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm %stack.2, 1, $noreg, 0, $noreg :: (load 8 from %stack.2)
146146 ; CHECK: [[ADCX64rm:%[0-9]+]]:gr64 = ADCX64rm [[ADCX64rm]], %stack.1, 1, $noreg, 0, $noreg, implicit-def $eflags, implicit killed $eflags :: (load 8 from %stack.1)
147 ; CHECK: [[SETBr:%[0-9]+]]:gr8 = SETBr implicit killed $eflags
147 ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 2, implicit killed $eflags
148148 ; CHECK: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm %stack.0, 1, $noreg, 0, $noreg :: (load 8 from %stack.0)
149149 ; CHECK: MOV64mr [[MOV64rm]], 1, $noreg, 0, $noreg, [[ADCX64rm]] :: (store 8 into %ir.4, align 1)
150 ; CHECK: $al = COPY [[SETBr]]
150 ; CHECK: $al = COPY [[SETCCr]]
151151 ; CHECK: RET 0, $al
152152 %3:gr64 = COPY $rcx
153153 %2:gr64 = COPY $rdx
156156 INLINEASM &nop, 1, 3145738, def dead %4, 12, implicit-def dead early-clobber $rax, 12, implicit-def dead early-clobber $rbx, 12, implicit-def dead early-clobber $rcx, 12, implicit-def dead early-clobber $rdx, 12, implicit-def dead early-clobber $rsi, 12, implicit-def dead early-clobber $rdi, 12, implicit-def dead early-clobber $rbp, 12, implicit-def dead early-clobber $r8, 12, implicit-def dead early-clobber $r9, 12, implicit-def dead early-clobber $r10, 12, implicit-def dead early-clobber $r11, 12, implicit-def dead early-clobber $r12, 12, implicit-def dead early-clobber $r13, 12, implicit-def dead early-clobber $r14, 12, implicit-def dead early-clobber $r15
157157 dead %0.sub_8bit:gr32 = ADD8ri %0.sub_8bit, -1, implicit-def $eflags
158158 %7:gr64 = ADCX64rr %7, %2, implicit-def $eflags, implicit killed $eflags
159 %8:gr8 = SETBr implicit killed $eflags
159 %8:gr8 = SETCCr 2, implicit killed $eflags
160160 MOV64mr %3, 1, $noreg, 0, $noreg, %7 :: (store 8 into %ir.4, align 1)
161161 $al = COPY %8
162162 RET 0, killed $al
196196 ; CHECK: dead [[MOV32rm]].sub_8bit:gr32 = ADD8ri [[MOV32rm]].sub_8bit, 127, implicit-def $eflags
197197 ; CHECK: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm %stack.2, 1, $noreg, 0, $noreg :: (load 4 from %stack.2)
198198 ; CHECK: [[ADOX32rm:%[0-9]+]]:gr32 = ADOX32rm [[ADOX32rm]], %stack.1, 1, $noreg, 0, $noreg, implicit-def $eflags, implicit killed $eflags :: (load 4 from %stack.1)
199 ; CHECK: [[SETOr:%[0-9]+]]:gr8 = SETOr implicit killed $eflags
199 ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 0, implicit killed $eflags
200200 ; CHECK: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm %stack.0, 1, $noreg, 0, $noreg :: (load 8 from %stack.0)
201201 ; CHECK: MOV32mr [[MOV64rm]], 1, $noreg, 0, $noreg, [[ADOX32rm]] :: (store 4 into %ir.4, align 1)
202 ; CHECK: $al = COPY [[SETOr]]
202 ; CHECK: $al = COPY [[SETCCr]]
203203 ; CHECK: RET 0, $al
204204 %3:gr64 = COPY $rcx
205205 %2:gr32 = COPY $edx
208208 INLINEASM &nop, 1, 3145738, def dead %4, 12, implicit-def dead early-clobber $rax, 12, implicit-def dead early-clobber $rbx, 12, implicit-def dead early-clobber $rcx, 12, implicit-def dead early-clobber $rdx, 12, implicit-def dead early-clobber $rsi, 12, implicit-def dead early-clobber $rdi, 12, implicit-def dead early-clobber $rbp, 12, implicit-def dead early-clobber $r8, 12, implicit-def dead early-clobber $r9, 12, implicit-def dead early-clobber $r10, 12, implicit-def dead early-clobber $r11, 12, implicit-def dead early-clobber $r12, 12, implicit-def dead early-clobber $r13, 12, implicit-def dead early-clobber $r14, 12, implicit-def dead early-clobber $r15
209209 dead %0.sub_8bit:gr32 = ADD8ri %0.sub_8bit, 127, implicit-def $eflags
210210 %7:gr32 = ADOX32rr %7, %2, implicit-def $eflags, implicit killed $eflags
211 %8:gr8 = SETOr implicit killed $eflags
211 %8:gr8 = SETCCr 0, implicit killed $eflags
212212 MOV32mr %3, 1, $noreg, 0, $noreg, %7 :: (store 4 into %ir.4, align 1)
213213 $al = COPY %8
214214 RET 0, killed $al
248248 ; CHECK: dead [[MOV32rm]].sub_8bit:gr32 = ADD8ri [[MOV32rm]].sub_8bit, 127, implicit-def $eflags
249249 ; CHECK: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm %stack.2, 1, $noreg, 0, $noreg :: (load 8 from %stack.2)
250250 ; CHECK: [[ADOX64rm:%[0-9]+]]:gr64 = ADOX64rm [[ADOX64rm]], %stack.1, 1, $noreg, 0, $noreg, implicit-def $eflags, implicit killed $eflags :: (load 8 from %stack.1)
251 ; CHECK: [[SETOr:%[0-9]+]]:gr8 = SETOr implicit killed $eflags
251 ; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 0, implicit killed $eflags
252252 ; CHECK: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm %stack.0, 1, $noreg, 0, $noreg :: (load 8 from %stack.0)
253253 ; CHECK: MOV64mr [[MOV64rm]], 1, $noreg, 0, $noreg, [[ADOX64rm]] :: (store 8 into %ir.4, align 1)
254 ; CHECK: $al = COPY [[SETOr]]
254 ; CHECK: $al = COPY [[SETCCr]]
255255 ; CHECK: RET 0, $al
256256 %3:gr64 = COPY $rcx
257257 %2:gr64 = COPY $rdx
260260 INLINEASM &nop, 1, 3145738, def dead %4, 12, implicit-def dead early-clobber $rax, 12, implicit-def dead early-clobber $rbx, 12, implicit-def dead early-clobber $rcx, 12, implicit-def dead early-clobber $rdx, 12, implicit-def dead early-clobber $rsi, 12, implicit-def dead early-clobber $rdi, 12, implicit-def dead early-clobber $rbp, 12, implicit-def dead early-clobber $r8, 12, implicit-def dead early-clobber $r9, 12, implicit-def dead early-clobber $r10, 12, implicit-def dead early-clobber $r11, 12, implicit-def dead early-clobber $r12, 12, implicit-def dead early-clobber $r13, 12, implicit-def dead early-clobber $r14, 12, implicit-def dead early-clobber $r15
261261 dead %0.sub_8bit:gr32 = ADD8ri %0.sub_8bit, 127, implicit-def $eflags
262262 %7:gr64 = ADOX64rr %7, %2, implicit-def $eflags, implicit killed $eflags
263 %8:gr8 = SETOr implicit killed $eflags
263 %8:gr8 = SETCCr 0, implicit killed $eflags
264264 MOV64mr %3, 1, $noreg, 0, $noreg, %7 :: (store 8 into %ir.4, align 1)
265265 $al = COPY %8
266266 RET 0, killed $al
3232 case X86II::MRMSrcReg4VOp3:
3333 case X86II::MRMSrcRegOp4:
3434 case X86II::MRMSrcRegCC:
35 case X86II::MRMXrCC:
3536 case X86II::MRMXr:
3637 case X86II::MRM0r:
3738 case X86II::MRM1r:
119120 case X86II::MRMSrcMem4VOp3:
120121 case X86II::MRMSrcMemOp4:
121122 case X86II::MRMSrcMemCC:
123 case X86II::MRMXmCC:
122124 case X86II::MRMXm:
123125 case X86II::MRM0m:
124126 case X86II::MRM1m:
633633 HANDLE_OPERAND(memory)
634634 HANDLE_OPERAND(opcodeModifier)
635635 break;
636 case X86Local::MRMXrCC:
637 assert(numPhysicalOperands == 2 &&
638 "Unexpected number of operands for MRMXrCC");
639 HANDLE_OPERAND(rmRegister)
640 HANDLE_OPERAND(opcodeModifier)
641 break;
636642 case X86Local::MRMXr:
637643 case X86Local::MRM0r:
638644 case X86Local::MRM1r:
658664 HANDLE_OPTIONAL(relocation)
659665 HANDLE_OPTIONAL(immediate)
660666 break;
667 case X86Local::MRMXmCC:
668 assert(numPhysicalOperands == 2 &&
669 "Unexpected number of operands for MRMXm");
670 HANDLE_OPERAND(memory)
671 HANDLE_OPERAND(opcodeModifier)
672 break;
661673 case X86Local::MRMXm:
662674 case X86Local::MRM0m:
663675 case X86Local::MRM1m:
743755 case X86Local::MRMSrcReg4VOp3:
744756 case X86Local::MRMSrcRegOp4:
745757 case X86Local::MRMSrcRegCC:
758 case X86Local::MRMXrCC:
746759 case X86Local::MRMXr:
747760 filter = llvm::make_unique(true);
748761 break;
751764 case X86Local::MRMSrcMem4VOp3:
752765 case X86Local::MRMSrcMemOp4:
753766 case X86Local::MRMSrcMemCC:
767 case X86Local::MRMXmCC:
754768 case X86Local::MRMXm:
755769 filter = llvm::make_unique(false);
756770 break;
784798 assert(filter && "Filter not set");
785799
786800 if (Form == X86Local::AddRegFrm || Form == X86Local::MRMSrcRegCC ||
787 Form == X86Local::MRMSrcMemCC) {
801 Form == X86Local::MRMSrcMemCC || Form == X86Local::MRMXrCC ||
802 Form == X86Local::MRMXmCC) {
788803 unsigned Count = Form == X86Local::AddRegFrm ? 8 : 16;
789804 assert(((opcodeToSet % Count) == 0) && "ADDREG_FRM opcode not aligned");
790805
105105 MRMSrcMem4VOp3 = 34,
106106 MRMSrcMemOp4 = 35,
107107 MRMSrcMemCC = 36,
108 MRMXm = 39,
108 MRMXmCC = 38, MRMXm = 39,
109109 MRM0m = 40, MRM1m = 41, MRM2m = 42, MRM3m = 43,
110110 MRM4m = 44, MRM5m = 45, MRM6m = 46, MRM7m = 47,
111111 MRMDestReg = 48,
113113 MRMSrcReg4VOp3 = 50,
114114 MRMSrcRegOp4 = 51,
115115 MRMSrcRegCC = 52,
116 MRMXr = 55,
116 MRMXrCC = 54, MRMXr = 55,
117117 MRM0r = 56, MRM1r = 57, MRM2r = 58, MRM3r = 59,
118118 MRM4r = 60, MRM5r = 61, MRM6r = 62, MRM7r = 63,
119119 #define MAP(from, to) MRM_##from = to,