llvm.org GIT mirror llvm / d849074
[X86] Merge the different CMOV instructions for each condition code into single instructions that store the condition code as an immediate. Summary: Reorder the condition code enum to match their encodings. Move it to MC layer so it can be used by the scheduler models. This avoids needing an isel pattern for each condition code. And it removes translation switches for converting between CMOV instructions and condition codes. Now the printer, encoder and disassembler take care of converting the immediate. We use InstAliases to handle the assembly matching. But we print using the asm string in the instruction definition. The instruction itself is marked IsCodeGenOnly=1 to hide it from the assembly parser. This does complicate the scheduler models a little since we can't assign the A and BE instructions to a separate class now. I plan to make similar changes for SETcc and Jcc. Reviewers: RKSimon, spatel, lebedev.ri, andreadb, courbet Reviewed By: RKSimon Subscribers: gchatelet, hiraditya, kristina, lebedev.ri, jdoerfert, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D60041 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@357800 91177308-0d34-0410-b5e6-96231b3b80d8 Craig Topper 1 year, 3 months ago
40 changed file(s) with 536 addition(s) and 474 deletion(s). Raw diff Collapse all Expand all
392392 ENUM_ENTRY(ENCODING_IRC, "Immediate for static rounding control") \
393393 ENUM_ENTRY(ENCODING_Rv, "Register code of operand size added to the " \
394394 "opcode byte") \
395 ENUM_ENTRY(ENCODING_CC, "Condition code encoded in opcode") \
395396 ENUM_ENTRY(ENCODING_DUP, "Duplicate of another operand; ID is encoded " \
396397 "in type") \
397398 ENUM_ENTRY(ENCODING_SI, "Source index; encoded in OpSize/Adsize prefix") \
780780 case ENCODING_Rv:
781781 translateRegister(mcInst, insn.opcodeRegister);
782782 return false;
783 case ENCODING_CC:
784 mcInst.addOperand(MCOperand::createImm(insn.immediates[0]));
785 return false;
783786 case ENCODING_FP:
784787 translateFPRegister(mcInst, insn.modRM & 7);
785788 return false;
18451845 if (readOpcodeRegister(insn, 0))
18461846 return -1;
18471847 break;
1848 case ENCODING_CC:
1849 insn->immediates[0] = insn->opcode & 0xf;
1850 break;
18481851 case ENCODING_FP:
18491852 break;
18501853 case ENCODING_VVVV:
2222 #include
2323
2424 using namespace llvm;
25
26 void X86InstPrinterCommon::printCondCode(const MCInst *MI, unsigned Op,
27 raw_ostream &O) {
28 int64_t Imm = MI->getOperand(Op).getImm();
29 switch (Imm) {
30 default: llvm_unreachable("Invalid condcode argument!");
31 case 0: O << "o"; break;
32 case 1: O << "no"; break;
33 case 2: O << "b"; break;
34 case 3: O << "ae"; break;
35 case 4: O << "e"; break;
36 case 5: O << "ne"; break;
37 case 6: O << "be"; break;
38 case 7: O << "a"; break;
39 case 8: O << "s"; break;
40 case 9: O << "ns"; break;
41 case 0xa: O << "p"; break;
42 case 0xb: O << "np"; break;
43 case 0xc: O << "l"; break;
44 case 0xd: O << "ge"; break;
45 case 0xe: O << "le"; break;
46 case 0xf: O << "g"; break;
47 }
48 }
2549
2650 void X86InstPrinterCommon::printSSEAVXCC(const MCInst *MI, unsigned Op,
2751 raw_ostream &O) {
2222 using MCInstPrinter::MCInstPrinter;
2323
2424 virtual void printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O) = 0;
25 void printCondCode(const MCInst *MI, unsigned Op, raw_ostream &OS);
2526 void printSSEAVXCC(const MCInst *MI, unsigned Op, raw_ostream &OS);
2627 void printVPCOMMnemonic(const MCInst *MI, raw_ostream &OS);
2728 void printVPCMPMnemonic(const MCInst *MI, raw_ostream &OS);
6565 enum OperandType : unsigned {
6666 /// AVX512 embedded rounding control. This should only have values 0-3.
6767 OPERAND_ROUNDING_CONTROL = MCOI::OPERAND_FIRST_TARGET,
68 OPERAND_COND_CODE,
69 };
70
71 // X86 specific condition code. These correspond to X86_*_COND in
72 // X86InstrInfo.td. They must be kept in synch.
73 enum CondCode {
74 COND_O = 0,
75 COND_NO = 1,
76 COND_B = 2,
77 COND_AE = 3,
78 COND_E = 4,
79 COND_NE = 5,
80 COND_BE = 6,
81 COND_A = 7,
82 COND_S = 8,
83 COND_NS = 9,
84 COND_P = 10,
85 COND_NP = 11,
86 COND_L = 12,
87 COND_GE = 13,
88 COND_LE = 14,
89 COND_G = 15,
90 LAST_VALID_COND = COND_G,
91
92 // Artificial condition codes. These are used by AnalyzeBranch
93 // to indicate a block terminated with two conditional branches that together
94 // form a compound condition. They occur in code using FCMP_OEQ or FCMP_UNE,
95 // which can't be represented on x86 with a single condition. These
96 // are never used in MachineInstrs and are inverses of one another.
97 COND_NE_OR_P,
98 COND_E_AND_NP,
99
100 COND_INVALID
68101 };
69102 } // end namespace X86;
70103
312345 ///
313346 MRMSrcMemOp4 = 35,
314347
348 /// MRMSrcMemCC - This form is used for instructions that use the Mod/RM
349 /// byte to specify the operands and also encodes a condition code.
350 ///
351 MRMSrcMemCC = 36,
352
315353 /// MRMXm - This form is used for instructions that use the Mod/RM byte
316354 /// to specify a memory source, but doesn't use the middle field.
317355 ///
340378 /// byte to specify the fourth source, which in this case is a register.
341379 ///
342380 MRMSrcRegOp4 = 51,
381
382 /// MRMSrcRegCC - This form is used for instructions that use the Mod/RM
383 /// byte to specify the operands and also encodes a condition code
384 ///
385 MRMSrcRegCC = 52,
343386
344387 /// MRMXr - This form is used for instructions that use the Mod/RM byte
345388 /// to specify a register source, but doesn't use the middle field.
726769 case X86II::MRMSrcMemOp4:
727770 // Skip registers encoded in reg, VEX_VVVV, and I8IMM.
728771 return 3;
772 case X86II::MRMSrcMemCC:
773 // Start from 1, skip any registers encoded in VEX_VVVV or I8IMM, or a
774 // mask register.
775 return 1;
729776 case X86II::MRMDestReg:
730777 case X86II::MRMSrcReg:
731778 case X86II::MRMSrcReg4VOp3:
732779 case X86II::MRMSrcRegOp4:
780 case X86II::MRMSrcRegCC:
733781 case X86II::MRMXr:
734782 case X86II::MRM0r: case X86II::MRM1r:
735783 case X86II::MRM2r: case X86II::MRM3r:
10591059 REX |= isREXExtendedReg(MI, CurOp++) << 0; // REX.B
10601060 break;
10611061 case X86II::MRMSrcReg:
1062 case X86II::MRMSrcRegCC:
10621063 REX |= isREXExtendedReg(MI, CurOp++) << 2; // REX.R
10631064 REX |= isREXExtendedReg(MI, CurOp++) << 0; // REX.B
10641065 break;
1065 case X86II::MRMSrcMem: {
1066 case X86II::MRMSrcMem:
1067 case X86II::MRMSrcMemCC:
10661068 REX |= isREXExtendedReg(MI, CurOp++) << 2; // REX.R
10671069 REX |= isREXExtendedReg(MI, MemOperand+X86::AddrBaseReg) << 0; // REX.B
10681070 REX |= isREXExtendedReg(MI, MemOperand+X86::AddrIndexReg) << 1; // REX.X
10691071 CurOp += X86::AddrNumOperands;
10701072 break;
1071 }
10721073 case X86II::MRMDestReg:
10731074 REX |= isREXExtendedReg(MI, CurOp++) << 0; // REX.B
10741075 REX |= isREXExtendedReg(MI, CurOp++) << 2; // REX.R
14351436 CurOp = SrcRegNum + 1;
14361437 break;
14371438 }
1439 case X86II::MRMSrcRegCC: {
1440 unsigned FirstOp = CurOp++;
1441 unsigned SecondOp = CurOp++;
1442
1443 unsigned CC = MI.getOperand(CurOp++).getImm();
1444 EmitByte(BaseOpcode + CC, CurByte, OS);
1445
1446 EmitRegModRMByte(MI.getOperand(SecondOp),
1447 GetX86RegNum(MI.getOperand(FirstOp)), CurByte, OS);
1448 break;
1449 }
14381450 case X86II::MRMSrcMem: {
14391451 unsigned FirstMemOp = CurOp+1;
14401452
14781490 emitMemModRMByte(MI, FirstMemOp, GetX86RegNum(MI.getOperand(CurOp)),
14791491 TSFlags, Rex, CurByte, OS, Fixups, STI);
14801492 CurOp = FirstMemOp + X86::AddrNumOperands;
1493 break;
1494 }
1495 case X86II::MRMSrcMemCC: {
1496 unsigned RegOp = CurOp++;
1497 unsigned FirstMemOp = CurOp;
1498 CurOp = FirstMemOp + X86::AddrNumOperands;
1499
1500 unsigned CC = MI.getOperand(CurOp++).getImm();
1501 EmitByte(BaseOpcode + CC, CurByte, OS);
1502
1503 emitMemModRMByte(MI, FirstMemOp, GetX86RegNum(MI.getOperand(RegOp)),
1504 TSFlags, Rex, CurByte, OS, Fixups, STI);
14811505 break;
14821506 }
14831507
289289 // Skip debug instructions.
290290 if (I.isDebugInstr())
291291 continue;
292 X86::CondCode CC = X86::getCondFromCMovOpc(I.getOpcode());
292 X86::CondCode CC = X86::getCondFromCMov(I);
293293 // Check if we found a X86::CMOVrr instruction.
294294 if (CC != X86::COND_INVALID && (IncludeLoads || !I.mayLoad())) {
295295 if (Group.empty()) {
544544 }
545545
546546 unsigned CondCost =
547 DepthMap[OperandToDefMap.lookup(&MI->getOperand(3))].Depth;
547 DepthMap[OperandToDefMap.lookup(&MI->getOperand(4))].Depth;
548548 unsigned ValCost = getDepthOfOptCmov(
549549 DepthMap[OperandToDefMap.lookup(&MI->getOperand(1))].Depth,
550550 DepthMap[OperandToDefMap.lookup(&MI->getOperand(2))].Depth);
592592 /// move all debug instructions to after the last CMOV instruction, making the
593593 /// CMOV group consecutive.
594594 static void packCmovGroup(MachineInstr *First, MachineInstr *Last) {
595 assert(X86::getCondFromCMovOpc(Last->getOpcode()) != X86::COND_INVALID &&
595 assert(X86::getCondFromCMov(*Last) != X86::COND_INVALID &&
596596 "Last instruction in a CMOV group must be a CMOV instruction");
597597
598598 SmallVector DBGInstructions;
650650 MachineInstr *LastCMOV = Group.back();
651651 DebugLoc DL = MI.getDebugLoc();
652652
653 X86::CondCode CC = X86::CondCode(X86::getCondFromCMovOpc(MI.getOpcode()));
653 X86::CondCode CC = X86::CondCode(X86::getCondFromCMov(MI));
654654 X86::CondCode OppCC = X86::GetOppositeBranchCondition(CC);
655655 // Potentially swap the condition codes so that any memory operand to a CMOV
656656 // is in the *false* position instead of the *true* position. We can invert
657657 // any non-memory operand CMOV instructions to cope with this and we ensure
658658 // memory operand CMOVs are only included with a single condition code.
659659 if (llvm::any_of(Group, [&](MachineInstr *I) {
660 return I->mayLoad() && X86::getCondFromCMovOpc(I->getOpcode()) == CC;
660 return I->mayLoad() && X86::getCondFromCMov(*I) == CC;
661661 }))
662662 std::swap(CC, OppCC);
663663
711711 if (!MI.mayLoad()) {
712712 // Remember the false-side register input.
713713 unsigned FalseReg =
714 MI.getOperand(X86::getCondFromCMovOpc(MI.getOpcode()) == CC ? 1 : 2)
715 .getReg();
714 MI.getOperand(X86::getCondFromCMov(MI) == CC ? 1 : 2).getReg();
716715 // Walk back through any intermediate cmovs referenced.
717716 while (true) {
718717 auto FRIt = FalseBBRegRewriteTable.find(FalseReg);
727726 // The condition must be the *opposite* of the one we've decided to branch
728727 // on as the branch will go *around* the load and the load should happen
729728 // when the CMOV condition is false.
730 assert(X86::getCondFromCMovOpc(MI.getOpcode()) == OppCC &&
729 assert(X86::getCondFromCMov(MI) == OppCC &&
731730 "Can only handle memory-operand cmov instructions with a condition "
732731 "opposite to the selected branch direction.");
733732
766765 // Move the new CMOV to just before the old one and reset any impacted
767766 // iterator.
768767 auto *NewCMOV = NewMIs.pop_back_val();
769 assert(X86::getCondFromCMovOpc(NewCMOV->getOpcode()) == OppCC &&
768 assert(X86::getCondFromCMov(*NewCMOV) == OppCC &&
770769 "Last new instruction isn't the expected CMOV!");
771770 LLVM_DEBUG(dbgs() << "\tRewritten cmov: "; NewCMOV->dump());
772771 MBB->insert(MachineBasicBlock::iterator(MI), NewCMOV);
818817 // If this CMOV we are processing is the opposite condition from the jump we
819818 // generated, then we have to swap the operands for the PHI that is going to
820819 // be generated.
821 if (X86::getCondFromCMovOpc(MIIt->getOpcode()) == OppCC)
820 if (X86::getCondFromCMov(*MIIt) == OppCC)
822821 std::swap(Op1Reg, Op2Reg);
823822
824823 auto Op1Itr = RegRewriteTable.find(Op1Reg);
21432143 return false;
21442144
21452145 const TargetRegisterInfo &TRI = *Subtarget->getRegisterInfo();
2146 unsigned Opc = X86::getCMovFromCond(CC, TRI.getRegSizeInBits(*RC)/8);
2147 unsigned ResultReg = fastEmitInst_rr(Opc, RC, RHSReg, RHSIsKill,
2148 LHSReg, LHSIsKill);
2146 unsigned Opc = X86::getCMovOpcode(TRI.getRegSizeInBits(*RC)/8);
2147 unsigned ResultReg = fastEmitInst_rri(Opc, RC, RHSReg, RHSIsKill,
2148 LHSReg, LHSIsKill, CC);
21492149 updateValueMap(I, ResultReg);
21502150 return true;
21512151 }
598598 }
599599
600600 // Otherwise we can just rewrite in-place.
601 if (X86::getCondFromCMovOpc(MI.getOpcode()) != X86::COND_INVALID) {
601 if (X86::getCondFromCMov(MI) != X86::COND_INVALID) {
602602 rewriteCMov(*TestMBB, TestPos, TestLoc, MI, *FlagUse, CondRegs);
603603 } else if (X86::getCondFromSETOpc(MI.getOpcode()) !=
604604 X86::COND_INVALID) {
840840 MachineOperand &FlagUse,
841841 CondRegArray &CondRegs) {
842842 // First get the register containing this specific condition.
843 X86::CondCode Cond = X86::getCondFromCMovOpc(CMovI.getOpcode());
843 X86::CondCode Cond = X86::getCondFromCMov(CMovI);
844844 unsigned CondReg;
845845 bool Inverted;
846846 std::tie(CondReg, Inverted) =
851851 // Insert a direct test of the saved register.
852852 insertTest(MBB, CMovI.getIterator(), CMovI.getDebugLoc(), CondReg);
853853
854 // Rewrite the CMov to use the !ZF flag from the test (but match register
855 // size and memory operand), and then kill its use of the flags afterward.
856 auto &CMovRC = *MRI->getRegClass(CMovI.getOperand(0).getReg());
857 CMovI.setDesc(TII->get(X86::getCMovFromCond(
858 Inverted ? X86::COND_E : X86::COND_NE, TRI->getRegSizeInBits(CMovRC) / 8,
859 !CMovI.memoperands_empty())));
854 // Rewrite the CMov to use the !ZF flag from the test, and then kill its use
855 // of the flags afterward.
856 CMovI.getOperand(CMovI.getDesc().getNumOperands() - 1)
857 .setImm(Inverted ? X86::COND_E : X86::COND_NE);
860858 FlagUse.setIsKill(true);
861859 LLVM_DEBUG(dbgs() << " fixed cmov: "; CMovI.dump());
862860 }
652652 BuildMI(&MBB, DL, TII.get(X86::SUB64rr), TestReg)
653653 .addReg(CopyReg)
654654 .addReg(SizeReg);
655 BuildMI(&MBB, DL, TII.get(X86::CMOVB64rr), FinalReg)
655 BuildMI(&MBB, DL, TII.get(X86::CMOV64rr), FinalReg)
656656 .addReg(TestReg)
657 .addReg(ZeroReg);
657 .addReg(ZeroReg)
658 .addImm(X86::COND_B);
658659
659660 // FinalReg now holds final stack pointer value, or zero if
660661 // allocation would overflow. Compare against the current stack
23202320 CR->getSignedMax().slt(1ull << Width);
23212321 }
23222322
2323 static X86::CondCode getCondFromOpc(unsigned Opc) {
2323 static X86::CondCode getCondFromNode(SDNode *N) {
2324 assert(N->isMachineOpcode() && "Unexpected node");
23242325 X86::CondCode CC = X86::COND_INVALID;
23252326 if (CC == X86::COND_INVALID)
2326 CC = X86::getCondFromBranchOpc(Opc);
2327 CC = X86::getCondFromBranchOpc(N->getMachineOpcode());
23272328 if (CC == X86::COND_INVALID)
2328 CC = X86::getCondFromSETOpc(Opc);
2329 if (CC == X86::COND_INVALID)
2330 CC = X86::getCondFromCMovOpc(Opc);
2329 CC = X86::getCondFromSETOpc(N->getMachineOpcode());
2330 if (CC == X86::COND_INVALID) {
2331 unsigned Opc = N->getMachineOpcode();
2332 if (Opc == X86::CMOV16rr || Opc == X86::CMOV32rr || Opc == X86::CMOV64rr)
2333 CC = static_cast(N->getConstantOperandVal(2));
2334 else if (Opc == X86::CMOV16rm || Opc == X86::CMOV32rm ||
2335 Opc == X86::CMOV64rm)
2336 CC = static_cast(N->getConstantOperandVal(6));
2337 }
23312338
23322339 return CC;
23332340 }
23532360 // Anything unusual: assume conservatively.
23542361 if (!FlagUI->isMachineOpcode()) return false;
23552362 // Examine the condition code of the user.
2356 X86::CondCode CC = getCondFromOpc(FlagUI->getMachineOpcode());
2363 X86::CondCode CC = getCondFromNode(*FlagUI);
23572364
23582365 switch (CC) {
23592366 // Comparisons which only use the zero flag.
23892396 // Anything unusual: assume conservatively.
23902397 if (!FlagUI->isMachineOpcode()) return false;
23912398 // Examine the condition code of the user.
2392 X86::CondCode CC = getCondFromOpc(FlagUI->getMachineOpcode());
2399 X86::CondCode CC = getCondFromNode(*FlagUI);
23932400
23942401 switch (CC) {
23952402 // Comparisons which don't examine the SF flag.
24502457 if (!FlagUI->isMachineOpcode())
24512458 return false;
24522459 // Examine the condition code of the user.
2453 X86::CondCode CC = getCondFromOpc(FlagUI->getMachineOpcode());
2460 X86::CondCode CC = getCondFromNode(*FlagUI);
24542461
24552462 if (mayUseCarryFlag(CC))
24562463 return false;
1212
1313
1414 // CMOV instructions.
15 multiclass CMOV opc, string Mnemonic, X86FoldableSchedWrite Sched,
16 PatLeaf CondNode> {
17 let Uses = [EFLAGS], Predicates = [HasCMov], Constraints = "$src1 = $dst",
18 isCommutable = 1, SchedRW = [Sched] in {
19 def NAME#16rr
20 : I
21 !strconcat(Mnemonic, "{w}\t{$src2, $dst|$dst, $src2}"),
22 [(set GR16:$dst,
23 (X86cmov GR16:$src1, GR16:$src2, CondNode, EFLAGS))]>,
24 TB, OpSize16;
25 def NAME#32rr
26 : I
27 !strconcat(Mnemonic, "{l}\t{$src2, $dst|$dst, $src2}"),
28 [(set GR32:$dst,
29 (X86cmov GR32:$src1, GR32:$src2, CondNode, EFLAGS))]>,
30 TB, OpSize32;
31 def NAME#64rr
32 :RI
33 !strconcat(Mnemonic, "{q}\t{$src2, $dst|$dst, $src2}"),
34 [(set GR64:$dst,
35 (X86cmov GR64:$src1, GR64:$src2, CondNode, EFLAGS))]>, TB;
36 }
15 let isCodeGenOnly = 1, ForceDisassemble = 1 in {
16 let Uses = [EFLAGS], Predicates = [HasCMov], Constraints = "$src1 = $dst",
17 isCommutable = 1, SchedRW = [WriteCMOV] in {
18 def CMOV16rr
19 : I<0x40, MRMSrcRegCC, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2, ccode:$cond),
20 "cmov${cond}{w}\t{$src2, $dst|$dst, $src2}",
21 [(set GR16:$dst,
22 (X86cmov GR16:$src1, GR16:$src2, imm:$cond, EFLAGS))]>,
23 TB, OpSize16;
24 def CMOV32rr
25 : I<0x40, MRMSrcRegCC, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2, ccode:$cond),
26 "cmov${cond}{l}\t{$src2, $dst|$dst, $src2}",
27 [(set GR32:$dst,
28 (X86cmov GR32:$src1, GR32:$src2, imm:$cond, EFLAGS))]>,
29 TB, OpSize32;
30 def CMOV64rr
31 :RI<0x40, MRMSrcRegCC, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2, ccode:$cond),
32 "cmov${cond}{q}\t{$src2, $dst|$dst, $src2}",
33 [(set GR64:$dst,
34 (X86cmov GR64:$src1, GR64:$src2, imm:$cond, EFLAGS))]>, TB;
35 }
3736
38 let Uses = [EFLAGS], Predicates = [HasCMov], Constraints = "$src1 = $dst",
39 SchedRW = [Sched.Folded, Sched.ReadAfterFold] in {
40 def NAME#16rm
41 : I
42 !strconcat(Mnemonic, "{w}\t{$src2, $dst|$dst, $src2}"),
43 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
44 CondNode, EFLAGS))]>, TB, OpSize16;
45 def NAME#32rm
46 : I
47 !strconcat(Mnemonic, "{l}\t{$src2, $dst|$dst, $src2}"),
48 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
49 CondNode, EFLAGS))]>, TB, OpSize32;
50 def NAME#64rm
51 :RI
52 !strconcat(Mnemonic, "{q}\t{$src2, $dst|$dst, $src2}"),
53 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
54 CondNode, EFLAGS))]>, TB;
55 } // Uses = [EFLAGS], Predicates = [HasCMov], Constraints = "$src1 = $dst"
56 } // end multiclass
37 let Uses = [EFLAGS], Predicates = [HasCMov], Constraints = "$src1 = $dst",
38 SchedRW = [WriteCMOV.Folded, WriteCMOV.ReadAfterFold] in {
39 def CMOV16rm
40 : I<0x40, MRMSrcMemCC, (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2, ccode:$cond),
41 "cmov${cond}{w}\t{$src2, $dst|$dst, $src2}",
42 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
43 imm:$cond, EFLAGS))]>, TB, OpSize16;
44 def CMOV32rm
45 : I<0x40, MRMSrcMemCC, (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2, ccode:$cond),
46 "cmov${cond}{l}\t{$src2, $dst|$dst, $src2}",
47 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
48 imm:$cond, EFLAGS))]>, TB, OpSize32;
49 def CMOV64rm
50 :RI<0x40, MRMSrcMemCC, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2, ccode:$cond),
51 "cmov${cond}{q}\t{$src2, $dst|$dst, $src2}",
52 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
53 imm:$cond, EFLAGS))]>, TB;
54 } // Uses = [EFLAGS], Predicates = [HasCMov], Constraints = "$src1 = $dst"
55 } // isCodeGenOnly = 1, ForceDisassemble = 1
5756
57 multiclass CMOV_Aliases {
58 def : InstAlias
59 (CMOV16rr GR16:$dst, GR16:$src, CC), 0>;
60 def : InstAlias
61 (CMOV16rm GR16:$dst, i16mem:$src, CC), 0>;
62 def : InstAlias
63 (CMOV32rr GR32:$dst, GR32:$src, CC), 0>;
64 def : InstAlias
65 (CMOV32rm GR32:$dst, i32mem:$src, CC), 0>;
66 def : InstAlias
67 (CMOV64rr GR64:$dst, GR64:$src, CC), 0>;
68 def : InstAlias
69 (CMOV64rm GR64:$dst, i64mem:$src, CC), 0>;
70 }
5871
59 // Conditional Moves.
60 defm CMOVO : CMOV<0x40, "cmovo" , WriteCMOV, X86_COND_O>;
61 defm CMOVNO : CMOV<0x41, "cmovno", WriteCMOV, X86_COND_NO>;
62 defm CMOVB : CMOV<0x42, "cmovb" , WriteCMOV, X86_COND_B>;
63 defm CMOVAE : CMOV<0x43, "cmovae", WriteCMOV, X86_COND_AE>;
64 defm CMOVE : CMOV<0x44, "cmove" , WriteCMOV, X86_COND_E>;
65 defm CMOVNE : CMOV<0x45, "cmovne", WriteCMOV, X86_COND_NE>;
66 defm CMOVBE : CMOV<0x46, "cmovbe", WriteCMOV2, X86_COND_BE>;
67 defm CMOVA : CMOV<0x47, "cmova" , WriteCMOV2, X86_COND_A>;
68 defm CMOVS : CMOV<0x48, "cmovs" , WriteCMOV, X86_COND_S>;
69 defm CMOVNS : CMOV<0x49, "cmovns", WriteCMOV, X86_COND_NS>;
70 defm CMOVP : CMOV<0x4A, "cmovp" , WriteCMOV, X86_COND_P>;
71 defm CMOVNP : CMOV<0x4B, "cmovnp", WriteCMOV, X86_COND_NP>;
72 defm CMOVL : CMOV<0x4C, "cmovl" , WriteCMOV, X86_COND_L>;
73 defm CMOVGE : CMOV<0x4D, "cmovge", WriteCMOV, X86_COND_GE>;
74 defm CMOVLE : CMOV<0x4E, "cmovle", WriteCMOV, X86_COND_LE>;
75 defm CMOVG : CMOV<0x4F, "cmovg" , WriteCMOV, X86_COND_G>;
72 defm : CMOV_Aliases<"cmovo" , 0>;
73 defm : CMOV_Aliases<"cmovno", 1>;
74 defm : CMOV_Aliases<"cmovb" , 2>;
75 defm : CMOV_Aliases<"cmovae", 3>;
76 defm : CMOV_Aliases<"cmove" , 4>;
77 defm : CMOV_Aliases<"cmovne", 5>;
78 defm : CMOV_Aliases<"cmovbe", 6>;
79 defm : CMOV_Aliases<"cmova" , 7>;
80 defm : CMOV_Aliases<"cmovs" , 8>;
81 defm : CMOV_Aliases<"cmovns", 9>;
82 defm : CMOV_Aliases<"cmovp" , 10>;
83 defm : CMOV_Aliases<"cmovnp", 11>;
84 defm : CMOV_Aliases<"cmovl" , 12>;
85 defm : CMOV_Aliases<"cmovge", 13>;
86 defm : CMOV_Aliases<"cmovle", 14>;
87 defm : CMOV_Aliases<"cmovg" , 15>;
7688
7789
7890 // SetCC instructions.
12351235 def : Pat<(X86cmp GR64:$src1, 0),
12361236 (TEST64rr GR64:$src1, GR64:$src1)>;
12371237
1238 def inv_cond_XFORM : SDNodeXForm
1239 X86::CondCode CC = static_cast(N->getZExtValue());
1240 return CurDAG->getTargetConstant(X86::GetOppositeBranchCondition(CC),
1241 SDLoc(N), MVT::i8);
1242 }]>;
1243
12381244 // Conditional moves with folded loads with operands swapped and conditions
12391245 // inverted.
1240 multiclass CMOVmr
1241 Instruction Inst64> {
1242 let Predicates = [HasCMov] in {
1243 def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, InvertedCond, EFLAGS),
1244 (Inst16 GR16:$src2, addr:$src1)>;
1245 def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, InvertedCond, EFLAGS),
1246 (Inst32 GR32:$src2, addr:$src1)>;
1247 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, InvertedCond, EFLAGS),
1248 (Inst64 GR64:$src2, addr:$src1)>;
1249 }
1250 }
1251
1252 defm : CMOVmr;
1253 defm : CMOVmr;
1254 defm : CMOVmr;
1255 defm : CMOVmr;
1256 defm : CMOVmr;
1257 defm : CMOVmr;
1258 defm : CMOVmr;
1259 defm : CMOVmr;
1260 defm : CMOVmr;
1261 defm : CMOVmr;
1262 defm : CMOVmr;
1263 defm : CMOVmr;
1264 defm : CMOVmr;
1265 defm : CMOVmr;
1266 defm : CMOVmr;
1267 defm : CMOVmr;
1246 let Predicates = [HasCMov] in {
1247 def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, imm:$cond, EFLAGS),
1248 (CMOV16rm GR16:$src2, addr:$src1, (inv_cond_XFORM imm:$cond))>;
1249 def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, imm:$cond, EFLAGS),
1250 (CMOV32rm GR32:$src2, addr:$src1, (inv_cond_XFORM imm:$cond))>;
1251 def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, imm:$cond, EFLAGS),
1252 (CMOV64rm GR64:$src2, addr:$src1, (inv_cond_XFORM imm:$cond))>;
1253 }
12681254
12691255 // zextload bool -> zextload byte
12701256 // i1 stored in one byte in zero-extended form.
12481248 { X86::BLENDPSrri, X86::BLENDPSrmi, TB_ALIGN_16 },
12491249 { X86::BLENDVPDrr0, X86::BLENDVPDrm0, TB_ALIGN_16 },
12501250 { X86::BLENDVPSrr0, X86::BLENDVPSrm0, TB_ALIGN_16 },
1251 { X86::CMOVA16rr, X86::CMOVA16rm, 0 },
1252 { X86::CMOVA32rr, X86::CMOVA32rm, 0 },
1253 { X86::CMOVA64rr, X86::CMOVA64rm, 0 },
1254 { X86::CMOVAE16rr, X86::CMOVAE16rm, 0 },
1255 { X86::CMOVAE32rr, X86::CMOVAE32rm, 0 },
1256 { X86::CMOVAE64rr, X86::CMOVAE64rm, 0 },
1257 { X86::CMOVB16rr, X86::CMOVB16rm, 0 },
1258 { X86::CMOVB32rr, X86::CMOVB32rm, 0 },
1259 { X86::CMOVB64rr, X86::CMOVB64rm, 0 },
1260 { X86::CMOVBE16rr, X86::CMOVBE16rm, 0 },
1261 { X86::CMOVBE32rr, X86::CMOVBE32rm, 0 },
1262 { X86::CMOVBE64rr, X86::CMOVBE64rm, 0 },
1263 { X86::CMOVE16rr, X86::CMOVE16rm, 0 },
1264 { X86::CMOVE32rr, X86::CMOVE32rm, 0 },
1265 { X86::CMOVE64rr, X86::CMOVE64rm, 0 },
1266 { X86::CMOVG16rr, X86::CMOVG16rm, 0 },
1267 { X86::CMOVG32rr, X86::CMOVG32rm, 0 },
1268 { X86::CMOVG64rr, X86::CMOVG64rm, 0 },
1269 { X86::CMOVGE16rr, X86::CMOVGE16rm, 0 },
1270 { X86::CMOVGE32rr, X86::CMOVGE32rm, 0 },
1271 { X86::CMOVGE64rr, X86::CMOVGE64rm, 0 },
1272 { X86::CMOVL16rr, X86::CMOVL16rm, 0 },
1273 { X86::CMOVL32rr, X86::CMOVL32rm, 0 },
1274 { X86::CMOVL64rr, X86::CMOVL64rm, 0 },
1275 { X86::CMOVLE16rr, X86::CMOVLE16rm, 0 },
1276 { X86::CMOVLE32rr, X86::CMOVLE32rm, 0 },
1277 { X86::CMOVLE64rr, X86::CMOVLE64rm, 0 },
1278 { X86::CMOVNE16rr, X86::CMOVNE16rm, 0 },
1279 { X86::CMOVNE32rr, X86::CMOVNE32rm, 0 },
1280 { X86::CMOVNE64rr, X86::CMOVNE64rm, 0 },
1281 { X86::CMOVNO16rr, X86::CMOVNO16rm, 0 },
1282 { X86::CMOVNO32rr, X86::CMOVNO32rm, 0 },
1283 { X86::CMOVNO64rr, X86::CMOVNO64rm, 0 },
1284 { X86::CMOVNP16rr, X86::CMOVNP16rm, 0 },
1285 { X86::CMOVNP32rr, X86::CMOVNP32rm, 0 },
1286 { X86::CMOVNP64rr, X86::CMOVNP64rm, 0 },
1287 { X86::CMOVNS16rr, X86::CMOVNS16rm, 0 },
1288 { X86::CMOVNS32rr, X86::CMOVNS32rm, 0 },
1289 { X86::CMOVNS64rr, X86::CMOVNS64rm, 0 },
1290 { X86::CMOVO16rr, X86::CMOVO16rm, 0 },
1291 { X86::CMOVO32rr, X86::CMOVO32rm, 0 },
1292 { X86::CMOVO64rr, X86::CMOVO64rm, 0 },
1293 { X86::CMOVP16rr, X86::CMOVP16rm, 0 },
1294 { X86::CMOVP32rr, X86::CMOVP32rm, 0 },
1295 { X86::CMOVP64rr, X86::CMOVP64rm, 0 },
1296 { X86::CMOVS16rr, X86::CMOVS16rm, 0 },
1297 { X86::CMOVS32rr, X86::CMOVS32rm, 0 },
1298 { X86::CMOVS64rr, X86::CMOVS64rm, 0 },
1251 { X86::CMOV16rr, X86::CMOV16rm, 0 },
1252 { X86::CMOV32rr, X86::CMOV32rm, 0 },
1253 { X86::CMOV64rr, X86::CMOV64rm, 0 },
12991254 { X86::CMPPDrri, X86::CMPPDrmi, TB_ALIGN_16 },
13001255 { X86::CMPPSrri, X86::CMPPSrmi, TB_ALIGN_16 },
13011256 { X86::CMPSDrr, X86::CMPSDrm, 0 },
2929 def MRMSrcMem : Format<33>;
3030 def MRMSrcMem4VOp3 : Format<34>;
3131 def MRMSrcMemOp4 : Format<35>;
32 def MRMSrcMemCC : Format<36>;
3233 def MRMXm : Format<39>;
3334 def MRM0m : Format<40>; def MRM1m : Format<41>; def MRM2m : Format<42>;
3435 def MRM3m : Format<43>; def MRM4m : Format<44>; def MRM5m : Format<45>;
3738 def MRMSrcReg : Format<49>;
3839 def MRMSrcReg4VOp3 : Format<50>;
3940 def MRMSrcRegOp4 : Format<51>;
41 def MRMSrcRegCC : Format<52>;
4042 def MRMXr : Format<55>;
4143 def MRM0r : Format<56>; def MRM1r : Format<57>; def MRM2r : Format<58>;
4244 def MRM3r : Format<59>; def MRM4r : Format<60>; def MRM5r : Format<61>;
16401640 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
16411641 OpIdx1, OpIdx2);
16421642 }
1643 case X86::CMOVB16rr: case X86::CMOVB32rr: case X86::CMOVB64rr:
1644 case X86::CMOVAE16rr: case X86::CMOVAE32rr: case X86::CMOVAE64rr:
1645 case X86::CMOVE16rr: case X86::CMOVE32rr: case X86::CMOVE64rr:
1646 case X86::CMOVNE16rr: case X86::CMOVNE32rr: case X86::CMOVNE64rr:
1647 case X86::CMOVBE16rr: case X86::CMOVBE32rr: case X86::CMOVBE64rr:
1648 case X86::CMOVA16rr: case X86::CMOVA32rr: case X86::CMOVA64rr:
1649 case X86::CMOVL16rr: case X86::CMOVL32rr: case X86::CMOVL64rr:
1650 case X86::CMOVGE16rr: case X86::CMOVGE32rr: case X86::CMOVGE64rr:
1651 case X86::CMOVLE16rr: case X86::CMOVLE32rr: case X86::CMOVLE64rr:
1652 case X86::CMOVG16rr: case X86::CMOVG32rr: case X86::CMOVG64rr:
1653 case X86::CMOVS16rr: case X86::CMOVS32rr: case X86::CMOVS64rr:
1654 case X86::CMOVNS16rr: case X86::CMOVNS32rr: case X86::CMOVNS64rr:
1655 case X86::CMOVP16rr: case X86::CMOVP32rr: case X86::CMOVP64rr:
1656 case X86::CMOVNP16rr: case X86::CMOVNP32rr: case X86::CMOVNP64rr:
1657 case X86::CMOVO16rr: case X86::CMOVO32rr: case X86::CMOVO64rr:
1658 case X86::CMOVNO16rr: case X86::CMOVNO32rr: case X86::CMOVNO64rr: {
1659 unsigned Opc;
1660 switch (MI.getOpcode()) {
1661 default: llvm_unreachable("Unreachable!");
1662 case X86::CMOVB16rr: Opc = X86::CMOVAE16rr; break;
1663 case X86::CMOVB32rr: Opc = X86::CMOVAE32rr; break;
1664 case X86::CMOVB64rr: Opc = X86::CMOVAE64rr; break;
1665 case X86::CMOVAE16rr: Opc = X86::CMOVB16rr; break;
1666 case X86::CMOVAE32rr: Opc = X86::CMOVB32rr; break;
1667 case X86::CMOVAE64rr: Opc = X86::CMOVB64rr; break;
1668 case X86::CMOVE16rr: Opc = X86::CMOVNE16rr; break;
1669 case X86::CMOVE32rr: Opc = X86::CMOVNE32rr; break;
1670 case X86::CMOVE64rr: Opc = X86::CMOVNE64rr; break;
1671 case X86::CMOVNE16rr: Opc = X86::CMOVE16rr; break;
1672 case X86::CMOVNE32rr: Opc = X86::CMOVE32rr; break;
1673 case X86::CMOVNE64rr: Opc = X86::CMOVE64rr; break;
1674 case X86::CMOVBE16rr: Opc = X86::CMOVA16rr; break;
1675 case X86::CMOVBE32rr: Opc = X86::CMOVA32rr; break;
1676 case X86::CMOVBE64rr: Opc = X86::CMOVA64rr; break;
1677 case X86::CMOVA16rr: Opc = X86::CMOVBE16rr; break;
1678 case X86::CMOVA32rr: Opc = X86::CMOVBE32rr; break;
1679 case X86::CMOVA64rr: Opc = X86::CMOVBE64rr; break;
1680 case X86::CMOVL16rr: Opc = X86::CMOVGE16rr; break;
1681 case X86::CMOVL32rr: Opc = X86::CMOVGE32rr; break;
1682 case X86::CMOVL64rr: Opc = X86::CMOVGE64rr; break;
1683 case X86::CMOVGE16rr: Opc = X86::CMOVL16rr; break;
1684 case X86::CMOVGE32rr: Opc = X86::CMOVL32rr; break;
1685 case X86::CMOVGE64rr: Opc = X86::CMOVL64rr; break;
1686 case X86::CMOVLE16rr: Opc = X86::CMOVG16rr; break;
1687 case X86::CMOVLE32rr: Opc = X86::CMOVG32rr; break;
1688 case X86::CMOVLE64rr: Opc = X86::CMOVG64rr; break;
1689 case X86::CMOVG16rr: Opc = X86::CMOVLE16rr; break;
1690 case X86::CMOVG32rr: Opc = X86::CMOVLE32rr; break;
1691 case X86::CMOVG64rr: Opc = X86::CMOVLE64rr; break;
1692 case X86::CMOVS16rr: Opc = X86::CMOVNS16rr; break;
1693 case X86::CMOVS32rr: Opc = X86::CMOVNS32rr; break;
1694 case X86::CMOVS64rr: Opc = X86::CMOVNS64rr; break;
1695 case X86::CMOVNS16rr: Opc = X86::CMOVS16rr; break;
1696 case X86::CMOVNS32rr: Opc = X86::CMOVS32rr; break;
1697 case X86::CMOVNS64rr: Opc = X86::CMOVS64rr; break;
1698 case X86::CMOVP16rr: Opc = X86::CMOVNP16rr; break;
1699 case X86::CMOVP32rr: Opc = X86::CMOVNP32rr; break;
1700 case X86::CMOVP64rr: Opc = X86::CMOVNP64rr; break;
1701 case X86::CMOVNP16rr: Opc = X86::CMOVP16rr; break;
1702 case X86::CMOVNP32rr: Opc = X86::CMOVP32rr; break;
1703 case X86::CMOVNP64rr: Opc = X86::CMOVP64rr; break;
1704 case X86::CMOVO16rr: Opc = X86::CMOVNO16rr; break;
1705 case X86::CMOVO32rr: Opc = X86::CMOVNO32rr; break;
1706 case X86::CMOVO64rr: Opc = X86::CMOVNO64rr; break;
1707 case X86::CMOVNO16rr: Opc = X86::CMOVO16rr; break;
1708 case X86::CMOVNO32rr: Opc = X86::CMOVO32rr; break;
1709 case X86::CMOVNO64rr: Opc = X86::CMOVO64rr; break;
1710 }
1643 case X86::CMOV16rr: case X86::CMOV32rr: case X86::CMOV64rr: {
17111644 auto &WorkingMI = cloneIfNew(MI);
1712 WorkingMI.setDesc(get(Opc));
1645 unsigned OpNo = MI.getDesc().getNumOperands() - 1;
1646 X86::CondCode CC = static_cast(MI.getOperand(OpNo).getImm());
1647 WorkingMI.getOperand(OpNo).setImm(X86::GetOppositeBranchCondition(CC));
17131648 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
17141649 OpIdx1, OpIdx2);
17151650 }
20892024 }
20902025
20912026 /// Return condition code of a CMov opcode.
2092 X86::CondCode X86::getCondFromCMovOpc(unsigned Opc) {
2093 switch (Opc) {
2027 X86::CondCode X86::getCondFromCMov(const MachineInstr &MI) {
2028 switch (MI.getOpcode()) {
20942029 default: return X86::COND_INVALID;
2095 case X86::CMOVA16rm: case X86::CMOVA16rr: case X86::CMOVA32rm:
2096 case X86::CMOVA32rr: case X86::CMOVA64rm: case X86::CMOVA64rr:
2097 return X86::COND_A;
2098 case X86::CMOVAE16rm: case X86::CMOVAE16rr: case X86::CMOVAE32rm:
2099 case X86::CMOVAE32rr: case X86::CMOVAE64rm: case X86::CMOVAE64rr:
2100 return X86::COND_AE;
2101 case X86::CMOVB16rm: case X86::CMOVB16rr: case X86::CMOVB32rm:
2102 case X86::CMOVB32rr: case X86::CMOVB64rm: case X86::CMOVB64rr:
2103 return X86::COND_B;
2104 case X86::CMOVBE16rm: case X86::CMOVBE16rr: case X86::CMOVBE32rm:
2105 case X86::CMOVBE32rr: case X86::CMOVBE64rm: case X86::CMOVBE64rr:
2106 return X86::COND_BE;
2107 case X86::CMOVE16rm: case X86::CMOVE16rr: case X86::CMOVE32rm:
2108 case X86::CMOVE32rr: case X86::CMOVE64rm: case X86::CMOVE64rr:
2109 return X86::COND_E;
2110 case X86::CMOVG16rm: case X86::CMOVG16rr: case X86::CMOVG32rm:
2111 case X86::CMOVG32rr: case X86::CMOVG64rm: case X86::CMOVG64rr:
2112 return X86::COND_G;
2113 case X86::CMOVGE16rm: case X86::CMOVGE16rr: case X86::CMOVGE32rm:
2114 case X86::CMOVGE32rr: case X86::CMOVGE64rm: case X86::CMOVGE64rr:
2115 return X86::COND_GE;
2116 case X86::CMOVL16rm: case X86::CMOVL16rr: case X86::CMOVL32rm:
2117 case X86::CMOVL32rr: case X86::CMOVL64rm: case X86::CMOVL64rr:
2118 return X86::COND_L;
2119 case X86::CMOVLE16rm: case X86::CMOVLE16rr: case X86::CMOVLE32rm:
2120 case X86::CMOVLE32rr: case X86::CMOVLE64rm: case X86::CMOVLE64rr:
2121 return X86::COND_LE;
2122 case X86::CMOVNE16rm: case X86::CMOVNE16rr: case X86::CMOVNE32rm:
2123 case X86::CMOVNE32rr: case X86::CMOVNE64rm: case X86::CMOVNE64rr:
2124 return X86::COND_NE;
2125 case X86::CMOVNO16rm: case X86::CMOVNO16rr: case X86::CMOVNO32rm:
2126 case X86::CMOVNO32rr: case X86::CMOVNO64rm: case X86::CMOVNO64rr:
2127 return X86::COND_NO;
2128 case X86::CMOVNP16rm: case X86::CMOVNP16rr: case X86::CMOVNP32rm:
2129 case X86::CMOVNP32rr: case X86::CMOVNP64rm: case X86::CMOVNP64rr:
2130 return X86::COND_NP;
2131 case X86::CMOVNS16rm: case X86::CMOVNS16rr: case X86::CMOVNS32rm:
2132 case X86::CMOVNS32rr: case X86::CMOVNS64rm: case X86::CMOVNS64rr:
2133 return X86::COND_NS;
2134 case X86::CMOVO16rm: case X86::CMOVO16rr: case X86::CMOVO32rm:
2135 case X86::CMOVO32rr: case X86::CMOVO64rm: case X86::CMOVO64rr:
2136 return X86::COND_O;
2137 case X86::CMOVP16rm: case X86::CMOVP16rr: case X86::CMOVP32rm:
2138 case X86::CMOVP32rr: case X86::CMOVP64rm: case X86::CMOVP64rr:
2139 return X86::COND_P;
2140 case X86::CMOVS16rm: case X86::CMOVS16rr: case X86::CMOVS32rm:
2141 case X86::CMOVS32rr: case X86::CMOVS64rm: case X86::CMOVS64rr:
2142 return X86::COND_S;
2030 case X86::CMOV16rr: case X86::CMOV32rr: case X86::CMOV64rr:
2031 case X86::CMOV16rm: case X86::CMOV32rm: case X86::CMOV64rm:
2032 return static_cast(
2033 MI.getOperand(MI.getDesc().getNumOperands() - 1).getImm());
21432034 }
21442035 }
21452036
22512142 /// whether it has memory operand.
22522143 unsigned X86::getSETFromCond(CondCode CC, bool HasMemoryOperand) {
22532144 static const uint16_t Opc[16][2] = {
2145 { X86::SETOr, X86::SETOm },
2146 { X86::SETNOr, X86::SETNOm },
2147 { X86::SETBr, X86::SETBm },
2148 { X86::SETAEr, X86::SETAEm },
2149 { X86::SETEr, X86::SETEm },
2150 { X86::SETNEr, X86::SETNEm },
2151 { X86::SETBEr, X86::SETBEm },
22542152 { X86::SETAr, X86::SETAm },
2255 { X86::SETAEr, X86::SETAEm },
2256 { X86::SETBr, X86::SETBm },
2257 { X86::SETBEr, X86::SETBEm },
2258 { X86::SETEr, X86::SETEm },
2153 { X86::SETSr, X86::SETSm },
2154 { X86::SETNSr, X86::SETNSm },
2155 { X86::SETPr, X86::SETPm },
2156 { X86::SETNPr, X86::SETNPm },
2157 { X86::SETLr, X86::SETLm },
2158 { X86::SETGEr, X86::SETGEm },
2159 { X86::SETLEr, X86::SETLEm },
22592160 { X86::SETGr, X86::SETGm },
2260 { X86::SETGEr, X86::SETGEm },
2261 { X86::SETLr, X86::SETLm },
2262 { X86::SETLEr, X86::SETLEm },
2263 { X86::SETNEr, X86::SETNEm },
2264 { X86::SETNOr, X86::SETNOm },
2265 { X86::SETNPr, X86::SETNPm },
2266 { X86::SETNSr, X86::SETNSm },
2267 { X86::SETOr, X86::SETOm },
2268 { X86::SETPr, X86::SETPm },
2269 { X86::SETSr, X86::SETSm }
22702161 };
22712162
22722163 assert(CC <= LAST_VALID_COND && "Can only handle standard cond codes");
22732164 return Opc[CC][HasMemoryOperand ? 1 : 0];
22742165 }
22752166
2276 /// Return a cmov opcode for the given condition,
2277 /// register size in bytes, and operand type.
2278 unsigned X86::getCMovFromCond(CondCode CC, unsigned RegBytes,
2279 bool HasMemoryOperand) {
2280 static const uint16_t Opc[32][3] = {
2281 { X86::CMOVA16rr, X86::CMOVA32rr, X86::CMOVA64rr },
2282 { X86::CMOVAE16rr, X86::CMOVAE32rr, X86::CMOVAE64rr },
2283 { X86::CMOVB16rr, X86::CMOVB32rr, X86::CMOVB64rr },
2284 { X86::CMOVBE16rr, X86::CMOVBE32rr, X86::CMOVBE64rr },
2285 { X86::CMOVE16rr, X86::CMOVE32rr, X86::CMOVE64rr },
2286 { X86::CMOVG16rr, X86::CMOVG32rr, X86::CMOVG64rr },
2287 { X86::CMOVGE16rr, X86::CMOVGE32rr, X86::CMOVGE64rr },
2288 { X86::CMOVL16rr, X86::CMOVL32rr, X86::CMOVL64rr },
2289 { X86::CMOVLE16rr, X86::CMOVLE32rr, X86::CMOVLE64rr },
2290 { X86::CMOVNE16rr, X86::CMOVNE32rr, X86::CMOVNE64rr },
2291 { X86::CMOVNO16rr, X86::CMOVNO32rr, X86::CMOVNO64rr },
2292 { X86::CMOVNP16rr, X86::CMOVNP32rr, X86::CMOVNP64rr },
2293 { X86::CMOVNS16rr, X86::CMOVNS32rr, X86::CMOVNS64rr },
2294 { X86::CMOVO16rr, X86::CMOVO32rr, X86::CMOVO64rr },
2295 { X86::CMOVP16rr, X86::CMOVP32rr, X86::CMOVP64rr },
2296 { X86::CMOVS16rr, X86::CMOVS32rr, X86::CMOVS64rr },
2297 { X86::CMOVA16rm, X86::CMOVA32rm, X86::CMOVA64rm },
2298 { X86::CMOVAE16rm, X86::CMOVAE32rm, X86::CMOVAE64rm },
2299 { X86::CMOVB16rm, X86::CMOVB32rm, X86::CMOVB64rm },
2300 { X86::CMOVBE16rm, X86::CMOVBE32rm, X86::CMOVBE64rm },
2301 { X86::CMOVE16rm, X86::CMOVE32rm, X86::CMOVE64rm },
2302 { X86::CMOVG16rm, X86::CMOVG32rm, X86::CMOVG64rm },
2303 { X86::CMOVGE16rm, X86::CMOVGE32rm, X86::CMOVGE64rm },
2304 { X86::CMOVL16rm, X86::CMOVL32rm, X86::CMOVL64rm },
2305 { X86::CMOVLE16rm, X86::CMOVLE32rm, X86::CMOVLE64rm },
2306 { X86::CMOVNE16rm, X86::CMOVNE32rm, X86::CMOVNE64rm },
2307 { X86::CMOVNO16rm, X86::CMOVNO32rm, X86::CMOVNO64rm },
2308 { X86::CMOVNP16rm, X86::CMOVNP32rm, X86::CMOVNP64rm },
2309 { X86::CMOVNS16rm, X86::CMOVNS32rm, X86::CMOVNS64rm },
2310 { X86::CMOVO16rm, X86::CMOVO32rm, X86::CMOVO64rm },
2311 { X86::CMOVP16rm, X86::CMOVP32rm, X86::CMOVP64rm },
2312 { X86::CMOVS16rm, X86::CMOVS32rm, X86::CMOVS64rm }
2313 };
2314
2315 assert(CC < 16 && "Can only handle standard cond codes");
2316 unsigned Idx = HasMemoryOperand ? 16+CC : CC;
2167 /// Return a cmov opcode for the given register size in bytes, and operand type.
2168 unsigned X86::getCMovOpcode(unsigned RegBytes, bool HasMemoryOperand) {
23172169 switch(RegBytes) {
23182170 default: llvm_unreachable("Illegal register size!");
2319 case 2: return Opc[Idx][0];
2320 case 4: return Opc[Idx][1];
2321 case 8: return Opc[Idx][2];
2171 case 2: return HasMemoryOperand ? X86::CMOV16rm : X86::CMOV16rr;
2172 case 4: return HasMemoryOperand ? X86::CMOV32rm : X86::CMOV32rr;
2173 case 8: return HasMemoryOperand ? X86::CMOV32rm : X86::CMOV64rr;
23222174 }
23232175 }
23242176
28692721 const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo();
28702722 const TargetRegisterClass &RC = *MRI.getRegClass(DstReg);
28712723 assert(Cond.size() == 1 && "Invalid Cond array");
2872 unsigned Opc = getCMovFromCond((X86::CondCode)Cond[0].getImm(),
2873 TRI.getRegSizeInBits(RC) / 8,
2874 false /*HasMemoryOperand*/);
2875 BuildMI(MBB, I, DL, get(Opc), DstReg).addReg(FalseReg).addReg(TrueReg);
2724 unsigned Opc = X86::getCMovOpcode(TRI.getRegSizeInBits(RC) / 8,
2725 false /*HasMemoryOperand*/);
2726 BuildMI(MBB, I, DL, get(Opc), DstReg)
2727 .addReg(FalseReg)
2728 .addReg(TrueReg)
2729 .addImm(Cond[0].getImm());
28762730 }
28772731
28782732 /// Test if the given register is a physical h register.
37273581 if (OldCC != X86::COND_INVALID)
37283582 OpcIsSET = true;
37293583 else
3730 OldCC = X86::getCondFromCMovOpc(Instr.getOpcode());
3584 OldCC = X86::getCondFromCMov(Instr);
37313585 }
37323586 if (OldCC == X86::COND_INVALID) return false;
37333587 }
37803634 else if(OpcIsSET)
37813635 NewOpc = getSETFromCond(ReplacementCC, HasMemoryOperand);
37823636 else {
3783 unsigned DstReg = Instr.getOperand(0).getReg();
3784 const TargetRegisterClass *DstRC = MRI->getRegClass(DstReg);
3785 NewOpc = getCMovFromCond(ReplacementCC, TRI->getRegSizeInBits(*DstRC)/8,
3786 HasMemoryOperand);
3637 NewOpc = ReplacementCC;
37873638 }
37883639
37893640 // Push the MachineInstr to OpsToUpdate.
38433694 CmpInstr.eraseFromParent();
38443695
38453696 // Modify the condition code of instructions in OpsToUpdate.
3846 for (auto &Op : OpsToUpdate)
3847 Op.first->setDesc(get(Op.second));
3697 for (auto &Op : OpsToUpdate) {
3698 if (X86::getCondFromCMov(*Op.first) != X86::COND_INVALID)
3699 Op.first->getOperand(Op.first->getDesc().getNumOperands() - 1)
3700 .setImm(Op.second);
3701 else
3702 Op.first->setDesc(get(Op.second));
3703 }
38483704 return true;
38493705 }
38503706
3434 AC_EVEX_2_VEX = MachineInstr::TAsmComments
3535 };
3636
37 // X86 specific condition code. These correspond to X86_*_COND in
38 // X86InstrInfo.td. They must be kept in synch.
39 enum CondCode {
40 COND_A = 0,
41 COND_AE = 1,
42 COND_B = 2,
43 COND_BE = 3,
44 COND_E = 4,
45 COND_G = 5,
46 COND_GE = 6,
47 COND_L = 7,
48 COND_LE = 8,
49 COND_NE = 9,
50 COND_NO = 10,
51 COND_NP = 11,
52 COND_NS = 12,
53 COND_O = 13,
54 COND_P = 14,
55 COND_S = 15,
56 LAST_VALID_COND = COND_S,
57
58 // Artificial condition codes. These are used by AnalyzeBranch
59 // to indicate a block terminated with two conditional branches that together
60 // form a compound condition. They occur in code using FCMP_OEQ or FCMP_UNE,
61 // which can't be represented on x86 with a single condition. These
62 // are never used in MachineInstrs and are inverses of one another.
63 COND_NE_OR_P,
64 COND_E_AND_NP,
65
66 COND_INVALID
67 };
68
6937 // Turn condition code into conditional branch opcode.
7038 unsigned GetCondBranchFromCond(CondCode CC);
7139
7745 /// a memory operand.
7846 unsigned getSETFromCond(CondCode CC, bool HasMemoryOperand = false);
7947
80 /// Return a cmov opcode for the given condition, register size in
81 /// bytes, and operand type.
82 unsigned getCMovFromCond(CondCode CC, unsigned RegBytes,
83 bool HasMemoryOperand = false);
48 /// Return a cmov opcode for the given register size in bytes, and operand type.
49 unsigned getCMovOpcode(unsigned RegBytes, bool HasMemoryOperand = false);
8450
8551 // Turn jCC opcode into condition code.
8652 CondCode getCondFromBranchOpc(unsigned Opc);
8955 CondCode getCondFromSETOpc(unsigned Opc);
9056
9157 // Turn CMov opcode into condition code.
92 CondCode getCondFromCMovOpc(unsigned Opc);
58 CondCode getCondFromCMov(const MachineInstr &MI);
9359
9460 /// GetOppositeBranchCondition - Return the inverse of the specified cond,
9561 /// e.g. turning COND_E to COND_NE.
600600 X86MemOffs64_32AsmOperand>;
601601 def offset64_64 : X86MemOffsOperand
602602 X86MemOffs64_64AsmOperand>;
603
604 def ccode : Operand {
605 let PrintMethod = "printCondCode";
606 let OperandNamespace = "X86";
607 let OperandType = "OPERAND_COND_CODE";
608 }
603609
604610 class ImmSExtAsmOperandClass : AsmOperandClass {
605611 let SuperClasses = [ImmAsmOperand];
955961
956962 // X86 specific condition code. These correspond to CondCode in
957963 // X86InstrInfo.h. They must be kept in synch.
958 def X86_COND_A : PatLeaf<(i8 0)>; // alt. COND_NBE
959 def X86_COND_AE : PatLeaf<(i8 1)>; // alt. COND_NC
964 def X86_COND_O : PatLeaf<(i8 0)>;
965 def X86_COND_NO : PatLeaf<(i8 1)>;
960966 def X86_COND_B : PatLeaf<(i8 2)>; // alt. COND_C
961 def X86_COND_BE : PatLeaf<(i8 3)>; // alt. COND_NA
967 def X86_COND_AE : PatLeaf<(i8 3)>; // alt. COND_NC
962968 def X86_COND_E : PatLeaf<(i8 4)>; // alt. COND_Z
963 def X86_COND_G : PatLeaf<(i8 5)>; // alt. COND_NLE
964 def X86_COND_GE : PatLeaf<(i8 6)>; // alt. COND_NL
965 def X86_COND_L : PatLeaf<(i8 7)>; // alt. COND_NGE
966 def X86_COND_LE : PatLeaf<(i8 8)>; // alt. COND_NG
967 def X86_COND_NE : PatLeaf<(i8 9)>; // alt. COND_NZ
968 def X86_COND_NO : PatLeaf<(i8 10)>;
969 def X86_COND_NE : PatLeaf<(i8 5)>; // alt. COND_NZ
970 def X86_COND_BE : PatLeaf<(i8 6)>; // alt. COND_NA
971 def X86_COND_A : PatLeaf<(i8 7)>; // alt. COND_NBE
972 def X86_COND_S : PatLeaf<(i8 8)>;
973 def X86_COND_NS : PatLeaf<(i8 9)>;
974 def X86_COND_P : PatLeaf<(i8 10)>; // alt. COND_PE
969975 def X86_COND_NP : PatLeaf<(i8 11)>; // alt. COND_PO
970 def X86_COND_NS : PatLeaf<(i8 12)>;
971 def X86_COND_O : PatLeaf<(i8 13)>;
972 def X86_COND_P : PatLeaf<(i8 14)>; // alt. COND_PE
973 def X86_COND_S : PatLeaf<(i8 15)>;
976 def X86_COND_L : PatLeaf<(i8 12)>; // alt. COND_NGE
977 def X86_COND_GE : PatLeaf<(i8 13)>; // alt. COND_NL
978 def X86_COND_LE : PatLeaf<(i8 14)>; // alt. COND_NG
979 def X86_COND_G : PatLeaf<(i8 15)>; // alt. COND_NLE
974980
975981 def i16immSExt8 : ImmLeaf(Imm); }]>;
976982 def i32immSExt8 : ImmLeaf(Imm); }]>;
159159 def : WriteRes; // LEA instructions can't fold loads.
160160
161161 defm : BWWriteResPair; // Conditional move.
162 defm : BWWriteResPair; // // Conditional (CF + ZF flag) move.
163162 defm : X86WriteRes; // x87 conditional move.
164163
165164 def : WriteRes; // Setcc.
16011600
16021601 def: InstRW<[WriteZero], (instrs CLC)>;
16031602
1603 // CMOVs that use both Z and C flag require an extra uop.
1604 def BWWriteCMOVA_CMOVBErr : SchedWriteRes<[BWPort06,BWPort0156]> {
1605 let Latency = 2;
1606 let ResourceCycles = [1,1];
1607 let NumMicroOps = 2;
1608 }
1609
1610 def BWWriteCMOVA_CMOVBErm : SchedWriteRes<[BWPort23,BWPort06,BWPort0156]> {
1611 let Latency = 7;
1612 let ResourceCycles = [1,1,1];
1613 let NumMicroOps = 3;
1614 }
1615
1616 def BWCMOVA_CMOVBErr : SchedWriteVariant<[
1617 SchedVar, [BWWriteCMOVA_CMOVBErr]>,
1618 SchedVar
1619 ]>;
1620
1621 def BWCMOVA_CMOVBErm : SchedWriteVariant<[
1622 SchedVar, [BWWriteCMOVA_CMOVBErm]>,
1623 SchedVar
1624 ]>;
1625
1626 def : InstRW<[BWCMOVA_CMOVBErr], (instrs CMOV16rr, CMOV32rr, CMOV64rr)>;
1627 def : InstRW<[BWCMOVA_CMOVBErm], (instrs CMOV16rm, CMOV32rm, CMOV64rm)>;
1628
16041629 } // SchedModel
164164 defm : HWWriteResPair;
165165
166166 defm : HWWriteResPair; // Conditional move.
167 defm : HWWriteResPair; // Conditional (CF + ZF flag) move.
168167 defm : X86WriteRes; // x87 conditional move.
169168 def : WriteRes; // Setcc.
170169 def : WriteRes {
18851884 def : InstRW<[HWWriteADC], (instrs ADC16ri8, ADC32ri8, ADC64ri8,
18861885 SBB16ri8, SBB32ri8, SBB64ri8)>;
18871886
1887 // CMOVs that use both Z and C flag require an extra uop.
1888 def HWWriteCMOVA_CMOVBErr : SchedWriteRes<[HWPort06,HWPort0156]> {
1889 let Latency = 3;
1890 let ResourceCycles = [1,2];
1891 let NumMicroOps = 3;
1892 }
1893
1894 def HWWriteCMOVA_CMOVBErm : SchedWriteRes<[HWPort23,HWPort06,HWPort0156]> {
1895 let Latency = 8;
1896 let ResourceCycles = [1,1,2];
1897 let NumMicroOps = 4;
1898 }
1899
1900 def HWCMOVA_CMOVBErr : SchedWriteVariant<[
1901 SchedVar, [HWWriteCMOVA_CMOVBErr]>,
1902 SchedVar
1903 ]>;
1904
1905 def HWCMOVA_CMOVBErm : SchedWriteVariant<[
1906 SchedVar, [HWWriteCMOVA_CMOVBErm]>,
1907 SchedVar
1908 ]>;
1909
1910 def : InstRW<[HWCMOVA_CMOVBErr], (instrs CMOV16rr, CMOV32rr, CMOV64rr)>;
1911 def : InstRW<[HWCMOVA_CMOVBErm], (instrs CMOV16rm, CMOV32rm, CMOV64rm)>;
1912
18881913 } // SchedModel
5959 // X86GenInstrInfo.
6060 def IsThreeOperandsLEAFn :
6161 TIIPredicate<"isThreeOperandsLEA", IsThreeOperandsLEABody>;
62
63 // A predicate to check for COND_A and COND_BE CMOVs which have an extra uop
64 // on recent Intel CPUs.
65 def IsCMOVArr_Or_CMOVBErr : CheckAny<[
66 CheckImmOperand_s<3, "X86::COND_A">,
67 CheckImmOperand_s<3, "X86::COND_BE">
68 ]>;
69
70 def IsCMOVArm_Or_CMOVBErm : CheckAny<[
71 CheckImmOperand_s<7, "X86::COND_A">,
72 CheckImmOperand_s<7, "X86::COND_BE">
73 ]>;
159159 defm : SBWriteResPair;
160160
161161 defm : SBWriteResPair; // Conditional move.
162 defm : SBWriteResPair; // Conditional (CF + ZF flag) move.
163162 defm : X86WriteRes; // x87 conditional move.
164163 def : WriteRes; // Setcc.
165164 def : WriteRes {
11721171 ]>;
11731172 def : InstRW<[SBWriteVZeroIdiomPCMPGTQ], (instrs PCMPGTQrr, VPCMPGTQrr)>;
11741173
1174 // CMOVs that use both Z and C flag require an extra uop.
1175 def SBWriteCMOVA_CMOVBErr : SchedWriteRes<[SBPort05,SBPort015]> {
1176 let Latency = 3;
1177 let ResourceCycles = [2,1];
1178 let NumMicroOps = 3;
1179 }
1180
1181 def SBWriteCMOVA_CMOVBErm : SchedWriteRes<[SBPort23,SBPort05,SBPort015]> {
1182 let Latency = 8;
1183 let ResourceCycles = [1,2,1];
1184 let NumMicroOps = 4;
1185 }
1186
1187 def SBCMOVA_CMOVBErr : SchedWriteVariant<[
1188 SchedVar, [SBWriteCMOVA_CMOVBErr]>,
1189 SchedVar
1190 ]>;
1191
1192 def SBCMOVA_CMOVBErm : SchedWriteVariant<[
1193 SchedVar, [SBWriteCMOVA_CMOVBErm]>,
1194 SchedVar
1195 ]>;
1196
1197 def : InstRW<[SBCMOVA_CMOVBErr], (instrs CMOV16rr, CMOV32rr, CMOV64rr)>;
1198 def : InstRW<[SBCMOVA_CMOVBErm], (instrs CMOV16rm, CMOV32rm, CMOV64rm)>;
1199
11751200 } // SchedModel
157157 def : WriteRes; // LEA instructions can't fold loads.
158158
159159 defm : SKLWriteResPair; // Conditional move.
160 defm : SKLWriteResPair; // Conditional (CF + ZF flag) move.
161160 defm : X86WriteRes; // x87 conditional move.
162161 def : WriteRes; // Setcc.
163162 def : WriteRes {
17561755
17571756 def: InstRW<[WriteZero], (instrs CLC)>;
17581757
1758 // CMOVs that use both Z and C flag require an extra uop.
1759 def SKLWriteCMOVA_CMOVBErr : SchedWriteRes<[SKLPort06]> {
1760 let Latency = 2;
1761 let ResourceCycles = [2];
1762 let NumMicroOps = 2;
1763 }
1764
1765 def SKLWriteCMOVA_CMOVBErm : SchedWriteRes<[SKLPort23,SKLPort06]> {
1766 let Latency = 7;
1767 let ResourceCycles = [1,2];
1768 let NumMicroOps = 3;
1769 }
1770
1771 def SKLCMOVA_CMOVBErr : SchedWriteVariant<[
1772 SchedVar, [SKLWriteCMOVA_CMOVBErr]>,
1773 SchedVar
1774 ]>;
1775
1776 def SKLCMOVA_CMOVBErm : SchedWriteVariant<[
1777 SchedVar, [SKLWriteCMOVA_CMOVBErm]>,
1778 SchedVar
1779 ]>;
1780
1781 def : InstRW<[SKLCMOVA_CMOVBErr], (instrs CMOV16rr, CMOV32rr, CMOV64rr)>;
1782 def : InstRW<[SKLCMOVA_CMOVBErm], (instrs CMOV16rm, CMOV32rm, CMOV64rm)>;
1783
17591784 } // SchedModel
158158 def : WriteRes; // LEA instructions can't fold loads.
159159
160160 defm : SKXWriteResPair; // Conditional move.
161 defm : SKXWriteResPair; // Conditional (CF + ZF flag) move.
162161 defm : X86WriteRes; // x87 conditional move.
163162 def : WriteRes; // Setcc.
164163 def : WriteRes {
24722471
24732472 def: InstRW<[WriteZero], (instrs CLC)>;
24742473
2474 // CMOVs that use both Z and C flag require an extra uop.
2475 def SKXWriteCMOVA_CMOVBErr : SchedWriteRes<[SKXPort06]> {
2476 let Latency = 2;
2477 let ResourceCycles = [2];
2478 let NumMicroOps = 2;
2479 }
2480
2481 def SKXWriteCMOVA_CMOVBErm : SchedWriteRes<[SKXPort23,SKXPort06]> {
2482 let Latency = 7;
2483 let ResourceCycles = [1,2];
2484 let NumMicroOps = 3;
2485 }
2486
2487 def SKXCMOVA_CMOVBErr : SchedWriteVariant<[
2488 SchedVar, [SKXWriteCMOVA_CMOVBErr]>,
2489 SchedVar
2490 ]>;
2491
2492 def SKXCMOVA_CMOVBErm : SchedWriteVariant<[
2493 SchedVar, [SKXWriteCMOVA_CMOVBErm]>,
2494 SchedVar
2495 ]>;
2496
2497 def : InstRW<[SKXCMOVA_CMOVBErr], (instrs CMOV16rr, CMOV32rr, CMOV64rr)>;
2498 def : InstRW<[SKXCMOVA_CMOVBErm], (instrs CMOV16rm, CMOV32rm, CMOV64rm)>;
2499
24752500 } // SchedModel
162162 defm WriteLZCNT : X86SchedWritePair; // Leading zero count.
163163 defm WriteTZCNT : X86SchedWritePair; // Trailing zero count.
164164 defm WriteCMOV : X86SchedWritePair; // Conditional move.
165 defm WriteCMOV2 : X86SchedWritePair; // Conditional (CF + ZF flag) move.
166165 def WriteFCMOV : SchedWrite; // X87 conditional move.
167166 def WriteSETCC : SchedWrite; // Set register based on condition code.
168167 def WriteSETCCStore : SchedWrite;
112112 defm : X86WriteResPairUnsupported;
113113
114114 defm : AtomWriteResPair;
115 defm : AtomWriteResPair;
116115 defm : X86WriteRes; // x87 conditional move.
117116
118117 def : WriteRes;
443443 def : InstRW<[PdWriteCRC32r64r64], (instrs CRC32r64r64)>;
444444
445445 defm : PdWriteResExPair; // Conditional move.
446 defm : PdWriteResExPair; // Conditional (CF + ZF flag) move.
447
448 def : InstRW<[WriteCMOV2.Folded], (instrs CMOVG16rm, CMOVG32rm, CMOVG64rm,
449 CMOVGE16rm, CMOVGE32rm, CMOVGE64rm,
450 CMOVL16rm, CMOVL32rm, CMOVL64rm,
451 CMOVLE16rm, CMOVLE32rm, CMOVLE64rm)>;
446
447 def PdWriteCMOVm : SchedWriteRes<[PdLoad, PdEX01]> {
448 let Latency = 5;
449 let ResourceCycles = [1, 1];
450 let NumMicroOps = 2;
451 }
452
453 def PdWriteCMOVmVar : SchedWriteVariant<[
454 SchedVar>, [PdWriteCMOVm]>,
455 SchedVar>, [PdWriteCMOVm]>,
456 SchedVar>, [PdWriteCMOVm]>,
457 SchedVar>, [PdWriteCMOVm]>,
458 SchedVar>, [PdWriteCMOVm]>,
459 SchedVar>, [PdWriteCMOVm]>,
460 SchedVar
461 ]>;
462
463 def : InstRW<[PdWriteCMOVmVar], (instrs CMOV16rm, CMOV32rm, CMOV64rm)>;
452464
453465 defm : PdWriteRes; // x87 conditional move.
454466
220220 defm : JWriteResIntPair;
221221
222222 defm : JWriteResIntPair; // Conditional move.
223 defm : JWriteResIntPair; // Conditional (CF + ZF flag) move.
224223 defm : X86WriteRes; // x87 conditional move.
225224 def : WriteRes; // Setcc.
226225 def : WriteRes;
130130 defm : SLMWriteResPair;
131131
132132 defm : SLMWriteResPair;
133 defm : SLMWriteResPair;
134133 defm : X86WriteRes; // x87 conditional move.
135134 def : WriteRes;
136135 def : WriteRes {
214214 defm : ZnWriteResFpuPair;
215215
216216 defm : ZnWriteResPair;
217 defm : ZnWriteResPair;
218217 def : WriteRes;
219218 def : WriteRes;
220219 defm : X86WriteRes;
750750
751751 for (X86::CondCode Cond : Conds) {
752752 int PredStateSizeInBytes = TRI->getRegSizeInBits(*PS->RC) / 8;
753 auto CMovOp = X86::getCMovFromCond(Cond, PredStateSizeInBytes);
753 auto CMovOp = X86::getCMovOpcode(PredStateSizeInBytes);
754754
755755 unsigned UpdatedStateReg = MRI->createVirtualRegister(PS->RC);
756756 // Note that we intentionally use an empty debug location so that
758758 auto CMovI = BuildMI(CheckingMBB, InsertPt, DebugLoc(),
759759 TII->get(CMovOp), UpdatedStateReg)
760760 .addReg(CurStateReg)
761 .addReg(PS->PoisonReg);
761 .addReg(PS->PoisonReg)
762 .addImm(Cond);
762763 // If this is the last cmov and the EFLAGS weren't originally
763764 // live-in, mark them as killed.
764765 if (!LiveEFLAGS && Cond == Conds.back())
11751176
11761177 // Now cmov over the predicate if the comparison wasn't equal.
11771178 int PredStateSizeInBytes = TRI->getRegSizeInBits(*PS->RC) / 8;
1178 auto CMovOp = X86::getCMovFromCond(X86::COND_NE, PredStateSizeInBytes);
1179 auto CMovOp = X86::getCMovOpcode(PredStateSizeInBytes);
11791180 unsigned UpdatedStateReg = MRI->createVirtualRegister(PS->RC);
11801181 auto CMovI =
11811182 BuildMI(MBB, InsertPt, DebugLoc(), TII->get(CMovOp), UpdatedStateReg)
11821183 .addReg(PS->InitialReg)
1183 .addReg(PS->PoisonReg);
1184 .addReg(PS->PoisonReg)
1185 .addImm(X86::COND_NE);
11841186 CMovI->findRegisterUseOperand(X86::EFLAGS)->setIsKill(true);
11851187 ++NumInstsInserted;
11861188 LLVM_DEBUG(dbgs() << " Inserting cmov: "; CMovI->dump(); dbgs() << "\n");
25442546 // Now conditionally update the predicate state we just extracted if we ended
25452547 // up at a different return address than expected.
25462548 int PredStateSizeInBytes = TRI->getRegSizeInBits(*PS->RC) / 8;
2547 auto CMovOp = X86::getCMovFromCond(X86::COND_NE, PredStateSizeInBytes);
2549 auto CMovOp = X86::getCMovOpcode(PredStateSizeInBytes);
25482550
25492551 unsigned UpdatedStateReg = MRI->createVirtualRegister(PS->RC);
25502552 auto CMovI = BuildMI(MBB, InsertPt, Loc, TII->get(CMovOp), UpdatedStateReg)
25512553 .addReg(NewStateReg, RegState::Kill)
2552 .addReg(PS->PoisonReg);
2554 .addReg(PS->PoisonReg)
2555 .addImm(X86::COND_NE);
25532556 CMovI->findRegisterUseOperand(X86::EFLAGS)->setIsKill(true);
25542557 ++NumInstsInserted;
25552558 LLVM_DEBUG(dbgs() << " Inserting cmov: "; CMovI->dump(); dbgs() << "\n");
282282 ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
283283
284284 $eflags = COPY %2
285 %3:gr64 = CMOVA64rr %0, %1, implicit $eflags
286 %4:gr64 = CMOVB64rr %0, %1, implicit $eflags
287 %5:gr64 = CMOVE64rr %0, %1, implicit $eflags
288 %6:gr64 = CMOVNE64rr %0, %1, implicit killed $eflags
285 %3:gr64 = CMOV64rr %0, %1, 7, implicit $eflags
286 %4:gr64 = CMOV64rr %0, %1, 2, implicit $eflags
287 %5:gr64 = CMOV64rr %0, %1, 4, implicit $eflags
288 %6:gr64 = CMOV64rr %0, %1, 5, implicit killed $eflags
289289 ; CHECK-NOT: $eflags =
290290 ; CHECK: TEST8rr %[[A_REG]], %[[A_REG]], implicit-def $eflags
291 ; CHECK-NEXT: %3:gr64 = CMOVNE64rr %0, %1, implicit killed $eflags
291 ; CHECK-NEXT: %3:gr64 = CMOV64rr %0, %1, 5, implicit killed $eflags
292292 ; CHECK-NEXT: TEST8rr %[[B_REG]], %[[B_REG]], implicit-def $eflags
293 ; CHECK-NEXT: %4:gr64 = CMOVNE64rr %0, %1, implicit killed $eflags
293 ; CHECK-NEXT: %4:gr64 = CMOV64rr %0, %1, 5, implicit killed $eflags
294294 ; CHECK-NEXT: TEST8rr %[[E_REG]], %[[E_REG]], implicit-def $eflags
295 ; CHECK-NEXT: %5:gr64 = CMOVNE64rr %0, %1, implicit killed $eflags
295 ; CHECK-NEXT: %5:gr64 = CMOV64rr %0, %1, 5, implicit killed $eflags
296296 ; CHECK-NEXT: TEST8rr %[[E_REG]], %[[E_REG]], implicit-def $eflags
297 ; CHECK-NEXT: %6:gr64 = CMOVE64rr %0, %1, implicit killed $eflags
297 ; CHECK-NEXT: %6:gr64 = CMOV64rr %0, %1, 4, implicit killed $eflags
298298 MOV64mr $rsp, 1, $noreg, -16, $noreg, killed %3
299299 MOV64mr $rsp, 1, $noreg, -16, $noreg, killed %4
300300 MOV64mr $rsp, 1, $noreg, -16, $noreg, killed %5
395395 ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
396396
397397 $eflags = COPY %3
398 %4:gr64 = CMOVE64rr %0, %1, implicit $eflags
398 %4:gr64 = CMOV64rr %0, %1, 4, implicit $eflags
399399 %5:gr64 = MOV64ri32 42
400400 %6:gr64 = ADCX64rr %2, %5, implicit-def $eflags, implicit $eflags
401401 ; CHECK-NOT: $eflags =
402402 ; CHECK: TEST8rr %[[E_REG]], %[[E_REG]], implicit-def $eflags
403 ; CHECK-NEXT: %4:gr64 = CMOVNE64rr %0, %1, implicit killed $eflags
403 ; CHECK-NEXT: %4:gr64 = CMOV64rr %0, %1, 5, implicit killed $eflags
404404 ; CHECK-NEXT: %5:gr64 = MOV64ri32 42
405405 ; CHECK-NEXT: dead %{{[^:]*}}:gr8 = ADD8ri %[[CF_REG]], 255, implicit-def $eflags
406406 ; CHECK-NEXT: %6:gr64 = ADCX64rr %2, %5, implicit-def{{( dead)?}} $eflags, implicit killed $eflags
434434 ADJCALLSTACKUP64 0, 0, implicit-def dead $rsp, implicit-def dead $eflags, implicit-def dead $ssp, implicit $rsp, implicit $ssp
435435
436436 $eflags = COPY %3
437 %4:gr64 = CMOVE64rr %0, %1, implicit $eflags
437 %4:gr64 = CMOV64rr %0, %1, 4, implicit $eflags
438438 %5:gr64 = MOV64ri32 42
439439 %6:gr64 = ADOX64rr %2, %5, implicit-def $eflags, implicit $eflags
440440 ; CHECK-NOT: $eflags =
441441 ; CHECK: TEST8rr %[[E_REG]], %[[E_REG]], implicit-def $eflags
442 ; CHECK-NEXT: %4:gr64 = CMOVNE64rr %0, %1, implicit killed $eflags
442 ; CHECK-NEXT: %4:gr64 = CMOV64rr %0, %1, 5, implicit killed $eflags
443443 ; CHECK-NEXT: %5:gr64 = MOV64ri32 42
444444 ; CHECK-NEXT: dead %{{[^:]*}}:gr8 = ADD8ri %[[OF_REG]], 127, implicit-def $eflags
445445 ; CHECK-NEXT: %6:gr64 = ADOX64rr %2, %5, implicit-def{{( dead)?}} $eflags, implicit killed $eflags
627627 bb.1:
628628 liveins: $eflags
629629
630 %3:gr64 = CMOVE64rr %0, %1, implicit killed $eflags
630 %3:gr64 = CMOV64rr %0, %1, 4, implicit killed $eflags
631631 ; CHECK-NOT: $eflags =
632632 ; CHECK: TEST8rr %[[NE_REG]], %[[NE_REG]], implicit-def $eflags
633 ; CHECK-NEXT: %3:gr64 = CMOVE64rr %0, %1, implicit killed $eflags
633 ; CHECK-NEXT: %3:gr64 = CMOV64rr %0, %1, 4, implicit killed $eflags
634634 $rax = COPY %3
635635 RET 0, $rax
636636
637637 bb.2:
638638 liveins: $eflags
639639
640 %4:gr64 = CMOVNE64rr %0, %1, implicit killed $eflags
640 %4:gr64 = CMOV64rr %0, %1, 5, implicit killed $eflags
641641 ; CHECK-NOT: $eflags =
642642 ; CHECK: TEST8rr %[[NE_REG]], %[[NE_REG]], implicit-def $eflags
643 ; CHECK-NEXT: %4:gr64 = CMOVNE64rr %0, %1, implicit killed $eflags
643 ; CHECK-NEXT: %4:gr64 = CMOV64rr %0, %1, 5, implicit killed $eflags
644644 $rax = COPY %4
645645 RET 0, $rax
646646
647647 bb.3:
648648 liveins: $eflags
649649
650 %5:gr64 = CMOVS64rr %0, %1, implicit killed $eflags
650 %5:gr64 = CMOV64rr %0, %1, 8, implicit killed $eflags
651651 ; CHECK-NOT: $eflags =
652652 ; CHECK: TEST8rr %[[S_REG]], %[[S_REG]], implicit-def $eflags
653 ; CHECK-NEXT: %5:gr64 = CMOVNE64rr %0, %1, implicit killed $eflags
653 ; CHECK-NEXT: %5:gr64 = CMOV64rr %0, %1, 5, implicit killed $eflags
654654 $rax = COPY %5
655655 RET 0, $rax
656656
702702 bb.1:
703703 liveins: $eflags
704704
705 %3:gr64 = CMOVE64rr %0, %1, implicit killed $eflags
705 %3:gr64 = CMOV64rr %0, %1, 4, implicit killed $eflags
706706 ; CHECK-NOT: $eflags =
707707 ; CHECK: TEST8rr %[[NE_REG]], %[[NE_REG]], implicit-def $eflags
708 ; CHECK-NEXT: %3:gr64 = CMOVE64rr %0, %1, implicit killed $eflags
708 ; CHECK-NEXT: %3:gr64 = CMOV64rr %0, %1, 4, implicit killed $eflags
709709 $rax = COPY %3
710710 RET 0, $rax
711711
727727 bb.3:
728728 liveins: $eflags
729729
730 %4:gr64 = CMOVNE64rr %0, %1, implicit $eflags
730 %4:gr64 = CMOV64rr %0, %1, 5, implicit $eflags
731731 ; CHECK-NOT: $eflags =
732732 ; CHECK: TEST8rr %[[NE_REG]], %[[NE_REG]], implicit-def $eflags
733 ; CHECK-NEXT: %4:gr64 = CMOVNE64rr %0, %1, implicit killed $eflags
733 ; CHECK-NEXT: %4:gr64 = CMOV64rr %0, %1, 5, implicit killed $eflags
734734 $rax = COPY %4
735735 RET 0, $rax
736736
737737 bb.4:
738738 liveins: $eflags
739739
740 %5:gr64 = CMOVP64rr %0, %1, implicit $eflags
740 %5:gr64 = CMOV64rr %0, %1, 10, implicit $eflags
741741 ; CHECK-NOT: $eflags =
742742 ; CHECK: TEST8rr %[[P_REG]], %[[P_REG]], implicit-def $eflags
743 ; CHECK-NEXT: %5:gr64 = CMOVNE64rr %0, %1, implicit killed $eflags
743 ; CHECK-NEXT: %5:gr64 = CMOV64rr %0, %1, 5, implicit killed $eflags
744744 $rax = COPY %5
745745 RET 0, $rax
746746
747747 bb.5:
748748 liveins: $eflags
749749
750 %6:gr64 = CMOVS64rr %0, %1, implicit killed $eflags
750 %6:gr64 = CMOV64rr %0, %1, 8, implicit killed $eflags
751751 ; CHECK-NOT: $eflags =
752752 ; CHECK: TEST8rr %[[S_REG]], %[[S_REG]], implicit-def $eflags
753 ; CHECK-NEXT: %6:gr64 = CMOVNE64rr %0, %1, implicit killed $eflags
753 ; CHECK-NEXT: %6:gr64 = CMOV64rr %0, %1, 5, implicit killed $eflags
754754 $rax = COPY %6
755755 RET 0, $rax
756756
875875 liveins: $eflags
876876
877877 ; Just use $eflags on this side of the diamond.
878 %4:gr64 = CMOVA64rr %0, %1, implicit $eflags
878 %4:gr64 = CMOV64rr %0, %1, 7, implicit $eflags
879879 ; CHECK: bb.5:
880880 ; CHECK-NOT: COPY{{( killed)?}} $eflags
881881 ; CHECK: TEST8rr %[[A_REG]], %[[A_REG]], implicit-def $eflags
882 ; CHECK-NEXT: %4:gr64 = CMOVNE64rr %0, %1, implicit killed $eflags
882 ; CHECK-NEXT: %4:gr64 = CMOV64rr %0, %1, 5, implicit killed $eflags
883883 ; CHECK-NOT: COPY{{( killed)?}} $eflags
884884 MOV64mr $rsp, 1, $noreg, -16, $noreg, killed %4
885885 JMP_1 %bb.7
889889 liveins: $eflags
890890
891891 ; Use, copy, and then use $eflags again.
892 %5:gr64 = CMOVA64rr %0, %1, implicit $eflags
892 %5:gr64 = CMOV64rr %0, %1, 7, implicit $eflags
893893 ; CHECK: bb.6:
894894 ; CHECK-NOT: COPY{{( killed)?}} $eflags
895895 ; CHECK: TEST8rr %[[A_REG]], %[[A_REG]], implicit-def $eflags
896 ; CHECK-NEXT: %5:gr64 = CMOVNE64rr %0, %1, implicit killed $eflags
896 ; CHECK-NEXT: %5:gr64 = CMOV64rr %0, %1, 5, implicit killed $eflags
897897 ; CHECK-NOT: COPY{{( killed)?}} $eflags
898898 MOV64mr $rsp, 1, $noreg, -16, $noreg, killed %5
899899
900900 %6:gr64 = COPY $eflags
901901 $eflags = COPY %6:gr64
902902
903 %7:gr64 = CMOVA64rr %0, %1, implicit $eflags
903 %7:gr64 = CMOV64rr %0, %1, 7, implicit $eflags
904904 ; CHECK-NOT: COPY{{( killed)?}} $eflags
905905 ; CHECK: TEST8rr %[[A_REG]], %[[A_REG]], implicit-def $eflags
906 ; CHECK-NEXT: %7:gr64 = CMOVNE64rr %0, %1, implicit killed $eflags
906 ; CHECK-NEXT: %7:gr64 = CMOV64rr %0, %1, 5, implicit killed $eflags
907907 ; CHECK-NOT: COPY{{( killed)?}} $eflags
908908 MOV64mr $rsp, 1, $noreg, -16, $noreg, killed %7
909909 JMP_1 %bb.7
939939 liveins: $eflags
940940
941941 ; And we're done.
942 %8:gr64 = CMOVE64rr %0, %1, implicit killed $eflags
942 %8:gr64 = CMOV64rr %0, %1, 4, implicit killed $eflags
943943 $rax = COPY %8
944944 RET 0, $rax
945945 ; CHECK: bb.9:
946946 ; CHECK-NOT: $eflags
947 ; CHECK: %8:gr64 = CMOVE64rr %0, %1, implicit killed $eflags
947 ; CHECK: %8:gr64 = CMOV64rr %0, %1, 4, implicit killed $eflags
948948
949949 ...
950950 ---
216216 $rax = MOV64ri @global.1
217217 $rax = MOV64rm killed $rax, 1, $noreg, 0, $noreg :: (dereferenceable load 8 from @global.1)
218218 TEST64rr $rax, $rax, implicit-def $eflags
219 $rax = CMOVE64rr undef $rax, killed $rax, implicit killed $eflags
219 $rax = CMOV64rr undef $rax, killed $rax, 4, implicit killed $eflags
220220 $ecx = MOV32rm undef $rax, 1, $noreg, 0, $noreg :: (load 4 from `i32* undef`)
221221 $rdx = MOV64rm $r12, 8, $r14, 0, $noreg :: (load 8 from %ir.tmp3)
222222 $r15 = LEA64r $rdx, 1, $noreg, 1, _
282282 $edx = XOR32rr undef $edx, undef $edx, implicit-def dead $eflags, implicit-def $rdx
283283 TEST64rr $rcx, $rcx, implicit-def $eflags
284284 $esi = MOV32ri @o, implicit-def $rsi
285 $rsi = CMOVNE64rr killed $rsi, $rdx, implicit killed $eflags
285 $rsi = CMOV64rr killed $rsi, $rdx, 5, implicit killed $eflags
286286 $rsi = OR64rr killed $rsi, killed $rcx, implicit-def $eflags
287287 $rcx = LEA64r $rbp, 1, $noreg, -20, $noreg
288288 DBG_VALUE $rcx, $noreg, !46, !17, debug-location !48
289289 DBG_VALUE $rcx, $noreg, !39, !17, debug-location !44
290290 DBG_VALUE $rbp, -20, !29, !17, debug-location !36
291 $rcx = CMOVNE64rr killed $rcx, killed $rdx, implicit killed $eflags
291 $rcx = CMOV64rr killed $rcx, killed $rdx, 5, implicit killed $eflags
292292 $rcx = OR64rr killed $rcx, killed $rsi, implicit-def dead $eflags
293293 $rdx = MOVSX64rm32 $rbx, 1, $noreg, 0, $noreg :: (load 4, align 8)
294294 TEST32mr killed $rcx, 4, killed $rdx, 0, $noreg, killed $eax, implicit-def $eflags :: (load 4)
4747 ; CHECK-NEXT: $rdi = COPY $rsi
4848 ; CHECK-NEXT: $rsi = COPY $rax
4949 ; CHECK-NEXT: CMP64ri8 $rax, 9, implicit-def $eflags
50 ; CHECK-NEXT: TCRETURNdi64cc @f1, 0, 3, csr_64, implicit $rsp, implicit $eflags, implicit $ssp, implicit $rsp, implicit $rdi, implicit $rsi, implicit $rdi, implicit-def $rdi, implicit $hsi, implicit-def $hsi, implicit $sih, implicit-def $sih, implicit $sil, implicit-def $sil, implicit $si, implicit-def $si, implicit $esi, implicit-def $esi, implicit $rsi, implicit-def $rsi, implicit $hdi, implicit-def $hdi, implicit $dih, implicit-def $dih, implicit $dil, implicit-def $dil, implicit $di, implicit-def $di, implicit $edi, implicit-def $edi
50 ; CHECK-NEXT: TCRETURNdi64cc @f1, 0, 6, csr_64, implicit $rsp, implicit $eflags, implicit $ssp, implicit $rsp, implicit $rdi, implicit $rsi, implicit $rdi, implicit-def $rdi, implicit $hsi, implicit-def $hsi, implicit $sih, implicit-def $sih, implicit $sil, implicit-def $sil, implicit $si, implicit-def $si, implicit $esi, implicit-def $esi, implicit $rsi, implicit-def $rsi, implicit $hdi, implicit-def $hdi, implicit $dih, implicit-def $dih, implicit $dil, implicit-def $dil, implicit $di, implicit-def $di, implicit $edi, implicit-def $edi
5151
5252 bb.1:
5353 successors: %bb.2, %bb.3
3131 case X86II::MRMSrcReg:
3232 case X86II::MRMSrcReg4VOp3:
3333 case X86II::MRMSrcRegOp4:
34 case X86II::MRMSrcRegCC:
3435 case X86II::MRMXr:
3536 case X86II::MRM0r:
3637 case X86II::MRM1r:
117118 case X86II::MRMSrcMem:
118119 case X86II::MRMSrcMem4VOp3:
119120 case X86II::MRMSrcMemOp4:
121 case X86II::MRMSrcMemCC:
120122 case X86II::MRMXm:
121123 case X86II::MRM0m:
122124 case X86II::MRM1m:
226226 }
227227
228228 TEST_F(UopsSnippetGeneratorTest, StaticRenaming) {
229 // CMOVA32rr has tied variables, we enumerate the possible values to execute
229 // CMOV32rr has tied variables, we enumerate the possible values to execute
230230 // as many in parallel as possible.
231231
232 // - CMOVA32rr
232 // - CMOV32rr
233233 // - Op0 Explicit Def RegClass(GR32)
234234 // - Op1 Explicit Use RegClass(GR32) TiedToOp0
235235 // - Op2 Explicit Use RegClass(GR32)
236 // - Op3 Explicit Use Immediate
236237 // - Op3 Implicit Use Reg(EFLAGS)
237238 // - Var0 [Op0,Op1]
238239 // - Var1 [Op2]
239240 // - hasTiedRegisters (execution is always serial)
240241 // - hasAliasingRegisters
241 const unsigned Opcode = llvm::X86::CMOVA32rr;
242 const unsigned Opcode = llvm::X86::CMOV32rr;
242243 const auto CodeTemplates = checkAndGetCodeTemplates(Opcode);
243244 ASSERT_THAT(CodeTemplates, SizeIs(1));
244245 const auto &CT = CodeTemplates[0];
248249 ASSERT_THAT(CT.Instructions, SizeIs(kInstructionCount));
249250 std::unordered_set AllDefRegisters;
250251 for (const auto &IT : CT.Instructions) {
251 ASSERT_THAT(IT.VariableValues, SizeIs(2));
252 ASSERT_THAT(IT.VariableValues, SizeIs(3));
252253 AllDefRegisters.insert(IT.VariableValues[0].getReg());
253254 }
254255 EXPECT_THAT(AllDefRegisters, SizeIs(kInstructionCount))
579579 HANDLE_OPERAND(rmRegister)
580580 HANDLE_OPTIONAL(immediate)
581581 break;
582 case X86Local::MRMSrcRegCC:
583 assert(numPhysicalOperands == 3 &&
584 "Unexpected number of operands for MRMSrcRegCC");
585 HANDLE_OPERAND(roRegister)
586 HANDLE_OPERAND(rmRegister)
587 HANDLE_OPERAND(opcodeModifier)
588 break;
582589 case X86Local::MRMSrcMem:
583590 // Operand 1 is a register operand in the Reg/Opcode field.
584591 // Operand 2 is a memory operand (possibly SIB-extended)
618625 HANDLE_OPERAND(immediate) // Register in imm[7:4]
619626 HANDLE_OPERAND(memory)
620627 HANDLE_OPTIONAL(immediate)
628 break;
629 case X86Local::MRMSrcMemCC:
630 assert(numPhysicalOperands == 3 &&
631 "Unexpected number of operands for MRMSrcMemCC");
632 HANDLE_OPERAND(roRegister)
633 HANDLE_OPERAND(memory)
634 HANDLE_OPERAND(opcodeModifier)
621635 break;
622636 case X86Local::MRMXr:
623637 case X86Local::MRM0r:
728742 case X86Local::MRMSrcReg:
729743 case X86Local::MRMSrcReg4VOp3:
730744 case X86Local::MRMSrcRegOp4:
745 case X86Local::MRMSrcRegCC:
731746 case X86Local::MRMXr:
732747 filter = llvm::make_unique(true);
733748 break;
735750 case X86Local::MRMSrcMem:
736751 case X86Local::MRMSrcMem4VOp3:
737752 case X86Local::MRMSrcMemOp4:
753 case X86Local::MRMSrcMemCC:
738754 case X86Local::MRMXm:
739755 filter = llvm::make_unique(false);
740756 break;
767783 assert(opcodeType && "Opcode type not set");
768784 assert(filter && "Filter not set");
769785
770 if (Form == X86Local::AddRegFrm) {
771 assert(((opcodeToSet & 7) == 0) &&
772 "ADDREG_FRM opcode not aligned");
786 if (Form == X86Local::AddRegFrm || Form == X86Local::MRMSrcRegCC ||
787 Form == X86Local::MRMSrcMemCC) {
788 unsigned Count = Form == X86Local::AddRegFrm ? 8 : 16;
789 assert(((opcodeToSet % Count) == 0) && "ADDREG_FRM opcode not aligned");
773790
774791 uint8_t currentOpcode;
775792
776 for (currentOpcode = opcodeToSet;
777 currentOpcode < opcodeToSet + 8;
793 for (currentOpcode = opcodeToSet; currentOpcode < opcodeToSet + Count;
778794 ++currentOpcode)
779795 tables.setTableFields(*opcodeType, insnContext(), currentOpcode, *filter,
780796 UID, Is32Bit, OpPrefix == 0,
849865 TYPE("i64i32imm_pcrel", TYPE_REL)
850866 TYPE("i16imm_pcrel", TYPE_REL)
851867 TYPE("i32imm_pcrel", TYPE_REL)
868 TYPE("ccode", TYPE_IMM)
852869 TYPE("AVX512RC", TYPE_IMM)
853870 TYPE("brtarget32", TYPE_REL)
854871 TYPE("brtarget16", TYPE_REL)
11641181 ENCODING("GR64", ENCODING_RO)
11651182 ENCODING("GR16", ENCODING_Rv)
11661183 ENCODING("GR8", ENCODING_RB)
1184 ENCODING("ccode", ENCODING_CC)
11671185 errs() << "Unhandled opcode modifier encoding " << s << "\n";
11681186 llvm_unreachable("Unhandled opcode modifier encoding");
11691187 }
104104 MRMSrcMem = 33,
105105 MRMSrcMem4VOp3 = 34,
106106 MRMSrcMemOp4 = 35,
107 MRMSrcMemCC = 36,
107108 MRMXm = 39,
108109 MRM0m = 40, MRM1m = 41, MRM2m = 42, MRM3m = 43,
109110 MRM4m = 44, MRM5m = 45, MRM6m = 46, MRM7m = 47,
111112 MRMSrcReg = 49,
112113 MRMSrcReg4VOp3 = 50,
113114 MRMSrcRegOp4 = 51,
115 MRMSrcRegCC = 52,
114116 MRMXr = 55,
115117 MRM0r = 56, MRM1r = 57, MRM2r = 58, MRM3r = 59,
116118 MRM4r = 60, MRM5r = 61, MRM6r = 62, MRM7r = 63,