llvm.org GIT mirror llvm / 35649fc
split conditional moves and setcc's out to their own file. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@115601 91177308-0d34-0410-b5e6-96231b3b80d8 Chris Lattner 10 years ago
4 changed file(s) with 838 addition(s) and 814 deletion(s). Raw diff Collapse all Expand all
11881188 "bts{q}\t{$src2, $src1|$src1, $src2}", []>, TB;
11891189 } // Defs = [EFLAGS]
11901190
1191 // Conditional moves
1192 let Uses = [EFLAGS], Constraints = "$src1 = $dst" in {
1193 let isCommutable = 1 in {
1194 def CMOVB64rr : RI<0x42, MRMSrcReg, // if
1195 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1196 "cmovb{q}\t{$src2, $dst|$dst, $src2}",
1197 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1198 X86_COND_B, EFLAGS))]>, TB;
1199 def CMOVAE64rr: RI<0x43, MRMSrcReg, // if >=u, GR64 = GR64
1200 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1201 "cmovae{q}\t{$src2, $dst|$dst, $src2}",
1202 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1203 X86_COND_AE, EFLAGS))]>, TB;
1204 def CMOVE64rr : RI<0x44, MRMSrcReg, // if ==, GR64 = GR64
1205 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1206 "cmove{q}\t{$src2, $dst|$dst, $src2}",
1207 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1208 X86_COND_E, EFLAGS))]>, TB;
1209 def CMOVNE64rr: RI<0x45, MRMSrcReg, // if !=, GR64 = GR64
1210 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1211 "cmovne{q}\t{$src2, $dst|$dst, $src2}",
1212 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1213 X86_COND_NE, EFLAGS))]>, TB;
1214 def CMOVBE64rr: RI<0x46, MRMSrcReg, // if <=u, GR64 = GR64
1215 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1216 "cmovbe{q}\t{$src2, $dst|$dst, $src2}",
1217 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1218 X86_COND_BE, EFLAGS))]>, TB;
1219 def CMOVA64rr : RI<0x47, MRMSrcReg, // if >u, GR64 = GR64
1220 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1221 "cmova{q}\t{$src2, $dst|$dst, $src2}",
1222 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1223 X86_COND_A, EFLAGS))]>, TB;
1224 def CMOVL64rr : RI<0x4C, MRMSrcReg, // if
1225 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1226 "cmovl{q}\t{$src2, $dst|$dst, $src2}",
1227 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1228 X86_COND_L, EFLAGS))]>, TB;
1229 def CMOVGE64rr: RI<0x4D, MRMSrcReg, // if >=s, GR64 = GR64
1230 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1231 "cmovge{q}\t{$src2, $dst|$dst, $src2}",
1232 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1233 X86_COND_GE, EFLAGS))]>, TB;
1234 def CMOVLE64rr: RI<0x4E, MRMSrcReg, // if <=s, GR64 = GR64
1235 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1236 "cmovle{q}\t{$src2, $dst|$dst, $src2}",
1237 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1238 X86_COND_LE, EFLAGS))]>, TB;
1239 def CMOVG64rr : RI<0x4F, MRMSrcReg, // if >s, GR64 = GR64
1240 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1241 "cmovg{q}\t{$src2, $dst|$dst, $src2}",
1242 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1243 X86_COND_G, EFLAGS))]>, TB;
1244 def CMOVS64rr : RI<0x48, MRMSrcReg, // if signed, GR64 = GR64
1245 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1246 "cmovs{q}\t{$src2, $dst|$dst, $src2}",
1247 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1248 X86_COND_S, EFLAGS))]>, TB;
1249 def CMOVNS64rr: RI<0x49, MRMSrcReg, // if !signed, GR64 = GR64
1250 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1251 "cmovns{q}\t{$src2, $dst|$dst, $src2}",
1252 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1253 X86_COND_NS, EFLAGS))]>, TB;
1254 def CMOVP64rr : RI<0x4A, MRMSrcReg, // if parity, GR64 = GR64
1255 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1256 "cmovp{q}\t{$src2, $dst|$dst, $src2}",
1257 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1258 X86_COND_P, EFLAGS))]>, TB;
1259 def CMOVNP64rr : RI<0x4B, MRMSrcReg, // if !parity, GR64 = GR64
1260 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1261 "cmovnp{q}\t{$src2, $dst|$dst, $src2}",
1262 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1263 X86_COND_NP, EFLAGS))]>, TB;
1264 def CMOVO64rr : RI<0x40, MRMSrcReg, // if overflow, GR64 = GR64
1265 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1266 "cmovo{q}\t{$src2, $dst|$dst, $src2}",
1267 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1268 X86_COND_O, EFLAGS))]>, TB;
1269 def CMOVNO64rr : RI<0x41, MRMSrcReg, // if !overflow, GR64 = GR64
1270 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
1271 "cmovno{q}\t{$src2, $dst|$dst, $src2}",
1272 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
1273 X86_COND_NO, EFLAGS))]>, TB;
1274 } // isCommutable = 1
1275
1276 def CMOVB64rm : RI<0x42, MRMSrcMem, // if
1277 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1278 "cmovb{q}\t{$src2, $dst|$dst, $src2}",
1279 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1280 X86_COND_B, EFLAGS))]>, TB;
1281 def CMOVAE64rm: RI<0x43, MRMSrcMem, // if >=u, GR64 = [mem64]
1282 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1283 "cmovae{q}\t{$src2, $dst|$dst, $src2}",
1284 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1285 X86_COND_AE, EFLAGS))]>, TB;
1286 def CMOVE64rm : RI<0x44, MRMSrcMem, // if ==, GR64 = [mem64]
1287 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1288 "cmove{q}\t{$src2, $dst|$dst, $src2}",
1289 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1290 X86_COND_E, EFLAGS))]>, TB;
1291 def CMOVNE64rm: RI<0x45, MRMSrcMem, // if !=, GR64 = [mem64]
1292 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1293 "cmovne{q}\t{$src2, $dst|$dst, $src2}",
1294 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1295 X86_COND_NE, EFLAGS))]>, TB;
1296 def CMOVBE64rm: RI<0x46, MRMSrcMem, // if <=u, GR64 = [mem64]
1297 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1298 "cmovbe{q}\t{$src2, $dst|$dst, $src2}",
1299 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1300 X86_COND_BE, EFLAGS))]>, TB;
1301 def CMOVA64rm : RI<0x47, MRMSrcMem, // if >u, GR64 = [mem64]
1302 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1303 "cmova{q}\t{$src2, $dst|$dst, $src2}",
1304 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1305 X86_COND_A, EFLAGS))]>, TB;
1306 def CMOVL64rm : RI<0x4C, MRMSrcMem, // if
1307 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1308 "cmovl{q}\t{$src2, $dst|$dst, $src2}",
1309 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1310 X86_COND_L, EFLAGS))]>, TB;
1311 def CMOVGE64rm: RI<0x4D, MRMSrcMem, // if >=s, GR64 = [mem64]
1312 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1313 "cmovge{q}\t{$src2, $dst|$dst, $src2}",
1314 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1315 X86_COND_GE, EFLAGS))]>, TB;
1316 def CMOVLE64rm: RI<0x4E, MRMSrcMem, // if <=s, GR64 = [mem64]
1317 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1318 "cmovle{q}\t{$src2, $dst|$dst, $src2}",
1319 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1320 X86_COND_LE, EFLAGS))]>, TB;
1321 def CMOVG64rm : RI<0x4F, MRMSrcMem, // if >s, GR64 = [mem64]
1322 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1323 "cmovg{q}\t{$src2, $dst|$dst, $src2}",
1324 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1325 X86_COND_G, EFLAGS))]>, TB;
1326 def CMOVS64rm : RI<0x48, MRMSrcMem, // if signed, GR64 = [mem64]
1327 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1328 "cmovs{q}\t{$src2, $dst|$dst, $src2}",
1329 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1330 X86_COND_S, EFLAGS))]>, TB;
1331 def CMOVNS64rm: RI<0x49, MRMSrcMem, // if !signed, GR64 = [mem64]
1332 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1333 "cmovns{q}\t{$src2, $dst|$dst, $src2}",
1334 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1335 X86_COND_NS, EFLAGS))]>, TB;
1336 def CMOVP64rm : RI<0x4A, MRMSrcMem, // if parity, GR64 = [mem64]
1337 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1338 "cmovp{q}\t{$src2, $dst|$dst, $src2}",
1339 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1340 X86_COND_P, EFLAGS))]>, TB;
1341 def CMOVNP64rm : RI<0x4B, MRMSrcMem, // if !parity, GR64 = [mem64]
1342 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1343 "cmovnp{q}\t{$src2, $dst|$dst, $src2}",
1344 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1345 X86_COND_NP, EFLAGS))]>, TB;
1346 def CMOVO64rm : RI<0x40, MRMSrcMem, // if overflow, GR64 = [mem64]
1347 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1348 "cmovo{q}\t{$src2, $dst|$dst, $src2}",
1349 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1350 X86_COND_O, EFLAGS))]>, TB;
1351 def CMOVNO64rm : RI<0x41, MRMSrcMem, // if !overflow, GR64 = [mem64]
1352 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
1353 "cmovno{q}\t{$src2, $dst|$dst, $src2}",
1354 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
1355 X86_COND_NO, EFLAGS))]>, TB;
1356 } // Constraints = "$src1 = $dst"
1357
1358 // Use sbb to materialize carry flag into a GPR.
1359 // FIXME: This are pseudo ops that should be replaced with Pat<> patterns.
1360 // However, Pat<> can't replicate the destination reg into the inputs of the
1361 // result.
1362 // FIXME: Change this to have encoding Pseudo when X86MCCodeEmitter replaces
1363 // X86CodeEmitter.
1364 let Defs = [EFLAGS], Uses = [EFLAGS], isCodeGenOnly = 1 in
1365 def SETB_C64r : RI<0x19, MRMInitReg, (outs GR64:$dst), (ins), "",
1366 [(set GR64:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
1367
1368 def : Pat<(i64 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
1369 (SETB_C64r)>;
13701191
13711192
13721193 //===----------------------------------------------------------------------===//
0 //===- X86InstrCMovSetCC.td - Conditional Move and SetCC ---*- tablegen -*-===//
1 //
2 // The LLVM Compiler Infrastructure
3 //
4 // This file is distributed under the University of Illinois Open Source
5 // License. See LICENSE.TXT for details.
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file describes the X86 conditional move and set on condition
10 // instructions.
11 //
12 //===----------------------------------------------------------------------===//
13
14 // FIXME: Someone please sprinkle some defm's in here!
15
16 let Constraints = "$src1 = $dst" in {
17
18 // Conditional moves
19 let Uses = [EFLAGS] in {
20
21 let Predicates = [HasCMov] in {
22 let isCommutable = 1 in {
23 def CMOVB16rr : I<0x42, MRMSrcReg, // if
24 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
25 "cmovb{w}\t{$src2, $dst|$dst, $src2}",
26 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
27 X86_COND_B, EFLAGS))]>,
28 TB, OpSize;
29 def CMOVB32rr : I<0x42, MRMSrcReg, // if
30 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
31 "cmovb{l}\t{$src2, $dst|$dst, $src2}",
32 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
33 X86_COND_B, EFLAGS))]>,
34 TB;
35 def CMOVAE16rr: I<0x43, MRMSrcReg, // if >=u, GR16 = GR16
36 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
37 "cmovae{w}\t{$src2, $dst|$dst, $src2}",
38 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
39 X86_COND_AE, EFLAGS))]>,
40 TB, OpSize;
41 def CMOVAE32rr: I<0x43, MRMSrcReg, // if >=u, GR32 = GR32
42 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
43 "cmovae{l}\t{$src2, $dst|$dst, $src2}",
44 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
45 X86_COND_AE, EFLAGS))]>,
46 TB;
47 def CMOVE16rr : I<0x44, MRMSrcReg, // if ==, GR16 = GR16
48 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
49 "cmove{w}\t{$src2, $dst|$dst, $src2}",
50 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
51 X86_COND_E, EFLAGS))]>,
52 TB, OpSize;
53 def CMOVE32rr : I<0x44, MRMSrcReg, // if ==, GR32 = GR32
54 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
55 "cmove{l}\t{$src2, $dst|$dst, $src2}",
56 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
57 X86_COND_E, EFLAGS))]>,
58 TB;
59 def CMOVNE16rr: I<0x45, MRMSrcReg, // if !=, GR16 = GR16
60 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
61 "cmovne{w}\t{$src2, $dst|$dst, $src2}",
62 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
63 X86_COND_NE, EFLAGS))]>,
64 TB, OpSize;
65 def CMOVNE32rr: I<0x45, MRMSrcReg, // if !=, GR32 = GR32
66 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
67 "cmovne{l}\t{$src2, $dst|$dst, $src2}",
68 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
69 X86_COND_NE, EFLAGS))]>,
70 TB;
71 def CMOVBE16rr: I<0x46, MRMSrcReg, // if <=u, GR16 = GR16
72 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
73 "cmovbe{w}\t{$src2, $dst|$dst, $src2}",
74 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
75 X86_COND_BE, EFLAGS))]>,
76 TB, OpSize;
77 def CMOVBE32rr: I<0x46, MRMSrcReg, // if <=u, GR32 = GR32
78 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
79 "cmovbe{l}\t{$src2, $dst|$dst, $src2}",
80 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
81 X86_COND_BE, EFLAGS))]>,
82 TB;
83 def CMOVA16rr : I<0x47, MRMSrcReg, // if >u, GR16 = GR16
84 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
85 "cmova{w}\t{$src2, $dst|$dst, $src2}",
86 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
87 X86_COND_A, EFLAGS))]>,
88 TB, OpSize;
89 def CMOVA32rr : I<0x47, MRMSrcReg, // if >u, GR32 = GR32
90 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
91 "cmova{l}\t{$src2, $dst|$dst, $src2}",
92 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
93 X86_COND_A, EFLAGS))]>,
94 TB;
95 def CMOVL16rr : I<0x4C, MRMSrcReg, // if
96 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
97 "cmovl{w}\t{$src2, $dst|$dst, $src2}",
98 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
99 X86_COND_L, EFLAGS))]>,
100 TB, OpSize;
101 def CMOVL32rr : I<0x4C, MRMSrcReg, // if
102 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
103 "cmovl{l}\t{$src2, $dst|$dst, $src2}",
104 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
105 X86_COND_L, EFLAGS))]>,
106 TB;
107 def CMOVGE16rr: I<0x4D, MRMSrcReg, // if >=s, GR16 = GR16
108 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
109 "cmovge{w}\t{$src2, $dst|$dst, $src2}",
110 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
111 X86_COND_GE, EFLAGS))]>,
112 TB, OpSize;
113 def CMOVGE32rr: I<0x4D, MRMSrcReg, // if >=s, GR32 = GR32
114 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
115 "cmovge{l}\t{$src2, $dst|$dst, $src2}",
116 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
117 X86_COND_GE, EFLAGS))]>,
118 TB;
119 def CMOVLE16rr: I<0x4E, MRMSrcReg, // if <=s, GR16 = GR16
120 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
121 "cmovle{w}\t{$src2, $dst|$dst, $src2}",
122 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
123 X86_COND_LE, EFLAGS))]>,
124 TB, OpSize;
125 def CMOVLE32rr: I<0x4E, MRMSrcReg, // if <=s, GR32 = GR32
126 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
127 "cmovle{l}\t{$src2, $dst|$dst, $src2}",
128 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
129 X86_COND_LE, EFLAGS))]>,
130 TB;
131 def CMOVG16rr : I<0x4F, MRMSrcReg, // if >s, GR16 = GR16
132 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
133 "cmovg{w}\t{$src2, $dst|$dst, $src2}",
134 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
135 X86_COND_G, EFLAGS))]>,
136 TB, OpSize;
137 def CMOVG32rr : I<0x4F, MRMSrcReg, // if >s, GR32 = GR32
138 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
139 "cmovg{l}\t{$src2, $dst|$dst, $src2}",
140 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
141 X86_COND_G, EFLAGS))]>,
142 TB;
143 def CMOVS16rr : I<0x48, MRMSrcReg, // if signed, GR16 = GR16
144 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
145 "cmovs{w}\t{$src2, $dst|$dst, $src2}",
146 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
147 X86_COND_S, EFLAGS))]>,
148 TB, OpSize;
149 def CMOVS32rr : I<0x48, MRMSrcReg, // if signed, GR32 = GR32
150 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
151 "cmovs{l}\t{$src2, $dst|$dst, $src2}",
152 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
153 X86_COND_S, EFLAGS))]>,
154 TB;
155 def CMOVNS16rr: I<0x49, MRMSrcReg, // if !signed, GR16 = GR16
156 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
157 "cmovns{w}\t{$src2, $dst|$dst, $src2}",
158 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
159 X86_COND_NS, EFLAGS))]>,
160 TB, OpSize;
161 def CMOVNS32rr: I<0x49, MRMSrcReg, // if !signed, GR32 = GR32
162 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
163 "cmovns{l}\t{$src2, $dst|$dst, $src2}",
164 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
165 X86_COND_NS, EFLAGS))]>,
166 TB;
167 def CMOVP16rr : I<0x4A, MRMSrcReg, // if parity, GR16 = GR16
168 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
169 "cmovp{w}\t{$src2, $dst|$dst, $src2}",
170 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
171 X86_COND_P, EFLAGS))]>,
172 TB, OpSize;
173 def CMOVP32rr : I<0x4A, MRMSrcReg, // if parity, GR32 = GR32
174 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
175 "cmovp{l}\t{$src2, $dst|$dst, $src2}",
176 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
177 X86_COND_P, EFLAGS))]>,
178 TB;
179 def CMOVNP16rr : I<0x4B, MRMSrcReg, // if !parity, GR16 = GR16
180 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
181 "cmovnp{w}\t{$src2, $dst|$dst, $src2}",
182 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
183 X86_COND_NP, EFLAGS))]>,
184 TB, OpSize;
185 def CMOVNP32rr : I<0x4B, MRMSrcReg, // if !parity, GR32 = GR32
186 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
187 "cmovnp{l}\t{$src2, $dst|$dst, $src2}",
188 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
189 X86_COND_NP, EFLAGS))]>,
190 TB;
191 def CMOVO16rr : I<0x40, MRMSrcReg, // if overflow, GR16 = GR16
192 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
193 "cmovo{w}\t{$src2, $dst|$dst, $src2}",
194 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
195 X86_COND_O, EFLAGS))]>,
196 TB, OpSize;
197 def CMOVO32rr : I<0x40, MRMSrcReg, // if overflow, GR32 = GR32
198 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
199 "cmovo{l}\t{$src2, $dst|$dst, $src2}",
200 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
201 X86_COND_O, EFLAGS))]>,
202 TB;
203 def CMOVNO16rr : I<0x41, MRMSrcReg, // if !overflow, GR16 = GR16
204 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
205 "cmovno{w}\t{$src2, $dst|$dst, $src2}",
206 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
207 X86_COND_NO, EFLAGS))]>,
208 TB, OpSize;
209 def CMOVNO32rr : I<0x41, MRMSrcReg, // if !overflow, GR32 = GR32
210 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
211 "cmovno{l}\t{$src2, $dst|$dst, $src2}",
212 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
213 X86_COND_NO, EFLAGS))]>,
214 TB;
215 } // isCommutable = 1
216
217 def CMOVB16rm : I<0x42, MRMSrcMem, // if
218 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
219 "cmovb{w}\t{$src2, $dst|$dst, $src2}",
220 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
221 X86_COND_B, EFLAGS))]>,
222 TB, OpSize;
223 def CMOVB32rm : I<0x42, MRMSrcMem, // if
224 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
225 "cmovb{l}\t{$src2, $dst|$dst, $src2}",
226 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
227 X86_COND_B, EFLAGS))]>,
228 TB;
229 def CMOVAE16rm: I<0x43, MRMSrcMem, // if >=u, GR16 = [mem16]
230 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
231 "cmovae{w}\t{$src2, $dst|$dst, $src2}",
232 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
233 X86_COND_AE, EFLAGS))]>,
234 TB, OpSize;
235 def CMOVAE32rm: I<0x43, MRMSrcMem, // if >=u, GR32 = [mem32]
236 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
237 "cmovae{l}\t{$src2, $dst|$dst, $src2}",
238 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
239 X86_COND_AE, EFLAGS))]>,
240 TB;
241 def CMOVE16rm : I<0x44, MRMSrcMem, // if ==, GR16 = [mem16]
242 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
243 "cmove{w}\t{$src2, $dst|$dst, $src2}",
244 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
245 X86_COND_E, EFLAGS))]>,
246 TB, OpSize;
247 def CMOVE32rm : I<0x44, MRMSrcMem, // if ==, GR32 = [mem32]
248 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
249 "cmove{l}\t{$src2, $dst|$dst, $src2}",
250 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
251 X86_COND_E, EFLAGS))]>,
252 TB;
253 def CMOVNE16rm: I<0x45, MRMSrcMem, // if !=, GR16 = [mem16]
254 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
255 "cmovne{w}\t{$src2, $dst|$dst, $src2}",
256 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
257 X86_COND_NE, EFLAGS))]>,
258 TB, OpSize;
259 def CMOVNE32rm: I<0x45, MRMSrcMem, // if !=, GR32 = [mem32]
260 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
261 "cmovne{l}\t{$src2, $dst|$dst, $src2}",
262 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
263 X86_COND_NE, EFLAGS))]>,
264 TB;
265 def CMOVBE16rm: I<0x46, MRMSrcMem, // if <=u, GR16 = [mem16]
266 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
267 "cmovbe{w}\t{$src2, $dst|$dst, $src2}",
268 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
269 X86_COND_BE, EFLAGS))]>,
270 TB, OpSize;
271 def CMOVBE32rm: I<0x46, MRMSrcMem, // if <=u, GR32 = [mem32]
272 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
273 "cmovbe{l}\t{$src2, $dst|$dst, $src2}",
274 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
275 X86_COND_BE, EFLAGS))]>,
276 TB;
277 def CMOVA16rm : I<0x47, MRMSrcMem, // if >u, GR16 = [mem16]
278 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
279 "cmova{w}\t{$src2, $dst|$dst, $src2}",
280 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
281 X86_COND_A, EFLAGS))]>,
282 TB, OpSize;
283 def CMOVA32rm : I<0x47, MRMSrcMem, // if >u, GR32 = [mem32]
284 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
285 "cmova{l}\t{$src2, $dst|$dst, $src2}",
286 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
287 X86_COND_A, EFLAGS))]>,
288 TB;
289 def CMOVL16rm : I<0x4C, MRMSrcMem, // if
290 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
291 "cmovl{w}\t{$src2, $dst|$dst, $src2}",
292 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
293 X86_COND_L, EFLAGS))]>,
294 TB, OpSize;
295 def CMOVL32rm : I<0x4C, MRMSrcMem, // if
296 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
297 "cmovl{l}\t{$src2, $dst|$dst, $src2}",
298 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
299 X86_COND_L, EFLAGS))]>,
300 TB;
301 def CMOVGE16rm: I<0x4D, MRMSrcMem, // if >=s, GR16 = [mem16]
302 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
303 "cmovge{w}\t{$src2, $dst|$dst, $src2}",
304 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
305 X86_COND_GE, EFLAGS))]>,
306 TB, OpSize;
307 def CMOVGE32rm: I<0x4D, MRMSrcMem, // if >=s, GR32 = [mem32]
308 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
309 "cmovge{l}\t{$src2, $dst|$dst, $src2}",
310 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
311 X86_COND_GE, EFLAGS))]>,
312 TB;
313 def CMOVLE16rm: I<0x4E, MRMSrcMem, // if <=s, GR16 = [mem16]
314 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
315 "cmovle{w}\t{$src2, $dst|$dst, $src2}",
316 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
317 X86_COND_LE, EFLAGS))]>,
318 TB, OpSize;
319 def CMOVLE32rm: I<0x4E, MRMSrcMem, // if <=s, GR32 = [mem32]
320 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
321 "cmovle{l}\t{$src2, $dst|$dst, $src2}",
322 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
323 X86_COND_LE, EFLAGS))]>,
324 TB;
325 def CMOVG16rm : I<0x4F, MRMSrcMem, // if >s, GR16 = [mem16]
326 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
327 "cmovg{w}\t{$src2, $dst|$dst, $src2}",
328 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
329 X86_COND_G, EFLAGS))]>,
330 TB, OpSize;
331 def CMOVG32rm : I<0x4F, MRMSrcMem, // if >s, GR32 = [mem32]
332 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
333 "cmovg{l}\t{$src2, $dst|$dst, $src2}",
334 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
335 X86_COND_G, EFLAGS))]>,
336 TB;
337 def CMOVS16rm : I<0x48, MRMSrcMem, // if signed, GR16 = [mem16]
338 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
339 "cmovs{w}\t{$src2, $dst|$dst, $src2}",
340 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
341 X86_COND_S, EFLAGS))]>,
342 TB, OpSize;
343 def CMOVS32rm : I<0x48, MRMSrcMem, // if signed, GR32 = [mem32]
344 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
345 "cmovs{l}\t{$src2, $dst|$dst, $src2}",
346 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
347 X86_COND_S, EFLAGS))]>,
348 TB;
349 def CMOVNS16rm: I<0x49, MRMSrcMem, // if !signed, GR16 = [mem16]
350 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
351 "cmovns{w}\t{$src2, $dst|$dst, $src2}",
352 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
353 X86_COND_NS, EFLAGS))]>,
354 TB, OpSize;
355 def CMOVNS32rm: I<0x49, MRMSrcMem, // if !signed, GR32 = [mem32]
356 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
357 "cmovns{l}\t{$src2, $dst|$dst, $src2}",
358 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
359 X86_COND_NS, EFLAGS))]>,
360 TB;
361 def CMOVP16rm : I<0x4A, MRMSrcMem, // if parity, GR16 = [mem16]
362 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
363 "cmovp{w}\t{$src2, $dst|$dst, $src2}",
364 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
365 X86_COND_P, EFLAGS))]>,
366 TB, OpSize;
367 def CMOVP32rm : I<0x4A, MRMSrcMem, // if parity, GR32 = [mem32]
368 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
369 "cmovp{l}\t{$src2, $dst|$dst, $src2}",
370 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
371 X86_COND_P, EFLAGS))]>,
372 TB;
373 def CMOVNP16rm : I<0x4B, MRMSrcMem, // if !parity, GR16 = [mem16]
374 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
375 "cmovnp{w}\t{$src2, $dst|$dst, $src2}",
376 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
377 X86_COND_NP, EFLAGS))]>,
378 TB, OpSize;
379 def CMOVNP32rm : I<0x4B, MRMSrcMem, // if !parity, GR32 = [mem32]
380 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
381 "cmovnp{l}\t{$src2, $dst|$dst, $src2}",
382 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
383 X86_COND_NP, EFLAGS))]>,
384 TB;
385 def CMOVO16rm : I<0x40, MRMSrcMem, // if overflow, GR16 = [mem16]
386 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
387 "cmovo{w}\t{$src2, $dst|$dst, $src2}",
388 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
389 X86_COND_O, EFLAGS))]>,
390 TB, OpSize;
391 def CMOVO32rm : I<0x40, MRMSrcMem, // if overflow, GR32 = [mem32]
392 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
393 "cmovo{l}\t{$src2, $dst|$dst, $src2}",
394 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
395 X86_COND_O, EFLAGS))]>,
396 TB;
397 def CMOVNO16rm : I<0x41, MRMSrcMem, // if !overflow, GR16 = [mem16]
398 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
399 "cmovno{w}\t{$src2, $dst|$dst, $src2}",
400 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
401 X86_COND_NO, EFLAGS))]>,
402 TB, OpSize;
403 def CMOVNO32rm : I<0x41, MRMSrcMem, // if !overflow, GR32 = [mem32]
404 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
405 "cmovno{l}\t{$src2, $dst|$dst, $src2}",
406 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
407 X86_COND_NO, EFLAGS))]>,
408 TB;
409 } // Predicates = [HasCMov]
410
411 // X86 doesn't have 8-bit conditional moves. Use a customInserter to
412 // emit control flow. An alternative to this is to mark i8 SELECT as Promote,
413 // however that requires promoting the operands, and can induce additional
414 // i8 register pressure. Note that CMOV_GR8 is conservatively considered to
415 // clobber EFLAGS, because if one of the operands is zero, the expansion
416 // could involve an xor.
417 let usesCustomInserter = 1, Constraints = "", Defs = [EFLAGS] in {
418 def CMOV_GR8 : I<0, Pseudo,
419 (outs GR8:$dst), (ins GR8:$src1, GR8:$src2, i8imm:$cond),
420 "#CMOV_GR8 PSEUDO!",
421 [(set GR8:$dst, (X86cmov GR8:$src1, GR8:$src2,
422 imm:$cond, EFLAGS))]>;
423
424 let Predicates = [NoCMov] in {
425 def CMOV_GR32 : I<0, Pseudo,
426 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2, i8imm:$cond),
427 "#CMOV_GR32* PSEUDO!",
428 [(set GR32:$dst,
429 (X86cmov GR32:$src1, GR32:$src2, imm:$cond, EFLAGS))]>;
430 def CMOV_GR16 : I<0, Pseudo,
431 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2, i8imm:$cond),
432 "#CMOV_GR16* PSEUDO!",
433 [(set GR16:$dst,
434 (X86cmov GR16:$src1, GR16:$src2, imm:$cond, EFLAGS))]>;
435 def CMOV_RFP32 : I<0, Pseudo,
436 (outs RFP32:$dst),
437 (ins RFP32:$src1, RFP32:$src2, i8imm:$cond),
438 "#CMOV_RFP32 PSEUDO!",
439 [(set RFP32:$dst,
440 (X86cmov RFP32:$src1, RFP32:$src2, imm:$cond,
441 EFLAGS))]>;
442 def CMOV_RFP64 : I<0, Pseudo,
443 (outs RFP64:$dst),
444 (ins RFP64:$src1, RFP64:$src2, i8imm:$cond),
445 "#CMOV_RFP64 PSEUDO!",
446 [(set RFP64:$dst,
447 (X86cmov RFP64:$src1, RFP64:$src2, imm:$cond,
448 EFLAGS))]>;
449 def CMOV_RFP80 : I<0, Pseudo,
450 (outs RFP80:$dst),
451 (ins RFP80:$src1, RFP80:$src2, i8imm:$cond),
452 "#CMOV_RFP80 PSEUDO!",
453 [(set RFP80:$dst,
454 (X86cmov RFP80:$src1, RFP80:$src2, imm:$cond,
455 EFLAGS))]>;
456 } // Predicates = [NoCMov]
457 } // UsesCustomInserter = 1, Constraints = "", Defs = [EFLAGS]
458 } // Uses = [EFLAGS]
459
460 } // Constraints = "$src1 = $dst" in
461
462
463 // Conditional moves
464 let Uses = [EFLAGS], Constraints = "$src1 = $dst" in {
465 let isCommutable = 1 in {
466 def CMOVB64rr : RI<0x42, MRMSrcReg, // if
467 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
468 "cmovb{q}\t{$src2, $dst|$dst, $src2}",
469 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
470 X86_COND_B, EFLAGS))]>, TB;
471 def CMOVAE64rr: RI<0x43, MRMSrcReg, // if >=u, GR64 = GR64
472 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
473 "cmovae{q}\t{$src2, $dst|$dst, $src2}",
474 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
475 X86_COND_AE, EFLAGS))]>, TB;
476 def CMOVE64rr : RI<0x44, MRMSrcReg, // if ==, GR64 = GR64
477 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
478 "cmove{q}\t{$src2, $dst|$dst, $src2}",
479 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
480 X86_COND_E, EFLAGS))]>, TB;
481 def CMOVNE64rr: RI<0x45, MRMSrcReg, // if !=, GR64 = GR64
482 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
483 "cmovne{q}\t{$src2, $dst|$dst, $src2}",
484 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
485 X86_COND_NE, EFLAGS))]>, TB;
486 def CMOVBE64rr: RI<0x46, MRMSrcReg, // if <=u, GR64 = GR64
487 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
488 "cmovbe{q}\t{$src2, $dst|$dst, $src2}",
489 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
490 X86_COND_BE, EFLAGS))]>, TB;
491 def CMOVA64rr : RI<0x47, MRMSrcReg, // if >u, GR64 = GR64
492 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
493 "cmova{q}\t{$src2, $dst|$dst, $src2}",
494 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
495 X86_COND_A, EFLAGS))]>, TB;
496 def CMOVL64rr : RI<0x4C, MRMSrcReg, // if
497 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
498 "cmovl{q}\t{$src2, $dst|$dst, $src2}",
499 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
500 X86_COND_L, EFLAGS))]>, TB;
501 def CMOVGE64rr: RI<0x4D, MRMSrcReg, // if >=s, GR64 = GR64
502 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
503 "cmovge{q}\t{$src2, $dst|$dst, $src2}",
504 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
505 X86_COND_GE, EFLAGS))]>, TB;
506 def CMOVLE64rr: RI<0x4E, MRMSrcReg, // if <=s, GR64 = GR64
507 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
508 "cmovle{q}\t{$src2, $dst|$dst, $src2}",
509 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
510 X86_COND_LE, EFLAGS))]>, TB;
511 def CMOVG64rr : RI<0x4F, MRMSrcReg, // if >s, GR64 = GR64
512 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
513 "cmovg{q}\t{$src2, $dst|$dst, $src2}",
514 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
515 X86_COND_G, EFLAGS))]>, TB;
516 def CMOVS64rr : RI<0x48, MRMSrcReg, // if signed, GR64 = GR64
517 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
518 "cmovs{q}\t{$src2, $dst|$dst, $src2}",
519 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
520 X86_COND_S, EFLAGS))]>, TB;
521 def CMOVNS64rr: RI<0x49, MRMSrcReg, // if !signed, GR64 = GR64
522 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
523 "cmovns{q}\t{$src2, $dst|$dst, $src2}",
524 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
525 X86_COND_NS, EFLAGS))]>, TB;
526 def CMOVP64rr : RI<0x4A, MRMSrcReg, // if parity, GR64 = GR64
527 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
528 "cmovp{q}\t{$src2, $dst|$dst, $src2}",
529 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
530 X86_COND_P, EFLAGS))]>, TB;
531 def CMOVNP64rr : RI<0x4B, MRMSrcReg, // if !parity, GR64 = GR64
532 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
533 "cmovnp{q}\t{$src2, $dst|$dst, $src2}",
534 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
535 X86_COND_NP, EFLAGS))]>, TB;
536 def CMOVO64rr : RI<0x40, MRMSrcReg, // if overflow, GR64 = GR64
537 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
538 "cmovo{q}\t{$src2, $dst|$dst, $src2}",
539 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
540 X86_COND_O, EFLAGS))]>, TB;
541 def CMOVNO64rr : RI<0x41, MRMSrcReg, // if !overflow, GR64 = GR64
542 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
543 "cmovno{q}\t{$src2, $dst|$dst, $src2}",
544 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
545 X86_COND_NO, EFLAGS))]>, TB;
546 } // isCommutable = 1
547
548 def CMOVB64rm : RI<0x42, MRMSrcMem, // if
549 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
550 "cmovb{q}\t{$src2, $dst|$dst, $src2}",
551 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
552 X86_COND_B, EFLAGS))]>, TB;
553 def CMOVAE64rm: RI<0x43, MRMSrcMem, // if >=u, GR64 = [mem64]
554 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
555 "cmovae{q}\t{$src2, $dst|$dst, $src2}",
556 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
557 X86_COND_AE, EFLAGS))]>, TB;
558 def CMOVE64rm : RI<0x44, MRMSrcMem, // if ==, GR64 = [mem64]
559 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
560 "cmove{q}\t{$src2, $dst|$dst, $src2}",
561 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
562 X86_COND_E, EFLAGS))]>, TB;
563 def CMOVNE64rm: RI<0x45, MRMSrcMem, // if !=, GR64 = [mem64]
564 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
565 "cmovne{q}\t{$src2, $dst|$dst, $src2}",
566 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
567 X86_COND_NE, EFLAGS))]>, TB;
568 def CMOVBE64rm: RI<0x46, MRMSrcMem, // if <=u, GR64 = [mem64]
569 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
570 "cmovbe{q}\t{$src2, $dst|$dst, $src2}",
571 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
572 X86_COND_BE, EFLAGS))]>, TB;
573 def CMOVA64rm : RI<0x47, MRMSrcMem, // if >u, GR64 = [mem64]
574 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
575 "cmova{q}\t{$src2, $dst|$dst, $src2}",
576 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
577 X86_COND_A, EFLAGS))]>, TB;
578 def CMOVL64rm : RI<0x4C, MRMSrcMem, // if
579 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
580 "cmovl{q}\t{$src2, $dst|$dst, $src2}",
581 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
582 X86_COND_L, EFLAGS))]>, TB;
583 def CMOVGE64rm: RI<0x4D, MRMSrcMem, // if >=s, GR64 = [mem64]
584 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
585 "cmovge{q}\t{$src2, $dst|$dst, $src2}",
586 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
587 X86_COND_GE, EFLAGS))]>, TB;
588 def CMOVLE64rm: RI<0x4E, MRMSrcMem, // if <=s, GR64 = [mem64]
589 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
590 "cmovle{q}\t{$src2, $dst|$dst, $src2}",
591 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
592 X86_COND_LE, EFLAGS))]>, TB;
593 def CMOVG64rm : RI<0x4F, MRMSrcMem, // if >s, GR64 = [mem64]
594 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
595 "cmovg{q}\t{$src2, $dst|$dst, $src2}",
596 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
597 X86_COND_G, EFLAGS))]>, TB;
598 def CMOVS64rm : RI<0x48, MRMSrcMem, // if signed, GR64 = [mem64]
599 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
600 "cmovs{q}\t{$src2, $dst|$dst, $src2}",
601 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
602 X86_COND_S, EFLAGS))]>, TB;
603 def CMOVNS64rm: RI<0x49, MRMSrcMem, // if !signed, GR64 = [mem64]
604 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
605 "cmovns{q}\t{$src2, $dst|$dst, $src2}",
606 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
607 X86_COND_NS, EFLAGS))]>, TB;
608 def CMOVP64rm : RI<0x4A, MRMSrcMem, // if parity, GR64 = [mem64]
609 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
610 "cmovp{q}\t{$src2, $dst|$dst, $src2}",
611 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
612 X86_COND_P, EFLAGS))]>, TB;
613 def CMOVNP64rm : RI<0x4B, MRMSrcMem, // if !parity, GR64 = [mem64]
614 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
615 "cmovnp{q}\t{$src2, $dst|$dst, $src2}",
616 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
617 X86_COND_NP, EFLAGS))]>, TB;
618 def CMOVO64rm : RI<0x40, MRMSrcMem, // if overflow, GR64 = [mem64]
619 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
620 "cmovo{q}\t{$src2, $dst|$dst, $src2}",
621 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
622 X86_COND_O, EFLAGS))]>, TB;
623 def CMOVNO64rm : RI<0x41, MRMSrcMem, // if !overflow, GR64 = [mem64]
624 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
625 "cmovno{q}\t{$src2, $dst|$dst, $src2}",
626 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
627 X86_COND_NO, EFLAGS))]>, TB;
628 } // Constraints = "$src1 = $dst"
629
630
631
632 let Uses = [EFLAGS] in {
633 // Use sbb to materialize carry bit.
634 let Defs = [EFLAGS], isCodeGenOnly = 1 in {
635 // FIXME: These are pseudo ops that should be replaced with Pat<> patterns.
636 // However, Pat<> can't replicate the destination reg into the inputs of the
637 // result.
638 // FIXME: Change these to have encoding Pseudo when X86MCCodeEmitter replaces
639 // X86CodeEmitter.
640 def SETB_C8r : I<0x18, MRMInitReg, (outs GR8:$dst), (ins), "",
641 [(set GR8:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
642 def SETB_C16r : I<0x19, MRMInitReg, (outs GR16:$dst), (ins), "",
643 [(set GR16:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>,
644 OpSize;
645 def SETB_C32r : I<0x19, MRMInitReg, (outs GR32:$dst), (ins), "",
646 [(set GR32:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
647 } // isCodeGenOnly
648
649 def SETEr : I<0x94, MRM0r,
650 (outs GR8 :$dst), (ins),
651 "sete\t$dst",
652 [(set GR8:$dst, (X86setcc X86_COND_E, EFLAGS))]>,
653 TB; // GR8 = ==
654 def SETEm : I<0x94, MRM0m,
655 (outs), (ins i8mem:$dst),
656 "sete\t$dst",
657 [(store (X86setcc X86_COND_E, EFLAGS), addr:$dst)]>,
658 TB; // [mem8] = ==
659
660 def SETNEr : I<0x95, MRM0r,
661 (outs GR8 :$dst), (ins),
662 "setne\t$dst",
663 [(set GR8:$dst, (X86setcc X86_COND_NE, EFLAGS))]>,
664 TB; // GR8 = !=
665 def SETNEm : I<0x95, MRM0m,
666 (outs), (ins i8mem:$dst),
667 "setne\t$dst",
668 [(store (X86setcc X86_COND_NE, EFLAGS), addr:$dst)]>,
669 TB; // [mem8] = !=
670
671 def SETLr : I<0x9C, MRM0r,
672 (outs GR8 :$dst), (ins),
673 "setl\t$dst",
674 [(set GR8:$dst, (X86setcc X86_COND_L, EFLAGS))]>,
675 TB; // GR8 = < signed
676 def SETLm : I<0x9C, MRM0m,
677 (outs), (ins i8mem:$dst),
678 "setl\t$dst",
679 [(store (X86setcc X86_COND_L, EFLAGS), addr:$dst)]>,
680 TB; // [mem8] = < signed
681
682 def SETGEr : I<0x9D, MRM0r,
683 (outs GR8 :$dst), (ins),
684 "setge\t$dst",
685 [(set GR8:$dst, (X86setcc X86_COND_GE, EFLAGS))]>,
686 TB; // GR8 = >= signed
687 def SETGEm : I<0x9D, MRM0m,
688 (outs), (ins i8mem:$dst),
689 "setge\t$dst",
690 [(store (X86setcc X86_COND_GE, EFLAGS), addr:$dst)]>,
691 TB; // [mem8] = >= signed
692
693 def SETLEr : I<0x9E, MRM0r,
694 (outs GR8 :$dst), (ins),
695 "setle\t$dst",
696 [(set GR8:$dst, (X86setcc X86_COND_LE, EFLAGS))]>,
697 TB; // GR8 = <= signed
698 def SETLEm : I<0x9E, MRM0m,
699 (outs), (ins i8mem:$dst),
700 "setle\t$dst",
701 [(store (X86setcc X86_COND_LE, EFLAGS), addr:$dst)]>,
702 TB; // [mem8] = <= signed
703
704 def SETGr : I<0x9F, MRM0r,
705 (outs GR8 :$dst), (ins),
706 "setg\t$dst",
707 [(set GR8:$dst, (X86setcc X86_COND_G, EFLAGS))]>,
708 TB; // GR8 = > signed
709 def SETGm : I<0x9F, MRM0m,
710 (outs), (ins i8mem:$dst),
711 "setg\t$dst",
712 [(store (X86setcc X86_COND_G, EFLAGS), addr:$dst)]>,
713 TB; // [mem8] = > signed
714
715 def SETBr : I<0x92, MRM0r,
716 (outs GR8 :$dst), (ins),
717 "setb\t$dst",
718 [(set GR8:$dst, (X86setcc X86_COND_B, EFLAGS))]>,
719 TB; // GR8 = < unsign
720 def SETBm : I<0x92, MRM0m,
721 (outs), (ins i8mem:$dst),
722 "setb\t$dst",
723 [(store (X86setcc X86_COND_B, EFLAGS), addr:$dst)]>,
724 TB; // [mem8] = < unsign
725
726 def SETAEr : I<0x93, MRM0r,
727 (outs GR8 :$dst), (ins),
728 "setae\t$dst",
729 [(set GR8:$dst, (X86setcc X86_COND_AE, EFLAGS))]>,
730 TB; // GR8 = >= unsign
731 def SETAEm : I<0x93, MRM0m,
732 (outs), (ins i8mem:$dst),
733 "setae\t$dst",
734 [(store (X86setcc X86_COND_AE, EFLAGS), addr:$dst)]>,
735 TB; // [mem8] = >= unsign
736
737 def SETBEr : I<0x96, MRM0r,
738 (outs GR8 :$dst), (ins),
739 "setbe\t$dst",
740 [(set GR8:$dst, (X86setcc X86_COND_BE, EFLAGS))]>,
741 TB; // GR8 = <= unsign
742 def SETBEm : I<0x96, MRM0m,
743 (outs), (ins i8mem:$dst),
744 "setbe\t$dst",
745 [(store (X86setcc X86_COND_BE, EFLAGS), addr:$dst)]>,
746 TB; // [mem8] = <= unsign
747
748 def SETAr : I<0x97, MRM0r,
749 (outs GR8 :$dst), (ins),
750 "seta\t$dst",
751 [(set GR8:$dst, (X86setcc X86_COND_A, EFLAGS))]>,
752 TB; // GR8 = > signed
753 def SETAm : I<0x97, MRM0m,
754 (outs), (ins i8mem:$dst),
755 "seta\t$dst",
756 [(store (X86setcc X86_COND_A, EFLAGS), addr:$dst)]>,
757 TB; // [mem8] = > signed
758
759 def SETSr : I<0x98, MRM0r,
760 (outs GR8 :$dst), (ins),
761 "sets\t$dst",
762 [(set GR8:$dst, (X86setcc X86_COND_S, EFLAGS))]>,
763 TB; // GR8 =
764 def SETSm : I<0x98, MRM0m,
765 (outs), (ins i8mem:$dst),
766 "sets\t$dst",
767 [(store (X86setcc X86_COND_S, EFLAGS), addr:$dst)]>,
768 TB; // [mem8] =
769 def SETNSr : I<0x99, MRM0r,
770 (outs GR8 :$dst), (ins),
771 "setns\t$dst",
772 [(set GR8:$dst, (X86setcc X86_COND_NS, EFLAGS))]>,
773 TB; // GR8 = !
774 def SETNSm : I<0x99, MRM0m,
775 (outs), (ins i8mem:$dst),
776 "setns\t$dst",
777 [(store (X86setcc X86_COND_NS, EFLAGS), addr:$dst)]>,
778 TB; // [mem8] = !
779
780 def SETPr : I<0x9A, MRM0r,
781 (outs GR8 :$dst), (ins),
782 "setp\t$dst",
783 [(set GR8:$dst, (X86setcc X86_COND_P, EFLAGS))]>,
784 TB; // GR8 = parity
785 def SETPm : I<0x9A, MRM0m,
786 (outs), (ins i8mem:$dst),
787 "setp\t$dst",
788 [(store (X86setcc X86_COND_P, EFLAGS), addr:$dst)]>,
789 TB; // [mem8] = parity
790 def SETNPr : I<0x9B, MRM0r,
791 (outs GR8 :$dst), (ins),
792 "setnp\t$dst",
793 [(set GR8:$dst, (X86setcc X86_COND_NP, EFLAGS))]>,
794 TB; // GR8 = not parity
795 def SETNPm : I<0x9B, MRM0m,
796 (outs), (ins i8mem:$dst),
797 "setnp\t$dst",
798 [(store (X86setcc X86_COND_NP, EFLAGS), addr:$dst)]>,
799 TB; // [mem8] = not parity
800
801 def SETOr : I<0x90, MRM0r,
802 (outs GR8 :$dst), (ins),
803 "seto\t$dst",
804 [(set GR8:$dst, (X86setcc X86_COND_O, EFLAGS))]>,
805 TB; // GR8 = overflow
806 def SETOm : I<0x90, MRM0m,
807 (outs), (ins i8mem:$dst),
808 "seto\t$dst",
809 [(store (X86setcc X86_COND_O, EFLAGS), addr:$dst)]>,
810 TB; // [mem8] = overflow
811 def SETNOr : I<0x91, MRM0r,
812 (outs GR8 :$dst), (ins),
813 "setno\t$dst",
814 [(set GR8:$dst, (X86setcc X86_COND_NO, EFLAGS))]>,
815 TB; // GR8 = not overflow
816 def SETNOm : I<0x91, MRM0m,
817 (outs), (ins i8mem:$dst),
818 "setno\t$dst",
819 [(store (X86setcc X86_COND_NO, EFLAGS), addr:$dst)]>,
820 TB; // [mem8] = not overflow
821 } // Uses = [EFLAGS]
822
143143 let AddedComplexity = 1, isReMaterializable = 1, isAsCheapAsAMove = 1 in
144144 def MOV64ri64i32 : Ii32<0xB8, AddRegFrm, (outs GR64:$dst), (ins i64i32imm:$src),
145145 "", [(set GR64:$dst, i64immZExt32:$src)]>;
146
147
148 // Use sbb to materialize carry flag into a GPR.
149 // FIXME: This are pseudo ops that should be replaced with Pat<> patterns.
150 // However, Pat<> can't replicate the destination reg into the inputs of the
151 // result.
152 // FIXME: Change this to have encoding Pseudo when X86MCCodeEmitter replaces
153 // X86CodeEmitter.
154 let Defs = [EFLAGS], Uses = [EFLAGS], isCodeGenOnly = 1 in
155 def SETB_C64r : RI<0x19, MRMInitReg, (outs GR64:$dst), (ins), "",
156 [(set GR64:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
157
158 def : Pat<(i64 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
159 (SETB_C64r)>;
146160
147161
148162 //===----------------------------------------------------------------------===//
913913 // Two address Instructions.
914914 //
915915 let Constraints = "$src1 = $dst" in {
916
917 // Conditional moves
918 let Uses = [EFLAGS] in {
919
920 let Predicates = [HasCMov] in {
921 let isCommutable = 1 in {
922 def CMOVB16rr : I<0x42, MRMSrcReg, // if
923 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
924 "cmovb{w}\t{$src2, $dst|$dst, $src2}",
925 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
926 X86_COND_B, EFLAGS))]>,
927 TB, OpSize;
928 def CMOVB32rr : I<0x42, MRMSrcReg, // if
929 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
930 "cmovb{l}\t{$src2, $dst|$dst, $src2}",
931 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
932 X86_COND_B, EFLAGS))]>,
933 TB;
934 def CMOVAE16rr: I<0x43, MRMSrcReg, // if >=u, GR16 = GR16
935 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
936 "cmovae{w}\t{$src2, $dst|$dst, $src2}",
937 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
938 X86_COND_AE, EFLAGS))]>,
939 TB, OpSize;
940 def CMOVAE32rr: I<0x43, MRMSrcReg, // if >=u, GR32 = GR32
941 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
942 "cmovae{l}\t{$src2, $dst|$dst, $src2}",
943 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
944 X86_COND_AE, EFLAGS))]>,
945 TB;
946 def CMOVE16rr : I<0x44, MRMSrcReg, // if ==, GR16 = GR16
947 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
948 "cmove{w}\t{$src2, $dst|$dst, $src2}",
949 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
950 X86_COND_E, EFLAGS))]>,
951 TB, OpSize;
952 def CMOVE32rr : I<0x44, MRMSrcReg, // if ==, GR32 = GR32
953 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
954 "cmove{l}\t{$src2, $dst|$dst, $src2}",
955 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
956 X86_COND_E, EFLAGS))]>,
957 TB;
958 def CMOVNE16rr: I<0x45, MRMSrcReg, // if !=, GR16 = GR16
959 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
960 "cmovne{w}\t{$src2, $dst|$dst, $src2}",
961 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
962 X86_COND_NE, EFLAGS))]>,
963 TB, OpSize;
964 def CMOVNE32rr: I<0x45, MRMSrcReg, // if !=, GR32 = GR32
965 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
966 "cmovne{l}\t{$src2, $dst|$dst, $src2}",
967 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
968 X86_COND_NE, EFLAGS))]>,
969 TB;
970 def CMOVBE16rr: I<0x46, MRMSrcReg, // if <=u, GR16 = GR16
971 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
972 "cmovbe{w}\t{$src2, $dst|$dst, $src2}",
973 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
974 X86_COND_BE, EFLAGS))]>,
975 TB, OpSize;
976 def CMOVBE32rr: I<0x46, MRMSrcReg, // if <=u, GR32 = GR32
977 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
978 "cmovbe{l}\t{$src2, $dst|$dst, $src2}",
979 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
980 X86_COND_BE, EFLAGS))]>,
981 TB;
982 def CMOVA16rr : I<0x47, MRMSrcReg, // if >u, GR16 = GR16
983 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
984 "cmova{w}\t{$src2, $dst|$dst, $src2}",
985 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
986 X86_COND_A, EFLAGS))]>,
987 TB, OpSize;
988 def CMOVA32rr : I<0x47, MRMSrcReg, // if >u, GR32 = GR32
989 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
990 "cmova{l}\t{$src2, $dst|$dst, $src2}",
991 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
992 X86_COND_A, EFLAGS))]>,
993 TB;
994 def CMOVL16rr : I<0x4C, MRMSrcReg, // if
995 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
996 "cmovl{w}\t{$src2, $dst|$dst, $src2}",
997 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
998 X86_COND_L, EFLAGS))]>,
999 TB, OpSize;
1000 def CMOVL32rr : I<0x4C, MRMSrcReg, // if
1001 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
1002 "cmovl{l}\t{$src2, $dst|$dst, $src2}",
1003 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
1004 X86_COND_L, EFLAGS))]>,
1005 TB;
1006 def CMOVGE16rr: I<0x4D, MRMSrcReg, // if >=s, GR16 = GR16
1007 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
1008 "cmovge{w}\t{$src2, $dst|$dst, $src2}",
1009 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
1010 X86_COND_GE, EFLAGS))]>,
1011 TB, OpSize;
1012 def CMOVGE32rr: I<0x4D, MRMSrcReg, // if >=s, GR32 = GR32
1013 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
1014 "cmovge{l}\t{$src2, $dst|$dst, $src2}",
1015 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
1016 X86_COND_GE, EFLAGS))]>,
1017 TB;
1018 def CMOVLE16rr: I<0x4E, MRMSrcReg, // if <=s, GR16 = GR16
1019 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
1020 "cmovle{w}\t{$src2, $dst|$dst, $src2}",
1021 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
1022 X86_COND_LE, EFLAGS))]>,
1023 TB, OpSize;
1024 def CMOVLE32rr: I<0x4E, MRMSrcReg, // if <=s, GR32 = GR32
1025 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
1026 "cmovle{l}\t{$src2, $dst|$dst, $src2}",
1027 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
1028 X86_COND_LE, EFLAGS))]>,
1029 TB;
1030 def CMOVG16rr : I<0x4F, MRMSrcReg, // if >s, GR16 = GR16
1031 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
1032 "cmovg{w}\t{$src2, $dst|$dst, $src2}",
1033 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
1034 X86_COND_G, EFLAGS))]>,
1035 TB, OpSize;
1036 def CMOVG32rr : I<0x4F, MRMSrcReg, // if >s, GR32 = GR32
1037 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
1038 "cmovg{l}\t{$src2, $dst|$dst, $src2}",
1039 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
1040 X86_COND_G, EFLAGS))]>,
1041 TB;
1042 def CMOVS16rr : I<0x48, MRMSrcReg, // if signed, GR16 = GR16
1043 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
1044 "cmovs{w}\t{$src2, $dst|$dst, $src2}",
1045 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
1046 X86_COND_S, EFLAGS))]>,
1047 TB, OpSize;
1048 def CMOVS32rr : I<0x48, MRMSrcReg, // if signed, GR32 = GR32
1049 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
1050 "cmovs{l}\t{$src2, $dst|$dst, $src2}",
1051 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
1052 X86_COND_S, EFLAGS))]>,
1053 TB;
1054 def CMOVNS16rr: I<0x49, MRMSrcReg, // if !signed, GR16 = GR16
1055 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
1056 "cmovns{w}\t{$src2, $dst|$dst, $src2}",
1057 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
1058 X86_COND_NS, EFLAGS))]>,
1059 TB, OpSize;
1060 def CMOVNS32rr: I<0x49, MRMSrcReg, // if !signed, GR32 = GR32
1061 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
1062 "cmovns{l}\t{$src2, $dst|$dst, $src2}",
1063 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
1064 X86_COND_NS, EFLAGS))]>,
1065 TB;
1066 def CMOVP16rr : I<0x4A, MRMSrcReg, // if parity, GR16 = GR16
1067 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
1068 "cmovp{w}\t{$src2, $dst|$dst, $src2}",
1069 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
1070 X86_COND_P, EFLAGS))]>,
1071 TB, OpSize;
1072 def CMOVP32rr : I<0x4A, MRMSrcReg, // if parity, GR32 = GR32
1073 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
1074 "cmovp{l}\t{$src2, $dst|$dst, $src2}",
1075 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
1076 X86_COND_P, EFLAGS))]>,
1077 TB;
1078 def CMOVNP16rr : I<0x4B, MRMSrcReg, // if !parity, GR16 = GR16
1079 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
1080 "cmovnp{w}\t{$src2, $dst|$dst, $src2}",
1081 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
1082 X86_COND_NP, EFLAGS))]>,
1083 TB, OpSize;
1084 def CMOVNP32rr : I<0x4B, MRMSrcReg, // if !parity, GR32 = GR32
1085 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
1086 "cmovnp{l}\t{$src2, $dst|$dst, $src2}",
1087 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
1088 X86_COND_NP, EFLAGS))]>,
1089 TB;
1090 def CMOVO16rr : I<0x40, MRMSrcReg, // if overflow, GR16 = GR16
1091 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
1092 "cmovo{w}\t{$src2, $dst|$dst, $src2}",
1093 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
1094 X86_COND_O, EFLAGS))]>,
1095 TB, OpSize;
1096 def CMOVO32rr : I<0x40, MRMSrcReg, // if overflow, GR32 = GR32
1097 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
1098 "cmovo{l}\t{$src2, $dst|$dst, $src2}",
1099 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
1100 X86_COND_O, EFLAGS))]>,
1101 TB;
1102 def CMOVNO16rr : I<0x41, MRMSrcReg, // if !overflow, GR16 = GR16
1103 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
1104 "cmovno{w}\t{$src2, $dst|$dst, $src2}",
1105 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
1106 X86_COND_NO, EFLAGS))]>,
1107 TB, OpSize;
1108 def CMOVNO32rr : I<0x41, MRMSrcReg, // if !overflow, GR32 = GR32
1109 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
1110 "cmovno{l}\t{$src2, $dst|$dst, $src2}",
1111 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
1112 X86_COND_NO, EFLAGS))]>,
1113 TB;
1114 } // isCommutable = 1
1115
1116 def CMOVB16rm : I<0x42, MRMSrcMem, // if
1117 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
1118 "cmovb{w}\t{$src2, $dst|$dst, $src2}",
1119 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
1120 X86_COND_B, EFLAGS))]>,
1121 TB, OpSize;
1122 def CMOVB32rm : I<0x42, MRMSrcMem, // if
1123 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
1124 "cmovb{l}\t{$src2, $dst|$dst, $src2}",
1125 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
1126 X86_COND_B, EFLAGS))]>,
1127 TB;
1128 def CMOVAE16rm: I<0x43, MRMSrcMem, // if >=u, GR16 = [mem16]
1129 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
1130 "cmovae{w}\t{$src2, $dst|$dst, $src2}",
1131 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
1132 X86_COND_AE, EFLAGS))]>,
1133 TB, OpSize;
1134 def CMOVAE32rm: I<0x43, MRMSrcMem, // if >=u, GR32 = [mem32]
1135 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
1136 "cmovae{l}\t{$src2, $dst|$dst, $src2}",
1137 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
1138 X86_COND_AE, EFLAGS))]>,
1139 TB;
1140 def CMOVE16rm : I<0x44, MRMSrcMem, // if ==, GR16 = [mem16]
1141 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
1142 "cmove{w}\t{$src2, $dst|$dst, $src2}",
1143 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
1144 X86_COND_E, EFLAGS))]>,
1145 TB, OpSize;
1146 def CMOVE32rm : I<0x44, MRMSrcMem, // if ==, GR32 = [mem32]
1147 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
1148 "cmove{l}\t{$src2, $dst|$dst, $src2}",
1149 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
1150 X86_COND_E, EFLAGS))]>,
1151 TB;
1152 def CMOVNE16rm: I<0x45, MRMSrcMem, // if !=, GR16 = [mem16]
1153 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
1154 "cmovne{w}\t{$src2, $dst|$dst, $src2}",
1155 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
1156 X86_COND_NE, EFLAGS))]>,
1157 TB, OpSize;
1158 def CMOVNE32rm: I<0x45, MRMSrcMem, // if !=, GR32 = [mem32]
1159 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
1160 "cmovne{l}\t{$src2, $dst|$dst, $src2}",
1161 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
1162 X86_COND_NE, EFLAGS))]>,
1163 TB;
1164 def CMOVBE16rm: I<0x46, MRMSrcMem, // if <=u, GR16 = [mem16]
1165 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
1166 "cmovbe{w}\t{$src2, $dst|$dst, $src2}",
1167 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
1168 X86_COND_BE, EFLAGS))]>,
1169 TB, OpSize;
1170 def CMOVBE32rm: I<0x46, MRMSrcMem, // if <=u, GR32 = [mem32]
1171 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
1172 "cmovbe{l}\t{$src2, $dst|$dst, $src2}",
1173 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
1174 X86_COND_BE, EFLAGS))]>,
1175 TB;
1176 def CMOVA16rm : I<0x47, MRMSrcMem, // if >u, GR16 = [mem16]
1177 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
1178 "cmova{w}\t{$src2, $dst|$dst, $src2}",
1179 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
1180 X86_COND_A, EFLAGS))]>,
1181 TB, OpSize;
1182 def CMOVA32rm : I<0x47, MRMSrcMem, // if >u, GR32 = [mem32]
1183 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
1184 "cmova{l}\t{$src2, $dst|$dst, $src2}",
1185 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
1186 X86_COND_A, EFLAGS))]>,
1187 TB;
1188 def CMOVL16rm : I<0x4C, MRMSrcMem, // if
1189 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
1190 "cmovl{w}\t{$src2, $dst|$dst, $src2}",
1191 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
1192 X86_COND_L, EFLAGS))]>,
1193 TB, OpSize;
1194 def CMOVL32rm : I<0x4C, MRMSrcMem, // if
1195 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
1196 "cmovl{l}\t{$src2, $dst|$dst, $src2}",
1197 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
1198 X86_COND_L, EFLAGS))]>,
1199 TB;
1200 def CMOVGE16rm: I<0x4D, MRMSrcMem, // if >=s, GR16 = [mem16]
1201 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
1202 "cmovge{w}\t{$src2, $dst|$dst, $src2}",
1203 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
1204 X86_COND_GE, EFLAGS))]>,
1205 TB, OpSize;
1206 def CMOVGE32rm: I<0x4D, MRMSrcMem, // if >=s, GR32 = [mem32]
1207 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
1208 "cmovge{l}\t{$src2, $dst|$dst, $src2}",
1209 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
1210 X86_COND_GE, EFLAGS))]>,
1211 TB;
1212 def CMOVLE16rm: I<0x4E, MRMSrcMem, // if <=s, GR16 = [mem16]
1213 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
1214 "cmovle{w}\t{$src2, $dst|$dst, $src2}",
1215 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
1216 X86_COND_LE, EFLAGS))]>,
1217 TB, OpSize;
1218 def CMOVLE32rm: I<0x4E, MRMSrcMem, // if <=s, GR32 = [mem32]
1219 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
1220 "cmovle{l}\t{$src2, $dst|$dst, $src2}",
1221 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
1222 X86_COND_LE, EFLAGS))]>,
1223 TB;
1224 def CMOVG16rm : I<0x4F, MRMSrcMem, // if >s, GR16 = [mem16]
1225 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
1226 "cmovg{w}\t{$src2, $dst|$dst, $src2}",
1227 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
1228 X86_COND_G, EFLAGS))]>,
1229 TB, OpSize;
1230 def CMOVG32rm : I<0x4F, MRMSrcMem, // if >s, GR32 = [mem32]
1231 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
1232 "cmovg{l}\t{$src2, $dst|$dst, $src2}",
1233 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
1234 X86_COND_G, EFLAGS))]>,
1235 TB;
1236 def CMOVS16rm : I<0x48, MRMSrcMem, // if signed, GR16 = [mem16]
1237 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
1238 "cmovs{w}\t{$src2, $dst|$dst, $src2}",
1239 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
1240 X86_COND_S, EFLAGS))]>,
1241 TB, OpSize;
1242 def CMOVS32rm : I<0x48, MRMSrcMem, // if signed, GR32 = [mem32]
1243 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
1244 "cmovs{l}\t{$src2, $dst|$dst, $src2}",
1245 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
1246 X86_COND_S, EFLAGS))]>,
1247 TB;
1248 def CMOVNS16rm: I<0x49, MRMSrcMem, // if !signed, GR16 = [mem16]
1249 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
1250 "cmovns{w}\t{$src2, $dst|$dst, $src2}",
1251 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
1252 X86_COND_NS, EFLAGS))]>,
1253 TB, OpSize;
1254 def CMOVNS32rm: I<0x49, MRMSrcMem, // if !signed, GR32 = [mem32]
1255 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
1256 "cmovns{l}\t{$src2, $dst|$dst, $src2}",
1257 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
1258 X86_COND_NS, EFLAGS))]>,
1259 TB;
1260 def CMOVP16rm : I<0x4A, MRMSrcMem, // if parity, GR16 = [mem16]
1261 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
1262 "cmovp{w}\t{$src2, $dst|$dst, $src2}",
1263 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
1264 X86_COND_P, EFLAGS))]>,
1265 TB, OpSize;
1266 def CMOVP32rm : I<0x4A, MRMSrcMem, // if parity, GR32 = [mem32]
1267 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
1268 "cmovp{l}\t{$src2, $dst|$dst, $src2}",
1269 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
1270 X86_COND_P, EFLAGS))]>,
1271 TB;
1272 def CMOVNP16rm : I<0x4B, MRMSrcMem, // if !parity, GR16 = [mem16]
1273 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
1274 "cmovnp{w}\t{$src2, $dst|$dst, $src2}",
1275 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
1276 X86_COND_NP, EFLAGS))]>,
1277 TB, OpSize;
1278 def CMOVNP32rm : I<0x4B, MRMSrcMem, // if !parity, GR32 = [mem32]
1279 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
1280 "cmovnp{l}\t{$src2, $dst|$dst, $src2}",
1281 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
1282 X86_COND_NP, EFLAGS))]>,
1283 TB;
1284 def CMOVO16rm : I<0x40, MRMSrcMem, // if overflow, GR16 = [mem16]
1285 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
1286 "cmovo{w}\t{$src2, $dst|$dst, $src2}",
1287 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
1288 X86_COND_O, EFLAGS))]>,
1289 TB, OpSize;
1290 def CMOVO32rm : I<0x40, MRMSrcMem, // if overflow, GR32 = [mem32]
1291 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
1292 "cmovo{l}\t{$src2, $dst|$dst, $src2}",
1293 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
1294 X86_COND_O, EFLAGS))]>,
1295 TB;
1296 def CMOVNO16rm : I<0x41, MRMSrcMem, // if !overflow, GR16 = [mem16]
1297 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
1298 "cmovno{w}\t{$src2, $dst|$dst, $src2}",
1299 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
1300 X86_COND_NO, EFLAGS))]>,
1301 TB, OpSize;
1302 def CMOVNO32rm : I<0x41, MRMSrcMem, // if !overflow, GR32 = [mem32]
1303 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
1304 "cmovno{l}\t{$src2, $dst|$dst, $src2}",
1305 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
1306 X86_COND_NO, EFLAGS))]>,
1307 TB;
1308 } // Predicates = [HasCMov]
1309
1310 // X86 doesn't have 8-bit conditional moves. Use a customInserter to
1311 // emit control flow. An alternative to this is to mark i8 SELECT as Promote,
1312 // however that requires promoting the operands, and can induce additional
1313 // i8 register pressure. Note that CMOV_GR8 is conservatively considered to
1314 // clobber EFLAGS, because if one of the operands is zero, the expansion
1315 // could involve an xor.
1316 let usesCustomInserter = 1, Constraints = "", Defs = [EFLAGS] in {
1317 def CMOV_GR8 : I<0, Pseudo,
1318 (outs GR8:$dst), (ins GR8:$src1, GR8:$src2, i8imm:$cond),
1319 "#CMOV_GR8 PSEUDO!",
1320 [(set GR8:$dst, (X86cmov GR8:$src1, GR8:$src2,
1321 imm:$cond, EFLAGS))]>;
1322
1323 let Predicates = [NoCMov] in {
1324 def CMOV_GR32 : I<0, Pseudo,
1325 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2, i8imm:$cond),
1326 "#CMOV_GR32* PSEUDO!",
1327 [(set GR32:$dst,
1328 (X86cmov GR32:$src1, GR32:$src2, imm:$cond, EFLAGS))]>;
1329 def CMOV_GR16 : I<0, Pseudo,
1330 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2, i8imm:$cond),
1331 "#CMOV_GR16* PSEUDO!",
1332 [(set GR16:$dst,
1333 (X86cmov GR16:$src1, GR16:$src2, imm:$cond, EFLAGS))]>;
1334 def CMOV_RFP32 : I<0, Pseudo,
1335 (outs RFP32:$dst),
1336 (ins RFP32:$src1, RFP32:$src2, i8imm:$cond),
1337 "#CMOV_RFP32 PSEUDO!",
1338 [(set RFP32:$dst,
1339 (X86cmov RFP32:$src1, RFP32:$src2, imm:$cond,
1340 EFLAGS))]>;
1341 def CMOV_RFP64 : I<0, Pseudo,
1342 (outs RFP64:$dst),
1343 (ins RFP64:$src1, RFP64:$src2, i8imm:$cond),
1344 "#CMOV_RFP64 PSEUDO!",
1345 [(set RFP64:$dst,
1346 (X86cmov RFP64:$src1, RFP64:$src2, imm:$cond,
1347 EFLAGS))]>;
1348 def CMOV_RFP80 : I<0, Pseudo,
1349 (outs RFP80:$dst),
1350 (ins RFP80:$src1, RFP80:$src2, i8imm:$cond),
1351 "#CMOV_RFP80 PSEUDO!",
1352 [(set RFP80:$dst,
1353 (X86cmov RFP80:$src1, RFP80:$src2, imm:$cond,
1354 EFLAGS))]>;
1355 } // Predicates = [NoCMov]
1356 } // UsesCustomInserter = 1, Constraints = "", Defs = [EFLAGS]
1357 } // Uses = [EFLAGS]
1358
1359916
1360917 // unary instructions
1361918 let CodeSize = 2 in {
30012558 let Defs = [AH], Uses = [EFLAGS], neverHasSideEffects = 1 in
30022559 def LAHF : I<0x9F, RawFrm, (outs), (ins), "lahf", []>; // AH = flags
30032560
3004 let Uses = [EFLAGS] in {
3005 // Use sbb to materialize carry bit.
3006 let Defs = [EFLAGS], isCodeGenOnly = 1 in {
3007 // FIXME: These are pseudo ops that should be replaced with Pat<> patterns.
3008 // However, Pat<> can't replicate the destination reg into the inputs of the
3009 // result.
3010 // FIXME: Change these to have encoding Pseudo when X86MCCodeEmitter replaces
3011 // X86CodeEmitter.
3012 def SETB_C8r : I<0x18, MRMInitReg, (outs GR8:$dst), (ins), "",
3013 [(set GR8:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
3014 def SETB_C16r : I<0x19, MRMInitReg, (outs GR16:$dst), (ins), "",
3015 [(set GR16:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>,
3016 OpSize;
3017 def SETB_C32r : I<0x19, MRMInitReg, (outs GR32:$dst), (ins), "",
3018 [(set GR32:$dst, (X86setcc_c X86_COND_B, EFLAGS))]>;
3019 } // isCodeGenOnly
3020
3021 def SETEr : I<0x94, MRM0r,
3022 (outs GR8 :$dst), (ins),
3023 "sete\t$dst",
3024 [(set GR8:$dst, (X86setcc X86_COND_E, EFLAGS))]>,
3025 TB; // GR8 = ==
3026 def SETEm : I<0x94, MRM0m,
3027 (outs), (ins i8mem:$dst),
3028 "sete\t$dst",
3029 [(store (X86setcc X86_COND_E, EFLAGS), addr:$dst)]>,
3030 TB; // [mem8] = ==
3031
3032 def SETNEr : I<0x95, MRM0r,
3033 (outs GR8 :$dst), (ins),
3034 "setne\t$dst",
3035 [(set GR8:$dst, (X86setcc X86_COND_NE, EFLAGS))]>,
3036 TB; // GR8 = !=
3037 def SETNEm : I<0x95, MRM0m,
3038 (outs), (ins i8mem:$dst),
3039 "setne\t$dst",
3040 [(store (X86setcc X86_COND_NE, EFLAGS), addr:$dst)]>,
3041 TB; // [mem8] = !=
3042
3043 def SETLr : I<0x9C, MRM0r,
3044 (outs GR8 :$dst), (ins),
3045 "setl\t$dst",
3046 [(set GR8:$dst, (X86setcc X86_COND_L, EFLAGS))]>,
3047 TB; // GR8 = < signed
3048 def SETLm : I<0x9C, MRM0m,
3049 (outs), (ins i8mem:$dst),
3050 "setl\t$dst",
3051 [(store (X86setcc X86_COND_L, EFLAGS), addr:$dst)]>,
3052 TB; // [mem8] = < signed
3053
3054 def SETGEr : I<0x9D, MRM0r,
3055 (outs GR8 :$dst), (ins),
3056 "setge\t$dst",
3057 [(set GR8:$dst, (X86setcc X86_COND_GE, EFLAGS))]>,
3058 TB; // GR8 = >= signed
3059 def SETGEm : I<0x9D, MRM0m,
3060 (outs), (ins i8mem:$dst),
3061 "setge\t$dst",
3062 [(store (X86setcc X86_COND_GE, EFLAGS), addr:$dst)]>,
3063 TB; // [mem8] = >= signed
3064
3065 def SETLEr : I<0x9E, MRM0r,
3066 (outs GR8 :$dst), (ins),
3067 "setle\t$dst",
3068 [(set GR8:$dst, (X86setcc X86_COND_LE, EFLAGS))]>,
3069 TB; // GR8 = <= signed
3070 def SETLEm : I<0x9E, MRM0m,
3071 (outs), (ins i8mem:$dst),
3072 "setle\t$dst",
3073 [(store (X86setcc X86_COND_LE, EFLAGS), addr:$dst)]>,
3074 TB; // [mem8] = <= signed
3075
3076 def SETGr : I<0x9F, MRM0r,
3077 (outs GR8 :$dst), (ins),
3078 "setg\t$dst",
3079 [(set GR8:$dst, (X86setcc X86_COND_G, EFLAGS))]>,
3080 TB; // GR8 = > signed
3081 def SETGm : I<0x9F, MRM0m,
3082 (outs), (ins i8mem:$dst),
3083 "setg\t$dst",
3084 [(store (X86setcc X86_COND_G, EFLAGS), addr:$dst)]>,
3085 TB; // [mem8] = > signed
3086
3087 def SETBr : I<0x92, MRM0r,
3088 (outs GR8 :$dst), (ins),
3089 "setb\t$dst",
3090 [(set GR8:$dst, (X86setcc X86_COND_B, EFLAGS))]>,
3091 TB; // GR8 = < unsign
3092 def SETBm : I<0x92, MRM0m,
3093 (outs), (ins i8mem:$dst),
3094 "setb\t$dst",
3095 [(store (X86setcc X86_COND_B, EFLAGS), addr:$dst)]>,
3096 TB; // [mem8] = < unsign
3097
3098 def SETAEr : I<0x93, MRM0r,
3099 (outs GR8 :$dst), (ins),
3100 "setae\t$dst",
3101 [(set GR8:$dst, (X86setcc X86_COND_AE, EFLAGS))]>,
3102 TB; // GR8 = >= unsign
3103 def SETAEm : I<0x93, MRM0m,
3104 (outs), (ins i8mem:$dst),
3105 "setae\t$dst",
3106 [(store (X86setcc X86_COND_AE, EFLAGS), addr:$dst)]>,
3107 TB; // [mem8] = >= unsign
3108
3109 def SETBEr : I<0x96, MRM0r,
3110 (outs GR8 :$dst), (ins),
3111 "setbe\t$dst",
3112 [(set GR8:$dst, (X86setcc X86_COND_BE, EFLAGS))]>,
3113 TB; // GR8 = <= unsign
3114 def SETBEm : I<0x96, MRM0m,
3115 (outs), (ins i8mem:$dst),
3116 "setbe\t$dst",
3117 [(store (X86setcc X86_COND_BE, EFLAGS), addr:$dst)]>,
3118 TB; // [mem8] = <= unsign
3119
3120 def SETAr : I<0x97, MRM0r,
3121 (outs GR8 :$dst), (ins),
3122 "seta\t$dst",
3123 [(set GR8:$dst, (X86setcc X86_COND_A, EFLAGS))]>,
3124 TB; // GR8 = > signed
3125 def SETAm : I<0x97, MRM0m,
3126 (outs), (ins i8mem:$dst),
3127 "seta\t$dst",
3128 [(store (X86setcc X86_COND_A, EFLAGS), addr:$dst)]>,
3129 TB; // [mem8] = > signed
3130
3131 def SETSr : I<0x98, MRM0r,
3132 (outs GR8 :$dst), (ins),
3133 "sets\t$dst",
3134 [(set GR8:$dst, (X86setcc X86_COND_S, EFLAGS))]>,
3135 TB; // GR8 =
3136 def SETSm : I<0x98, MRM0m,
3137 (outs), (ins i8mem:$dst),
3138 "sets\t$dst",
3139 [(store (X86setcc X86_COND_S, EFLAGS), addr:$dst)]>,
3140 TB; // [mem8] =
3141 def SETNSr : I<0x99, MRM0r,
3142 (outs GR8 :$dst), (ins),
3143 "setns\t$dst",
3144 [(set GR8:$dst, (X86setcc X86_COND_NS, EFLAGS))]>,
3145 TB; // GR8 = !
3146 def SETNSm : I<0x99, MRM0m,
3147 (outs), (ins i8mem:$dst),
3148 "setns\t$dst",
3149 [(store (X86setcc X86_COND_NS, EFLAGS), addr:$dst)]>,
3150 TB; // [mem8] = !
3151
3152 def SETPr : I<0x9A, MRM0r,
3153 (outs GR8 :$dst), (ins),
3154 "setp\t$dst",
3155 [(set GR8:$dst, (X86setcc X86_COND_P, EFLAGS))]>,
3156 TB; // GR8 = parity
3157 def SETPm : I<0x9A, MRM0m,
3158 (outs), (ins i8mem:$dst),
3159 "setp\t$dst",
3160 [(store (X86setcc X86_COND_P, EFLAGS), addr:$dst)]>,
3161 TB; // [mem8] = parity
3162 def SETNPr : I<0x9B, MRM0r,
3163 (outs GR8 :$dst), (ins),
3164 "setnp\t$dst",
3165 [(set GR8:$dst, (X86setcc X86_COND_NP, EFLAGS))]>,
3166 TB; // GR8 = not parity
3167 def SETNPm : I<0x9B, MRM0m,
3168 (outs), (ins i8mem:$dst),
3169 "setnp\t$dst",
3170 [(store (X86setcc X86_COND_NP, EFLAGS), addr:$dst)]>,
3171 TB; // [mem8] = not parity
3172
3173 def SETOr : I<0x90, MRM0r,
3174 (outs GR8 :$dst), (ins),
3175 "seto\t$dst",
3176 [(set GR8:$dst, (X86setcc X86_COND_O, EFLAGS))]>,
3177 TB; // GR8 = overflow
3178 def SETOm : I<0x90, MRM0m,
3179 (outs), (ins i8mem:$dst),
3180 "seto\t$dst",
3181 [(store (X86setcc X86_COND_O, EFLAGS), addr:$dst)]>,
3182 TB; // [mem8] = overflow
3183 def SETNOr : I<0x91, MRM0r,
3184 (outs GR8 :$dst), (ins),
3185 "setno\t$dst",
3186 [(set GR8:$dst, (X86setcc X86_COND_NO, EFLAGS))]>,
3187 TB; // GR8 = not overflow
3188 def SETNOm : I<0x91, MRM0m,
3189 (outs), (ins i8mem:$dst),
3190 "setno\t$dst",
3191 [(store (X86setcc X86_COND_NO, EFLAGS), addr:$dst)]>,
3192 TB; // [mem8] = not overflow
3193 } // Uses = [EFLAGS]
3194
3195
31962561 // Integer comparisons
31972562 let Defs = [EFLAGS] in {
31982563 def CMP8i8 : Ii8<0x3C, RawFrm, (outs), (ins i8imm:$src),
36232988 // X86-64 Support
36242989 include "X86Instr64bit.td"
36252990
2991 include "X86InstrCMovSetCC.td"
36262992 include "X86InstrControl.td"
36272993
36282994 // SIMD support (SSE, MMX and AVX)