llvm.org GIT mirror llvm / 6dbbff9
Move cmov pseudo instructions to InstrCompiler, convert all the rest of the cmovs to the multiclass, with good results: X86InstrCMovSetCC.td | 598 +-------------------------------------------------- X86InstrCompiler.td | 61 +++++ 2 files changed, 77 insertions(+), 582 deletions(-) git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@115707 91177308-0d34-0410-b5e6-96231b3b80d8 Chris Lattner 10 years ago
2 changed file(s) with 77 addition(s) and 582 deletion(s). Raw diff Collapse all Expand all
3333 (X86cmov GR64:$src1, GR64:$src2, CondNode, EFLAGS))]>, TB;
3434 }
3535
36 let Uses = [EFLAGS], Predicates = [HasCMov], Constraints = "$src1 = $dst"in {
36 let Uses = [EFLAGS], Predicates = [HasCMov], Constraints = "$src1 = $dst" in {
3737 def #NAME#16rm
3838 : I
3939 !strconcat(Mnemonic, "{w}\t{$src2, $dst|$dst, $src2}"),
5454
5555
5656 // Conditional Moves.
57 defm CMOVO : CMOV<0x40, "cmovo" , X86_COND_O>;
58 defm CMOVNO : CMOV<0x41, "cmovno", X86_COND_NO>;
59 defm CMOVB : CMOV<0x42, "cmovb" , X86_COND_B>;
60 defm CMOVAE : CMOV<0x43, "cmovae", X86_COND_AE>;
61 defm CMOVE : CMOV<0x44, "cmove" , X86_COND_E>;
62 defm CMOVNE : CMOV<0x45, "cmovne", X86_COND_NE>;
5763 defm CMOVBE : CMOV<0x46, "cmovbe", X86_COND_BE>;
58
59
60 let Constraints = "$src1 = $dst" in {
61
62 // Conditional moves
63 let Uses = [EFLAGS] in {
64
65 let Predicates = [HasCMov] in {
66 let isCommutable = 1 in {
67 def CMOVB16rr : I<0x42, MRMSrcReg, // if
68 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
69 "cmovb{w}\t{$src2, $dst|$dst, $src2}",
70 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
71 X86_COND_B, EFLAGS))]>,
72 TB, OpSize;
73 def CMOVB32rr : I<0x42, MRMSrcReg, // if
74 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
75 "cmovb{l}\t{$src2, $dst|$dst, $src2}",
76 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
77 X86_COND_B, EFLAGS))]>,
78 TB;
79 def CMOVAE16rr: I<0x43, MRMSrcReg, // if >=u, GR16 = GR16
80 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
81 "cmovae{w}\t{$src2, $dst|$dst, $src2}",
82 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
83 X86_COND_AE, EFLAGS))]>,
84 TB, OpSize;
85 def CMOVAE32rr: I<0x43, MRMSrcReg, // if >=u, GR32 = GR32
86 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
87 "cmovae{l}\t{$src2, $dst|$dst, $src2}",
88 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
89 X86_COND_AE, EFLAGS))]>,
90 TB;
91 def CMOVE16rr : I<0x44, MRMSrcReg, // if ==, GR16 = GR16
92 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
93 "cmove{w}\t{$src2, $dst|$dst, $src2}",
94 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
95 X86_COND_E, EFLAGS))]>,
96 TB, OpSize;
97 def CMOVE32rr : I<0x44, MRMSrcReg, // if ==, GR32 = GR32
98 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
99 "cmove{l}\t{$src2, $dst|$dst, $src2}",
100 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
101 X86_COND_E, EFLAGS))]>,
102 TB;
103 def CMOVNE16rr: I<0x45, MRMSrcReg, // if !=, GR16 = GR16
104 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
105 "cmovne{w}\t{$src2, $dst|$dst, $src2}",
106 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
107 X86_COND_NE, EFLAGS))]>,
108 TB, OpSize;
109 def CMOVNE32rr: I<0x45, MRMSrcReg, // if !=, GR32 = GR32
110 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
111 "cmovne{l}\t{$src2, $dst|$dst, $src2}",
112 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
113 X86_COND_NE, EFLAGS))]>,
114 TB;
115 def CMOVA16rr : I<0x47, MRMSrcReg, // if >u, GR16 = GR16
116 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
117 "cmova{w}\t{$src2, $dst|$dst, $src2}",
118 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
119 X86_COND_A, EFLAGS))]>,
120 TB, OpSize;
121 def CMOVA32rr : I<0x47, MRMSrcReg, // if >u, GR32 = GR32
122 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
123 "cmova{l}\t{$src2, $dst|$dst, $src2}",
124 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
125 X86_COND_A, EFLAGS))]>,
126 TB;
127 def CMOVL16rr : I<0x4C, MRMSrcReg, // if
128 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
129 "cmovl{w}\t{$src2, $dst|$dst, $src2}",
130 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
131 X86_COND_L, EFLAGS))]>,
132 TB, OpSize;
133 def CMOVL32rr : I<0x4C, MRMSrcReg, // if
134 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
135 "cmovl{l}\t{$src2, $dst|$dst, $src2}",
136 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
137 X86_COND_L, EFLAGS))]>,
138 TB;
139 def CMOVGE16rr: I<0x4D, MRMSrcReg, // if >=s, GR16 = GR16
140 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
141 "cmovge{w}\t{$src2, $dst|$dst, $src2}",
142 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
143 X86_COND_GE, EFLAGS))]>,
144 TB, OpSize;
145 def CMOVGE32rr: I<0x4D, MRMSrcReg, // if >=s, GR32 = GR32
146 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
147 "cmovge{l}\t{$src2, $dst|$dst, $src2}",
148 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
149 X86_COND_GE, EFLAGS))]>,
150 TB;
151 def CMOVLE16rr: I<0x4E, MRMSrcReg, // if <=s, GR16 = GR16
152 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
153 "cmovle{w}\t{$src2, $dst|$dst, $src2}",
154 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
155 X86_COND_LE, EFLAGS))]>,
156 TB, OpSize;
157 def CMOVLE32rr: I<0x4E, MRMSrcReg, // if <=s, GR32 = GR32
158 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
159 "cmovle{l}\t{$src2, $dst|$dst, $src2}",
160 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
161 X86_COND_LE, EFLAGS))]>,
162 TB;
163 def CMOVG16rr : I<0x4F, MRMSrcReg, // if >s, GR16 = GR16
164 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
165 "cmovg{w}\t{$src2, $dst|$dst, $src2}",
166 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
167 X86_COND_G, EFLAGS))]>,
168 TB, OpSize;
169 def CMOVG32rr : I<0x4F, MRMSrcReg, // if >s, GR32 = GR32
170 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
171 "cmovg{l}\t{$src2, $dst|$dst, $src2}",
172 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
173 X86_COND_G, EFLAGS))]>,
174 TB;
175 def CMOVS16rr : I<0x48, MRMSrcReg, // if signed, GR16 = GR16
176 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
177 "cmovs{w}\t{$src2, $dst|$dst, $src2}",
178 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
179 X86_COND_S, EFLAGS))]>,
180 TB, OpSize;
181 def CMOVS32rr : I<0x48, MRMSrcReg, // if signed, GR32 = GR32
182 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
183 "cmovs{l}\t{$src2, $dst|$dst, $src2}",
184 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
185 X86_COND_S, EFLAGS))]>,
186 TB;
187 def CMOVNS16rr: I<0x49, MRMSrcReg, // if !signed, GR16 = GR16
188 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
189 "cmovns{w}\t{$src2, $dst|$dst, $src2}",
190 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
191 X86_COND_NS, EFLAGS))]>,
192 TB, OpSize;
193 def CMOVNS32rr: I<0x49, MRMSrcReg, // if !signed, GR32 = GR32
194 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
195 "cmovns{l}\t{$src2, $dst|$dst, $src2}",
196 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
197 X86_COND_NS, EFLAGS))]>,
198 TB;
199 def CMOVP16rr : I<0x4A, MRMSrcReg, // if parity, GR16 = GR16
200 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
201 "cmovp{w}\t{$src2, $dst|$dst, $src2}",
202 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
203 X86_COND_P, EFLAGS))]>,
204 TB, OpSize;
205 def CMOVP32rr : I<0x4A, MRMSrcReg, // if parity, GR32 = GR32
206 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
207 "cmovp{l}\t{$src2, $dst|$dst, $src2}",
208 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
209 X86_COND_P, EFLAGS))]>,
210 TB;
211 def CMOVNP16rr : I<0x4B, MRMSrcReg, // if !parity, GR16 = GR16
212 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
213 "cmovnp{w}\t{$src2, $dst|$dst, $src2}",
214 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
215 X86_COND_NP, EFLAGS))]>,
216 TB, OpSize;
217 def CMOVNP32rr : I<0x4B, MRMSrcReg, // if !parity, GR32 = GR32
218 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
219 "cmovnp{l}\t{$src2, $dst|$dst, $src2}",
220 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
221 X86_COND_NP, EFLAGS))]>,
222 TB;
223 def CMOVO16rr : I<0x40, MRMSrcReg, // if overflow, GR16 = GR16
224 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
225 "cmovo{w}\t{$src2, $dst|$dst, $src2}",
226 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
227 X86_COND_O, EFLAGS))]>,
228 TB, OpSize;
229 def CMOVO32rr : I<0x40, MRMSrcReg, // if overflow, GR32 = GR32
230 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
231 "cmovo{l}\t{$src2, $dst|$dst, $src2}",
232 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
233 X86_COND_O, EFLAGS))]>,
234 TB;
235 def CMOVNO16rr : I<0x41, MRMSrcReg, // if !overflow, GR16 = GR16
236 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
237 "cmovno{w}\t{$src2, $dst|$dst, $src2}",
238 [(set GR16:$dst, (X86cmov GR16:$src1, GR16:$src2,
239 X86_COND_NO, EFLAGS))]>,
240 TB, OpSize;
241 def CMOVNO32rr : I<0x41, MRMSrcReg, // if !overflow, GR32 = GR32
242 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
243 "cmovno{l}\t{$src2, $dst|$dst, $src2}",
244 [(set GR32:$dst, (X86cmov GR32:$src1, GR32:$src2,
245 X86_COND_NO, EFLAGS))]>,
246 TB;
247 } // isCommutable = 1
248
249 def CMOVB16rm : I<0x42, MRMSrcMem, // if
250 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
251 "cmovb{w}\t{$src2, $dst|$dst, $src2}",
252 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
253 X86_COND_B, EFLAGS))]>,
254 TB, OpSize;
255 def CMOVB32rm : I<0x42, MRMSrcMem, // if
256 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
257 "cmovb{l}\t{$src2, $dst|$dst, $src2}",
258 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
259 X86_COND_B, EFLAGS))]>,
260 TB;
261 def CMOVAE16rm: I<0x43, MRMSrcMem, // if >=u, GR16 = [mem16]
262 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
263 "cmovae{w}\t{$src2, $dst|$dst, $src2}",
264 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
265 X86_COND_AE, EFLAGS))]>,
266 TB, OpSize;
267 def CMOVAE32rm: I<0x43, MRMSrcMem, // if >=u, GR32 = [mem32]
268 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
269 "cmovae{l}\t{$src2, $dst|$dst, $src2}",
270 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
271 X86_COND_AE, EFLAGS))]>,
272 TB;
273 def CMOVE16rm : I<0x44, MRMSrcMem, // if ==, GR16 = [mem16]
274 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
275 "cmove{w}\t{$src2, $dst|$dst, $src2}",
276 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
277 X86_COND_E, EFLAGS))]>,
278 TB, OpSize;
279 def CMOVE32rm : I<0x44, MRMSrcMem, // if ==, GR32 = [mem32]
280 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
281 "cmove{l}\t{$src2, $dst|$dst, $src2}",
282 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
283 X86_COND_E, EFLAGS))]>,
284 TB;
285 def CMOVNE16rm: I<0x45, MRMSrcMem, // if !=, GR16 = [mem16]
286 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
287 "cmovne{w}\t{$src2, $dst|$dst, $src2}",
288 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
289 X86_COND_NE, EFLAGS))]>,
290 TB, OpSize;
291 def CMOVNE32rm: I<0x45, MRMSrcMem, // if !=, GR32 = [mem32]
292 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
293 "cmovne{l}\t{$src2, $dst|$dst, $src2}",
294 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
295 X86_COND_NE, EFLAGS))]>,
296 TB;
297 def CMOVA16rm : I<0x47, MRMSrcMem, // if >u, GR16 = [mem16]
298 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
299 "cmova{w}\t{$src2, $dst|$dst, $src2}",
300 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
301 X86_COND_A, EFLAGS))]>,
302 TB, OpSize;
303 def CMOVA32rm : I<0x47, MRMSrcMem, // if >u, GR32 = [mem32]
304 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
305 "cmova{l}\t{$src2, $dst|$dst, $src2}",
306 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
307 X86_COND_A, EFLAGS))]>,
308 TB;
309 def CMOVL16rm : I<0x4C, MRMSrcMem, // if
310 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
311 "cmovl{w}\t{$src2, $dst|$dst, $src2}",
312 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
313 X86_COND_L, EFLAGS))]>,
314 TB, OpSize;
315 def CMOVL32rm : I<0x4C, MRMSrcMem, // if
316 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
317 "cmovl{l}\t{$src2, $dst|$dst, $src2}",
318 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
319 X86_COND_L, EFLAGS))]>,
320 TB;
321 def CMOVGE16rm: I<0x4D, MRMSrcMem, // if >=s, GR16 = [mem16]
322 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
323 "cmovge{w}\t{$src2, $dst|$dst, $src2}",
324 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
325 X86_COND_GE, EFLAGS))]>,
326 TB, OpSize;
327 def CMOVGE32rm: I<0x4D, MRMSrcMem, // if >=s, GR32 = [mem32]
328 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
329 "cmovge{l}\t{$src2, $dst|$dst, $src2}",
330 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
331 X86_COND_GE, EFLAGS))]>,
332 TB;
333 def CMOVLE16rm: I<0x4E, MRMSrcMem, // if <=s, GR16 = [mem16]
334 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
335 "cmovle{w}\t{$src2, $dst|$dst, $src2}",
336 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
337 X86_COND_LE, EFLAGS))]>,
338 TB, OpSize;
339 def CMOVLE32rm: I<0x4E, MRMSrcMem, // if <=s, GR32 = [mem32]
340 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
341 "cmovle{l}\t{$src2, $dst|$dst, $src2}",
342 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
343 X86_COND_LE, EFLAGS))]>,
344 TB;
345 def CMOVG16rm : I<0x4F, MRMSrcMem, // if >s, GR16 = [mem16]
346 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
347 "cmovg{w}\t{$src2, $dst|$dst, $src2}",
348 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
349 X86_COND_G, EFLAGS))]>,
350 TB, OpSize;
351 def CMOVG32rm : I<0x4F, MRMSrcMem, // if >s, GR32 = [mem32]
352 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
353 "cmovg{l}\t{$src2, $dst|$dst, $src2}",
354 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
355 X86_COND_G, EFLAGS))]>,
356 TB;
357 def CMOVS16rm : I<0x48, MRMSrcMem, // if signed, GR16 = [mem16]
358 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
359 "cmovs{w}\t{$src2, $dst|$dst, $src2}",
360 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
361 X86_COND_S, EFLAGS))]>,
362 TB, OpSize;
363 def CMOVS32rm : I<0x48, MRMSrcMem, // if signed, GR32 = [mem32]
364 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
365 "cmovs{l}\t{$src2, $dst|$dst, $src2}",
366 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
367 X86_COND_S, EFLAGS))]>,
368 TB;
369 def CMOVNS16rm: I<0x49, MRMSrcMem, // if !signed, GR16 = [mem16]
370 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
371 "cmovns{w}\t{$src2, $dst|$dst, $src2}",
372 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
373 X86_COND_NS, EFLAGS))]>,
374 TB, OpSize;
375 def CMOVNS32rm: I<0x49, MRMSrcMem, // if !signed, GR32 = [mem32]
376 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
377 "cmovns{l}\t{$src2, $dst|$dst, $src2}",
378 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
379 X86_COND_NS, EFLAGS))]>,
380 TB;
381 def CMOVP16rm : I<0x4A, MRMSrcMem, // if parity, GR16 = [mem16]
382 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
383 "cmovp{w}\t{$src2, $dst|$dst, $src2}",
384 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
385 X86_COND_P, EFLAGS))]>,
386 TB, OpSize;
387 def CMOVP32rm : I<0x4A, MRMSrcMem, // if parity, GR32 = [mem32]
388 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
389 "cmovp{l}\t{$src2, $dst|$dst, $src2}",
390 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
391 X86_COND_P, EFLAGS))]>,
392 TB;
393 def CMOVNP16rm : I<0x4B, MRMSrcMem, // if !parity, GR16 = [mem16]
394 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
395 "cmovnp{w}\t{$src2, $dst|$dst, $src2}",
396 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
397 X86_COND_NP, EFLAGS))]>,
398 TB, OpSize;
399 def CMOVNP32rm : I<0x4B, MRMSrcMem, // if !parity, GR32 = [mem32]
400 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
401 "cmovnp{l}\t{$src2, $dst|$dst, $src2}",
402 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
403 X86_COND_NP, EFLAGS))]>,
404 TB;
405 def CMOVO16rm : I<0x40, MRMSrcMem, // if overflow, GR16 = [mem16]
406 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
407 "cmovo{w}\t{$src2, $dst|$dst, $src2}",
408 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
409 X86_COND_O, EFLAGS))]>,
410 TB, OpSize;
411 def CMOVO32rm : I<0x40, MRMSrcMem, // if overflow, GR32 = [mem32]
412 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
413 "cmovo{l}\t{$src2, $dst|$dst, $src2}",
414 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
415 X86_COND_O, EFLAGS))]>,
416 TB;
417 def CMOVNO16rm : I<0x41, MRMSrcMem, // if !overflow, GR16 = [mem16]
418 (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2),
419 "cmovno{w}\t{$src2, $dst|$dst, $src2}",
420 [(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
421 X86_COND_NO, EFLAGS))]>,
422 TB, OpSize;
423 def CMOVNO32rm : I<0x41, MRMSrcMem, // if !overflow, GR32 = [mem32]
424 (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2),
425 "cmovno{l}\t{$src2, $dst|$dst, $src2}",
426 [(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
427 X86_COND_NO, EFLAGS))]>,
428 TB;
429 } // Predicates = [HasCMov]
430
431 // X86 doesn't have 8-bit conditional moves. Use a customInserter to
432 // emit control flow. An alternative to this is to mark i8 SELECT as Promote,
433 // however that requires promoting the operands, and can induce additional
434 // i8 register pressure. Note that CMOV_GR8 is conservatively considered to
435 // clobber EFLAGS, because if one of the operands is zero, the expansion
436 // could involve an xor.
437 let usesCustomInserter = 1, Constraints = "", Defs = [EFLAGS] in {
438 def CMOV_GR8 : I<0, Pseudo,
439 (outs GR8:$dst), (ins GR8:$src1, GR8:$src2, i8imm:$cond),
440 "#CMOV_GR8 PSEUDO!",
441 [(set GR8:$dst, (X86cmov GR8:$src1, GR8:$src2,
442 imm:$cond, EFLAGS))]>;
443
444 let Predicates = [NoCMov] in {
445 def CMOV_GR32 : I<0, Pseudo,
446 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2, i8imm:$cond),
447 "#CMOV_GR32* PSEUDO!",
448 [(set GR32:$dst,
449 (X86cmov GR32:$src1, GR32:$src2, imm:$cond, EFLAGS))]>;
450 def CMOV_GR16 : I<0, Pseudo,
451 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2, i8imm:$cond),
452 "#CMOV_GR16* PSEUDO!",
453 [(set GR16:$dst,
454 (X86cmov GR16:$src1, GR16:$src2, imm:$cond, EFLAGS))]>;
455 def CMOV_RFP32 : I<0, Pseudo,
456 (outs RFP32:$dst),
457 (ins RFP32:$src1, RFP32:$src2, i8imm:$cond),
458 "#CMOV_RFP32 PSEUDO!",
459 [(set RFP32:$dst,
460 (X86cmov RFP32:$src1, RFP32:$src2, imm:$cond,
461 EFLAGS))]>;
462 def CMOV_RFP64 : I<0, Pseudo,
463 (outs RFP64:$dst),
464 (ins RFP64:$src1, RFP64:$src2, i8imm:$cond),
465 "#CMOV_RFP64 PSEUDO!",
466 [(set RFP64:$dst,
467 (X86cmov RFP64:$src1, RFP64:$src2, imm:$cond,
468 EFLAGS))]>;
469 def CMOV_RFP80 : I<0, Pseudo,
470 (outs RFP80:$dst),
471 (ins RFP80:$src1, RFP80:$src2, i8imm:$cond),
472 "#CMOV_RFP80 PSEUDO!",
473 [(set RFP80:$dst,
474 (X86cmov RFP80:$src1, RFP80:$src2, imm:$cond,
475 EFLAGS))]>;
476 } // Predicates = [NoCMov]
477 } // UsesCustomInserter = 1, Constraints = "", Defs = [EFLAGS]
478 } // Uses = [EFLAGS]
479
480 } // Constraints = "$src1 = $dst" in
481
482
483 // Conditional moves
484 let Uses = [EFLAGS], Constraints = "$src1 = $dst" in {
485 let isCommutable = 1 in {
486 def CMOVB64rr : RI<0x42, MRMSrcReg, // if
487 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
488 "cmovb{q}\t{$src2, $dst|$dst, $src2}",
489 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
490 X86_COND_B, EFLAGS))]>, TB;
491 def CMOVAE64rr: RI<0x43, MRMSrcReg, // if >=u, GR64 = GR64
492 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
493 "cmovae{q}\t{$src2, $dst|$dst, $src2}",
494 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
495 X86_COND_AE, EFLAGS))]>, TB;
496 def CMOVE64rr : RI<0x44, MRMSrcReg, // if ==, GR64 = GR64
497 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
498 "cmove{q}\t{$src2, $dst|$dst, $src2}",
499 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
500 X86_COND_E, EFLAGS))]>, TB;
501 def CMOVNE64rr: RI<0x45, MRMSrcReg, // if !=, GR64 = GR64
502 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
503 "cmovne{q}\t{$src2, $dst|$dst, $src2}",
504 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
505 X86_COND_NE, EFLAGS))]>, TB;
506 def CMOVA64rr : RI<0x47, MRMSrcReg, // if >u, GR64 = GR64
507 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
508 "cmova{q}\t{$src2, $dst|$dst, $src2}",
509 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
510 X86_COND_A, EFLAGS))]>, TB;
511 def CMOVL64rr : RI<0x4C, MRMSrcReg, // if
512 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
513 "cmovl{q}\t{$src2, $dst|$dst, $src2}",
514 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
515 X86_COND_L, EFLAGS))]>, TB;
516 def CMOVGE64rr: RI<0x4D, MRMSrcReg, // if >=s, GR64 = GR64
517 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
518 "cmovge{q}\t{$src2, $dst|$dst, $src2}",
519 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
520 X86_COND_GE, EFLAGS))]>, TB;
521 def CMOVLE64rr: RI<0x4E, MRMSrcReg, // if <=s, GR64 = GR64
522 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
523 "cmovle{q}\t{$src2, $dst|$dst, $src2}",
524 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
525 X86_COND_LE, EFLAGS))]>, TB;
526 def CMOVG64rr : RI<0x4F, MRMSrcReg, // if >s, GR64 = GR64
527 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
528 "cmovg{q}\t{$src2, $dst|$dst, $src2}",
529 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
530 X86_COND_G, EFLAGS))]>, TB;
531 def CMOVS64rr : RI<0x48, MRMSrcReg, // if signed, GR64 = GR64
532 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
533 "cmovs{q}\t{$src2, $dst|$dst, $src2}",
534 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
535 X86_COND_S, EFLAGS))]>, TB;
536 def CMOVNS64rr: RI<0x49, MRMSrcReg, // if !signed, GR64 = GR64
537 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
538 "cmovns{q}\t{$src2, $dst|$dst, $src2}",
539 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
540 X86_COND_NS, EFLAGS))]>, TB;
541 def CMOVP64rr : RI<0x4A, MRMSrcReg, // if parity, GR64 = GR64
542 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
543 "cmovp{q}\t{$src2, $dst|$dst, $src2}",
544 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
545 X86_COND_P, EFLAGS))]>, TB;
546 def CMOVNP64rr : RI<0x4B, MRMSrcReg, // if !parity, GR64 = GR64
547 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
548 "cmovnp{q}\t{$src2, $dst|$dst, $src2}",
549 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
550 X86_COND_NP, EFLAGS))]>, TB;
551 def CMOVO64rr : RI<0x40, MRMSrcReg, // if overflow, GR64 = GR64
552 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
553 "cmovo{q}\t{$src2, $dst|$dst, $src2}",
554 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
555 X86_COND_O, EFLAGS))]>, TB;
556 def CMOVNO64rr : RI<0x41, MRMSrcReg, // if !overflow, GR64 = GR64
557 (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
558 "cmovno{q}\t{$src2, $dst|$dst, $src2}",
559 [(set GR64:$dst, (X86cmov GR64:$src1, GR64:$src2,
560 X86_COND_NO, EFLAGS))]>, TB;
561 } // isCommutable = 1
562
563 def CMOVB64rm : RI<0x42, MRMSrcMem, // if
564 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
565 "cmovb{q}\t{$src2, $dst|$dst, $src2}",
566 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
567 X86_COND_B, EFLAGS))]>, TB;
568 def CMOVAE64rm: RI<0x43, MRMSrcMem, // if >=u, GR64 = [mem64]
569 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
570 "cmovae{q}\t{$src2, $dst|$dst, $src2}",
571 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
572 X86_COND_AE, EFLAGS))]>, TB;
573 def CMOVE64rm : RI<0x44, MRMSrcMem, // if ==, GR64 = [mem64]
574 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
575 "cmove{q}\t{$src2, $dst|$dst, $src2}",
576 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
577 X86_COND_E, EFLAGS))]>, TB;
578 def CMOVNE64rm: RI<0x45, MRMSrcMem, // if !=, GR64 = [mem64]
579 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
580 "cmovne{q}\t{$src2, $dst|$dst, $src2}",
581 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
582 X86_COND_NE, EFLAGS))]>, TB;
583 def CMOVA64rm : RI<0x47, MRMSrcMem, // if >u, GR64 = [mem64]
584 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
585 "cmova{q}\t{$src2, $dst|$dst, $src2}",
586 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
587 X86_COND_A, EFLAGS))]>, TB;
588 def CMOVL64rm : RI<0x4C, MRMSrcMem, // if
589 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
590 "cmovl{q}\t{$src2, $dst|$dst, $src2}",
591 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
592 X86_COND_L, EFLAGS))]>, TB;
593 def CMOVGE64rm: RI<0x4D, MRMSrcMem, // if >=s, GR64 = [mem64]
594 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
595 "cmovge{q}\t{$src2, $dst|$dst, $src2}",
596 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
597 X86_COND_GE, EFLAGS))]>, TB;
598 def CMOVLE64rm: RI<0x4E, MRMSrcMem, // if <=s, GR64 = [mem64]
599 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
600 "cmovle{q}\t{$src2, $dst|$dst, $src2}",
601 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
602 X86_COND_LE, EFLAGS))]>, TB;
603 def CMOVG64rm : RI<0x4F, MRMSrcMem, // if >s, GR64 = [mem64]
604 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
605 "cmovg{q}\t{$src2, $dst|$dst, $src2}",
606 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
607 X86_COND_G, EFLAGS))]>, TB;
608 def CMOVS64rm : RI<0x48, MRMSrcMem, // if signed, GR64 = [mem64]
609 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
610 "cmovs{q}\t{$src2, $dst|$dst, $src2}",
611 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
612 X86_COND_S, EFLAGS))]>, TB;
613 def CMOVNS64rm: RI<0x49, MRMSrcMem, // if !signed, GR64 = [mem64]
614 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
615 "cmovns{q}\t{$src2, $dst|$dst, $src2}",
616 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
617 X86_COND_NS, EFLAGS))]>, TB;
618 def CMOVP64rm : RI<0x4A, MRMSrcMem, // if parity, GR64 = [mem64]
619 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
620 "cmovp{q}\t{$src2, $dst|$dst, $src2}",
621 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
622 X86_COND_P, EFLAGS))]>, TB;
623 def CMOVNP64rm : RI<0x4B, MRMSrcMem, // if !parity, GR64 = [mem64]
624 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
625 "cmovnp{q}\t{$src2, $dst|$dst, $src2}",
626 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
627 X86_COND_NP, EFLAGS))]>, TB;
628 def CMOVO64rm : RI<0x40, MRMSrcMem, // if overflow, GR64 = [mem64]
629 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
630 "cmovo{q}\t{$src2, $dst|$dst, $src2}",
631 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
632 X86_COND_O, EFLAGS))]>, TB;
633 def CMOVNO64rm : RI<0x41, MRMSrcMem, // if !overflow, GR64 = [mem64]
634 (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2),
635 "cmovno{q}\t{$src2, $dst|$dst, $src2}",
636 [(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
637 X86_COND_NO, EFLAGS))]>, TB;
638 } // Constraints = "$src1 = $dst"
64 defm CMOVA : CMOV<0x47, "cmova" , X86_COND_A>;
65 defm CMOVS : CMOV<0x48, "cmovs" , X86_COND_S>;
66 defm CMOVNS : CMOV<0x49, "cmovns", X86_COND_NS>;
67 defm CMOVP : CMOV<0x4A, "cmovp" , X86_COND_P>;
68 defm CMOVNP : CMOV<0x4B, "cmovnp", X86_COND_NP>;
69 defm CMOVL : CMOV<0x4C, "cmovl" , X86_COND_L>;
70 defm CMOVGE : CMOV<0x4D, "cmovge", X86_COND_GE>;
71 defm CMOVLE : CMOV<0x4E, "cmovle", X86_COND_LE>;
72 defm CMOVG : CMOV<0x4F, "cmovg" , X86_COND_G>;
63973
64074
64175 // SetCC instructions.
271271 "# TLSCall_64",
272272 [(X86TLSCall addr:$sym)]>,
273273 Requires<[In64BitMode]>;
274
275
276 //===----------------------------------------------------------------------===//
277 // Conditional Move Pseudo Instructions
278
279 let Constraints = "$src1 = $dst" in {
280
281 // Conditional moves
282 let Uses = [EFLAGS] in {
283
284 // X86 doesn't have 8-bit conditional moves. Use a customInserter to
285 // emit control flow. An alternative to this is to mark i8 SELECT as Promote,
286 // however that requires promoting the operands, and can induce additional
287 // i8 register pressure. Note that CMOV_GR8 is conservatively considered to
288 // clobber EFLAGS, because if one of the operands is zero, the expansion
289 // could involve an xor.
290 let usesCustomInserter = 1, Constraints = "", Defs = [EFLAGS] in {
291 def CMOV_GR8 : I<0, Pseudo,
292 (outs GR8:$dst), (ins GR8:$src1, GR8:$src2, i8imm:$cond),
293 "#CMOV_GR8 PSEUDO!",
294 [(set GR8:$dst, (X86cmov GR8:$src1, GR8:$src2,
295 imm:$cond, EFLAGS))]>;
296
297 let Predicates = [NoCMov] in {
298 def CMOV_GR32 : I<0, Pseudo,
299 (outs GR32:$dst), (ins GR32:$src1, GR32:$src2, i8imm:$cond),
300 "#CMOV_GR32* PSEUDO!",
301 [(set GR32:$dst,
302 (X86cmov GR32:$src1, GR32:$src2, imm:$cond, EFLAGS))]>;
303 def CMOV_GR16 : I<0, Pseudo,
304 (outs GR16:$dst), (ins GR16:$src1, GR16:$src2, i8imm:$cond),
305 "#CMOV_GR16* PSEUDO!",
306 [(set GR16:$dst,
307 (X86cmov GR16:$src1, GR16:$src2, imm:$cond, EFLAGS))]>;
308 def CMOV_RFP32 : I<0, Pseudo,
309 (outs RFP32:$dst),
310 (ins RFP32:$src1, RFP32:$src2, i8imm:$cond),
311 "#CMOV_RFP32 PSEUDO!",
312 [(set RFP32:$dst,
313 (X86cmov RFP32:$src1, RFP32:$src2, imm:$cond,
314 EFLAGS))]>;
315 def CMOV_RFP64 : I<0, Pseudo,
316 (outs RFP64:$dst),
317 (ins RFP64:$src1, RFP64:$src2, i8imm:$cond),
318 "#CMOV_RFP64 PSEUDO!",
319 [(set RFP64:$dst,
320 (X86cmov RFP64:$src1, RFP64:$src2, imm:$cond,
321 EFLAGS))]>;
322 def CMOV_RFP80 : I<0, Pseudo,
323 (outs RFP80:$dst),
324 (ins RFP80:$src1, RFP80:$src2, i8imm:$cond),
325 "#CMOV_RFP80 PSEUDO!",
326 [(set RFP80:$dst,
327 (X86cmov RFP80:$src1, RFP80:$src2, imm:$cond,
328 EFLAGS))]>;
329 } // Predicates = [NoCMov]
330 } // UsesCustomInserter = 1, Constraints = "", Defs = [EFLAGS]
331 } // Uses = [EFLAGS]
332
333 } // Constraints = "$src1 = $dst" in
334
274335
275336 //===----------------------------------------------------------------------===//
276337 // Atomic Instruction Pseudo Instructions