LLVM 19.0.0git
TargetLoweringBase.cpp
Go to the documentation of this file.
1//===- TargetLoweringBase.cpp - Implement the TargetLoweringBase class ----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This implements the TargetLoweringBase class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "llvm/ADT/BitVector.h"
14#include "llvm/ADT/STLExtras.h"
17#include "llvm/ADT/StringRef.h"
18#include "llvm/ADT/Twine.h"
19#include "llvm/Analysis/Loads.h"
38#include "llvm/IR/Attributes.h"
39#include "llvm/IR/CallingConv.h"
40#include "llvm/IR/DataLayout.h"
42#include "llvm/IR/Function.h"
43#include "llvm/IR/GlobalValue.h"
45#include "llvm/IR/IRBuilder.h"
46#include "llvm/IR/Module.h"
47#include "llvm/IR/Type.h"
57#include <algorithm>
58#include <cassert>
59#include <cstdint>
60#include <cstring>
61#include <iterator>
62#include <string>
63#include <tuple>
64#include <utility>
65
66using namespace llvm;
67
69 "jump-is-expensive", cl::init(false),
70 cl::desc("Do not create extra branches to split comparison logic."),
72
74 ("min-jump-table-entries", cl::init(4), cl::Hidden,
75 cl::desc("Set minimum number of entries to use a jump table."));
76
78 ("max-jump-table-size", cl::init(UINT_MAX), cl::Hidden,
79 cl::desc("Set maximum size of jump tables."));
80
81/// Minimum jump table density for normal functions.
83 JumpTableDensity("jump-table-density", cl::init(10), cl::Hidden,
84 cl::desc("Minimum density for building a jump table in "
85 "a normal function"));
86
87/// Minimum jump table density for -Os or -Oz functions.
89 "optsize-jump-table-density", cl::init(40), cl::Hidden,
90 cl::desc("Minimum density for building a jump table in "
91 "an optsize function"));
92
93// FIXME: This option is only to test if the strict fp operation processed
94// correctly by preventing mutating strict fp operation to normal fp operation
95// during development. When the backend supports strict float operation, this
96// option will be meaningless.
97static cl::opt<bool> DisableStrictNodeMutation("disable-strictnode-mutation",
98 cl::desc("Don't mutate strict-float node to a legalize node"),
99 cl::init(false), cl::Hidden);
100
101static bool darwinHasSinCos(const Triple &TT) {
102 assert(TT.isOSDarwin() && "should be called with darwin triple");
103 // Don't bother with 32 bit x86.
104 if (TT.getArch() == Triple::x86)
105 return false;
106 // Macos < 10.9 has no sincos_stret.
107 if (TT.isMacOSX())
108 return !TT.isMacOSXVersionLT(10, 9) && TT.isArch64Bit();
109 // iOS < 7.0 has no sincos_stret.
110 if (TT.isiOS())
111 return !TT.isOSVersionLT(7, 0);
112 // Any other darwin such as WatchOS/TvOS is new enough.
113 return true;
114}
115
116void TargetLoweringBase::InitLibcalls(const Triple &TT) {
117#define HANDLE_LIBCALL(code, name) \
118 setLibcallName(RTLIB::code, name);
119#include "llvm/IR/RuntimeLibcalls.def"
120#undef HANDLE_LIBCALL
121 // Initialize calling conventions to their default.
122 for (int LC = 0; LC < RTLIB::UNKNOWN_LIBCALL; ++LC)
124
125 // Use the f128 variants of math functions on x86_64
126 if (TT.getArch() == Triple::ArchType::x86_64 && TT.isGNUEnvironment()) {
127 setLibcallName(RTLIB::REM_F128, "fmodf128");
128 setLibcallName(RTLIB::FMA_F128, "fmaf128");
129 setLibcallName(RTLIB::SQRT_F128, "sqrtf128");
130 setLibcallName(RTLIB::CBRT_F128, "cbrtf128");
131 setLibcallName(RTLIB::LOG_F128, "logf128");
132 setLibcallName(RTLIB::LOG_FINITE_F128, "__logf128_finite");
133 setLibcallName(RTLIB::LOG2_F128, "log2f128");
134 setLibcallName(RTLIB::LOG2_FINITE_F128, "__log2f128_finite");
135 setLibcallName(RTLIB::LOG10_F128, "log10f128");
136 setLibcallName(RTLIB::LOG10_FINITE_F128, "__log10f128_finite");
137 setLibcallName(RTLIB::EXP_F128, "expf128");
138 setLibcallName(RTLIB::EXP_FINITE_F128, "__expf128_finite");
139 setLibcallName(RTLIB::EXP2_F128, "exp2f128");
140 setLibcallName(RTLIB::EXP2_FINITE_F128, "__exp2f128_finite");
141 setLibcallName(RTLIB::EXP10_F128, "exp10f128");
142 setLibcallName(RTLIB::SIN_F128, "sinf128");
143 setLibcallName(RTLIB::COS_F128, "cosf128");
144 setLibcallName(RTLIB::SINCOS_F128, "sincosf128");
145 setLibcallName(RTLIB::POW_F128, "powf128");
146 setLibcallName(RTLIB::POW_FINITE_F128, "__powf128_finite");
147 setLibcallName(RTLIB::CEIL_F128, "ceilf128");
148 setLibcallName(RTLIB::TRUNC_F128, "truncf128");
149 setLibcallName(RTLIB::RINT_F128, "rintf128");
150 setLibcallName(RTLIB::NEARBYINT_F128, "nearbyintf128");
151 setLibcallName(RTLIB::ROUND_F128, "roundf128");
152 setLibcallName(RTLIB::ROUNDEVEN_F128, "roundevenf128");
153 setLibcallName(RTLIB::FLOOR_F128, "floorf128");
154 setLibcallName(RTLIB::COPYSIGN_F128, "copysignf128");
155 setLibcallName(RTLIB::FMIN_F128, "fminf128");
156 setLibcallName(RTLIB::FMAX_F128, "fmaxf128");
157 setLibcallName(RTLIB::LROUND_F128, "lroundf128");
158 setLibcallName(RTLIB::LLROUND_F128, "llroundf128");
159 setLibcallName(RTLIB::LRINT_F128, "lrintf128");
160 setLibcallName(RTLIB::LLRINT_F128, "llrintf128");
161 setLibcallName(RTLIB::LDEXP_F128, "ldexpf128");
162 setLibcallName(RTLIB::FREXP_F128, "frexpf128");
163 }
164
165 // For IEEE quad-precision libcall names, PPC uses "kf" instead of "tf".
166 if (TT.isPPC()) {
167 setLibcallName(RTLIB::ADD_F128, "__addkf3");
168 setLibcallName(RTLIB::SUB_F128, "__subkf3");
169 setLibcallName(RTLIB::MUL_F128, "__mulkf3");
170 setLibcallName(RTLIB::DIV_F128, "__divkf3");
171 setLibcallName(RTLIB::POWI_F128, "__powikf2");
172 setLibcallName(RTLIB::FPEXT_F32_F128, "__extendsfkf2");
173 setLibcallName(RTLIB::FPEXT_F64_F128, "__extenddfkf2");
174 setLibcallName(RTLIB::FPROUND_F128_F32, "__trunckfsf2");
175 setLibcallName(RTLIB::FPROUND_F128_F64, "__trunckfdf2");
176 setLibcallName(RTLIB::FPTOSINT_F128_I32, "__fixkfsi");
177 setLibcallName(RTLIB::FPTOSINT_F128_I64, "__fixkfdi");
178 setLibcallName(RTLIB::FPTOSINT_F128_I128, "__fixkfti");
179 setLibcallName(RTLIB::FPTOUINT_F128_I32, "__fixunskfsi");
180 setLibcallName(RTLIB::FPTOUINT_F128_I64, "__fixunskfdi");
181 setLibcallName(RTLIB::FPTOUINT_F128_I128, "__fixunskfti");
182 setLibcallName(RTLIB::SINTTOFP_I32_F128, "__floatsikf");
183 setLibcallName(RTLIB::SINTTOFP_I64_F128, "__floatdikf");
184 setLibcallName(RTLIB::SINTTOFP_I128_F128, "__floattikf");
185 setLibcallName(RTLIB::UINTTOFP_I32_F128, "__floatunsikf");
186 setLibcallName(RTLIB::UINTTOFP_I64_F128, "__floatundikf");
187 setLibcallName(RTLIB::UINTTOFP_I128_F128, "__floatuntikf");
188 setLibcallName(RTLIB::OEQ_F128, "__eqkf2");
189 setLibcallName(RTLIB::UNE_F128, "__nekf2");
190 setLibcallName(RTLIB::OGE_F128, "__gekf2");
191 setLibcallName(RTLIB::OLT_F128, "__ltkf2");
192 setLibcallName(RTLIB::OLE_F128, "__lekf2");
193 setLibcallName(RTLIB::OGT_F128, "__gtkf2");
194 setLibcallName(RTLIB::UO_F128, "__unordkf2");
195 }
196
197 // A few names are different on particular architectures or environments.
198 if (TT.isOSDarwin()) {
199 // For f16/f32 conversions, Darwin uses the standard naming scheme, instead
200 // of the gnueabi-style __gnu_*_ieee.
201 // FIXME: What about other targets?
202 setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
203 setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
204
205 // Some darwins have an optimized __bzero/bzero function.
206 switch (TT.getArch()) {
207 case Triple::x86:
208 case Triple::x86_64:
209 if (TT.isMacOSX() && !TT.isMacOSXVersionLT(10, 6))
210 setLibcallName(RTLIB::BZERO, "__bzero");
211 break;
212 case Triple::aarch64:
214 setLibcallName(RTLIB::BZERO, "bzero");
215 break;
216 default:
217 break;
218 }
219
220 if (darwinHasSinCos(TT)) {
221 setLibcallName(RTLIB::SINCOS_STRET_F32, "__sincosf_stret");
222 setLibcallName(RTLIB::SINCOS_STRET_F64, "__sincos_stret");
223 if (TT.isWatchABI()) {
224 setLibcallCallingConv(RTLIB::SINCOS_STRET_F32,
226 setLibcallCallingConv(RTLIB::SINCOS_STRET_F64,
228 }
229 }
230 } else {
231 setLibcallName(RTLIB::FPEXT_F16_F32, "__gnu_h2f_ieee");
232 setLibcallName(RTLIB::FPROUND_F32_F16, "__gnu_f2h_ieee");
233 }
234
235 if (TT.isGNUEnvironment() || TT.isOSFuchsia() ||
236 (TT.isAndroid() && !TT.isAndroidVersionLT(9))) {
237 setLibcallName(RTLIB::SINCOS_F32, "sincosf");
238 setLibcallName(RTLIB::SINCOS_F64, "sincos");
239 setLibcallName(RTLIB::SINCOS_F80, "sincosl");
240 setLibcallName(RTLIB::SINCOS_F128, "sincosl");
241 setLibcallName(RTLIB::SINCOS_PPCF128, "sincosl");
242 }
243
244 if (TT.isPS()) {
245 setLibcallName(RTLIB::SINCOS_F32, "sincosf");
246 setLibcallName(RTLIB::SINCOS_F64, "sincos");
247 }
248
249 if (TT.isOSOpenBSD()) {
250 setLibcallName(RTLIB::STACKPROTECTOR_CHECK_FAIL, nullptr);
251 }
252
253 if (TT.isOSWindows() && !TT.isOSCygMing()) {
254 setLibcallName(RTLIB::LDEXP_F32, nullptr);
255 setLibcallName(RTLIB::LDEXP_F80, nullptr);
256 setLibcallName(RTLIB::LDEXP_F128, nullptr);
257 setLibcallName(RTLIB::LDEXP_PPCF128, nullptr);
258
259 setLibcallName(RTLIB::FREXP_F32, nullptr);
260 setLibcallName(RTLIB::FREXP_F80, nullptr);
261 setLibcallName(RTLIB::FREXP_F128, nullptr);
262 setLibcallName(RTLIB::FREXP_PPCF128, nullptr);
263 }
264}
265
266/// GetFPLibCall - Helper to return the right libcall for the given floating
267/// point type, or UNKNOWN_LIBCALL if there is none.
269 RTLIB::Libcall Call_F32,
270 RTLIB::Libcall Call_F64,
271 RTLIB::Libcall Call_F80,
272 RTLIB::Libcall Call_F128,
273 RTLIB::Libcall Call_PPCF128) {
274 return
275 VT == MVT::f32 ? Call_F32 :
276 VT == MVT::f64 ? Call_F64 :
277 VT == MVT::f80 ? Call_F80 :
278 VT == MVT::f128 ? Call_F128 :
279 VT == MVT::ppcf128 ? Call_PPCF128 :
280 RTLIB::UNKNOWN_LIBCALL;
281}
282
283/// getFPEXT - Return the FPEXT_*_* value for the given types, or
284/// UNKNOWN_LIBCALL if there is none.
286 if (OpVT == MVT::f16) {
287 if (RetVT == MVT::f32)
288 return FPEXT_F16_F32;
289 if (RetVT == MVT::f64)
290 return FPEXT_F16_F64;
291 if (RetVT == MVT::f80)
292 return FPEXT_F16_F80;
293 if (RetVT == MVT::f128)
294 return FPEXT_F16_F128;
295 } else if (OpVT == MVT::f32) {
296 if (RetVT == MVT::f64)
297 return FPEXT_F32_F64;
298 if (RetVT == MVT::f128)
299 return FPEXT_F32_F128;
300 if (RetVT == MVT::ppcf128)
301 return FPEXT_F32_PPCF128;
302 } else if (OpVT == MVT::f64) {
303 if (RetVT == MVT::f128)
304 return FPEXT_F64_F128;
305 else if (RetVT == MVT::ppcf128)
306 return FPEXT_F64_PPCF128;
307 } else if (OpVT == MVT::f80) {
308 if (RetVT == MVT::f128)
309 return FPEXT_F80_F128;
310 } else if (OpVT == MVT::bf16) {
311 if (RetVT == MVT::f32)
312 return FPEXT_BF16_F32;
313 }
314
315 return UNKNOWN_LIBCALL;
316}
317
318/// getFPROUND - Return the FPROUND_*_* value for the given types, or
319/// UNKNOWN_LIBCALL if there is none.
321 if (RetVT == MVT::f16) {
322 if (OpVT == MVT::f32)
323 return FPROUND_F32_F16;
324 if (OpVT == MVT::f64)
325 return FPROUND_F64_F16;
326 if (OpVT == MVT::f80)
327 return FPROUND_F80_F16;
328 if (OpVT == MVT::f128)
329 return FPROUND_F128_F16;
330 if (OpVT == MVT::ppcf128)
331 return FPROUND_PPCF128_F16;
332 } else if (RetVT == MVT::bf16) {
333 if (OpVT == MVT::f32)
334 return FPROUND_F32_BF16;
335 if (OpVT == MVT::f64)
336 return FPROUND_F64_BF16;
337 } else if (RetVT == MVT::f32) {
338 if (OpVT == MVT::f64)
339 return FPROUND_F64_F32;
340 if (OpVT == MVT::f80)
341 return FPROUND_F80_F32;
342 if (OpVT == MVT::f128)
343 return FPROUND_F128_F32;
344 if (OpVT == MVT::ppcf128)
345 return FPROUND_PPCF128_F32;
346 } else if (RetVT == MVT::f64) {
347 if (OpVT == MVT::f80)
348 return FPROUND_F80_F64;
349 if (OpVT == MVT::f128)
350 return FPROUND_F128_F64;
351 if (OpVT == MVT::ppcf128)
352 return FPROUND_PPCF128_F64;
353 } else if (RetVT == MVT::f80) {
354 if (OpVT == MVT::f128)
355 return FPROUND_F128_F80;
356 }
357
358 return UNKNOWN_LIBCALL;
359}
360
361/// getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or
362/// UNKNOWN_LIBCALL if there is none.
364 if (OpVT == MVT::f16) {
365 if (RetVT == MVT::i32)
366 return FPTOSINT_F16_I32;
367 if (RetVT == MVT::i64)
368 return FPTOSINT_F16_I64;
369 if (RetVT == MVT::i128)
370 return FPTOSINT_F16_I128;
371 } else if (OpVT == MVT::f32) {
372 if (RetVT == MVT::i32)
373 return FPTOSINT_F32_I32;
374 if (RetVT == MVT::i64)
375 return FPTOSINT_F32_I64;
376 if (RetVT == MVT::i128)
377 return FPTOSINT_F32_I128;
378 } else if (OpVT == MVT::f64) {
379 if (RetVT == MVT::i32)
380 return FPTOSINT_F64_I32;
381 if (RetVT == MVT::i64)
382 return FPTOSINT_F64_I64;
383 if (RetVT == MVT::i128)
384 return FPTOSINT_F64_I128;
385 } else if (OpVT == MVT::f80) {
386 if (RetVT == MVT::i32)
387 return FPTOSINT_F80_I32;
388 if (RetVT == MVT::i64)
389 return FPTOSINT_F80_I64;
390 if (RetVT == MVT::i128)
391 return FPTOSINT_F80_I128;
392 } else if (OpVT == MVT::f128) {
393 if (RetVT == MVT::i32)
394 return FPTOSINT_F128_I32;
395 if (RetVT == MVT::i64)
396 return FPTOSINT_F128_I64;
397 if (RetVT == MVT::i128)
398 return FPTOSINT_F128_I128;
399 } else if (OpVT == MVT::ppcf128) {
400 if (RetVT == MVT::i32)
401 return FPTOSINT_PPCF128_I32;
402 if (RetVT == MVT::i64)
403 return FPTOSINT_PPCF128_I64;
404 if (RetVT == MVT::i128)
405 return FPTOSINT_PPCF128_I128;
406 }
407 return UNKNOWN_LIBCALL;
408}
409
410/// getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or
411/// UNKNOWN_LIBCALL if there is none.
413 if (OpVT == MVT::f16) {
414 if (RetVT == MVT::i32)
415 return FPTOUINT_F16_I32;
416 if (RetVT == MVT::i64)
417 return FPTOUINT_F16_I64;
418 if (RetVT == MVT::i128)
419 return FPTOUINT_F16_I128;
420 } else if (OpVT == MVT::f32) {
421 if (RetVT == MVT::i32)
422 return FPTOUINT_F32_I32;
423 if (RetVT == MVT::i64)
424 return FPTOUINT_F32_I64;
425 if (RetVT == MVT::i128)
426 return FPTOUINT_F32_I128;
427 } else if (OpVT == MVT::f64) {
428 if (RetVT == MVT::i32)
429 return FPTOUINT_F64_I32;
430 if (RetVT == MVT::i64)
431 return FPTOUINT_F64_I64;
432 if (RetVT == MVT::i128)
433 return FPTOUINT_F64_I128;
434 } else if (OpVT == MVT::f80) {
435 if (RetVT == MVT::i32)
436 return FPTOUINT_F80_I32;
437 if (RetVT == MVT::i64)
438 return FPTOUINT_F80_I64;
439 if (RetVT == MVT::i128)
440 return FPTOUINT_F80_I128;
441 } else if (OpVT == MVT::f128) {
442 if (RetVT == MVT::i32)
443 return FPTOUINT_F128_I32;
444 if (RetVT == MVT::i64)
445 return FPTOUINT_F128_I64;
446 if (RetVT == MVT::i128)
447 return FPTOUINT_F128_I128;
448 } else if (OpVT == MVT::ppcf128) {
449 if (RetVT == MVT::i32)
450 return FPTOUINT_PPCF128_I32;
451 if (RetVT == MVT::i64)
452 return FPTOUINT_PPCF128_I64;
453 if (RetVT == MVT::i128)
454 return FPTOUINT_PPCF128_I128;
455 }
456 return UNKNOWN_LIBCALL;
457}
458
459/// getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or
460/// UNKNOWN_LIBCALL if there is none.
462 if (OpVT == MVT::i32) {
463 if (RetVT == MVT::f16)
464 return SINTTOFP_I32_F16;
465 if (RetVT == MVT::f32)
466 return SINTTOFP_I32_F32;
467 if (RetVT == MVT::f64)
468 return SINTTOFP_I32_F64;
469 if (RetVT == MVT::f80)
470 return SINTTOFP_I32_F80;
471 if (RetVT == MVT::f128)
472 return SINTTOFP_I32_F128;
473 if (RetVT == MVT::ppcf128)
474 return SINTTOFP_I32_PPCF128;
475 } else if (OpVT == MVT::i64) {
476 if (RetVT == MVT::f16)
477 return SINTTOFP_I64_F16;
478 if (RetVT == MVT::f32)
479 return SINTTOFP_I64_F32;
480 if (RetVT == MVT::f64)
481 return SINTTOFP_I64_F64;
482 if (RetVT == MVT::f80)
483 return SINTTOFP_I64_F80;
484 if (RetVT == MVT::f128)
485 return SINTTOFP_I64_F128;
486 if (RetVT == MVT::ppcf128)
487 return SINTTOFP_I64_PPCF128;
488 } else if (OpVT == MVT::i128) {
489 if (RetVT == MVT::f16)
490 return SINTTOFP_I128_F16;
491 if (RetVT == MVT::f32)
492 return SINTTOFP_I128_F32;
493 if (RetVT == MVT::f64)
494 return SINTTOFP_I128_F64;
495 if (RetVT == MVT::f80)
496 return SINTTOFP_I128_F80;
497 if (RetVT == MVT::f128)
498 return SINTTOFP_I128_F128;
499 if (RetVT == MVT::ppcf128)
500 return SINTTOFP_I128_PPCF128;
501 }
502 return UNKNOWN_LIBCALL;
503}
504
505/// getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or
506/// UNKNOWN_LIBCALL if there is none.
508 if (OpVT == MVT::i32) {
509 if (RetVT == MVT::f16)
510 return UINTTOFP_I32_F16;
511 if (RetVT == MVT::f32)
512 return UINTTOFP_I32_F32;
513 if (RetVT == MVT::f64)
514 return UINTTOFP_I32_F64;
515 if (RetVT == MVT::f80)
516 return UINTTOFP_I32_F80;
517 if (RetVT == MVT::f128)
518 return UINTTOFP_I32_F128;
519 if (RetVT == MVT::ppcf128)
520 return UINTTOFP_I32_PPCF128;
521 } else if (OpVT == MVT::i64) {
522 if (RetVT == MVT::f16)
523 return UINTTOFP_I64_F16;
524 if (RetVT == MVT::f32)
525 return UINTTOFP_I64_F32;
526 if (RetVT == MVT::f64)
527 return UINTTOFP_I64_F64;
528 if (RetVT == MVT::f80)
529 return UINTTOFP_I64_F80;
530 if (RetVT == MVT::f128)
531 return UINTTOFP_I64_F128;
532 if (RetVT == MVT::ppcf128)
533 return UINTTOFP_I64_PPCF128;
534 } else if (OpVT == MVT::i128) {
535 if (RetVT == MVT::f16)
536 return UINTTOFP_I128_F16;
537 if (RetVT == MVT::f32)
538 return UINTTOFP_I128_F32;
539 if (RetVT == MVT::f64)
540 return UINTTOFP_I128_F64;
541 if (RetVT == MVT::f80)
542 return UINTTOFP_I128_F80;
543 if (RetVT == MVT::f128)
544 return UINTTOFP_I128_F128;
545 if (RetVT == MVT::ppcf128)
546 return UINTTOFP_I128_PPCF128;
547 }
548 return UNKNOWN_LIBCALL;
549}
550
552 return getFPLibCall(RetVT, POWI_F32, POWI_F64, POWI_F80, POWI_F128,
553 POWI_PPCF128);
554}
555
557 return getFPLibCall(RetVT, LDEXP_F32, LDEXP_F64, LDEXP_F80, LDEXP_F128,
558 LDEXP_PPCF128);
559}
560
562 return getFPLibCall(RetVT, FREXP_F32, FREXP_F64, FREXP_F80, FREXP_F128,
563 FREXP_PPCF128);
564}
565
567 AtomicOrdering Order,
568 uint64_t MemSize) {
569 unsigned ModeN, ModelN;
570 switch (MemSize) {
571 case 1:
572 ModeN = 0;
573 break;
574 case 2:
575 ModeN = 1;
576 break;
577 case 4:
578 ModeN = 2;
579 break;
580 case 8:
581 ModeN = 3;
582 break;
583 case 16:
584 ModeN = 4;
585 break;
586 default:
587 return RTLIB::UNKNOWN_LIBCALL;
588 }
589
590 switch (Order) {
592 ModelN = 0;
593 break;
595 ModelN = 1;
596 break;
598 ModelN = 2;
599 break;
602 ModelN = 3;
603 break;
604 default:
605 return UNKNOWN_LIBCALL;
606 }
607
608 return LC[ModeN][ModelN];
609}
610
612 MVT VT) {
613 if (!VT.isScalarInteger())
614 return UNKNOWN_LIBCALL;
615 uint64_t MemSize = VT.getScalarSizeInBits() / 8;
616
617#define LCALLS(A, B) \
618 { A##B##_RELAX, A##B##_ACQ, A##B##_REL, A##B##_ACQ_REL }
619#define LCALL5(A) \
620 LCALLS(A, 1), LCALLS(A, 2), LCALLS(A, 4), LCALLS(A, 8), LCALLS(A, 16)
621 switch (Opc) {
623 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_CAS)};
624 return getOutlineAtomicHelper(LC, Order, MemSize);
625 }
626 case ISD::ATOMIC_SWAP: {
627 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_SWP)};
628 return getOutlineAtomicHelper(LC, Order, MemSize);
629 }
631 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDADD)};
632 return getOutlineAtomicHelper(LC, Order, MemSize);
633 }
634 case ISD::ATOMIC_LOAD_OR: {
635 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDSET)};
636 return getOutlineAtomicHelper(LC, Order, MemSize);
637 }
639 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDCLR)};
640 return getOutlineAtomicHelper(LC, Order, MemSize);
641 }
643 const Libcall LC[5][4] = {LCALL5(OUTLINE_ATOMIC_LDEOR)};
644 return getOutlineAtomicHelper(LC, Order, MemSize);
645 }
646 default:
647 return UNKNOWN_LIBCALL;
648 }
649#undef LCALLS
650#undef LCALL5
651}
652
654#define OP_TO_LIBCALL(Name, Enum) \
655 case Name: \
656 switch (VT.SimpleTy) { \
657 default: \
658 return UNKNOWN_LIBCALL; \
659 case MVT::i8: \
660 return Enum##_1; \
661 case MVT::i16: \
662 return Enum##_2; \
663 case MVT::i32: \
664 return Enum##_4; \
665 case MVT::i64: \
666 return Enum##_8; \
667 case MVT::i128: \
668 return Enum##_16; \
669 }
670
671 switch (Opc) {
672 OP_TO_LIBCALL(ISD::ATOMIC_SWAP, SYNC_LOCK_TEST_AND_SET)
673 OP_TO_LIBCALL(ISD::ATOMIC_CMP_SWAP, SYNC_VAL_COMPARE_AND_SWAP)
674 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_ADD, SYNC_FETCH_AND_ADD)
675 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_SUB, SYNC_FETCH_AND_SUB)
676 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_AND, SYNC_FETCH_AND_AND)
677 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_OR, SYNC_FETCH_AND_OR)
678 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_XOR, SYNC_FETCH_AND_XOR)
679 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_NAND, SYNC_FETCH_AND_NAND)
680 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_MAX, SYNC_FETCH_AND_MAX)
681 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_UMAX, SYNC_FETCH_AND_UMAX)
682 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_MIN, SYNC_FETCH_AND_MIN)
683 OP_TO_LIBCALL(ISD::ATOMIC_LOAD_UMIN, SYNC_FETCH_AND_UMIN)
684 }
685
686#undef OP_TO_LIBCALL
687
688 return UNKNOWN_LIBCALL;
689}
690
692 switch (ElementSize) {
693 case 1:
694 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_1;
695 case 2:
696 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_2;
697 case 4:
698 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_4;
699 case 8:
700 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_8;
701 case 16:
702 return MEMCPY_ELEMENT_UNORDERED_ATOMIC_16;
703 default:
704 return UNKNOWN_LIBCALL;
705 }
706}
707
709 switch (ElementSize) {
710 case 1:
711 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_1;
712 case 2:
713 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_2;
714 case 4:
715 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_4;
716 case 8:
717 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_8;
718 case 16:
719 return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_16;
720 default:
721 return UNKNOWN_LIBCALL;
722 }
723}
724
726 switch (ElementSize) {
727 case 1:
728 return MEMSET_ELEMENT_UNORDERED_ATOMIC_1;
729 case 2:
730 return MEMSET_ELEMENT_UNORDERED_ATOMIC_2;
731 case 4:
732 return MEMSET_ELEMENT_UNORDERED_ATOMIC_4;
733 case 8:
734 return MEMSET_ELEMENT_UNORDERED_ATOMIC_8;
735 case 16:
736 return MEMSET_ELEMENT_UNORDERED_ATOMIC_16;
737 default:
738 return UNKNOWN_LIBCALL;
739 }
740}
741
742/// InitCmpLibcallCCs - Set default comparison libcall CC.
744 std::fill(CCs, CCs + RTLIB::UNKNOWN_LIBCALL, ISD::SETCC_INVALID);
745 CCs[RTLIB::OEQ_F32] = ISD::SETEQ;
746 CCs[RTLIB::OEQ_F64] = ISD::SETEQ;
747 CCs[RTLIB::OEQ_F128] = ISD::SETEQ;
748 CCs[RTLIB::OEQ_PPCF128] = ISD::SETEQ;
749 CCs[RTLIB::UNE_F32] = ISD::SETNE;
750 CCs[RTLIB::UNE_F64] = ISD::SETNE;
751 CCs[RTLIB::UNE_F128] = ISD::SETNE;
752 CCs[RTLIB::UNE_PPCF128] = ISD::SETNE;
753 CCs[RTLIB::OGE_F32] = ISD::SETGE;
754 CCs[RTLIB::OGE_F64] = ISD::SETGE;
755 CCs[RTLIB::OGE_F128] = ISD::SETGE;
756 CCs[RTLIB::OGE_PPCF128] = ISD::SETGE;
757 CCs[RTLIB::OLT_F32] = ISD::SETLT;
758 CCs[RTLIB::OLT_F64] = ISD::SETLT;
759 CCs[RTLIB::OLT_F128] = ISD::SETLT;
760 CCs[RTLIB::OLT_PPCF128] = ISD::SETLT;
761 CCs[RTLIB::OLE_F32] = ISD::SETLE;
762 CCs[RTLIB::OLE_F64] = ISD::SETLE;
763 CCs[RTLIB::OLE_F128] = ISD::SETLE;
764 CCs[RTLIB::OLE_PPCF128] = ISD::SETLE;
765 CCs[RTLIB::OGT_F32] = ISD::SETGT;
766 CCs[RTLIB::OGT_F64] = ISD::SETGT;
767 CCs[RTLIB::OGT_F128] = ISD::SETGT;
768 CCs[RTLIB::OGT_PPCF128] = ISD::SETGT;
769 CCs[RTLIB::UO_F32] = ISD::SETNE;
770 CCs[RTLIB::UO_F64] = ISD::SETNE;
771 CCs[RTLIB::UO_F128] = ISD::SETNE;
772 CCs[RTLIB::UO_PPCF128] = ISD::SETNE;
773}
774
775/// NOTE: The TargetMachine owns TLOF.
777 initActions();
778
779 // Perform these initializations only once.
785 HasMultipleConditionRegisters = false;
786 HasExtractBitsInsn = false;
787 JumpIsExpensive = JumpIsExpensiveOverride;
789 EnableExtLdPromotion = false;
790 StackPointerRegisterToSaveRestore = 0;
791 BooleanContents = UndefinedBooleanContent;
792 BooleanFloatContents = UndefinedBooleanContent;
793 BooleanVectorContents = UndefinedBooleanContent;
794 SchedPreferenceInfo = Sched::ILP;
797 MaxBytesForAlignment = 0;
798 MaxAtomicSizeInBitsSupported = 0;
799
800 // Assume that even with libcalls, no target supports wider than 128 bit
801 // division.
802 MaxDivRemBitWidthSupported = 128;
803
804 MaxLargeFPConvertBitWidthSupported = llvm::IntegerType::MAX_INT_BITS;
805
806 MinCmpXchgSizeInBits = 0;
807 SupportsUnalignedAtomics = false;
808
809 std::fill(std::begin(LibcallRoutineNames), std::end(LibcallRoutineNames), nullptr);
810
811 InitLibcalls(TM.getTargetTriple());
812 InitCmpLibcallCCs(CmpLibcallCCs);
813}
814
816 // All operations default to being supported.
817 memset(OpActions, 0, sizeof(OpActions));
818 memset(LoadExtActions, 0, sizeof(LoadExtActions));
819 memset(TruncStoreActions, 0, sizeof(TruncStoreActions));
820 memset(IndexedModeActions, 0, sizeof(IndexedModeActions));
821 memset(CondCodeActions, 0, sizeof(CondCodeActions));
822 std::fill(std::begin(RegClassForVT), std::end(RegClassForVT), nullptr);
823 std::fill(std::begin(TargetDAGCombineArray),
824 std::end(TargetDAGCombineArray), 0);
825
826 // Let extending atomic loads be unsupported by default.
827 for (MVT ValVT : MVT::all_valuetypes())
828 for (MVT MemVT : MVT::all_valuetypes())
830 Expand);
831
832 // We're somewhat special casing MVT::i2 and MVT::i4. Ideally we want to
833 // remove this and targets should individually set these types if not legal.
836 for (MVT VT : {MVT::i2, MVT::i4})
837 OpActions[(unsigned)VT.SimpleTy][NT] = Expand;
838 }
839 for (MVT AVT : MVT::all_valuetypes()) {
840 for (MVT VT : {MVT::i2, MVT::i4, MVT::v128i2, MVT::v64i4}) {
841 setTruncStoreAction(AVT, VT, Expand);
844 }
845 }
846 for (unsigned IM = (unsigned)ISD::PRE_INC;
847 IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) {
848 for (MVT VT : {MVT::i2, MVT::i4}) {
853 }
854 }
855
856 for (MVT VT : MVT::fp_valuetypes()) {
857 MVT IntVT = MVT::getIntegerVT(VT.getFixedSizeInBits());
858 if (IntVT.isValid()) {
861 }
862 }
863
864 // Set default actions for various operations.
865 for (MVT VT : MVT::all_valuetypes()) {
866 // Default all indexed load / store to expand.
867 for (unsigned IM = (unsigned)ISD::PRE_INC;
868 IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) {
873 }
874
875 // Most backends expect to see the node which just returns the value loaded.
877
878 // These operations default to expand.
896 VT, Expand);
897
898 // Overflow operations default to expand
901 VT, Expand);
902
903 // Carry-using overflow operations default to expand.
906 VT, Expand);
907
908 // ADDC/ADDE/SUBC/SUBE default to expand.
910 Expand);
911
912 // Halving adds
915 Expand);
916
917 // Absolute difference
919
920 // These default to Expand so they will be expanded to CTLZ/CTTZ by default.
922 Expand);
923
925
926 // These library functions default to expand.
928 Expand);
929
930 // These operations default to expand for vector types.
931 if (VT.isVector())
936 VT, Expand);
937
938 // Constrained floating-point operations default to expand.
939#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
940 setOperationAction(ISD::STRICT_##DAGN, VT, Expand);
941#include "llvm/IR/ConstrainedOps.def"
942
943 // For most targets @llvm.get.dynamic.area.offset just returns 0.
945
946 // Vector reduction default to expand.
954 VT, Expand);
955
956 // Named vector shuffles default to expand.
958
959 // VP operations default to expand.
960#define BEGIN_REGISTER_VP_SDNODE(SDOPC, ...) \
961 setOperationAction(ISD::SDOPC, VT, Expand);
962#include "llvm/IR/VPIntrinsics.def"
963
964 // FP environment operations default to expand.
968 }
969
970 // Most targets ignore the @llvm.prefetch intrinsic.
972
973 // Most targets also ignore the @llvm.readcyclecounter intrinsic.
975
976 // Most targets also ignore the @llvm.readsteadycounter intrinsic.
978
979 // ConstantFP nodes default to expand. Targets can either change this to
980 // Legal, in which case all fp constants are legal, or use isFPImmLegal()
981 // to optimize expansions for certain constants.
983 {MVT::bf16, MVT::f16, MVT::f32, MVT::f64, MVT::f80, MVT::f128},
984 Expand);
985
986 // These library functions default to expand.
991 {MVT::f32, MVT::f64, MVT::f128}, Expand);
992
993 // Default ISD::TRAP to expand (which turns it into abort).
994 setOperationAction(ISD::TRAP, MVT::Other, Expand);
995
996 // On most systems, DEBUGTRAP and TRAP have no difference. The "Expand"
997 // here is to inform DAG Legalizer to replace DEBUGTRAP with TRAP.
999
1001
1004
1005 for (MVT VT : {MVT::i8, MVT::i16, MVT::i32, MVT::i64}) {
1008 }
1010}
1011
1013 EVT) const {
1014 return MVT::getIntegerVT(DL.getPointerSizeInBits(0));
1015}
1016
1018 bool LegalTypes) const {
1019 assert(LHSTy.isInteger() && "Shift amount is not an integer type!");
1020 if (LHSTy.isVector())
1021 return LHSTy;
1022 MVT ShiftVT =
1023 LegalTypes ? getScalarShiftAmountTy(DL, LHSTy) : getPointerTy(DL);
1024 // If any possible shift value won't fit in the prefered type, just use
1025 // something safe. Assume it will be legalized when the shift is expanded.
1026 if (ShiftVT.getSizeInBits() < Log2_32_Ceil(LHSTy.getSizeInBits()))
1027 ShiftVT = MVT::i32;
1028 assert(ShiftVT.getSizeInBits() >= Log2_32_Ceil(LHSTy.getSizeInBits()) &&
1029 "ShiftVT is still too small!");
1030 return ShiftVT;
1031}
1032
1033bool TargetLoweringBase::canOpTrap(unsigned Op, EVT VT) const {
1034 assert(isTypeLegal(VT));
1035 switch (Op) {
1036 default:
1037 return false;
1038 case ISD::SDIV:
1039 case ISD::UDIV:
1040 case ISD::SREM:
1041 case ISD::UREM:
1042 return true;
1043 }
1044}
1045
1047 unsigned DestAS) const {
1048 return TM.isNoopAddrSpaceCast(SrcAS, DestAS);
1049}
1050
1052 Type *RetTy, ElementCount EC, bool ZeroIsPoison,
1053 const ConstantRange *VScaleRange) const {
1054 // Find the smallest "sensible" element type to use for the expansion.
1055 ConstantRange CR(APInt(64, EC.getKnownMinValue()));
1056 if (EC.isScalable())
1057 CR = CR.umul_sat(*VScaleRange);
1058
1059 if (ZeroIsPoison)
1060 CR = CR.subtract(APInt(64, 1));
1061
1062 unsigned EltWidth = RetTy->getScalarSizeInBits();
1063 EltWidth = std::min(EltWidth, (unsigned)CR.getActiveBits());
1064 EltWidth = std::max(llvm::bit_ceil(EltWidth), (unsigned)8);
1065
1066 return EltWidth;
1067}
1068
1070 // If the command-line option was specified, ignore this request.
1071 if (!JumpIsExpensiveOverride.getNumOccurrences())
1072 JumpIsExpensive = isExpensive;
1073}
1074
1077 // If this is a simple type, use the ComputeRegisterProp mechanism.
1078 if (VT.isSimple()) {
1079 MVT SVT = VT.getSimpleVT();
1080 assert((unsigned)SVT.SimpleTy < std::size(TransformToType));
1081 MVT NVT = TransformToType[SVT.SimpleTy];
1082 LegalizeTypeAction LA = ValueTypeActions.getTypeAction(SVT);
1083
1084 assert((LA == TypeLegal || LA == TypeSoftenFloat ||
1085 LA == TypeSoftPromoteHalf ||
1086 (NVT.isVector() ||
1087 ValueTypeActions.getTypeAction(NVT) != TypePromoteInteger)) &&
1088 "Promote may not follow Expand or Promote");
1089
1090 if (LA == TypeSplitVector)
1091 return LegalizeKind(LA, EVT(SVT).getHalfNumVectorElementsVT(Context));
1092 if (LA == TypeScalarizeVector)
1093 return LegalizeKind(LA, SVT.getVectorElementType());
1094 return LegalizeKind(LA, NVT);
1095 }
1096
1097 // Handle Extended Scalar Types.
1098 if (!VT.isVector()) {
1099 assert(VT.isInteger() && "Float types must be simple");
1100 unsigned BitSize = VT.getSizeInBits();
1101 // First promote to a power-of-two size, then expand if necessary.
1102 if (BitSize < 8 || !isPowerOf2_32(BitSize)) {
1104 assert(NVT != VT && "Unable to round integer VT");
1105 LegalizeKind NextStep = getTypeConversion(Context, NVT);
1106 // Avoid multi-step promotion.
1107 if (NextStep.first == TypePromoteInteger)
1108 return NextStep;
1109 // Return rounded integer type.
1110 return LegalizeKind(TypePromoteInteger, NVT);
1111 }
1112
1115 }
1116
1117 // Handle vector types.
1118 ElementCount NumElts = VT.getVectorElementCount();
1119 EVT EltVT = VT.getVectorElementType();
1120
1121 // Vectors with only one element are always scalarized.
1122 if (NumElts.isScalar())
1123 return LegalizeKind(TypeScalarizeVector, EltVT);
1124
1125 // Try to widen vector elements until the element type is a power of two and
1126 // promote it to a legal type later on, for example:
1127 // <3 x i8> -> <4 x i8> -> <4 x i32>
1128 if (EltVT.isInteger()) {
1129 // Vectors with a number of elements that is not a power of two are always
1130 // widened, for example <3 x i8> -> <4 x i8>.
1131 if (!VT.isPow2VectorType()) {
1132 NumElts = NumElts.coefficientNextPowerOf2();
1133 EVT NVT = EVT::getVectorVT(Context, EltVT, NumElts);
1134 return LegalizeKind(TypeWidenVector, NVT);
1135 }
1136
1137 // Examine the element type.
1139
1140 // If type is to be expanded, split the vector.
1141 // <4 x i140> -> <2 x i140>
1142 if (LK.first == TypeExpandInteger) {
1147 }
1148
1149 // Promote the integer element types until a legal vector type is found
1150 // or until the element integer type is too big. If a legal type was not
1151 // found, fallback to the usual mechanism of widening/splitting the
1152 // vector.
1153 EVT OldEltVT = EltVT;
1154 while (true) {
1155 // Increase the bitwidth of the element to the next pow-of-two
1156 // (which is greater than 8 bits).
1157 EltVT = EVT::getIntegerVT(Context, 1 + EltVT.getSizeInBits())
1159
1160 // Stop trying when getting a non-simple element type.
1161 // Note that vector elements may be greater than legal vector element
1162 // types. Example: X86 XMM registers hold 64bit element on 32bit
1163 // systems.
1164 if (!EltVT.isSimple())
1165 break;
1166
1167 // Build a new vector type and check if it is legal.
1168 MVT NVT = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
1169 // Found a legal promoted vector type.
1170 if (NVT != MVT() && ValueTypeActions.getTypeAction(NVT) == TypeLegal)
1172 EVT::getVectorVT(Context, EltVT, NumElts));
1173 }
1174
1175 // Reset the type to the unexpanded type if we did not find a legal vector
1176 // type with a promoted vector element type.
1177 EltVT = OldEltVT;
1178 }
1179
1180 // Try to widen the vector until a legal type is found.
1181 // If there is no wider legal type, split the vector.
1182 while (true) {
1183 // Round up to the next power of 2.
1184 NumElts = NumElts.coefficientNextPowerOf2();
1185
1186 // If there is no simple vector type with this many elements then there
1187 // cannot be a larger legal vector type. Note that this assumes that
1188 // there are no skipped intermediate vector types in the simple types.
1189 if (!EltVT.isSimple())
1190 break;
1191 MVT LargerVector = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
1192 if (LargerVector == MVT())
1193 break;
1194
1195 // If this type is legal then widen the vector.
1196 if (ValueTypeActions.getTypeAction(LargerVector) == TypeLegal)
1197 return LegalizeKind(TypeWidenVector, LargerVector);
1198 }
1199
1200 // Widen odd vectors to next power of two.
1201 if (!VT.isPow2VectorType()) {
1202 EVT NVT = VT.getPow2VectorType(Context);
1203 return LegalizeKind(TypeWidenVector, NVT);
1204 }
1205
1208
1209 // Vectors with illegal element types are expanded.
1210 EVT NVT = EVT::getVectorVT(Context, EltVT,
1212 return LegalizeKind(TypeSplitVector, NVT);
1213}
1214
1215static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT,
1216 unsigned &NumIntermediates,
1217 MVT &RegisterVT,
1218 TargetLoweringBase *TLI) {
1219 // Figure out the right, legal destination reg to copy into.
1221 MVT EltTy = VT.getVectorElementType();
1222
1223 unsigned NumVectorRegs = 1;
1224
1225 // Scalable vectors cannot be scalarized, so splitting or widening is
1226 // required.
1227 if (VT.isScalableVector() && !isPowerOf2_32(EC.getKnownMinValue()))
1229 "Splitting or widening of non-power-of-2 MVTs is not implemented.");
1230
1231 // FIXME: We don't support non-power-of-2-sized vectors for now.
1232 // Ideally we could break down into LHS/RHS like LegalizeDAG does.
1233 if (!isPowerOf2_32(EC.getKnownMinValue())) {
1234 // Split EC to unit size (scalable property is preserved).
1235 NumVectorRegs = EC.getKnownMinValue();
1236 EC = ElementCount::getFixed(1);
1237 }
1238
1239 // Divide the input until we get to a supported size. This will
1240 // always end up with an EC that represent a scalar or a scalable
1241 // scalar.
1242 while (EC.getKnownMinValue() > 1 &&
1243 !TLI->isTypeLegal(MVT::getVectorVT(EltTy, EC))) {
1244 EC = EC.divideCoefficientBy(2);
1245 NumVectorRegs <<= 1;
1246 }
1247
1248 NumIntermediates = NumVectorRegs;
1249
1250 MVT NewVT = MVT::getVectorVT(EltTy, EC);
1251 if (!TLI->isTypeLegal(NewVT))
1252 NewVT = EltTy;
1253 IntermediateVT = NewVT;
1254
1255 unsigned LaneSizeInBits = NewVT.getScalarSizeInBits();
1256
1257 // Convert sizes such as i33 to i64.
1258 LaneSizeInBits = llvm::bit_ceil(LaneSizeInBits);
1259
1260 MVT DestVT = TLI->getRegisterType(NewVT);
1261 RegisterVT = DestVT;
1262 if (EVT(DestVT).bitsLT(NewVT)) // Value is expanded, e.g. i64 -> i16.
1263 return NumVectorRegs * (LaneSizeInBits / DestVT.getScalarSizeInBits());
1264
1265 // Otherwise, promotion or legal types use the same number of registers as
1266 // the vector decimated to the appropriate level.
1267 return NumVectorRegs;
1268}
1269
1270/// isLegalRC - Return true if the value types that can be represented by the
1271/// specified register class are all legal.
1273 const TargetRegisterClass &RC) const {
1274 for (const auto *I = TRI.legalclasstypes_begin(RC); *I != MVT::Other; ++I)
1275 if (isTypeLegal(*I))
1276 return true;
1277 return false;
1278}
1279
1280/// Replace/modify any TargetFrameIndex operands with a targte-dependent
1281/// sequence of memory operands that is recognized by PrologEpilogInserter.
1284 MachineBasicBlock *MBB) const {
1285 MachineInstr *MI = &InitialMI;
1286 MachineFunction &MF = *MI->getMF();
1287 MachineFrameInfo &MFI = MF.getFrameInfo();
1288
1289 // We're handling multiple types of operands here:
1290 // PATCHPOINT MetaArgs - live-in, read only, direct
1291 // STATEPOINT Deopt Spill - live-through, read only, indirect
1292 // STATEPOINT Deopt Alloca - live-through, read only, direct
1293 // (We're currently conservative and mark the deopt slots read/write in
1294 // practice.)
1295 // STATEPOINT GC Spill - live-through, read/write, indirect
1296 // STATEPOINT GC Alloca - live-through, read/write, direct
1297 // The live-in vs live-through is handled already (the live through ones are
1298 // all stack slots), but we need to handle the different type of stackmap
1299 // operands and memory effects here.
1300
1301 if (llvm::none_of(MI->operands(),
1302 [](MachineOperand &Operand) { return Operand.isFI(); }))
1303 return MBB;
1304
1305 MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), MI->getDesc());
1306
1307 // Inherit previous memory operands.
1308 MIB.cloneMemRefs(*MI);
1309
1310 for (unsigned i = 0; i < MI->getNumOperands(); ++i) {
1311 MachineOperand &MO = MI->getOperand(i);
1312 if (!MO.isFI()) {
1313 // Index of Def operand this Use it tied to.
1314 // Since Defs are coming before Uses, if Use is tied, then
1315 // index of Def must be smaller that index of that Use.
1316 // Also, Defs preserve their position in new MI.
1317 unsigned TiedTo = i;
1318 if (MO.isReg() && MO.isTied())
1319 TiedTo = MI->findTiedOperandIdx(i);
1320 MIB.add(MO);
1321 if (TiedTo < i)
1322 MIB->tieOperands(TiedTo, MIB->getNumOperands() - 1);
1323 continue;
1324 }
1325
1326 // foldMemoryOperand builds a new MI after replacing a single FI operand
1327 // with the canonical set of five x86 addressing-mode operands.
1328 int FI = MO.getIndex();
1329
1330 // Add frame index operands recognized by stackmaps.cpp
1332 // indirect-mem-ref tag, size, #FI, offset.
1333 // Used for spills inserted by StatepointLowering. This codepath is not
1334 // used for patchpoints/stackmaps at all, for these spilling is done via
1335 // foldMemoryOperand callback only.
1336 assert(MI->getOpcode() == TargetOpcode::STATEPOINT && "sanity");
1337 MIB.addImm(StackMaps::IndirectMemRefOp);
1338 MIB.addImm(MFI.getObjectSize(FI));
1339 MIB.add(MO);
1340 MIB.addImm(0);
1341 } else {
1342 // direct-mem-ref tag, #FI, offset.
1343 // Used by patchpoint, and direct alloca arguments to statepoints
1344 MIB.addImm(StackMaps::DirectMemRefOp);
1345 MIB.add(MO);
1346 MIB.addImm(0);
1347 }
1348
1349 assert(MIB->mayLoad() && "Folded a stackmap use to a non-load!");
1350
1351 // Add a new memory operand for this FI.
1352 assert(MFI.getObjectOffset(FI) != -1);
1353
1354 // Note: STATEPOINT MMOs are added during SelectionDAG. STACKMAP, and
1355 // PATCHPOINT should be updated to do the same. (TODO)
1356 if (MI->getOpcode() != TargetOpcode::STATEPOINT) {
1357 auto Flags = MachineMemOperand::MOLoad;
1359 MachinePointerInfo::getFixedStack(MF, FI), Flags,
1361 MIB->addMemOperand(MF, MMO);
1362 }
1363 }
1365 MI->eraseFromParent();
1366 return MBB;
1367}
1368
1369/// findRepresentativeClass - Return the largest legal super-reg register class
1370/// of the register class for the specified type and its associated "cost".
1371// This function is in TargetLowering because it uses RegClassForVT which would
1372// need to be moved to TargetRegisterInfo and would necessitate moving
1373// isTypeLegal over as well - a massive change that would just require
1374// TargetLowering having a TargetRegisterInfo class member that it would use.
1375std::pair<const TargetRegisterClass *, uint8_t>
1377 MVT VT) const {
1378 const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy];
1379 if (!RC)
1380 return std::make_pair(RC, 0);
1381
1382 // Compute the set of all super-register classes.
1383 BitVector SuperRegRC(TRI->getNumRegClasses());
1384 for (SuperRegClassIterator RCI(RC, TRI); RCI.isValid(); ++RCI)
1385 SuperRegRC.setBitsInMask(RCI.getMask());
1386
1387 // Find the first legal register class with the largest spill size.
1388 const TargetRegisterClass *BestRC = RC;
1389 for (unsigned i : SuperRegRC.set_bits()) {
1390 const TargetRegisterClass *SuperRC = TRI->getRegClass(i);
1391 // We want the largest possible spill size.
1392 if (TRI->getSpillSize(*SuperRC) <= TRI->getSpillSize(*BestRC))
1393 continue;
1394 if (!isLegalRC(*TRI, *SuperRC))
1395 continue;
1396 BestRC = SuperRC;
1397 }
1398 return std::make_pair(BestRC, 1);
1399}
1400
1401/// computeRegisterProperties - Once all of the register classes are added,
1402/// this allows us to compute derived properties we expose.
1404 const TargetRegisterInfo *TRI) {
1406 "Too many value types for ValueTypeActions to hold!");
1407
1408 // Everything defaults to needing one register.
1409 for (unsigned i = 0; i != MVT::VALUETYPE_SIZE; ++i) {
1410 NumRegistersForVT[i] = 1;
1411 RegisterTypeForVT[i] = TransformToType[i] = (MVT::SimpleValueType)i;
1412 }
1413 // ...except isVoid, which doesn't need any registers.
1414 NumRegistersForVT[MVT::isVoid] = 0;
1415
1416 // Find the largest integer register class.
1417 unsigned LargestIntReg = MVT::LAST_INTEGER_VALUETYPE;
1418 for (; RegClassForVT[LargestIntReg] == nullptr; --LargestIntReg)
1419 assert(LargestIntReg != MVT::i1 && "No integer registers defined!");
1420
1421 // Every integer value type larger than this largest register takes twice as
1422 // many registers to represent as the previous ValueType.
1423 for (unsigned ExpandedReg = LargestIntReg + 1;
1424 ExpandedReg <= MVT::LAST_INTEGER_VALUETYPE; ++ExpandedReg) {
1425 NumRegistersForVT[ExpandedReg] = 2*NumRegistersForVT[ExpandedReg-1];
1426 RegisterTypeForVT[ExpandedReg] = (MVT::SimpleValueType)LargestIntReg;
1427 TransformToType[ExpandedReg] = (MVT::SimpleValueType)(ExpandedReg - 1);
1428 ValueTypeActions.setTypeAction((MVT::SimpleValueType)ExpandedReg,
1430 }
1431
1432 // Inspect all of the ValueType's smaller than the largest integer
1433 // register to see which ones need promotion.
1434 unsigned LegalIntReg = LargestIntReg;
1435 for (unsigned IntReg = LargestIntReg - 1;
1436 IntReg >= (unsigned)MVT::i1; --IntReg) {
1437 MVT IVT = (MVT::SimpleValueType)IntReg;
1438 if (isTypeLegal(IVT)) {
1439 LegalIntReg = IntReg;
1440 } else {
1441 RegisterTypeForVT[IntReg] = TransformToType[IntReg] =
1442 (MVT::SimpleValueType)LegalIntReg;
1443 ValueTypeActions.setTypeAction(IVT, TypePromoteInteger);
1444 }
1445 }
1446
1447 // ppcf128 type is really two f64's.
1448 if (!isTypeLegal(MVT::ppcf128)) {
1449 if (isTypeLegal(MVT::f64)) {
1450 NumRegistersForVT[MVT::ppcf128] = 2*NumRegistersForVT[MVT::f64];
1451 RegisterTypeForVT[MVT::ppcf128] = MVT::f64;
1452 TransformToType[MVT::ppcf128] = MVT::f64;
1453 ValueTypeActions.setTypeAction(MVT::ppcf128, TypeExpandFloat);
1454 } else {
1455 NumRegistersForVT[MVT::ppcf128] = NumRegistersForVT[MVT::i128];
1456 RegisterTypeForVT[MVT::ppcf128] = RegisterTypeForVT[MVT::i128];
1457 TransformToType[MVT::ppcf128] = MVT::i128;
1458 ValueTypeActions.setTypeAction(MVT::ppcf128, TypeSoftenFloat);
1459 }
1460 }
1461
1462 // Decide how to handle f128. If the target does not have native f128 support,
1463 // expand it to i128 and we will be generating soft float library calls.
1464 if (!isTypeLegal(MVT::f128)) {
1465 NumRegistersForVT[MVT::f128] = NumRegistersForVT[MVT::i128];
1466 RegisterTypeForVT[MVT::f128] = RegisterTypeForVT[MVT::i128];
1467 TransformToType[MVT::f128] = MVT::i128;
1468 ValueTypeActions.setTypeAction(MVT::f128, TypeSoftenFloat);
1469 }
1470
1471 // Decide how to handle f80. If the target does not have native f80 support,
1472 // expand it to i96 and we will be generating soft float library calls.
1473 if (!isTypeLegal(MVT::f80)) {
1474 NumRegistersForVT[MVT::f80] = 3*NumRegistersForVT[MVT::i32];
1475 RegisterTypeForVT[MVT::f80] = RegisterTypeForVT[MVT::i32];
1476 TransformToType[MVT::f80] = MVT::i32;
1477 ValueTypeActions.setTypeAction(MVT::f80, TypeSoftenFloat);
1478 }
1479
1480 // Decide how to handle f64. If the target does not have native f64 support,
1481 // expand it to i64 and we will be generating soft float library calls.
1482 if (!isTypeLegal(MVT::f64)) {
1483 NumRegistersForVT[MVT::f64] = NumRegistersForVT[MVT::i64];
1484 RegisterTypeForVT[MVT::f64] = RegisterTypeForVT[MVT::i64];
1485 TransformToType[MVT::f64] = MVT::i64;
1486 ValueTypeActions.setTypeAction(MVT::f64, TypeSoftenFloat);
1487 }
1488
1489 // Decide how to handle f32. If the target does not have native f32 support,
1490 // expand it to i32 and we will be generating soft float library calls.
1491 if (!isTypeLegal(MVT::f32)) {
1492 NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::i32];
1493 RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::i32];
1494 TransformToType[MVT::f32] = MVT::i32;
1495 ValueTypeActions.setTypeAction(MVT::f32, TypeSoftenFloat);
1496 }
1497
1498 // Decide how to handle f16. If the target does not have native f16 support,
1499 // promote it to f32, because there are no f16 library calls (except for
1500 // conversions).
1501 if (!isTypeLegal(MVT::f16)) {
1502 // Allow targets to control how we legalize half.
1503 bool SoftPromoteHalfType = softPromoteHalfType();
1504 bool UseFPRegsForHalfType = !SoftPromoteHalfType || useFPRegsForHalfType();
1505
1506 if (!UseFPRegsForHalfType) {
1507 NumRegistersForVT[MVT::f16] = NumRegistersForVT[MVT::i16];
1508 RegisterTypeForVT[MVT::f16] = RegisterTypeForVT[MVT::i16];
1509 } else {
1510 NumRegistersForVT[MVT::f16] = NumRegistersForVT[MVT::f32];
1511 RegisterTypeForVT[MVT::f16] = RegisterTypeForVT[MVT::f32];
1512 }
1513 TransformToType[MVT::f16] = MVT::f32;
1514 if (SoftPromoteHalfType) {
1515 ValueTypeActions.setTypeAction(MVT::f16, TypeSoftPromoteHalf);
1516 } else {
1517 ValueTypeActions.setTypeAction(MVT::f16, TypePromoteFloat);
1518 }
1519 }
1520
1521 // Decide how to handle bf16. If the target does not have native bf16 support,
1522 // promote it to f32, because there are no bf16 library calls (except for
1523 // converting from f32 to bf16).
1524 if (!isTypeLegal(MVT::bf16)) {
1525 NumRegistersForVT[MVT::bf16] = NumRegistersForVT[MVT::f32];
1526 RegisterTypeForVT[MVT::bf16] = RegisterTypeForVT[MVT::f32];
1527 TransformToType[MVT::bf16] = MVT::f32;
1528 ValueTypeActions.setTypeAction(MVT::bf16, TypeSoftPromoteHalf);
1529 }
1530
1531 // Loop over all of the vector value types to see which need transformations.
1532 for (unsigned i = MVT::FIRST_VECTOR_VALUETYPE;
1533 i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) {
1534 MVT VT = (MVT::SimpleValueType) i;
1535 if (isTypeLegal(VT))
1536 continue;
1537
1538 MVT EltVT = VT.getVectorElementType();
1540 bool IsLegalWiderType = false;
1541 bool IsScalable = VT.isScalableVector();
1542 LegalizeTypeAction PreferredAction = getPreferredVectorAction(VT);
1543 switch (PreferredAction) {
1544 case TypePromoteInteger: {
1545 MVT::SimpleValueType EndVT = IsScalable ?
1546 MVT::LAST_INTEGER_SCALABLE_VECTOR_VALUETYPE :
1547 MVT::LAST_INTEGER_FIXEDLEN_VECTOR_VALUETYPE;
1548 // Try to promote the elements of integer vectors. If no legal
1549 // promotion was found, fall through to the widen-vector method.
1550 for (unsigned nVT = i + 1;
1551 (MVT::SimpleValueType)nVT <= EndVT; ++nVT) {
1552 MVT SVT = (MVT::SimpleValueType) nVT;
1553 // Promote vectors of integers to vectors with the same number
1554 // of elements, with a wider element type.
1555 if (SVT.getScalarSizeInBits() > EltVT.getFixedSizeInBits() &&
1556 SVT.getVectorElementCount() == EC && isTypeLegal(SVT)) {
1557 TransformToType[i] = SVT;
1558 RegisterTypeForVT[i] = SVT;
1559 NumRegistersForVT[i] = 1;
1560 ValueTypeActions.setTypeAction(VT, TypePromoteInteger);
1561 IsLegalWiderType = true;
1562 break;
1563 }
1564 }
1565 if (IsLegalWiderType)
1566 break;
1567 [[fallthrough]];
1568 }
1569
1570 case TypeWidenVector:
1571 if (isPowerOf2_32(EC.getKnownMinValue())) {
1572 // Try to widen the vector.
1573 for (unsigned nVT = i + 1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) {
1574 MVT SVT = (MVT::SimpleValueType) nVT;
1575 if (SVT.getVectorElementType() == EltVT &&
1576 SVT.isScalableVector() == IsScalable &&
1578 EC.getKnownMinValue() &&
1579 isTypeLegal(SVT)) {
1580 TransformToType[i] = SVT;
1581 RegisterTypeForVT[i] = SVT;
1582 NumRegistersForVT[i] = 1;
1583 ValueTypeActions.setTypeAction(VT, TypeWidenVector);
1584 IsLegalWiderType = true;
1585 break;
1586 }
1587 }
1588 if (IsLegalWiderType)
1589 break;
1590 } else {
1591 // Only widen to the next power of 2 to keep consistency with EVT.
1592 MVT NVT = VT.getPow2VectorType();
1593 if (isTypeLegal(NVT)) {
1594 TransformToType[i] = NVT;
1595 ValueTypeActions.setTypeAction(VT, TypeWidenVector);
1596 RegisterTypeForVT[i] = NVT;
1597 NumRegistersForVT[i] = 1;
1598 break;
1599 }
1600 }
1601 [[fallthrough]];
1602
1603 case TypeSplitVector:
1604 case TypeScalarizeVector: {
1605 MVT IntermediateVT;
1606 MVT RegisterVT;
1607 unsigned NumIntermediates;
1608 unsigned NumRegisters = getVectorTypeBreakdownMVT(VT, IntermediateVT,
1609 NumIntermediates, RegisterVT, this);
1610 NumRegistersForVT[i] = NumRegisters;
1611 assert(NumRegistersForVT[i] == NumRegisters &&
1612 "NumRegistersForVT size cannot represent NumRegisters!");
1613 RegisterTypeForVT[i] = RegisterVT;
1614
1615 MVT NVT = VT.getPow2VectorType();
1616 if (NVT == VT) {
1617 // Type is already a power of 2. The default action is to split.
1618 TransformToType[i] = MVT::Other;
1619 if (PreferredAction == TypeScalarizeVector)
1620 ValueTypeActions.setTypeAction(VT, TypeScalarizeVector);
1621 else if (PreferredAction == TypeSplitVector)
1622 ValueTypeActions.setTypeAction(VT, TypeSplitVector);
1623 else if (EC.getKnownMinValue() > 1)
1624 ValueTypeActions.setTypeAction(VT, TypeSplitVector);
1625 else
1626 ValueTypeActions.setTypeAction(VT, EC.isScalable()
1629 } else {
1630 TransformToType[i] = NVT;
1631 ValueTypeActions.setTypeAction(VT, TypeWidenVector);
1632 }
1633 break;
1634 }
1635 default:
1636 llvm_unreachable("Unknown vector legalization action!");
1637 }
1638 }
1639
1640 // Determine the 'representative' register class for each value type.
1641 // An representative register class is the largest (meaning one which is
1642 // not a sub-register class / subreg register class) legal register class for
1643 // a group of value types. For example, on i386, i8, i16, and i32
1644 // representative would be GR32; while on x86_64 it's GR64.
1645 for (unsigned i = 0; i != MVT::VALUETYPE_SIZE; ++i) {
1646 const TargetRegisterClass* RRC;
1647 uint8_t Cost;
1649 RepRegClassForVT[i] = RRC;
1650 RepRegClassCostForVT[i] = Cost;
1651 }
1652}
1653
1655 EVT VT) const {
1656 assert(!VT.isVector() && "No default SetCC type for vectors!");
1657 return getPointerTy(DL).SimpleTy;
1658}
1659
1661 return MVT::i32; // return the default value
1662}
1663
1664/// getVectorTypeBreakdown - Vector types are broken down into some number of
1665/// legal first class types. For example, MVT::v8f32 maps to 2 MVT::v4f32
1666/// with Altivec or SSE1, or 8 promoted MVT::f64 values with the X86 FP stack.
1667/// Similarly, MVT::v2i64 turns into 4 MVT::i32 values with both PPC and X86.
1668///
1669/// This method returns the number of registers needed, and the VT for each
1670/// register. It also returns the VT and quantity of the intermediate values
1671/// before they are promoted/expanded.
1673 EVT VT, EVT &IntermediateVT,
1674 unsigned &NumIntermediates,
1675 MVT &RegisterVT) const {
1676 ElementCount EltCnt = VT.getVectorElementCount();
1677
1678 // If there is a wider vector type with the same element type as this one,
1679 // or a promoted vector type that has the same number of elements which
1680 // are wider, then we should convert to that legal vector type.
1681 // This handles things like <2 x float> -> <4 x float> and
1682 // <4 x i1> -> <4 x i32>.
1684 if (!EltCnt.isScalar() &&
1685 (TA == TypeWidenVector || TA == TypePromoteInteger)) {
1686 EVT RegisterEVT = getTypeToTransformTo(Context, VT);
1687 if (isTypeLegal(RegisterEVT)) {
1688 IntermediateVT = RegisterEVT;
1689 RegisterVT = RegisterEVT.getSimpleVT();
1690 NumIntermediates = 1;
1691 return 1;
1692 }
1693 }
1694
1695 // Figure out the right, legal destination reg to copy into.
1696 EVT EltTy = VT.getVectorElementType();
1697
1698 unsigned NumVectorRegs = 1;
1699
1700 // Scalable vectors cannot be scalarized, so handle the legalisation of the
1701 // types like done elsewhere in SelectionDAG.
1702 if (EltCnt.isScalable()) {
1703 LegalizeKind LK;
1704 EVT PartVT = VT;
1705 do {
1706 // Iterate until we've found a legal (part) type to hold VT.
1707 LK = getTypeConversion(Context, PartVT);
1708 PartVT = LK.second;
1709 } while (LK.first != TypeLegal);
1710
1711 if (!PartVT.isVector()) {
1713 "Don't know how to legalize this scalable vector type");
1714 }
1715
1716 NumIntermediates =
1719 IntermediateVT = PartVT;
1720 RegisterVT = getRegisterType(Context, IntermediateVT);
1721 return NumIntermediates;
1722 }
1723
1724 // FIXME: We don't support non-power-of-2-sized vectors for now. Ideally
1725 // we could break down into LHS/RHS like LegalizeDAG does.
1726 if (!isPowerOf2_32(EltCnt.getKnownMinValue())) {
1727 NumVectorRegs = EltCnt.getKnownMinValue();
1728 EltCnt = ElementCount::getFixed(1);
1729 }
1730
1731 // Divide the input until we get to a supported size. This will always
1732 // end with a scalar if the target doesn't support vectors.
1733 while (EltCnt.getKnownMinValue() > 1 &&
1734 !isTypeLegal(EVT::getVectorVT(Context, EltTy, EltCnt))) {
1735 EltCnt = EltCnt.divideCoefficientBy(2);
1736 NumVectorRegs <<= 1;
1737 }
1738
1739 NumIntermediates = NumVectorRegs;
1740
1741 EVT NewVT = EVT::getVectorVT(Context, EltTy, EltCnt);
1742 if (!isTypeLegal(NewVT))
1743 NewVT = EltTy;
1744 IntermediateVT = NewVT;
1745
1746 MVT DestVT = getRegisterType(Context, NewVT);
1747 RegisterVT = DestVT;
1748
1749 if (EVT(DestVT).bitsLT(NewVT)) { // Value is expanded, e.g. i64 -> i16.
1750 TypeSize NewVTSize = NewVT.getSizeInBits();
1751 // Convert sizes such as i33 to i64.
1752 if (!llvm::has_single_bit<uint32_t>(NewVTSize.getKnownMinValue()))
1753 NewVTSize = NewVTSize.coefficientNextPowerOf2();
1754 return NumVectorRegs*(NewVTSize/DestVT.getSizeInBits());
1755 }
1756
1757 // Otherwise, promotion or legal types use the same number of registers as
1758 // the vector decimated to the appropriate level.
1759 return NumVectorRegs;
1760}
1761
1763 uint64_t NumCases,
1764 uint64_t Range,
1765 ProfileSummaryInfo *PSI,
1766 BlockFrequencyInfo *BFI) const {
1767 // FIXME: This function check the maximum table size and density, but the
1768 // minimum size is not checked. It would be nice if the minimum size is
1769 // also combined within this function. Currently, the minimum size check is
1770 // performed in findJumpTable() in SelectionDAGBuiler and
1771 // getEstimatedNumberOfCaseClusters() in BasicTTIImpl.
1772 const bool OptForSize =
1773 SI->getParent()->getParent()->hasOptSize() ||
1774 llvm::shouldOptimizeForSize(SI->getParent(), PSI, BFI);
1775 const unsigned MinDensity = getMinimumJumpTableDensity(OptForSize);
1776 const unsigned MaxJumpTableSize = getMaximumJumpTableSize();
1777
1778 // Check whether the number of cases is small enough and
1779 // the range is dense enough for a jump table.
1780 return (OptForSize || Range <= MaxJumpTableSize) &&
1781 (NumCases * 100 >= Range * MinDensity);
1782}
1783
1785 EVT ConditionVT) const {
1786 return getRegisterType(Context, ConditionVT);
1787}
1788
1789/// Get the EVTs and ArgFlags collections that represent the legalized return
1790/// type of the given function. This does not require a DAG or a return value,
1791/// and is suitable for use before any DAGs for the function are constructed.
1792/// TODO: Move this out of TargetLowering.cpp.
1794 AttributeList attr,
1796 const TargetLowering &TLI, const DataLayout &DL) {
1797 SmallVector<EVT, 4> ValueVTs;
1798 ComputeValueVTs(TLI, DL, ReturnType, ValueVTs);
1799 unsigned NumValues = ValueVTs.size();
1800 if (NumValues == 0) return;
1801
1802 for (unsigned j = 0, f = NumValues; j != f; ++j) {
1803 EVT VT = ValueVTs[j];
1804 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
1805
1806 if (attr.hasRetAttr(Attribute::SExt))
1807 ExtendKind = ISD::SIGN_EXTEND;
1808 else if (attr.hasRetAttr(Attribute::ZExt))
1809 ExtendKind = ISD::ZERO_EXTEND;
1810
1811 if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger())
1812 VT = TLI.getTypeForExtReturn(ReturnType->getContext(), VT, ExtendKind);
1813
1814 unsigned NumParts =
1815 TLI.getNumRegistersForCallingConv(ReturnType->getContext(), CC, VT);
1816 MVT PartVT =
1817 TLI.getRegisterTypeForCallingConv(ReturnType->getContext(), CC, VT);
1818
1819 // 'inreg' on function refers to return value
1821 if (attr.hasRetAttr(Attribute::InReg))
1822 Flags.setInReg();
1823
1824 // Propagate extension type if any
1825 if (attr.hasRetAttr(Attribute::SExt))
1826 Flags.setSExt();
1827 else if (attr.hasRetAttr(Attribute::ZExt))
1828 Flags.setZExt();
1829
1830 for (unsigned i = 0; i < NumParts; ++i) {
1831 ISD::ArgFlagsTy OutFlags = Flags;
1832 if (NumParts > 1 && i == 0)
1833 OutFlags.setSplit();
1834 else if (i == NumParts - 1 && i != 0)
1835 OutFlags.setSplitEnd();
1836
1837 Outs.push_back(
1838 ISD::OutputArg(OutFlags, PartVT, VT, /*isfixed=*/true, 0, 0));
1839 }
1840 }
1841}
1842
1843/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
1844/// function arguments in the caller parameter area. This is the actual
1845/// alignment, not its logarithm.
1847 const DataLayout &DL) const {
1848 return DL.getABITypeAlign(Ty).value();
1849}
1850
1852 LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace,
1853 Align Alignment, MachineMemOperand::Flags Flags, unsigned *Fast) const {
1854 // Check if the specified alignment is sufficient based on the data layout.
1855 // TODO: While using the data layout works in practice, a better solution
1856 // would be to implement this check directly (make this a virtual function).
1857 // For example, the ABI alignment may change based on software platform while
1858 // this function should only be affected by hardware implementation.
1859 Type *Ty = VT.getTypeForEVT(Context);
1860 if (VT.isZeroSized() || Alignment >= DL.getABITypeAlign(Ty)) {
1861 // Assume that an access that meets the ABI-specified alignment is fast.
1862 if (Fast != nullptr)
1863 *Fast = 1;
1864 return true;
1865 }
1866
1867 // This is a misaligned access.
1868 return allowsMisalignedMemoryAccesses(VT, AddrSpace, Alignment, Flags, Fast);
1869}
1870
1872 LLVMContext &Context, const DataLayout &DL, EVT VT,
1873 const MachineMemOperand &MMO, unsigned *Fast) const {
1875 MMO.getAlign(), MMO.getFlags(), Fast);
1876}
1877
1879 const DataLayout &DL, EVT VT,
1880 unsigned AddrSpace, Align Alignment,
1882 unsigned *Fast) const {
1883 return allowsMemoryAccessForAlignment(Context, DL, VT, AddrSpace, Alignment,
1884 Flags, Fast);
1885}
1886
1888 const DataLayout &DL, EVT VT,
1889 const MachineMemOperand &MMO,
1890 unsigned *Fast) const {
1891 return allowsMemoryAccess(Context, DL, VT, MMO.getAddrSpace(), MMO.getAlign(),
1892 MMO.getFlags(), Fast);
1893}
1894
1896 const DataLayout &DL, LLT Ty,
1897 const MachineMemOperand &MMO,
1898 unsigned *Fast) const {
1900 return allowsMemoryAccess(Context, DL, VT, MMO.getAddrSpace(), MMO.getAlign(),
1901 MMO.getFlags(), Fast);
1902}
1903
1904//===----------------------------------------------------------------------===//
1905// TargetTransformInfo Helpers
1906//===----------------------------------------------------------------------===//
1907
1909 enum InstructionOpcodes {
1910#define HANDLE_INST(NUM, OPCODE, CLASS) OPCODE = NUM,
1911#define LAST_OTHER_INST(NUM) InstructionOpcodesCount = NUM
1912#include "llvm/IR/Instruction.def"
1913 };
1914 switch (static_cast<InstructionOpcodes>(Opcode)) {
1915 case Ret: return 0;
1916 case Br: return 0;
1917 case Switch: return 0;
1918 case IndirectBr: return 0;
1919 case Invoke: return 0;
1920 case CallBr: return 0;
1921 case Resume: return 0;
1922 case Unreachable: return 0;
1923 case CleanupRet: return 0;
1924 case CatchRet: return 0;
1925 case CatchPad: return 0;
1926 case CatchSwitch: return 0;
1927 case CleanupPad: return 0;
1928 case FNeg: return ISD::FNEG;
1929 case Add: return ISD::ADD;
1930 case FAdd: return ISD::FADD;
1931 case Sub: return ISD::SUB;
1932 case FSub: return ISD::FSUB;
1933 case Mul: return ISD::MUL;
1934 case FMul: return ISD::FMUL;
1935 case UDiv: return ISD::UDIV;
1936 case SDiv: return ISD::SDIV;
1937 case FDiv: return ISD::FDIV;
1938 case URem: return ISD::UREM;
1939 case SRem: return ISD::SREM;
1940 case FRem: return ISD::FREM;
1941 case Shl: return ISD::SHL;
1942 case LShr: return ISD::SRL;
1943 case AShr: return ISD::SRA;
1944 case And: return ISD::AND;
1945 case Or: return ISD::OR;
1946 case Xor: return ISD::XOR;
1947 case Alloca: return 0;
1948 case Load: return ISD::LOAD;
1949 case Store: return ISD::STORE;
1950 case GetElementPtr: return 0;
1951 case Fence: return 0;
1952 case AtomicCmpXchg: return 0;
1953 case AtomicRMW: return 0;
1954 case Trunc: return ISD::TRUNCATE;
1955 case ZExt: return ISD::ZERO_EXTEND;
1956 case SExt: return ISD::SIGN_EXTEND;
1957 case FPToUI: return ISD::FP_TO_UINT;
1958 case FPToSI: return ISD::FP_TO_SINT;
1959 case UIToFP: return ISD::UINT_TO_FP;
1960 case SIToFP: return ISD::SINT_TO_FP;
1961 case FPTrunc: return ISD::FP_ROUND;
1962 case FPExt: return ISD::FP_EXTEND;
1963 case PtrToInt: return ISD::BITCAST;
1964 case IntToPtr: return ISD::BITCAST;
1965 case BitCast: return ISD::BITCAST;
1966 case AddrSpaceCast: return ISD::ADDRSPACECAST;
1967 case ICmp: return ISD::SETCC;
1968 case FCmp: return ISD::SETCC;
1969 case PHI: return 0;
1970 case Call: return 0;
1971 case Select: return ISD::SELECT;
1972 case UserOp1: return 0;
1973 case UserOp2: return 0;
1974 case VAArg: return 0;
1975 case ExtractElement: return ISD::EXTRACT_VECTOR_ELT;
1976 case InsertElement: return ISD::INSERT_VECTOR_ELT;
1977 case ShuffleVector: return ISD::VECTOR_SHUFFLE;
1978 case ExtractValue: return ISD::MERGE_VALUES;
1979 case InsertValue: return ISD::MERGE_VALUES;
1980 case LandingPad: return 0;
1981 case Freeze: return ISD::FREEZE;
1982 }
1983
1984 llvm_unreachable("Unknown instruction type encountered!");
1985}
1986
1987Value *
1989 bool UseTLS) const {
1990 // compiler-rt provides a variable with a magic name. Targets that do not
1991 // link with compiler-rt may also provide such a variable.
1992 Module *M = IRB.GetInsertBlock()->getParent()->getParent();
1993 const char *UnsafeStackPtrVar = "__safestack_unsafe_stack_ptr";
1994 auto UnsafeStackPtr =
1995 dyn_cast_or_null<GlobalVariable>(M->getNamedValue(UnsafeStackPtrVar));
1996
1997 Type *StackPtrTy = PointerType::getUnqual(M->getContext());
1998
1999 if (!UnsafeStackPtr) {
2000 auto TLSModel = UseTLS ?
2003 // The global variable is not defined yet, define it ourselves.
2004 // We use the initial-exec TLS model because we do not support the
2005 // variable living anywhere other than in the main executable.
2006 UnsafeStackPtr = new GlobalVariable(
2007 *M, StackPtrTy, false, GlobalValue::ExternalLinkage, nullptr,
2008 UnsafeStackPtrVar, nullptr, TLSModel);
2009 } else {
2010 // The variable exists, check its type and attributes.
2011 if (UnsafeStackPtr->getValueType() != StackPtrTy)
2012 report_fatal_error(Twine(UnsafeStackPtrVar) + " must have void* type");
2013 if (UseTLS != UnsafeStackPtr->isThreadLocal())
2014 report_fatal_error(Twine(UnsafeStackPtrVar) + " must " +
2015 (UseTLS ? "" : "not ") + "be thread-local");
2016 }
2017 return UnsafeStackPtr;
2018}
2019
2020Value *
2022 if (!TM.getTargetTriple().isAndroid())
2023 return getDefaultSafeStackPointerLocation(IRB, true);
2024
2025 // Android provides a libc function to retrieve the address of the current
2026 // thread's unsafe stack pointer.
2027 Module *M = IRB.GetInsertBlock()->getParent()->getParent();
2028 auto *PtrTy = PointerType::getUnqual(M->getContext());
2029 FunctionCallee Fn =
2030 M->getOrInsertFunction("__safestack_pointer_address", PtrTy);
2031 return IRB.CreateCall(Fn);
2032}
2033
2034//===----------------------------------------------------------------------===//
2035// Loop Strength Reduction hooks
2036//===----------------------------------------------------------------------===//
2037
2038/// isLegalAddressingMode - Return true if the addressing mode represented
2039/// by AM is legal for this target, for a load/store of the specified type.
2041 const AddrMode &AM, Type *Ty,
2042 unsigned AS, Instruction *I) const {
2043 // The default implementation of this implements a conservative RISCy, r+r and
2044 // r+i addr mode.
2045
2046 // Scalable offsets not supported
2047 if (AM.ScalableOffset)
2048 return false;
2049
2050 // Allows a sign-extended 16-bit immediate field.
2051 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
2052 return false;
2053
2054 // No global is ever allowed as a base.
2055 if (AM.BaseGV)
2056 return false;
2057
2058 // Only support r+r,
2059 switch (AM.Scale) {
2060 case 0: // "r+i" or just "i", depending on HasBaseReg.
2061 break;
2062 case 1:
2063 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed.
2064 return false;
2065 // Otherwise we have r+r or r+i.
2066 break;
2067 case 2:
2068 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed.
2069 return false;
2070 // Allow 2*r as r+r.
2071 break;
2072 default: // Don't allow n * r
2073 return false;
2074 }
2075
2076 return true;
2077}
2078
2079//===----------------------------------------------------------------------===//
2080// Stack Protector
2081//===----------------------------------------------------------------------===//
2082
2083// For OpenBSD return its special guard variable. Otherwise return nullptr,
2084// so that SelectionDAG handle SSP.
2086 if (getTargetMachine().getTargetTriple().isOSOpenBSD()) {
2087 Module &M = *IRB.GetInsertBlock()->getParent()->getParent();
2088 PointerType *PtrTy = PointerType::getUnqual(M.getContext());
2089 Constant *C = M.getOrInsertGlobal("__guard_local", PtrTy);
2090 if (GlobalVariable *G = dyn_cast_or_null<GlobalVariable>(C))
2091 G->setVisibility(GlobalValue::HiddenVisibility);
2092 return C;
2093 }
2094 return nullptr;
2095}
2096
2097// Currently only support "standard" __stack_chk_guard.
2098// TODO: add LOAD_STACK_GUARD support.
2100 if (!M.getNamedValue("__stack_chk_guard")) {
2101 auto *GV = new GlobalVariable(M, PointerType::getUnqual(M.getContext()),
2103 nullptr, "__stack_chk_guard");
2104
2105 // FreeBSD has "__stack_chk_guard" defined externally on libc.so
2106 if (M.getDirectAccessExternalData() &&
2108 !(TM.getTargetTriple().isPPC64() &&
2109 TM.getTargetTriple().isOSFreeBSD()) &&
2110 (!TM.getTargetTriple().isOSDarwin() ||
2112 GV->setDSOLocal(true);
2113 }
2114}
2115
2116// Currently only support "standard" __stack_chk_guard.
2117// TODO: add LOAD_STACK_GUARD support.
2119 return M.getNamedValue("__stack_chk_guard");
2120}
2121
2123 return nullptr;
2124}
2125
2128}
2129
2132}
2133
2134unsigned TargetLoweringBase::getMinimumJumpTableDensity(bool OptForSize) const {
2135 return OptForSize ? OptsizeJumpTableDensity : JumpTableDensity;
2136}
2137
2139 return MaximumJumpTableSize;
2140}
2141
2144}
2145
2148}
2149
2151 if (TM.Options.LoopAlignment)
2152 return Align(TM.Options.LoopAlignment);
2153 return PrefLoopAlignment;
2154}
2155
2157 MachineBasicBlock *MBB) const {
2158 return MaxBytesForAlignment;
2159}
2160
2161//===----------------------------------------------------------------------===//
2162// Reciprocal Estimates
2163//===----------------------------------------------------------------------===//
2164
2165/// Get the reciprocal estimate attribute string for a function that will
2166/// override the target defaults.
2168 const Function &F = MF.getFunction();
2169 return F.getFnAttribute("reciprocal-estimates").getValueAsString();
2170}
2171
2172/// Construct a string for the given reciprocal operation of the given type.
2173/// This string should match the corresponding option to the front-end's
2174/// "-mrecip" flag assuming those strings have been passed through in an
2175/// attribute string. For example, "vec-divf" for a division of a vXf32.
2176static std::string getReciprocalOpName(bool IsSqrt, EVT VT) {
2177 std::string Name = VT.isVector() ? "vec-" : "";
2178
2179 Name += IsSqrt ? "sqrt" : "div";
2180
2181 // TODO: Handle other float types?
2182 if (VT.getScalarType() == MVT::f64) {
2183 Name += "d";
2184 } else if (VT.getScalarType() == MVT::f16) {
2185 Name += "h";
2186 } else {
2187 assert(VT.getScalarType() == MVT::f32 &&
2188 "Unexpected FP type for reciprocal estimate");
2189 Name += "f";
2190 }
2191
2192 return Name;
2193}
2194
2195/// Return the character position and value (a single numeric character) of a
2196/// customized refinement operation in the input string if it exists. Return
2197/// false if there is no customized refinement step count.
2198static bool parseRefinementStep(StringRef In, size_t &Position,
2199 uint8_t &Value) {
2200 const char RefStepToken = ':';
2201 Position = In.find(RefStepToken);
2202 if (Position == StringRef::npos)
2203 return false;
2204
2205 StringRef RefStepString = In.substr(Position + 1);
2206 // Allow exactly one numeric character for the additional refinement
2207 // step parameter.
2208 if (RefStepString.size() == 1) {
2209 char RefStepChar = RefStepString[0];
2210 if (isDigit(RefStepChar)) {
2211 Value = RefStepChar - '0';
2212 return true;
2213 }
2214 }
2215 report_fatal_error("Invalid refinement step for -recip.");
2216}
2217
2218/// For the input attribute string, return one of the ReciprocalEstimate enum
2219/// status values (enabled, disabled, or not specified) for this operation on
2220/// the specified data type.
2221static int getOpEnabled(bool IsSqrt, EVT VT, StringRef Override) {
2222 if (Override.empty())
2224
2225 SmallVector<StringRef, 4> OverrideVector;
2226 Override.split(OverrideVector, ',');
2227 unsigned NumArgs = OverrideVector.size();
2228
2229 // Check if "all", "none", or "default" was specified.
2230 if (NumArgs == 1) {
2231 // Look for an optional setting of the number of refinement steps needed
2232 // for this type of reciprocal operation.
2233 size_t RefPos;
2234 uint8_t RefSteps;
2235 if (parseRefinementStep(Override, RefPos, RefSteps)) {
2236 // Split the string for further processing.
2237 Override = Override.substr(0, RefPos);
2238 }
2239
2240 // All reciprocal types are enabled.
2241 if (Override == "all")
2243
2244 // All reciprocal types are disabled.
2245 if (Override == "none")
2247
2248 // Target defaults for enablement are used.
2249 if (Override == "default")
2251 }
2252
2253 // The attribute string may omit the size suffix ('f'/'d').
2254 std::string VTName = getReciprocalOpName(IsSqrt, VT);
2255 std::string VTNameNoSize = VTName;
2256 VTNameNoSize.pop_back();
2257 static const char DisabledPrefix = '!';
2258
2259 for (StringRef RecipType : OverrideVector) {
2260 size_t RefPos;
2261 uint8_t RefSteps;
2262 if (parseRefinementStep(RecipType, RefPos, RefSteps))
2263 RecipType = RecipType.substr(0, RefPos);
2264
2265 // Ignore the disablement token for string matching.
2266 bool IsDisabled = RecipType[0] == DisabledPrefix;
2267 if (IsDisabled)
2268 RecipType = RecipType.substr(1);
2269
2270 if (RecipType == VTName || RecipType == VTNameNoSize)
2273 }
2274
2276}
2277
2278/// For the input attribute string, return the customized refinement step count
2279/// for this operation on the specified data type. If the step count does not
2280/// exist, return the ReciprocalEstimate enum value for unspecified.
2281static int getOpRefinementSteps(bool IsSqrt, EVT VT, StringRef Override) {
2282 if (Override.empty())
2284
2285 SmallVector<StringRef, 4> OverrideVector;
2286 Override.split(OverrideVector, ',');
2287 unsigned NumArgs = OverrideVector.size();
2288
2289 // Check if "all", "default", or "none" was specified.
2290 if (NumArgs == 1) {
2291 // Look for an optional setting of the number of refinement steps needed
2292 // for this type of reciprocal operation.
2293 size_t RefPos;
2294 uint8_t RefSteps;
2295 if (!parseRefinementStep(Override, RefPos, RefSteps))
2297
2298 // Split the string for further processing.
2299 Override = Override.substr(0, RefPos);
2300 assert(Override != "none" &&
2301 "Disabled reciprocals, but specifed refinement steps?");
2302
2303 // If this is a general override, return the specified number of steps.
2304 if (Override == "all" || Override == "default")
2305 return RefSteps;
2306 }
2307
2308 // The attribute string may omit the size suffix ('f'/'d').
2309 std::string VTName = getReciprocalOpName(IsSqrt, VT);
2310 std::string VTNameNoSize = VTName;
2311 VTNameNoSize.pop_back();
2312
2313 for (StringRef RecipType : OverrideVector) {
2314 size_t RefPos;
2315 uint8_t RefSteps;
2316 if (!parseRefinementStep(RecipType, RefPos, RefSteps))
2317 continue;
2318
2319 RecipType = RecipType.substr(0, RefPos);
2320 if (RecipType == VTName || RecipType == VTNameNoSize)
2321 return RefSteps;
2322 }
2323
2325}
2326
2328 MachineFunction &MF) const {
2329 return getOpEnabled(true, VT, getRecipEstimateForFunc(MF));
2330}
2331
2333 MachineFunction &MF) const {
2334 return getOpEnabled(false, VT, getRecipEstimateForFunc(MF));
2335}
2336
2338 MachineFunction &MF) const {
2339 return getOpRefinementSteps(true, VT, getRecipEstimateForFunc(MF));
2340}
2341
2343 MachineFunction &MF) const {
2344 return getOpRefinementSteps(false, VT, getRecipEstimateForFunc(MF));
2345}
2346
2348 EVT LoadVT, EVT BitcastVT, const SelectionDAG &DAG,
2349 const MachineMemOperand &MMO) const {
2350 // Single-element vectors are scalarized, so we should generally avoid having
2351 // any memory operations on such types, as they would get scalarized too.
2352 if (LoadVT.isFixedLengthVector() && BitcastVT.isFixedLengthVector() &&
2353 BitcastVT.getVectorNumElements() == 1)
2354 return false;
2355
2356 // Don't do if we could do an indexed load on the original type, but not on
2357 // the new one.
2358 if (!LoadVT.isSimple() || !BitcastVT.isSimple())
2359 return true;
2360
2361 MVT LoadMVT = LoadVT.getSimpleVT();
2362
2363 // Don't bother doing this if it's just going to be promoted again later, as
2364 // doing so might interfere with other combines.
2365 if (getOperationAction(ISD::LOAD, LoadMVT) == Promote &&
2366 getTypeToPromoteTo(ISD::LOAD, LoadMVT) == BitcastVT.getSimpleVT())
2367 return false;
2368
2369 unsigned Fast = 0;
2370 return allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), BitcastVT,
2371 MMO, &Fast) &&
2372 Fast;
2373}
2374
2377}
2378
2380 const LoadInst &LI, const DataLayout &DL, AssumptionCache *AC,
2381 const TargetLibraryInfo *LibInfo) const {
2383 if (LI.isVolatile())
2385
2386 if (LI.hasMetadata(LLVMContext::MD_nontemporal))
2388
2389 if (LI.hasMetadata(LLVMContext::MD_invariant_load))
2391
2393 LI.getAlign(), DL, &LI, AC,
2394 /*DT=*/nullptr, LibInfo))
2396
2397 Flags |= getTargetMMOFlags(LI);
2398 return Flags;
2399}
2400
2403 const DataLayout &DL) const {
2405
2406 if (SI.isVolatile())
2408
2409 if (SI.hasMetadata(LLVMContext::MD_nontemporal))
2411
2412 // FIXME: Not preserving dereferenceable
2413 Flags |= getTargetMMOFlags(SI);
2414 return Flags;
2415}
2416
2419 const DataLayout &DL) const {
2421
2422 if (const AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(&AI)) {
2423 if (RMW->isVolatile())
2425 } else if (const AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(&AI)) {
2426 if (CmpX->isVolatile())
2428 } else
2429 llvm_unreachable("not an atomic instruction");
2430
2431 // FIXME: Not preserving dereferenceable
2432 Flags |= getTargetMMOFlags(AI);
2433 return Flags;
2434}
2435
2437 Instruction *Inst,
2438 AtomicOrdering Ord) const {
2439 if (isReleaseOrStronger(Ord) && Inst->hasAtomicStore())
2440 return Builder.CreateFence(Ord);
2441 else
2442 return nullptr;
2443}
2444
2446 Instruction *Inst,
2447 AtomicOrdering Ord) const {
2448 if (isAcquireOrStronger(Ord))
2449 return Builder.CreateFence(Ord);
2450 else
2451 return nullptr;
2452}
2453
2454//===----------------------------------------------------------------------===//
2455// GlobalISel Hooks
2456//===----------------------------------------------------------------------===//
2457
2459 const TargetTransformInfo *TTI) const {
2460 auto &MF = *MI.getMF();
2461 auto &MRI = MF.getRegInfo();
2462 // Assuming a spill and reload of a value has a cost of 1 instruction each,
2463 // this helper function computes the maximum number of uses we should consider
2464 // for remat. E.g. on arm64 global addresses take 2 insts to materialize. We
2465 // break even in terms of code size when the original MI has 2 users vs
2466 // choosing to potentially spill. Any more than 2 users we we have a net code
2467 // size increase. This doesn't take into account register pressure though.
2468 auto maxUses = [](unsigned RematCost) {
2469 // A cost of 1 means remats are basically free.
2470 if (RematCost == 1)
2471 return std::numeric_limits<unsigned>::max();
2472 if (RematCost == 2)
2473 return 2U;
2474
2475 // Remat is too expensive, only sink if there's one user.
2476 if (RematCost > 2)
2477 return 1U;
2478 llvm_unreachable("Unexpected remat cost");
2479 };
2480
2481 switch (MI.getOpcode()) {
2482 default:
2483 return false;
2484 // Constants-like instructions should be close to their users.
2485 // We don't want long live-ranges for them.
2486 case TargetOpcode::G_CONSTANT:
2487 case TargetOpcode::G_FCONSTANT:
2488 case TargetOpcode::G_FRAME_INDEX:
2489 case TargetOpcode::G_INTTOPTR:
2490 return true;
2491 case TargetOpcode::G_GLOBAL_VALUE: {
2492 unsigned RematCost = TTI->getGISelRematGlobalCost();
2493 Register Reg = MI.getOperand(0).getReg();
2494 unsigned MaxUses = maxUses(RematCost);
2495 if (MaxUses == UINT_MAX)
2496 return true; // Remats are "free" so always localize.
2497 return MRI.hasAtMostUserInstrs(Reg, MaxUses);
2498 }
2499 }
2500}
unsigned const MachineRegisterInfo * MRI
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
amdgpu AMDGPU Register Bank Select
Rewrite undef for PHI
This file contains the simple types necessary to represent the attributes associated with functions a...
This file implements the BitVector class.
return RetTy
std::string Name
IRTranslator LLVM IR MI
#define LCALL5(A)
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
#define G(x, y, z)
Definition: MD5.cpp:56
unsigned const TargetRegisterInfo * TRI
Module.h This file contains the declarations for the Module class.
LLVMContext & Context
const char LLVMTargetMachineRef TM
static bool isDigit(const char C)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
static bool darwinHasSinCos(const Triple &TT)
static cl::opt< bool > JumpIsExpensiveOverride("jump-is-expensive", cl::init(false), cl::desc("Do not create extra branches to split comparison logic."), cl::Hidden)
#define OP_TO_LIBCALL(Name, Enum)
static cl::opt< unsigned > MinimumJumpTableEntries("min-jump-table-entries", cl::init(4), cl::Hidden, cl::desc("Set minimum number of entries to use a jump table."))
static cl::opt< bool > DisableStrictNodeMutation("disable-strictnode-mutation", cl::desc("Don't mutate strict-float node to a legalize node"), cl::init(false), cl::Hidden)
static bool parseRefinementStep(StringRef In, size_t &Position, uint8_t &Value)
Return the character position and value (a single numeric character) of a customized refinement opera...
static cl::opt< unsigned > MaximumJumpTableSize("max-jump-table-size", cl::init(UINT_MAX), cl::Hidden, cl::desc("Set maximum size of jump tables."))
static cl::opt< unsigned > JumpTableDensity("jump-table-density", cl::init(10), cl::Hidden, cl::desc("Minimum density for building a jump table in " "a normal function"))
Minimum jump table density for normal functions.
static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT, TargetLoweringBase *TLI)
static std::string getReciprocalOpName(bool IsSqrt, EVT VT)
Construct a string for the given reciprocal operation of the given type.
static int getOpRefinementSteps(bool IsSqrt, EVT VT, StringRef Override)
For the input attribute string, return the customized refinement step count for this operation on the...
static void InitCmpLibcallCCs(ISD::CondCode *CCs)
InitCmpLibcallCCs - Set default comparison libcall CC.
static int getOpEnabled(bool IsSqrt, EVT VT, StringRef Override)
For the input attribute string, return one of the ReciprocalEstimate enum status values (enabled,...
static StringRef getRecipEstimateForFunc(MachineFunction &MF)
Get the reciprocal estimate attribute string for a function that will override the target defaults.
static cl::opt< unsigned > OptsizeJumpTableDensity("optsize-jump-table-density", cl::init(40), cl::Hidden, cl::desc("Minimum density for building a jump table in " "an optsize function"))
Minimum jump table density for -Os or -Oz functions.
This file describes how to lower LLVM code to machine code.
This pass exposes codegen information to IR-level passes.
Class for arbitrary precision integers.
Definition: APInt.h:76
A cache of @llvm.assume calls within a function.
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:539
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:748
bool hasRetAttr(Attribute::AttrKind Kind) const
Return true if the attribute exists for the return value.
Definition: Attributes.h:803
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:206
void setBitsInMask(const uint32_t *Mask, unsigned MaskWords=~0u)
setBitsInMask - Add '1' bits from Mask to this vector.
Definition: BitVector.h:707
iterator_range< const_set_bits_iterator > set_bits() const
Definition: BitVector.h:140
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
This class represents a range of values.
Definition: ConstantRange.h:47
unsigned getActiveBits() const
Compute the maximal number of active bits needed to represent every value in this range.
ConstantRange umul_sat(const ConstantRange &Other) const
Perform an unsigned saturating multiplication of two constant ranges.
ConstantRange subtract(const APInt &CI) const
Subtract the specified constant from the endpoints of this constant range.
This is an important base class in LLVM.
Definition: Constant.h:41
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
unsigned getPointerSize(unsigned AS=0) const
Layout pointer size in bytes, rounded up to a whole number of bytes.
Definition: DataLayout.cpp:750
static constexpr ElementCount getScalable(ScalarTy MinVal)
Definition: TypeSize.h:311
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition: TypeSize.h:308
constexpr bool isScalar() const
Exactly one element.
Definition: TypeSize.h:319
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
Definition: DerivedTypes.h:168
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:656
@ HiddenVisibility
The GV is hidden.
Definition: GlobalValue.h:68
@ ExternalLinkage
Externally visible function.
Definition: GlobalValue.h:52
Common base class shared among various IRBuilders.
Definition: IRBuilder.h:94
FenceInst * CreateFence(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System, const Twine &Name="")
Definition: IRBuilder.h:1834
BasicBlock * GetInsertBlock() const
Definition: IRBuilder.h:174
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=std::nullopt, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2412
bool hasAtomicStore() const LLVM_READONLY
Return true if this atomic instruction stores to memory.
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
Definition: Instruction.h:341
@ MAX_INT_BITS
Maximum number of bits that can be specified.
Definition: DerivedTypes.h:52
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
An instruction for reading from memory.
Definition: Instructions.h:184
Value * getPointerOperand()
Definition: Instructions.h:280
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Definition: Instructions.h:230
Align getAlign() const
Return the alignment of the access that is being performed.
Definition: Instructions.h:236
Machine Value Type.
SimpleValueType SimpleTy
uint64_t getScalarSizeInBits() const
bool isVector() const
Return true if this is a vector value type.
bool isScalableVector() const
Return true if this is a vector value type where the runtime length is machine dependent.
static auto all_valuetypes()
SimpleValueType Iteration.
TypeSize getSizeInBits() const
Returns the size of the specified MVT in bits.
uint64_t getFixedSizeInBits() const
Return the size of the specified fixed width value type in bits.
ElementCount getVectorElementCount() const
bool isScalarInteger() const
Return true if this is an integer, not including vectors.
static MVT getVectorVT(MVT VT, unsigned NumElements)
MVT getVectorElementType() const
bool isValid() const
Return true if this is a valid simple valuetype.
static MVT getIntegerVT(unsigned BitWidth)
static auto fp_valuetypes()
MVT getPow2VectorType() const
Widens the length of the given vector MVT up to the nearest power of 2 and returns that type.
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
bool isStatepointSpillSlotObjectIndex(int ObjectIdx) const
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
Representation of each machine instruction.
Definition: MachineInstr.h:69
unsigned getNumOperands() const
Retuns the total number of operands.
Definition: MachineInstr.h:561
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
void tieOperands(unsigned DefIdx, unsigned UseIdx)
Add a tie between the register operands at DefIdx and UseIdx.
void addMemOperand(MachineFunction &MF, MachineMemOperand *MO)
Add a MachineMemOperand to the machine instruction.
A description of a memory reference used in the backend.
unsigned getAddrSpace() const
Flags
Flags values. These may be or'd together.
@ MOVolatile
The memory access is volatile.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MONonTemporal
The memory access is non-temporal.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
Flags getFlags() const
Return the raw flags of the source value,.
Align getAlign() const
Return the minimum known alignment in bytes of the actual memory reference.
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
void freezeReservedRegs()
freezeReservedRegs - Called by the register allocator to freeze the set of reserved registers before ...
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
Class to represent pointers.
Definition: DerivedTypes.h:646
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
Definition: DerivedTypes.h:662
Analysis providing profile information.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
This is used to represent a portion of an LLVM function in a low-level Data Dependence DAG representa...
Definition: SelectionDAG.h:225
const DataLayout & getDataLayout() const
Definition: SelectionDAG.h:472
LLVMContext * getContext() const
Definition: SelectionDAG.h:485
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
An instruction for storing to memory.
Definition: Instructions.h:317
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition: StringRef.h:692
constexpr StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
Definition: StringRef.h:563
constexpr bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:134
constexpr size_t size() const
size - Get the string size.
Definition: StringRef.h:137
static constexpr size_t npos
Definition: StringRef.h:52
bool isValid() const
Returns true if this iterator is still pointing at a valid entry.
Multiway switch.
Provides information about what library functions are available for the current target.
LegalizeTypeAction getTypeAction(MVT VT) const
void setTypeAction(MVT VT, LegalizeTypeAction Action)
This base class for TargetLowering contains the SelectionDAG-independent parts that can be used from ...
int InstructionOpcodeToISD(unsigned Opcode) const
Get the ISD node that corresponds to the Instruction class opcode.
void setOperationAction(unsigned Op, MVT VT, LegalizeAction Action)
Indicate that the specified operation does not work with the specified type and indicate what to do a...
virtual void finalizeLowering(MachineFunction &MF) const
Execute target specific actions to finalize target lowering.
void initActions()
Initialize all of the actions to default values.
bool PredictableSelectIsExpensive
Tells the code generator that select is more expensive than a branch if the branch is usually predict...
unsigned MaxStoresPerMemcpyOptSize
Likewise for functions with the OptSize attribute.
MachineBasicBlock * emitPatchPoint(MachineInstr &MI, MachineBasicBlock *MBB) const
Replace/modify any TargetFrameIndex operands with a targte-dependent sequence of memory operands that...
virtual Value * getSafeStackPointerLocation(IRBuilderBase &IRB) const
Returns the target-specific address of the unsafe stack pointer.
int getRecipEstimateSqrtEnabled(EVT VT, MachineFunction &MF) const
Return a ReciprocalEstimate enum value for a square root of the given type based on the function's at...
virtual bool canOpTrap(unsigned Op, EVT VT) const
Returns true if the operation can trap for the value type.
virtual bool shouldLocalize(const MachineInstr &MI, const TargetTransformInfo *TTI) const
Check whether or not MI needs to be moved close to its uses.
virtual unsigned getMaxPermittedBytesForAlignment(MachineBasicBlock *MBB) const
Return the maximum amount of bytes allowed to be emitted when padding for alignment.
void setMaximumJumpTableSize(unsigned)
Indicate the maximum number of entries in jump tables.
virtual unsigned getMinimumJumpTableEntries() const
Return lower limit for number of blocks in a jump table.
const TargetMachine & getTargetMachine() const
unsigned MaxLoadsPerMemcmp
Specify maximum number of load instructions per memcmp call.
virtual unsigned getNumRegistersForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain targets require unusual breakdowns of certain types.
virtual MachineMemOperand::Flags getTargetMMOFlags(const Instruction &I) const
This callback is used to inspect load/store instructions and add target-specific MachineMemOperand fl...
unsigned MaxGluedStoresPerMemcpy
Specify max number of store instructions to glue in inlined memcpy.
virtual MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC, EVT VT) const
Certain combinations of ABIs, Targets and features require that types are legal for some operations a...
LegalizeTypeAction
This enum indicates whether a types are legal for a target, and if not, what action should be used to...
virtual bool isSuitableForJumpTable(const SwitchInst *SI, uint64_t NumCases, uint64_t Range, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) const
Return true if lowering to a jump table is suitable for a set of case clusters which may contain NumC...
void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC)
Set the CallingConv that should be used for the specified libcall.
void setIndexedMaskedLoadAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed masked load does or does not work with the specified type and ind...
virtual Value * getSDagStackGuard(const Module &M) const
Return the variable that's previously inserted by insertSSPDeclarations, if any, otherwise return nul...
virtual bool useFPRegsForHalfType() const
virtual bool isLoadBitCastBeneficial(EVT LoadVT, EVT BitcastVT, const SelectionDAG &DAG, const MachineMemOperand &MMO) const
Return true if the following transform is beneficial: fold (conv (load x)) -> (load (conv*)x) On arch...
void setIndexedLoadAction(ArrayRef< unsigned > IdxModes, MVT VT, LegalizeAction Action)
Indicate that the specified indexed load does or does not work with the specified type and indicate w...
virtual bool softPromoteHalfType() const
unsigned getMaximumJumpTableSize() const
Return upper limit for number of entries in a jump table.
virtual MVT::SimpleValueType getCmpLibcallReturnType() const
Return the ValueType for comparison libcalls.
unsigned getBitWidthForCttzElements(Type *RetTy, ElementCount EC, bool ZeroIsPoison, const ConstantRange *VScaleRange) const
Return the minimum number of bits required to hold the maximum possible number of trailing zero vecto...
bool isLegalRC(const TargetRegisterInfo &TRI, const TargetRegisterClass &RC) const
Return true if the value types that can be represented by the specified register class are all legal.
virtual TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT) const
Return the preferred vector type legalization action.
void setAtomicLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Let target indicate that an extending atomic load of the specified type is legal.
Value * getDefaultSafeStackPointerLocation(IRBuilderBase &IRB, bool UseTLS) const
virtual Function * getSSPStackGuardCheck(const Module &M) const
If the target has a standard stack protection check function that performs validation and error handl...
MachineMemOperand::Flags getAtomicMemOperandFlags(const Instruction &AI, const DataLayout &DL) const
virtual bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *=nullptr) const
Determine if the target supports unaligned memory accesses.
unsigned MaxStoresPerMemsetOptSize
Likewise for functions with the OptSize attribute.
unsigned MaxStoresPerMemmove
Specify maximum number of store instructions per memmove call.
virtual Align getPrefLoopAlignment(MachineLoop *ML=nullptr) const
Return the preferred loop alignment.
void computeRegisterProperties(const TargetRegisterInfo *TRI)
Once all of the register classes are added, this allows us to compute derived properties we expose.
int getDivRefinementSteps(EVT VT, MachineFunction &MF) const
Return the refinement step count for a division of the given type based on the function's attributes.
virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context, EVT VT) const
Return the ValueType of the result of SETCC operations.
EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL, bool LegalTypes=true) const
Returns the type for the shift amount of a shift opcode.
MachineMemOperand::Flags getLoadMemOperandFlags(const LoadInst &LI, const DataLayout &DL, AssumptionCache *AC=nullptr, const TargetLibraryInfo *LibInfo=nullptr) const
virtual EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const
For types supported by the target, this is an identity function.
unsigned MaxStoresPerMemmoveOptSize
Likewise for functions with the OptSize attribute.
virtual Value * getIRStackGuard(IRBuilderBase &IRB) const
If the target has a standard location for the stack protector guard, returns the address of that loca...
virtual MVT getPreferredSwitchConditionType(LLVMContext &Context, EVT ConditionVT) const
Returns preferred type for switch condition.
bool isTypeLegal(EVT VT) const
Return true if the target has native support for the specified value type.
int getRecipEstimateDivEnabled(EVT VT, MachineFunction &MF) const
Return a ReciprocalEstimate enum value for a division of the given type based on the function's attri...
void setIndexedStoreAction(ArrayRef< unsigned > IdxModes, MVT VT, LegalizeAction Action)
Indicate that the specified indexed store does or does not work with the specified type and indicate ...
virtual bool isJumpTableRelative() const
virtual MVT getScalarShiftAmountTy(const DataLayout &, EVT) const
Return the type to use for a scalar shift opcode, given the shifted amount type.
virtual MVT getPointerTy(const DataLayout &DL, uint32_t AS=0) const
Return the pointer type for the given address space, defaults to the pointer type from the data layou...
void setLibcallName(RTLIB::Libcall Call, const char *Name)
Rename the default libcall routine name for the specified libcall.
virtual bool isFreeAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast from SrcAS to DestAS is "cheap", such that e.g.
void setIndexedMaskedStoreAction(unsigned IdxMode, MVT VT, LegalizeAction Action)
Indicate that the specified indexed masked store does or does not work with the specified type and in...
unsigned MaxStoresPerMemset
Specify maximum number of store instructions per memset call.
void setMinimumJumpTableEntries(unsigned Val)
Indicate the minimum number of blocks to generate jump tables.
void setTruncStoreAction(MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified truncating store does not work with the specified type and indicate what ...
virtual uint64_t getByValTypeAlignment(Type *Ty, const DataLayout &DL) const
Return the desired alignment for ByVal or InAlloca aggregate function arguments in the caller paramet...
virtual bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const
Return true if the target supports a memory access of this type for the given address space and align...
unsigned MaxLoadsPerMemcmpOptSize
Likewise for functions with the OptSize attribute.
MachineMemOperand::Flags getStoreMemOperandFlags(const StoreInst &SI, const DataLayout &DL) const
void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT)
If Opc/OrigVT is specified as being promoted, the promotion code defaults to trying a larger integer/...
unsigned getMinimumJumpTableDensity(bool OptForSize) const
Return lower limit of the density in a jump table.
virtual std::pair< const TargetRegisterClass *, uint8_t > findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const
Return the largest legal super-reg register class of the register class for the specified type and it...
TargetLoweringBase(const TargetMachine &TM)
NOTE: The TargetMachine owns TLOF.
LegalizeKind getTypeConversion(LLVMContext &Context, EVT VT) const
Return pair that represents the legalization kind (first) that needs to happen to EVT (second) in ord...
void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT, LegalizeAction Action)
Indicate that the specified load with extension does not work with the specified type and indicate wh...
unsigned GatherAllAliasesMaxDepth
Depth that GatherAllAliases should continue looking for chain dependencies when trying to find a more...
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const
Return how we should legalize values of this type, either it is already legal (return 'Legal') or we ...
int getSqrtRefinementSteps(EVT VT, MachineFunction &MF) const
Return the refinement step count for a square root of the given type based on the function's attribut...
bool allowsMemoryAccessForAlignment(LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace=0, Align Alignment=Align(1), MachineMemOperand::Flags Flags=MachineMemOperand::MONone, unsigned *Fast=nullptr) const
This function returns true if the memory access is aligned or if the target allows this specific unal...
virtual Instruction * emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const
virtual Instruction * emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst, AtomicOrdering Ord) const
Inserts in the IR a target-specific intrinsic specifying a fence.
unsigned MaxStoresPerMemcpy
Specify maximum number of store instructions per memcpy call.
MVT getRegisterType(MVT VT) const
Return the type of registers that this ValueType will eventually require.
virtual void insertSSPDeclarations(Module &M) const
Inserts necessary declarations for SSP (stack protection) purpose.
void setJumpIsExpensive(bool isExpensive=true)
Tells the code generator not to expand logic operations on comparison predicates into separate sequen...
LegalizeAction getOperationAction(unsigned Op, EVT VT) const
Return how this operation should be treated: either it is legal, needs to be promoted to a larger siz...
MVT getTypeToPromoteTo(unsigned Op, MVT VT) const
If the action for this operation is to promote, this method returns the ValueType to promote to.
virtual bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty, unsigned AddrSpace, Instruction *I=nullptr) const
Return true if the addressing mode represented by AM is legal for this target, for a load/store of th...
unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT, EVT &IntermediateVT, unsigned &NumIntermediates, MVT &RegisterVT) const
Vector types are broken down into some number of legal first class types.
std::pair< LegalizeTypeAction, EVT > LegalizeKind
LegalizeKind holds the legalization kind that needs to happen to EVT in order to type-legalize it.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
virtual EVT getTypeForExtReturn(LLVMContext &Context, EVT VT, ISD::NodeType) const
Return the type that should be used to zero or sign extend a zeroext/signext integer return value.
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:76
bool isPositionIndependent() const
virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const
Returns true if a cast between SrcAS and DestAS is a noop.
const Triple & getTargetTriple() const
Reloc::Model getRelocationModel() const
Returns the code generation relocation model.
TargetOptions Options
unsigned LoopAlignment
If greater than 0, override TargetLoweringBase::PrefLoopAlignment.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
unsigned getGISelRematGlobalCost() const
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
bool isWindowsGNUEnvironment() const
Definition: Triple.h:657
bool isAndroid() const
Tests whether the target is Android.
Definition: Triple.h:769
@ aarch64_32
Definition: Triple.h:53
bool isOSFreeBSD() const
Definition: Triple.h:584
bool isPPC64() const
Tests whether the target is 64-bit PowerPC (little and big endian).
Definition: Triple.h:964
bool isOSDarwin() const
Is this a "Darwin" OS (macOS, iOS, tvOS, watchOS, XROS, or DriverKit).
Definition: Triple.h:558
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
constexpr LeafTy coefficientNextPowerOf2() const
Definition: TypeSize.h:259
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:171
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition: TypeSize.h:168
constexpr LeafTy divideCoefficientBy(ScalarTy RHS) const
We do not provide the '/' operator here because division for polynomial types does not work in the sa...
Definition: TypeSize.h:251
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ Fast
Attempts to make calls as fast as possible (e.g.
Definition: CallingConv.h:41
@ ARM_AAPCS_VFP
Same as ARM_AAPCS, but uses hard floating point ABI.
Definition: CallingConv.h:114
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
NodeType
ISD::NodeType enum - This enum defines the target-independent operators for a SelectionDAG.
Definition: ISDOpcodes.h:40
@ SETCC
SetCC operator - This evaluates to a true value iff the condition is true.
Definition: ISDOpcodes.h:751
@ MERGE_VALUES
MERGE_VALUES - This node takes multiple discrete operands and returns them all as its individual resu...
Definition: ISDOpcodes.h:237
@ CTLZ_ZERO_UNDEF
Definition: ISDOpcodes.h:724
@ DELETED_NODE
DELETED_NODE - This is an illegal value that is used to catch errors.
Definition: ISDOpcodes.h:44
@ SET_FPENV
Sets the current floating-point environment.
Definition: ISDOpcodes.h:1005
@ VECREDUCE_SEQ_FADD
Generic reduction nodes.
Definition: ISDOpcodes.h:1346
@ VECREDUCE_SMIN
Definition: ISDOpcodes.h:1377
@ FGETSIGN
INT = FGETSIGN(FP) - Return the sign bit of the specified floating point value as an integer 0/1 valu...
Definition: ISDOpcodes.h:498
@ ATOMIC_LOAD_NAND
Definition: ISDOpcodes.h:1276
@ SMULFIX
RESULT = [US]MULFIX(LHS, RHS, SCALE) - Perform fixed point multiplication on 2 integers with the same...
Definition: ISDOpcodes.h:368
@ ConstantFP
Definition: ISDOpcodes.h:77
@ ATOMIC_LOAD_MAX
Definition: ISDOpcodes.h:1278
@ ATOMIC_LOAD_UMIN
Definition: ISDOpcodes.h:1279
@ ADDC
Carry-setting nodes for multiple precision addition and subtraction.
Definition: ISDOpcodes.h:270
@ RESET_FPENV
Set floating-point environment to default state.
Definition: ISDOpcodes.h:1009
@ FMAD
FMAD - Perform a * b + c, while getting the same result as the separately rounded operations.
Definition: ISDOpcodes.h:488
@ FMAXNUM_IEEE
Definition: ISDOpcodes.h:986
@ ADD
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:240
@ LOAD
LOAD and STORE have token chains as their first operand, then the same operands as an LLVM load/store...
Definition: ISDOpcodes.h:1038
@ SMULFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
Definition: ISDOpcodes.h:374
@ SET_FPMODE
Sets the current dynamic floating-point control modes.
Definition: ISDOpcodes.h:1028
@ ANY_EXTEND
ANY_EXTEND - Used for integer types. The high bits are undefined.
Definition: ISDOpcodes.h:784
@ ATOMIC_CMP_SWAP_WITH_SUCCESS
Val, Success, OUTCHAIN = ATOMIC_CMP_SWAP_WITH_SUCCESS(INCHAIN, ptr, cmp, swap) N.b.
Definition: ISDOpcodes.h:1261
@ SINT_TO_FP
[SU]INT_TO_FP - These operators convert integers (whose interpreted sign depends on the first letter)...
Definition: ISDOpcodes.h:791
@ CONCAT_VECTORS
CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of vector type with the same length ...
Definition: ISDOpcodes.h:544
@ VECREDUCE_FMAX
FMIN/FMAX nodes can have flags, for NaN/NoNaN variants.
Definition: ISDOpcodes.h:1362
@ FADD
Simple binary floating point operators.
Definition: ISDOpcodes.h:391
@ VECREDUCE_FMAXIMUM
FMINIMUM/FMAXIMUM nodes propatate NaNs and signed zeroes using the llvm.minimum and llvm....
Definition: ISDOpcodes.h:1366
@ ABS
ABS - Determine the unsigned absolute value of a signed integer value of the same bitwidth.
Definition: ISDOpcodes.h:689
@ RESET_FPMODE
Sets default dynamic floating-point control modes.
Definition: ISDOpcodes.h:1032
@ SIGN_EXTEND_VECTOR_INREG
SIGN_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register sign-extension of the low ...
Definition: ISDOpcodes.h:821
@ VECREDUCE_SMAX
Definition: ISDOpcodes.h:1376
@ ATOMIC_LOAD_OR
Definition: ISDOpcodes.h:1274
@ BITCAST
BITCAST - This operator converts between integer, vector and FP values, as if the value was stored to...
Definition: ISDOpcodes.h:904
@ ATOMIC_LOAD_XOR
Definition: ISDOpcodes.h:1275
@ FLDEXP
FLDEXP - ldexp, inspired by libm (op0 * 2**op1).
Definition: ISDOpcodes.h:940
@ SDIVFIX
RESULT = [US]DIVFIX(LHS, RHS, SCALE) - Perform fixed point division on 2 integers with the same width...
Definition: ISDOpcodes.h:381
@ BUILTIN_OP_END
BUILTIN_OP_END - This must be the last enum value in this list.
Definition: ISDOpcodes.h:1407
@ SIGN_EXTEND
Conversion operators.
Definition: ISDOpcodes.h:775
@ AVGCEILS
AVGCEILS/AVGCEILU - Rounding averaging add - Add two integers using an integer of type i[N+2],...
Definition: ISDOpcodes.h:663
@ READSTEADYCOUNTER
READSTEADYCOUNTER - This corresponds to the readfixedcounter intrinsic.
Definition: ISDOpcodes.h:1195
@ VECREDUCE_FADD
These reductions have relaxed evaluation order semantics, and have a single vector operand.
Definition: ISDOpcodes.h:1359
@ CTTZ_ZERO_UNDEF
Bit counting operators with an undefined result for zero inputs.
Definition: ISDOpcodes.h:723
@ PREFETCH
PREFETCH - This corresponds to a prefetch intrinsic.
Definition: ISDOpcodes.h:1228
@ VECREDUCE_FMIN
Definition: ISDOpcodes.h:1363
@ SETCCCARRY
Like SetCC, ops #0 and #1 are the LHS and RHS operands to compare, but op #2 is a boolean indicating ...
Definition: ISDOpcodes.h:759
@ FNEG
Perform various unary floating-point operations inspired by libm.
Definition: ISDOpcodes.h:931
@ SSUBO
Same for subtraction.
Definition: ISDOpcodes.h:328
@ ATOMIC_LOAD_MIN
Definition: ISDOpcodes.h:1277
@ IS_FPCLASS
Performs a check of floating point class property, defined by IEEE-754.
Definition: ISDOpcodes.h:508
@ SSUBSAT
RESULT = [US]SUBSAT(LHS, RHS) - Perform saturation subtraction on 2 integers with the same bit width ...
Definition: ISDOpcodes.h:350
@ SELECT
Select(COND, TRUEVAL, FALSEVAL).
Definition: ISDOpcodes.h:728
@ VECREDUCE_UMAX
Definition: ISDOpcodes.h:1378
@ SPLAT_VECTOR
SPLAT_VECTOR(VAL) - Returns a vector with the scalar value VAL duplicated in all lanes.
Definition: ISDOpcodes.h:628
@ SADDO
RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
Definition: ISDOpcodes.h:324
@ VECREDUCE_ADD
Integer reductions may have a result type larger than the vector element type.
Definition: ISDOpcodes.h:1371
@ GET_FPMODE
Reads the current dynamic floating-point control modes.
Definition: ISDOpcodes.h:1023
@ GET_FPENV
Gets the current floating-point environment.
Definition: ISDOpcodes.h:1000
@ SHL
Shift and rotation operations.
Definition: ISDOpcodes.h:706
@ ATOMIC_LOAD_CLR
Definition: ISDOpcodes.h:1273
@ VECTOR_SHUFFLE
VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as VEC1/VEC2.
Definition: ISDOpcodes.h:601
@ ATOMIC_LOAD_AND
Definition: ISDOpcodes.h:1272
@ FMINNUM_IEEE
FMINNUM_IEEE/FMAXNUM_IEEE - Perform floating-point minimumNumber or maximumNumber on two values,...
Definition: ISDOpcodes.h:985
@ EXTRACT_VECTOR_ELT
EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR identified by the (potentially...
Definition: ISDOpcodes.h:536
@ ZERO_EXTEND
ZERO_EXTEND - Used for integer types, zeroing the new bits.
Definition: ISDOpcodes.h:781
@ DEBUGTRAP
DEBUGTRAP - Trap intended to get the attention of a debugger.
Definition: ISDOpcodes.h:1218
@ FP_TO_UINT_SAT
Definition: ISDOpcodes.h:857
@ ATOMIC_CMP_SWAP
Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap) For double-word atomic operations: ValLo,...
Definition: ISDOpcodes.h:1255
@ ATOMIC_LOAD_UMAX
Definition: ISDOpcodes.h:1280
@ FMINNUM
FMINNUM/FMAXNUM - Perform floating-point minimum or maximum on two values.
Definition: ISDOpcodes.h:972
@ UBSANTRAP
UBSANTRAP - Trap with an immediate describing the kind of sanitizer failure.
Definition: ISDOpcodes.h:1222
@ SSHLSAT
RESULT = [US]SHLSAT(LHS, RHS) - Perform saturation left shift.
Definition: ISDOpcodes.h:360
@ SMULO
Same for multiplication.
Definition: ISDOpcodes.h:332
@ ANY_EXTEND_VECTOR_INREG
ANY_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register any-extension of the low la...
Definition: ISDOpcodes.h:810
@ SIGN_EXTEND_INREG
SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to sign extend a small value in ...
Definition: ISDOpcodes.h:799
@ SMIN
[US]{MIN/MAX} - Binary minimum or maximum of signed or unsigned integers.
Definition: ISDOpcodes.h:675
@ SDIVFIXSAT
Same as the corresponding unsaturated fixed point instructions, but the result is clamped between the...
Definition: ISDOpcodes.h:387
@ FP_EXTEND
X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
Definition: ISDOpcodes.h:889
@ UADDO_CARRY
Carry-using nodes for multiple precision addition and subtraction.
Definition: ISDOpcodes.h:304
@ VECREDUCE_UMIN
Definition: ISDOpcodes.h:1379
@ ATOMIC_LOAD_ADD
Definition: ISDOpcodes.h:1270
@ FMINIMUM
FMINIMUM/FMAXIMUM - NaN-propagating minimum/maximum that also treat -0.0 as less than 0....
Definition: ISDOpcodes.h:991
@ ATOMIC_LOAD_SUB
Definition: ISDOpcodes.h:1271
@ FP_TO_SINT
FP_TO_[US]INT - Convert a floating point value to a signed or unsigned integer.
Definition: ISDOpcodes.h:837
@ READCYCLECOUNTER
READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
Definition: ISDOpcodes.h:1189
@ AND
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:681
@ TRAP
TRAP - Trapping instruction.
Definition: ISDOpcodes.h:1215
@ GET_FPENV_MEM
Gets the current floating-point environment.
Definition: ISDOpcodes.h:1014
@ AVGFLOORS
AVGFLOORS/AVGFLOORU - Averaging add - Add two integers using an integer of type i[N+1],...
Definition: ISDOpcodes.h:658
@ VECREDUCE_FMUL
Definition: ISDOpcodes.h:1360
@ ADDE
Carry-using nodes for multiple precision addition and subtraction.
Definition: ISDOpcodes.h:280
@ INSERT_VECTOR_ELT
INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element at IDX replaced with VAL.
Definition: ISDOpcodes.h:525
@ VECTOR_SPLICE
VECTOR_SPLICE(VEC1, VEC2, IMM) - Returns a subvector of the same type as VEC1/VEC2 from CONCAT_VECTOR...
Definition: ISDOpcodes.h:613
@ ATOMIC_SWAP
Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt) Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN,...
Definition: ISDOpcodes.h:1269
@ FFREXP
FFREXP - frexp, extract fractional and exponent component of a floating-point value.
Definition: ISDOpcodes.h:945
@ FP_ROUND
X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type down to the precision of the ...
Definition: ISDOpcodes.h:870
@ ZERO_EXTEND_VECTOR_INREG
ZERO_EXTEND_VECTOR_INREG(Vector) - This operator represents an in-register zero-extension of the low ...
Definition: ISDOpcodes.h:832
@ ADDRSPACECAST
ADDRSPACECAST - This operator converts between pointers of different address spaces.
Definition: ISDOpcodes.h:908
@ FP_TO_SINT_SAT
FP_TO_[US]INT_SAT - Convert floating point value in operand 0 to a signed or unsigned scalar integer ...
Definition: ISDOpcodes.h:856
@ VECREDUCE_FMINIMUM
Definition: ISDOpcodes.h:1367
@ TRUNCATE
TRUNCATE - Completely drop the high bits.
Definition: ISDOpcodes.h:787
@ VECREDUCE_SEQ_FMUL
Definition: ISDOpcodes.h:1347
@ FCOPYSIGN
FCOPYSIGN(X, Y) - Return the value of X with the sign of Y.
Definition: ISDOpcodes.h:494
@ SADDSAT
RESULT = [US]ADDSAT(LHS, RHS) - Perform saturation addition on 2 integers with the same bit width (W)...
Definition: ISDOpcodes.h:341
@ GET_DYNAMIC_AREA_OFFSET
GET_DYNAMIC_AREA_OFFSET - get offset from native SP to the address of the most recent dynamic alloca.
Definition: ISDOpcodes.h:1327
@ SET_FPENV_MEM
Sets the current floating point environment.
Definition: ISDOpcodes.h:1019
@ SADDO_CARRY
Carry-using overflow-aware nodes for multiple precision addition and subtraction.
Definition: ISDOpcodes.h:314
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
Definition: ISDOpcodes.h:1530
static const int LAST_INDEXED_MODE
Definition: ISDOpcodes.h:1481
Libcall getPOWI(EVT RetVT)
getPOWI - Return the POWI_* value for the given types, or UNKNOWN_LIBCALL if there is none.
Libcall getSINTTOFP(EVT OpVT, EVT RetVT)
getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
Libcall getSYNC(unsigned Opc, MVT VT)
Return the SYNC_FETCH_AND_* value for the given opcode and type, or UNKNOWN_LIBCALL if there is none.
Libcall getLDEXP(EVT RetVT)
getLDEXP - Return the LDEXP_* value for the given types, or UNKNOWN_LIBCALL if there is none.
Libcall getUINTTOFP(EVT OpVT, EVT RetVT)
getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
Libcall getFREXP(EVT RetVT)
getFREXP - Return the FREXP_* value for the given types, or UNKNOWN_LIBCALL if there is none.
Libcall
RTLIB::Libcall enum - This enum defines all of the runtime library calls the backend can emit.
Libcall getMEMCPY_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize)
getMEMCPY_ELEMENT_UNORDERED_ATOMIC - Return MEMCPY_ELEMENT_UNORDERED_ATOMIC_* value for the given ele...
Libcall getFPTOUINT(EVT OpVT, EVT RetVT)
getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
Libcall getFPTOSINT(EVT OpVT, EVT RetVT)
getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
Libcall getOUTLINE_ATOMIC(unsigned Opc, AtomicOrdering Order, MVT VT)
Return the outline atomics value for the given opcode, atomic ordering and type, or UNKNOWN_LIBCALL i...
Libcall getFPEXT(EVT OpVT, EVT RetVT)
getFPEXT - Return the FPEXT_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
Libcall getFPROUND(EVT OpVT, EVT RetVT)
getFPROUND - Return the FPROUND_*_* value for the given types, or UNKNOWN_LIBCALL if there is none.
Libcall getMEMSET_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize)
getMEMSET_ELEMENT_UNORDERED_ATOMIC - Return MEMSET_ELEMENT_UNORDERED_ATOMIC_* value for the given ele...
Libcall getOutlineAtomicHelper(const Libcall(&LC)[5][4], AtomicOrdering Order, uint64_t MemSize)
Return the outline atomics value for the given atomic ordering, access size and set of libcalls for a...
Libcall getFPLibCall(EVT VT, Libcall Call_F32, Libcall Call_F64, Libcall Call_F80, Libcall Call_F128, Libcall Call_PPCF128)
GetFPLibCall - Helper to return the right libcall for the given floating point type,...
Libcall getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize)
getMEMMOVE_ELEMENT_UNORDERED_ATOMIC - Return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_* value for the given e...
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:450
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
Definition: MathExtras.h:337
EVT getApproximateEVTForLLT(LLT Ty, const DataLayout &DL, LLVMContext &Ctx)
void GetReturnInfo(CallingConv::ID CC, Type *ReturnType, AttributeList attr, SmallVectorImpl< ISD::OutputArg > &Outs, const TargetLowering &TLI, const DataLayout &DL)
Given an LLVM IR type and return type attributes, compute the return value EVTs and flags,...
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
uint64_t divideCeil(uint64_t Numerator, uint64_t Denominator)
Returns the integer ceil(Numerator / Denominator).
Definition: MathExtras.h:428
auto enum_seq(EnumT Begin, EnumT End)
Iterate over an enum type from Begin up to - but not including - End.
Definition: Sequence.h:337
bool isDereferenceableAndAlignedPointer(const Value *V, Type *Ty, Align Alignment, const DataLayout &DL, const Instruction *CtxI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr, const TargetLibraryInfo *TLI=nullptr)
Returns true if V is always a dereferenceable pointer with alignment greater or equal than requested.
Definition: Loads.cpp:201
bool shouldOptimizeForSize(const MachineFunction *MF, ProfileSummaryInfo *PSI, const MachineBlockFrequencyInfo *BFI, PGSOQueryType QueryType=PGSOQueryType::Other)
Returns true if machine function MF is suggested to be size-optimized based on the profile.
constexpr force_iteration_on_noniterable_enum_t force_iteration_on_noniterable_enum
Definition: Sequence.h:108
T bit_ceil(T Value)
Returns the smallest integral power of two no smaller than Value if Value is nonzero.
Definition: bit.h:342
bool isReleaseOrStronger(AtomicOrdering AO)
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:275
bool none_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::none_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1736
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:156
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ Or
Bitwise or logical OR of integers.
@ Mul
Product of integers.
@ Xor
Bitwise or logical XOR of integers.
@ FMul
Product of floats.
@ And
Bitwise or logical AND of integers.
@ Add
Sum of integers.
@ FAdd
Sum of floats.
void ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL, Type *Ty, SmallVectorImpl< EVT > &ValueVTs, SmallVectorImpl< EVT > *MemVTs, SmallVectorImpl< TypeSize > *Offsets=nullptr, TypeSize StartingOffset=TypeSize::getZero())
ComputeValueVTs - Given an LLVM IR type, compute a sequence of EVTs that represent all the individual...
Definition: Analysis.cpp:79
bool isAcquireOrStronger(AtomicOrdering AO)
InstructionCost Cost
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Extended Value Type.
Definition: ValueTypes.h:34
EVT getPow2VectorType(LLVMContext &Context) const
Widens the length of the given vector EVT up to the nearest power of 2 and returns that type.
Definition: ValueTypes.h:462
bool isSimple() const
Test if the given EVT is simple (as opposed to being extended).
Definition: ValueTypes.h:136
static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements, bool IsScalable=false)
Returns the EVT that represents a vector NumElements in length, where each element is of type VT.
Definition: ValueTypes.h:73
ElementCount getVectorElementCount() const
Definition: ValueTypes.h:340
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
Definition: ValueTypes.h:358
bool isPow2VectorType() const
Returns true if the given vector is a power of 2.
Definition: ValueTypes.h:455
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
Definition: ValueTypes.h:306
static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth)
Returns the EVT that represents an integer with the given number of bits.
Definition: ValueTypes.h:64
bool isFixedLengthVector() const
Definition: ValueTypes.h:177
EVT getRoundIntegerType(LLVMContext &Context) const
Rounds the bit-width of the given integer EVT up to the nearest power of two (and at least to eight),...
Definition: ValueTypes.h:404
bool isVector() const
Return true if this is a vector value type.
Definition: ValueTypes.h:167
EVT getScalarType() const
If this is a vector type, return the element type, otherwise return this.
Definition: ValueTypes.h:313
Type * getTypeForEVT(LLVMContext &Context) const
This method returns an LLVM type corresponding to the specified EVT.
Definition: ValueTypes.cpp:202
EVT getVectorElementType() const
Given a vector type, return the type of each element.
Definition: ValueTypes.h:318
unsigned getVectorNumElements() const
Given a vector type, return the number of elements it contains.
Definition: ValueTypes.h:326
bool isZeroSized() const
Test if the given EVT has zero size, this will fail if called on a scalable type.
Definition: ValueTypes.h:131
EVT getHalfNumVectorElementsVT(LLVMContext &Context) const
Definition: ValueTypes.h:438
bool isInteger() const
Return true if this is an integer or a vector integer type.
Definition: ValueTypes.h:151
OutputArg - This struct carries flags and a value for a single outgoing (actual) argument or outgoing...
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
This represents an addressing mode of: BaseGV + BaseOffs + BaseReg + Scale*ScaleReg + ScalableOffset*...