LLVM 19.0.0git
Instructions.cpp
Go to the documentation of this file.
1//===- Instructions.cpp - Implement the LLVM instructions -----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements all of the non-inline methods for the LLVM instruction
10// classes.
11//
12//===----------------------------------------------------------------------===//
13
15#include "LLVMContextImpl.h"
18#include "llvm/ADT/Twine.h"
19#include "llvm/IR/Attributes.h"
20#include "llvm/IR/BasicBlock.h"
21#include "llvm/IR/Constant.h"
23#include "llvm/IR/Constants.h"
24#include "llvm/IR/DataLayout.h"
26#include "llvm/IR/Function.h"
27#include "llvm/IR/InstrTypes.h"
28#include "llvm/IR/Instruction.h"
29#include "llvm/IR/Intrinsics.h"
30#include "llvm/IR/LLVMContext.h"
31#include "llvm/IR/MDBuilder.h"
32#include "llvm/IR/Metadata.h"
33#include "llvm/IR/Module.h"
34#include "llvm/IR/Operator.h"
36#include "llvm/IR/Type.h"
37#include "llvm/IR/Value.h"
42#include "llvm/Support/ModRef.h"
44#include <algorithm>
45#include <cassert>
46#include <cstdint>
47#include <optional>
48#include <vector>
49
50using namespace llvm;
51
53 "disable-i2p-p2i-opt", cl::init(false),
54 cl::desc("Disables inttoptr/ptrtoint roundtrip optimization"));
55
56//===----------------------------------------------------------------------===//
57// AllocaInst Class
58//===----------------------------------------------------------------------===//
59
60std::optional<TypeSize>
62 TypeSize Size = DL.getTypeAllocSize(getAllocatedType());
63 if (isArrayAllocation()) {
64 auto *C = dyn_cast<ConstantInt>(getArraySize());
65 if (!C)
66 return std::nullopt;
67 assert(!Size.isScalable() && "Array elements cannot have a scalable size");
68 Size *= C->getZExtValue();
69 }
70 return Size;
71}
72
73std::optional<TypeSize>
75 std::optional<TypeSize> Size = getAllocationSize(DL);
76 if (Size)
77 return *Size * 8;
78 return std::nullopt;
79}
80
81//===----------------------------------------------------------------------===//
82// SelectInst Class
83//===----------------------------------------------------------------------===//
84
85/// areInvalidOperands - Return a string if the specified operands are invalid
86/// for a select operation, otherwise return null.
87const char *SelectInst::areInvalidOperands(Value *Op0, Value *Op1, Value *Op2) {
88 if (Op1->getType() != Op2->getType())
89 return "both values to select must have same type";
90
91 if (Op1->getType()->isTokenTy())
92 return "select values cannot have token type";
93
94 if (VectorType *VT = dyn_cast<VectorType>(Op0->getType())) {
95 // Vector select.
96 if (VT->getElementType() != Type::getInt1Ty(Op0->getContext()))
97 return "vector select condition element type must be i1";
98 VectorType *ET = dyn_cast<VectorType>(Op1->getType());
99 if (!ET)
100 return "selected values for vector select must be vectors";
101 if (ET->getElementCount() != VT->getElementCount())
102 return "vector select requires selected vectors to have "
103 "the same vector length as select condition";
104 } else if (Op0->getType() != Type::getInt1Ty(Op0->getContext())) {
105 return "select condition must be i1 or <n x i1>";
106 }
107 return nullptr;
108}
109
110//===----------------------------------------------------------------------===//
111// PHINode Class
112//===----------------------------------------------------------------------===//
113
114PHINode::PHINode(const PHINode &PN)
115 : Instruction(PN.getType(), Instruction::PHI, nullptr, PN.getNumOperands()),
116 ReservedSpace(PN.getNumOperands()) {
118 std::copy(PN.op_begin(), PN.op_end(), op_begin());
119 copyIncomingBlocks(make_range(PN.block_begin(), PN.block_end()));
121}
122
123// removeIncomingValue - Remove an incoming value. This is useful if a
124// predecessor basic block is deleted.
125Value *PHINode::removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty) {
126 Value *Removed = getIncomingValue(Idx);
127
128 // Move everything after this operand down.
129 //
130 // FIXME: we could just swap with the end of the list, then erase. However,
131 // clients might not expect this to happen. The code as it is thrashes the
132 // use/def lists, which is kinda lame.
133 std::copy(op_begin() + Idx + 1, op_end(), op_begin() + Idx);
135
136 // Nuke the last value.
137 Op<-1>().set(nullptr);
139
140 // If the PHI node is dead, because it has zero entries, nuke it now.
141 if (getNumOperands() == 0 && DeletePHIIfEmpty) {
142 // If anyone is using this PHI, make them use a dummy value instead...
145 }
146 return Removed;
147}
148
149void PHINode::removeIncomingValueIf(function_ref<bool(unsigned)> Predicate,
150 bool DeletePHIIfEmpty) {
151 SmallDenseSet<unsigned> RemoveIndices;
152 for (unsigned Idx = 0; Idx < getNumIncomingValues(); ++Idx)
153 if (Predicate(Idx))
154 RemoveIndices.insert(Idx);
155
156 if (RemoveIndices.empty())
157 return;
158
159 // Remove operands.
160 auto NewOpEnd = remove_if(operands(), [&](Use &U) {
161 return RemoveIndices.contains(U.getOperandNo());
162 });
163 for (Use &U : make_range(NewOpEnd, op_end()))
164 U.set(nullptr);
165
166 // Remove incoming blocks.
167 (void)std::remove_if(const_cast<block_iterator>(block_begin()),
168 const_cast<block_iterator>(block_end()), [&](BasicBlock *&BB) {
169 return RemoveIndices.contains(&BB - block_begin());
170 });
171
172 setNumHungOffUseOperands(getNumOperands() - RemoveIndices.size());
173
174 // If the PHI node is dead, because it has zero entries, nuke it now.
175 if (getNumOperands() == 0 && DeletePHIIfEmpty) {
176 // If anyone is using this PHI, make them use a dummy value instead...
179 }
180}
181
182/// growOperands - grow operands - This grows the operand list in response
183/// to a push_back style of operation. This grows the number of ops by 1.5
184/// times.
185///
186void PHINode::growOperands() {
187 unsigned e = getNumOperands();
188 unsigned NumOps = e + e / 2;
189 if (NumOps < 2) NumOps = 2; // 2 op PHI nodes are VERY common.
190
191 ReservedSpace = NumOps;
192 growHungoffUses(ReservedSpace, /* IsPhi */ true);
193}
194
195/// hasConstantValue - If the specified PHI node always merges together the same
196/// value, return the value, otherwise return null.
198 // Exploit the fact that phi nodes always have at least one entry.
199 Value *ConstantValue = getIncomingValue(0);
200 for (unsigned i = 1, e = getNumIncomingValues(); i != e; ++i)
201 if (getIncomingValue(i) != ConstantValue && getIncomingValue(i) != this) {
202 if (ConstantValue != this)
203 return nullptr; // Incoming values not all the same.
204 // The case where the first value is this PHI.
205 ConstantValue = getIncomingValue(i);
206 }
207 if (ConstantValue == this)
208 return UndefValue::get(getType());
209 return ConstantValue;
210}
211
212/// hasConstantOrUndefValue - Whether the specified PHI node always merges
213/// together the same value, assuming that undefs result in the same value as
214/// non-undefs.
215/// Unlike \ref hasConstantValue, this does not return a value because the
216/// unique non-undef incoming value need not dominate the PHI node.
218 Value *ConstantValue = nullptr;
219 for (unsigned i = 0, e = getNumIncomingValues(); i != e; ++i) {
221 if (Incoming != this && !isa<UndefValue>(Incoming)) {
222 if (ConstantValue && ConstantValue != Incoming)
223 return false;
224 ConstantValue = Incoming;
225 }
226 }
227 return true;
228}
229
230//===----------------------------------------------------------------------===//
231// LandingPadInst Implementation
232//===----------------------------------------------------------------------===//
233
234LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
235 const Twine &NameStr,
236 BasicBlock::iterator InsertBefore)
237 : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertBefore) {
238 init(NumReservedValues, NameStr);
239}
240
241LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
242 const Twine &NameStr, Instruction *InsertBefore)
243 : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertBefore) {
244 init(NumReservedValues, NameStr);
245}
246
247LandingPadInst::LandingPadInst(Type *RetTy, unsigned NumReservedValues,
248 const Twine &NameStr, BasicBlock *InsertAtEnd)
249 : Instruction(RetTy, Instruction::LandingPad, nullptr, 0, InsertAtEnd) {
250 init(NumReservedValues, NameStr);
251}
252
253LandingPadInst::LandingPadInst(const LandingPadInst &LP)
254 : Instruction(LP.getType(), Instruction::LandingPad, nullptr,
255 LP.getNumOperands()),
256 ReservedSpace(LP.getNumOperands()) {
258 Use *OL = getOperandList();
259 const Use *InOL = LP.getOperandList();
260 for (unsigned I = 0, E = ReservedSpace; I != E; ++I)
261 OL[I] = InOL[I];
262
263 setCleanup(LP.isCleanup());
264}
265
266LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses,
267 const Twine &NameStr,
268 Instruction *InsertBefore) {
269 return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertBefore);
270}
271
272LandingPadInst *LandingPadInst::Create(Type *RetTy, unsigned NumReservedClauses,
273 const Twine &NameStr,
274 BasicBlock *InsertAtEnd) {
275 return new LandingPadInst(RetTy, NumReservedClauses, NameStr, InsertAtEnd);
276}
277
278void LandingPadInst::init(unsigned NumReservedValues, const Twine &NameStr) {
279 ReservedSpace = NumReservedValues;
281 allocHungoffUses(ReservedSpace);
282 setName(NameStr);
283 setCleanup(false);
284}
285
286/// growOperands - grow operands - This grows the operand list in response to a
287/// push_back style of operation. This grows the number of ops by 2 times.
288void LandingPadInst::growOperands(unsigned Size) {
289 unsigned e = getNumOperands();
290 if (ReservedSpace >= e + Size) return;
291 ReservedSpace = (std::max(e, 1U) + Size / 2) * 2;
292 growHungoffUses(ReservedSpace);
293}
294
296 unsigned OpNo = getNumOperands();
297 growOperands(1);
298 assert(OpNo < ReservedSpace && "Growing didn't work!");
300 getOperandList()[OpNo] = Val;
301}
302
303//===----------------------------------------------------------------------===//
304// CallBase Implementation
305//===----------------------------------------------------------------------===//
306
308 BasicBlock::iterator InsertPt) {
309 switch (CB->getOpcode()) {
310 case Instruction::Call:
311 return CallInst::Create(cast<CallInst>(CB), Bundles, InsertPt);
312 case Instruction::Invoke:
313 return InvokeInst::Create(cast<InvokeInst>(CB), Bundles, InsertPt);
314 case Instruction::CallBr:
315 return CallBrInst::Create(cast<CallBrInst>(CB), Bundles, InsertPt);
316 default:
317 llvm_unreachable("Unknown CallBase sub-class!");
318 }
319}
320
322 Instruction *InsertPt) {
323 switch (CB->getOpcode()) {
324 case Instruction::Call:
325 return CallInst::Create(cast<CallInst>(CB), Bundles, InsertPt);
326 case Instruction::Invoke:
327 return InvokeInst::Create(cast<InvokeInst>(CB), Bundles, InsertPt);
328 case Instruction::CallBr:
329 return CallBrInst::Create(cast<CallBrInst>(CB), Bundles, InsertPt);
330 default:
331 llvm_unreachable("Unknown CallBase sub-class!");
332 }
333}
334
336 Instruction *InsertPt) {
338 for (unsigned i = 0, e = CI->getNumOperandBundles(); i < e; ++i) {
339 auto ChildOB = CI->getOperandBundleAt(i);
340 if (ChildOB.getTagName() != OpB.getTag())
341 OpDefs.emplace_back(ChildOB);
342 }
343 OpDefs.emplace_back(OpB);
344 return CallBase::Create(CI, OpDefs, InsertPt);
345}
346
347
349
351 assert(getOpcode() == Instruction::CallBr && "Unexpected opcode!");
352 return cast<CallBrInst>(this)->getNumIndirectDests() + 1;
353}
354
356 const Value *V = getCalledOperand();
357 if (isa<Function>(V) || isa<Constant>(V))
358 return false;
359 return !isInlineAsm();
360}
361
362/// Tests if this call site must be tail call optimized. Only a CallInst can
363/// be tail call optimized.
365 if (auto *CI = dyn_cast<CallInst>(this))
366 return CI->isMustTailCall();
367 return false;
368}
369
370/// Tests if this call site is marked as a tail call.
372 if (auto *CI = dyn_cast<CallInst>(this))
373 return CI->isTailCall();
374 return false;
375}
376
378 if (auto *F = getCalledFunction())
379 return F->getIntrinsicID();
381}
382
385
386 if (const Function *F = getCalledFunction())
387 Mask |= F->getAttributes().getRetNoFPClass();
388 return Mask;
389}
390
393
394 if (const Function *F = getCalledFunction())
395 Mask |= F->getAttributes().getParamNoFPClass(i);
396 return Mask;
397}
398
399std::optional<ConstantRange> CallBase::getRange() const {
400 const Attribute RangeAttr = getRetAttr(llvm::Attribute::Range);
401 if (RangeAttr.isValid())
402 return RangeAttr.getRange();
403 return std::nullopt;
404}
405
407 if (hasRetAttr(Attribute::NonNull))
408 return true;
409
410 if (getRetDereferenceableBytes() > 0 &&
412 return true;
413
414 return false;
415}
416
418 unsigned Index;
419
420 if (Attrs.hasAttrSomewhere(Kind, &Index))
422 if (const Function *F = getCalledFunction())
423 if (F->getAttributes().hasAttrSomewhere(Kind, &Index))
425
426 return nullptr;
427}
428
429/// Determine whether the argument or parameter has the given attribute.
430bool CallBase::paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const {
431 assert(ArgNo < arg_size() && "Param index out of bounds!");
432
433 if (Attrs.hasParamAttr(ArgNo, Kind))
434 return true;
435
436 const Function *F = getCalledFunction();
437 if (!F)
438 return false;
439
440 if (!F->getAttributes().hasParamAttr(ArgNo, Kind))
441 return false;
442
443 // Take into account mod/ref by operand bundles.
444 switch (Kind) {
445 case Attribute::ReadNone:
447 case Attribute::ReadOnly:
449 case Attribute::WriteOnly:
450 return !hasReadingOperandBundles();
451 default:
452 return true;
453 }
454}
455
456bool CallBase::hasFnAttrOnCalledFunction(Attribute::AttrKind Kind) const {
457 if (auto *F = dyn_cast<Function>(getCalledOperand()))
458 return F->getAttributes().hasFnAttr(Kind);
459
460 return false;
461}
462
463bool CallBase::hasFnAttrOnCalledFunction(StringRef Kind) const {
464 if (auto *F = dyn_cast<Function>(getCalledOperand()))
465 return F->getAttributes().hasFnAttr(Kind);
466
467 return false;
468}
469
470template <typename AK>
471Attribute CallBase::getFnAttrOnCalledFunction(AK Kind) const {
472 if constexpr (std::is_same_v<AK, Attribute::AttrKind>) {
473 // getMemoryEffects() correctly combines memory effects from the call-site,
474 // operand bundles and function.
475 assert(Kind != Attribute::Memory && "Use getMemoryEffects() instead");
476 }
477
478 if (auto *F = dyn_cast<Function>(getCalledOperand()))
479 return F->getAttributes().getFnAttr(Kind);
480
481 return Attribute();
482}
483
484template Attribute
485CallBase::getFnAttrOnCalledFunction(Attribute::AttrKind Kind) const;
486template Attribute CallBase::getFnAttrOnCalledFunction(StringRef Kind) const;
487
488template <typename AK>
489Attribute CallBase::getParamAttrOnCalledFunction(unsigned ArgNo,
490 AK Kind) const {
492
493 if (auto *F = dyn_cast<Function>(V))
494 return F->getAttributes().getParamAttr(ArgNo, Kind);
495
496 return Attribute();
497}
498template Attribute
499CallBase::getParamAttrOnCalledFunction(unsigned ArgNo,
500 Attribute::AttrKind Kind) const;
501template Attribute CallBase::getParamAttrOnCalledFunction(unsigned ArgNo,
502 StringRef Kind) const;
503
506 for (unsigned i = 0, e = getNumOperandBundles(); i != e; ++i)
508}
509
512 const unsigned BeginIndex) {
513 auto It = op_begin() + BeginIndex;
514 for (auto &B : Bundles)
515 It = std::copy(B.input_begin(), B.input_end(), It);
516
517 auto *ContextImpl = getContext().pImpl;
518 auto BI = Bundles.begin();
519 unsigned CurrentIndex = BeginIndex;
520
521 for (auto &BOI : bundle_op_infos()) {
522 assert(BI != Bundles.end() && "Incorrect allocation?");
523
524 BOI.Tag = ContextImpl->getOrInsertBundleTag(BI->getTag());
525 BOI.Begin = CurrentIndex;
526 BOI.End = CurrentIndex + BI->input_size();
527 CurrentIndex = BOI.End;
528 BI++;
529 }
530
531 assert(BI == Bundles.end() && "Incorrect allocation?");
532
533 return It;
534}
535
537 /// When there isn't many bundles, we do a simple linear search.
538 /// Else fallback to a binary-search that use the fact that bundles usually
539 /// have similar number of argument to get faster convergence.
541 for (auto &BOI : bundle_op_infos())
542 if (BOI.Begin <= OpIdx && OpIdx < BOI.End)
543 return BOI;
544
545 llvm_unreachable("Did not find operand bundle for operand!");
546 }
547
548 assert(OpIdx >= arg_size() && "the Idx is not in the operand bundles");
550 OpIdx < std::prev(bundle_op_info_end())->End &&
551 "The Idx isn't in the operand bundle");
552
553 /// We need a decimal number below and to prevent using floating point numbers
554 /// we use an intergal value multiplied by this constant.
555 constexpr unsigned NumberScaling = 1024;
556
559 bundle_op_iterator Current = Begin;
560
561 while (Begin != End) {
562 unsigned ScaledOperandPerBundle =
563 NumberScaling * (std::prev(End)->End - Begin->Begin) / (End - Begin);
564 Current = Begin + (((OpIdx - Begin->Begin) * NumberScaling) /
565 ScaledOperandPerBundle);
566 if (Current >= End)
567 Current = std::prev(End);
568 assert(Current < End && Current >= Begin &&
569 "the operand bundle doesn't cover every value in the range");
570 if (OpIdx >= Current->Begin && OpIdx < Current->End)
571 break;
572 if (OpIdx >= Current->End)
573 Begin = Current + 1;
574 else
575 End = Current;
576 }
577
578 assert(OpIdx >= Current->Begin && OpIdx < Current->End &&
579 "the operand bundle doesn't cover every value in the range");
580 return *Current;
581}
582
585 BasicBlock::iterator InsertPt) {
586 if (CB->getOperandBundle(ID))
587 return CB;
588
590 CB->getOperandBundlesAsDefs(Bundles);
591 Bundles.push_back(OB);
592 return Create(CB, Bundles, InsertPt);
593}
594
597 Instruction *InsertPt) {
598 if (CB->getOperandBundle(ID))
599 return CB;
600
602 CB->getOperandBundlesAsDefs(Bundles);
603 Bundles.push_back(OB);
604 return Create(CB, Bundles, InsertPt);
605}
606
608 BasicBlock::iterator InsertPt) {
610 bool CreateNew = false;
611
612 for (unsigned I = 0, E = CB->getNumOperandBundles(); I != E; ++I) {
613 auto Bundle = CB->getOperandBundleAt(I);
614 if (Bundle.getTagID() == ID) {
615 CreateNew = true;
616 continue;
617 }
618 Bundles.emplace_back(Bundle);
619 }
620
621 return CreateNew ? Create(CB, Bundles, InsertPt) : CB;
622}
623
625 Instruction *InsertPt) {
627 bool CreateNew = false;
628
629 for (unsigned I = 0, E = CB->getNumOperandBundles(); I != E; ++I) {
630 auto Bundle = CB->getOperandBundleAt(I);
631 if (Bundle.getTagID() == ID) {
632 CreateNew = true;
633 continue;
634 }
635 Bundles.emplace_back(Bundle);
636 }
637
638 return CreateNew ? Create(CB, Bundles, InsertPt) : CB;
639}
640
642 // Implementation note: this is a conservative implementation of operand
643 // bundle semantics, where *any* non-assume operand bundle (other than
644 // ptrauth) forces a callsite to be at least readonly.
647 getIntrinsicID() != Intrinsic::assume;
648}
649
654 getIntrinsicID() != Intrinsic::assume;
655}
656
659 if (auto *Fn = dyn_cast<Function>(getCalledOperand())) {
660 MemoryEffects FnME = Fn->getMemoryEffects();
661 if (hasOperandBundles()) {
662 // TODO: Add a method to get memory effects for operand bundles instead.
664 FnME |= MemoryEffects::readOnly();
666 FnME |= MemoryEffects::writeOnly();
667 }
668 ME &= FnME;
669 }
670 return ME;
671}
674}
675
676/// Determine if the function does not access memory.
679}
682}
683
684/// Determine if the function does not access or only reads memory.
687}
690}
691
692/// Determine if the function does not access or only writes memory.
695}
698}
699
700/// Determine if the call can access memmory only using pointers based
701/// on its arguments.
704}
707}
708
709/// Determine if the function may only access memory that is
710/// inaccessible from the IR.
713}
716}
717
718/// Determine if the function may only access memory that is
719/// either inaccessible from the IR or pointed to by its arguments.
722}
726}
727
728//===----------------------------------------------------------------------===//
729// CallInst Implementation
730//===----------------------------------------------------------------------===//
731
732void CallInst::init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
733 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr) {
734 this->FTy = FTy;
735 assert(getNumOperands() == Args.size() + CountBundleInputs(Bundles) + 1 &&
736 "NumOperands not set up?");
737
738#ifndef NDEBUG
739 assert((Args.size() == FTy->getNumParams() ||
740 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
741 "Calling a function with bad signature!");
742
743 for (unsigned i = 0; i != Args.size(); ++i)
744 assert((i >= FTy->getNumParams() ||
745 FTy->getParamType(i) == Args[i]->getType()) &&
746 "Calling a function with a bad signature!");
747#endif
748
749 // Set operands in order of their index to match use-list-order
750 // prediction.
751 llvm::copy(Args, op_begin());
752 setCalledOperand(Func);
753
754 auto It = populateBundleOperandInfos(Bundles, Args.size());
755 (void)It;
756 assert(It + 1 == op_end() && "Should add up!");
757
758 setName(NameStr);
759}
760
761void CallInst::init(FunctionType *FTy, Value *Func, const Twine &NameStr) {
762 this->FTy = FTy;
763 assert(getNumOperands() == 1 && "NumOperands not set up?");
764 setCalledOperand(Func);
765
766 assert(FTy->getNumParams() == 0 && "Calling a function with bad signature");
767
768 setName(NameStr);
769}
770
771CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
772 BasicBlock::iterator InsertBefore)
773 : CallBase(Ty->getReturnType(), Instruction::Call,
774 OperandTraits<CallBase>::op_end(this) - 1, 1, InsertBefore) {
775 init(Ty, Func, Name);
776}
777
778CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
779 Instruction *InsertBefore)
780 : CallBase(Ty->getReturnType(), Instruction::Call,
781 OperandTraits<CallBase>::op_end(this) - 1, 1, InsertBefore) {
782 init(Ty, Func, Name);
783}
784
785CallInst::CallInst(FunctionType *Ty, Value *Func, const Twine &Name,
786 BasicBlock *InsertAtEnd)
787 : CallBase(Ty->getReturnType(), Instruction::Call,
788 OperandTraits<CallBase>::op_end(this) - 1, 1, InsertAtEnd) {
789 init(Ty, Func, Name);
790}
791
792CallInst::CallInst(const CallInst &CI)
793 : CallBase(CI.Attrs, CI.FTy, CI.getType(), Instruction::Call,
794 OperandTraits<CallBase>::op_end(this) - CI.getNumOperands(),
795 CI.getNumOperands()) {
796 setTailCallKind(CI.getTailCallKind());
798
799 std::copy(CI.op_begin(), CI.op_end(), op_begin());
800 std::copy(CI.bundle_op_info_begin(), CI.bundle_op_info_end(),
803}
804
806 BasicBlock::iterator InsertPt) {
807 std::vector<Value *> Args(CI->arg_begin(), CI->arg_end());
808
809 auto *NewCI = CallInst::Create(CI->getFunctionType(), CI->getCalledOperand(),
810 Args, OpB, CI->getName(), InsertPt);
811 NewCI->setTailCallKind(CI->getTailCallKind());
812 NewCI->setCallingConv(CI->getCallingConv());
813 NewCI->SubclassOptionalData = CI->SubclassOptionalData;
814 NewCI->setAttributes(CI->getAttributes());
815 NewCI->setDebugLoc(CI->getDebugLoc());
816 return NewCI;
817}
818
820 Instruction *InsertPt) {
821 std::vector<Value *> Args(CI->arg_begin(), CI->arg_end());
822
823 auto *NewCI = CallInst::Create(CI->getFunctionType(), CI->getCalledOperand(),
824 Args, OpB, CI->getName(), InsertPt);
825 NewCI->setTailCallKind(CI->getTailCallKind());
826 NewCI->setCallingConv(CI->getCallingConv());
827 NewCI->SubclassOptionalData = CI->SubclassOptionalData;
828 NewCI->setAttributes(CI->getAttributes());
829 NewCI->setDebugLoc(CI->getDebugLoc());
830 return NewCI;
831}
832
833// Update profile weight for call instruction by scaling it using the ratio
834// of S/T. The meaning of "branch_weights" meta data for call instruction is
835// transfered to represent call count.
837 if (T == 0) {
838 LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in "
839 "div by 0. Ignoring. Likely the function "
840 << getParent()->getParent()->getName()
841 << " has 0 entry count, and contains call instructions "
842 "with non-zero prof info.");
843 return;
844 }
845 scaleProfData(*this, S, T);
846}
847
848//===----------------------------------------------------------------------===//
849// InvokeInst Implementation
850//===----------------------------------------------------------------------===//
851
852void InvokeInst::init(FunctionType *FTy, Value *Fn, BasicBlock *IfNormal,
853 BasicBlock *IfException, ArrayRef<Value *> Args,
855 const Twine &NameStr) {
856 this->FTy = FTy;
857
858 assert((int)getNumOperands() ==
859 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)) &&
860 "NumOperands not set up?");
861
862#ifndef NDEBUG
863 assert(((Args.size() == FTy->getNumParams()) ||
864 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
865 "Invoking a function with bad signature");
866
867 for (unsigned i = 0, e = Args.size(); i != e; i++)
868 assert((i >= FTy->getNumParams() ||
869 FTy->getParamType(i) == Args[i]->getType()) &&
870 "Invoking a function with a bad signature!");
871#endif
872
873 // Set operands in order of their index to match use-list-order
874 // prediction.
875 llvm::copy(Args, op_begin());
876 setNormalDest(IfNormal);
877 setUnwindDest(IfException);
879
880 auto It = populateBundleOperandInfos(Bundles, Args.size());
881 (void)It;
882 assert(It + 3 == op_end() && "Should add up!");
883
884 setName(NameStr);
885}
886
887InvokeInst::InvokeInst(const InvokeInst &II)
888 : CallBase(II.Attrs, II.FTy, II.getType(), Instruction::Invoke,
889 OperandTraits<CallBase>::op_end(this) - II.getNumOperands(),
890 II.getNumOperands()) {
892 std::copy(II.op_begin(), II.op_end(), op_begin());
893 std::copy(II.bundle_op_info_begin(), II.bundle_op_info_end(),
896}
897
899 BasicBlock::iterator InsertPt) {
900 std::vector<Value *> Args(II->arg_begin(), II->arg_end());
901
902 auto *NewII = InvokeInst::Create(
904 II->getUnwindDest(), Args, OpB, II->getName(), InsertPt);
905 NewII->setCallingConv(II->getCallingConv());
906 NewII->SubclassOptionalData = II->SubclassOptionalData;
907 NewII->setAttributes(II->getAttributes());
908 NewII->setDebugLoc(II->getDebugLoc());
909 return NewII;
910}
911
913 Instruction *InsertPt) {
914 std::vector<Value *> Args(II->arg_begin(), II->arg_end());
915
916 auto *NewII = InvokeInst::Create(
918 II->getUnwindDest(), Args, OpB, II->getName(), InsertPt);
919 NewII->setCallingConv(II->getCallingConv());
920 NewII->SubclassOptionalData = II->SubclassOptionalData;
921 NewII->setAttributes(II->getAttributes());
922 NewII->setDebugLoc(II->getDebugLoc());
923 return NewII;
924}
925
927 return cast<LandingPadInst>(getUnwindDest()->getFirstNonPHI());
928}
929
931 if (T == 0) {
932 LLVM_DEBUG(dbgs() << "Attempting to update profile weights will result in "
933 "div by 0. Ignoring. Likely the function "
934 << getParent()->getParent()->getName()
935 << " has 0 entry count, and contains call instructions "
936 "with non-zero prof info.");
937 return;
938 }
939 scaleProfData(*this, S, T);
940}
941
942//===----------------------------------------------------------------------===//
943// CallBrInst Implementation
944//===----------------------------------------------------------------------===//
945
946void CallBrInst::init(FunctionType *FTy, Value *Fn, BasicBlock *Fallthrough,
947 ArrayRef<BasicBlock *> IndirectDests,
950 const Twine &NameStr) {
951 this->FTy = FTy;
952
953 assert((int)getNumOperands() ==
954 ComputeNumOperands(Args.size(), IndirectDests.size(),
955 CountBundleInputs(Bundles)) &&
956 "NumOperands not set up?");
957
958#ifndef NDEBUG
959 assert(((Args.size() == FTy->getNumParams()) ||
960 (FTy->isVarArg() && Args.size() > FTy->getNumParams())) &&
961 "Calling a function with bad signature");
962
963 for (unsigned i = 0, e = Args.size(); i != e; i++)
964 assert((i >= FTy->getNumParams() ||
965 FTy->getParamType(i) == Args[i]->getType()) &&
966 "Calling a function with a bad signature!");
967#endif
968
969 // Set operands in order of their index to match use-list-order
970 // prediction.
971 std::copy(Args.begin(), Args.end(), op_begin());
972 NumIndirectDests = IndirectDests.size();
973 setDefaultDest(Fallthrough);
974 for (unsigned i = 0; i != NumIndirectDests; ++i)
975 setIndirectDest(i, IndirectDests[i]);
977
978 auto It = populateBundleOperandInfos(Bundles, Args.size());
979 (void)It;
980 assert(It + 2 + IndirectDests.size() == op_end() && "Should add up!");
981
982 setName(NameStr);
983}
984
985CallBrInst::CallBrInst(const CallBrInst &CBI)
986 : CallBase(CBI.Attrs, CBI.FTy, CBI.getType(), Instruction::CallBr,
987 OperandTraits<CallBase>::op_end(this) - CBI.getNumOperands(),
988 CBI.getNumOperands()) {
990 std::copy(CBI.op_begin(), CBI.op_end(), op_begin());
991 std::copy(CBI.bundle_op_info_begin(), CBI.bundle_op_info_end(),
994 NumIndirectDests = CBI.NumIndirectDests;
995}
996
998 BasicBlock::iterator InsertPt) {
999 std::vector<Value *> Args(CBI->arg_begin(), CBI->arg_end());
1000
1001 auto *NewCBI = CallBrInst::Create(
1002 CBI->getFunctionType(), CBI->getCalledOperand(), CBI->getDefaultDest(),
1003 CBI->getIndirectDests(), Args, OpB, CBI->getName(), InsertPt);
1004 NewCBI->setCallingConv(CBI->getCallingConv());
1005 NewCBI->SubclassOptionalData = CBI->SubclassOptionalData;
1006 NewCBI->setAttributes(CBI->getAttributes());
1007 NewCBI->setDebugLoc(CBI->getDebugLoc());
1008 NewCBI->NumIndirectDests = CBI->NumIndirectDests;
1009 return NewCBI;
1010}
1011
1013 Instruction *InsertPt) {
1014 std::vector<Value *> Args(CBI->arg_begin(), CBI->arg_end());
1015
1016 auto *NewCBI = CallBrInst::Create(
1017 CBI->getFunctionType(), CBI->getCalledOperand(), CBI->getDefaultDest(),
1018 CBI->getIndirectDests(), Args, OpB, CBI->getName(), InsertPt);
1019 NewCBI->setCallingConv(CBI->getCallingConv());
1020 NewCBI->SubclassOptionalData = CBI->SubclassOptionalData;
1021 NewCBI->setAttributes(CBI->getAttributes());
1022 NewCBI->setDebugLoc(CBI->getDebugLoc());
1023 NewCBI->NumIndirectDests = CBI->NumIndirectDests;
1024 return NewCBI;
1025}
1026
1027//===----------------------------------------------------------------------===//
1028// ReturnInst Implementation
1029//===----------------------------------------------------------------------===//
1030
1031ReturnInst::ReturnInst(const ReturnInst &RI)
1032 : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Ret,
1033 OperandTraits<ReturnInst>::op_end(this) - RI.getNumOperands(),
1034 RI.getNumOperands()) {
1035 if (RI.getNumOperands())
1036 Op<0>() = RI.Op<0>();
1038}
1039
1040ReturnInst::ReturnInst(LLVMContext &C, Value *retVal,
1041 BasicBlock::iterator InsertBefore)
1042 : Instruction(Type::getVoidTy(C), Instruction::Ret,
1043 OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal,
1044 InsertBefore) {
1045 if (retVal)
1046 Op<0>() = retVal;
1047}
1048
1049ReturnInst::ReturnInst(LLVMContext &C, Value *retVal,
1050 Instruction *InsertBefore)
1051 : Instruction(Type::getVoidTy(C), Instruction::Ret,
1052 OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal,
1053 InsertBefore) {
1054 if (retVal)
1055 Op<0>() = retVal;
1056}
1057
1058ReturnInst::ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd)
1059 : Instruction(Type::getVoidTy(C), Instruction::Ret,
1060 OperandTraits<ReturnInst>::op_end(this) - !!retVal, !!retVal,
1061 InsertAtEnd) {
1062 if (retVal)
1063 Op<0>() = retVal;
1064}
1065
1066ReturnInst::ReturnInst(LLVMContext &Context, BasicBlock *InsertAtEnd)
1067 : Instruction(Type::getVoidTy(Context), Instruction::Ret,
1068 OperandTraits<ReturnInst>::op_end(this), 0, InsertAtEnd) {}
1069
1070//===----------------------------------------------------------------------===//
1071// ResumeInst Implementation
1072//===----------------------------------------------------------------------===//
1073
1074ResumeInst::ResumeInst(const ResumeInst &RI)
1075 : Instruction(Type::getVoidTy(RI.getContext()), Instruction::Resume,
1076 OperandTraits<ResumeInst>::op_begin(this), 1) {
1077 Op<0>() = RI.Op<0>();
1078}
1079
1080ResumeInst::ResumeInst(Value *Exn, BasicBlock::iterator InsertBefore)
1081 : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
1082 OperandTraits<ResumeInst>::op_begin(this), 1, InsertBefore) {
1083 Op<0>() = Exn;
1084}
1085
1086ResumeInst::ResumeInst(Value *Exn, Instruction *InsertBefore)
1087 : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
1088 OperandTraits<ResumeInst>::op_begin(this), 1, InsertBefore) {
1089 Op<0>() = Exn;
1090}
1091
1092ResumeInst::ResumeInst(Value *Exn, BasicBlock *InsertAtEnd)
1093 : Instruction(Type::getVoidTy(Exn->getContext()), Instruction::Resume,
1094 OperandTraits<ResumeInst>::op_begin(this), 1, InsertAtEnd) {
1095 Op<0>() = Exn;
1096}
1097
1098//===----------------------------------------------------------------------===//
1099// CleanupReturnInst Implementation
1100//===----------------------------------------------------------------------===//
1101
1102CleanupReturnInst::CleanupReturnInst(const CleanupReturnInst &CRI)
1103 : Instruction(CRI.getType(), Instruction::CleanupRet,
1104 OperandTraits<CleanupReturnInst>::op_end(this) -
1105 CRI.getNumOperands(),
1106 CRI.getNumOperands()) {
1107 setSubclassData<Instruction::OpaqueField>(
1109 Op<0>() = CRI.Op<0>();
1110 if (CRI.hasUnwindDest())
1111 Op<1>() = CRI.Op<1>();
1112}
1113
1114void CleanupReturnInst::init(Value *CleanupPad, BasicBlock *UnwindBB) {
1115 if (UnwindBB)
1116 setSubclassData<UnwindDestField>(true);
1117
1118 Op<0>() = CleanupPad;
1119 if (UnwindBB)
1120 Op<1>() = UnwindBB;
1121}
1122
1123CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
1124 unsigned Values,
1125 BasicBlock::iterator InsertBefore)
1126 : Instruction(Type::getVoidTy(CleanupPad->getContext()),
1127 Instruction::CleanupRet,
1128 OperandTraits<CleanupReturnInst>::op_end(this) - Values,
1129 Values, InsertBefore) {
1130 init(CleanupPad, UnwindBB);
1131}
1132
1133CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
1134 unsigned Values, Instruction *InsertBefore)
1135 : Instruction(Type::getVoidTy(CleanupPad->getContext()),
1136 Instruction::CleanupRet,
1137 OperandTraits<CleanupReturnInst>::op_end(this) - Values,
1138 Values, InsertBefore) {
1139 init(CleanupPad, UnwindBB);
1140}
1141
1142CleanupReturnInst::CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB,
1143 unsigned Values, BasicBlock *InsertAtEnd)
1144 : Instruction(Type::getVoidTy(CleanupPad->getContext()),
1145 Instruction::CleanupRet,
1146 OperandTraits<CleanupReturnInst>::op_end(this) - Values,
1147 Values, InsertAtEnd) {
1148 init(CleanupPad, UnwindBB);
1149}
1150
1151//===----------------------------------------------------------------------===//
1152// CatchReturnInst Implementation
1153//===----------------------------------------------------------------------===//
1154void CatchReturnInst::init(Value *CatchPad, BasicBlock *BB) {
1155 Op<0>() = CatchPad;
1156 Op<1>() = BB;
1157}
1158
1159CatchReturnInst::CatchReturnInst(const CatchReturnInst &CRI)
1160 : Instruction(Type::getVoidTy(CRI.getContext()), Instruction::CatchRet,
1161 OperandTraits<CatchReturnInst>::op_begin(this), 2) {
1162 Op<0>() = CRI.Op<0>();
1163 Op<1>() = CRI.Op<1>();
1164}
1165
1166CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
1167 BasicBlock::iterator InsertBefore)
1168 : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,
1169 OperandTraits<CatchReturnInst>::op_begin(this), 2,
1170 InsertBefore) {
1171 init(CatchPad, BB);
1172}
1173
1174CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
1175 Instruction *InsertBefore)
1176 : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,
1177 OperandTraits<CatchReturnInst>::op_begin(this), 2,
1178 InsertBefore) {
1179 init(CatchPad, BB);
1180}
1181
1182CatchReturnInst::CatchReturnInst(Value *CatchPad, BasicBlock *BB,
1183 BasicBlock *InsertAtEnd)
1184 : Instruction(Type::getVoidTy(BB->getContext()), Instruction::CatchRet,
1185 OperandTraits<CatchReturnInst>::op_begin(this), 2,
1186 InsertAtEnd) {
1187 init(CatchPad, BB);
1188}
1189
1190//===----------------------------------------------------------------------===//
1191// CatchSwitchInst Implementation
1192//===----------------------------------------------------------------------===//
1193
1194CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
1195 unsigned NumReservedValues,
1196 const Twine &NameStr,
1197 BasicBlock::iterator InsertBefore)
1198 : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0,
1199 InsertBefore) {
1200 if (UnwindDest)
1201 ++NumReservedValues;
1202 init(ParentPad, UnwindDest, NumReservedValues + 1);
1203 setName(NameStr);
1204}
1205
1206CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
1207 unsigned NumReservedValues,
1208 const Twine &NameStr,
1209 Instruction *InsertBefore)
1210 : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0,
1211 InsertBefore) {
1212 if (UnwindDest)
1213 ++NumReservedValues;
1214 init(ParentPad, UnwindDest, NumReservedValues + 1);
1215 setName(NameStr);
1216}
1217
1218CatchSwitchInst::CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
1219 unsigned NumReservedValues,
1220 const Twine &NameStr, BasicBlock *InsertAtEnd)
1221 : Instruction(ParentPad->getType(), Instruction::CatchSwitch, nullptr, 0,
1222 InsertAtEnd) {
1223 if (UnwindDest)
1224 ++NumReservedValues;
1225 init(ParentPad, UnwindDest, NumReservedValues + 1);
1226 setName(NameStr);
1227}
1228
1229CatchSwitchInst::CatchSwitchInst(const CatchSwitchInst &CSI)
1230 : Instruction(CSI.getType(), Instruction::CatchSwitch, nullptr,
1231 CSI.getNumOperands()) {
1232 init(CSI.getParentPad(), CSI.getUnwindDest(), CSI.getNumOperands());
1233 setNumHungOffUseOperands(ReservedSpace);
1234 Use *OL = getOperandList();
1235 const Use *InOL = CSI.getOperandList();
1236 for (unsigned I = 1, E = ReservedSpace; I != E; ++I)
1237 OL[I] = InOL[I];
1238}
1239
1240void CatchSwitchInst::init(Value *ParentPad, BasicBlock *UnwindDest,
1241 unsigned NumReservedValues) {
1242 assert(ParentPad && NumReservedValues);
1243
1244 ReservedSpace = NumReservedValues;
1245 setNumHungOffUseOperands(UnwindDest ? 2 : 1);
1246 allocHungoffUses(ReservedSpace);
1247
1248 Op<0>() = ParentPad;
1249 if (UnwindDest) {
1250 setSubclassData<UnwindDestField>(true);
1251 setUnwindDest(UnwindDest);
1252 }
1253}
1254
1255/// growOperands - grow operands - This grows the operand list in response to a
1256/// push_back style of operation. This grows the number of ops by 2 times.
1257void CatchSwitchInst::growOperands(unsigned Size) {
1258 unsigned NumOperands = getNumOperands();
1259 assert(NumOperands >= 1);
1260 if (ReservedSpace >= NumOperands + Size)
1261 return;
1262 ReservedSpace = (NumOperands + Size / 2) * 2;
1263 growHungoffUses(ReservedSpace);
1264}
1265
1267 unsigned OpNo = getNumOperands();
1268 growOperands(1);
1269 assert(OpNo < ReservedSpace && "Growing didn't work!");
1271 getOperandList()[OpNo] = Handler;
1272}
1273
1275 // Move all subsequent handlers up one.
1276 Use *EndDst = op_end() - 1;
1277 for (Use *CurDst = HI.getCurrent(); CurDst != EndDst; ++CurDst)
1278 *CurDst = *(CurDst + 1);
1279 // Null out the last handler use.
1280 *EndDst = nullptr;
1281
1283}
1284
1285//===----------------------------------------------------------------------===//
1286// FuncletPadInst Implementation
1287//===----------------------------------------------------------------------===//
1288void FuncletPadInst::init(Value *ParentPad, ArrayRef<Value *> Args,
1289 const Twine &NameStr) {
1290 assert(getNumOperands() == 1 + Args.size() && "NumOperands not set up?");
1291 llvm::copy(Args, op_begin());
1292 setParentPad(ParentPad);
1293 setName(NameStr);
1294}
1295
1296FuncletPadInst::FuncletPadInst(const FuncletPadInst &FPI)
1297 : Instruction(FPI.getType(), FPI.getOpcode(),
1298 OperandTraits<FuncletPadInst>::op_end(this) -
1299 FPI.getNumOperands(),
1300 FPI.getNumOperands()) {
1301 std::copy(FPI.op_begin(), FPI.op_end(), op_begin());
1303}
1304
1305FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
1306 ArrayRef<Value *> Args, unsigned Values,
1307 const Twine &NameStr,
1308 BasicBlock::iterator InsertBefore)
1309 : Instruction(ParentPad->getType(), Op,
1310 OperandTraits<FuncletPadInst>::op_end(this) - Values, Values,
1311 InsertBefore) {
1312 init(ParentPad, Args, NameStr);
1313}
1314
1315FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
1316 ArrayRef<Value *> Args, unsigned Values,
1317 const Twine &NameStr, Instruction *InsertBefore)
1318 : Instruction(ParentPad->getType(), Op,
1319 OperandTraits<FuncletPadInst>::op_end(this) - Values, Values,
1320 InsertBefore) {
1321 init(ParentPad, Args, NameStr);
1322}
1323
1324FuncletPadInst::FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
1325 ArrayRef<Value *> Args, unsigned Values,
1326 const Twine &NameStr, BasicBlock *InsertAtEnd)
1327 : Instruction(ParentPad->getType(), Op,
1328 OperandTraits<FuncletPadInst>::op_end(this) - Values, Values,
1329 InsertAtEnd) {
1330 init(ParentPad, Args, NameStr);
1331}
1332
1333//===----------------------------------------------------------------------===//
1334// UnreachableInst Implementation
1335//===----------------------------------------------------------------------===//
1336
1338 BasicBlock::iterator InsertBefore)
1339 : Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr,
1340 0, InsertBefore) {}
1342 Instruction *InsertBefore)
1343 : Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr,
1344 0, InsertBefore) {}
1346 : Instruction(Type::getVoidTy(Context), Instruction::Unreachable, nullptr,
1347 0, InsertAtEnd) {}
1348
1349//===----------------------------------------------------------------------===//
1350// BranchInst Implementation
1351//===----------------------------------------------------------------------===//
1352
1353void BranchInst::AssertOK() {
1354 if (isConditional())
1355 assert(getCondition()->getType()->isIntegerTy(1) &&
1356 "May only branch on boolean predicates!");
1357}
1358
1359BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock::iterator InsertBefore)
1360 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1361 OperandTraits<BranchInst>::op_end(this) - 1, 1,
1362 InsertBefore) {
1363 assert(IfTrue && "Branch destination may not be null!");
1364 Op<-1>() = IfTrue;
1365}
1366
1367BranchInst::BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore)
1368 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1369 OperandTraits<BranchInst>::op_end(this) - 1, 1,
1370 InsertBefore) {
1371 assert(IfTrue && "Branch destination may not be null!");
1372 Op<-1>() = IfTrue;
1373}
1374
1375BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
1376 BasicBlock::iterator InsertBefore)
1377 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1378 OperandTraits<BranchInst>::op_end(this) - 3, 3,
1379 InsertBefore) {
1380 // Assign in order of operand index to make use-list order predictable.
1381 Op<-3>() = Cond;
1382 Op<-2>() = IfFalse;
1383 Op<-1>() = IfTrue;
1384#ifndef NDEBUG
1385 AssertOK();
1386#endif
1387}
1388
1389BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
1390 Instruction *InsertBefore)
1391 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1392 OperandTraits<BranchInst>::op_end(this) - 3, 3,
1393 InsertBefore) {
1394 // Assign in order of operand index to make use-list order predictable.
1395 Op<-3>() = Cond;
1396 Op<-2>() = IfFalse;
1397 Op<-1>() = IfTrue;
1398#ifndef NDEBUG
1399 AssertOK();
1400#endif
1401}
1402
1403BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd)
1404 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1405 OperandTraits<BranchInst>::op_end(this) - 1, 1, InsertAtEnd) {
1406 assert(IfTrue && "Branch destination may not be null!");
1407 Op<-1>() = IfTrue;
1408}
1409
1410BranchInst::BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
1411 BasicBlock *InsertAtEnd)
1412 : Instruction(Type::getVoidTy(IfTrue->getContext()), Instruction::Br,
1413 OperandTraits<BranchInst>::op_end(this) - 3, 3, InsertAtEnd) {
1414 // Assign in order of operand index to make use-list order predictable.
1415 Op<-3>() = Cond;
1416 Op<-2>() = IfFalse;
1417 Op<-1>() = IfTrue;
1418#ifndef NDEBUG
1419 AssertOK();
1420#endif
1421}
1422
1423BranchInst::BranchInst(const BranchInst &BI)
1424 : Instruction(Type::getVoidTy(BI.getContext()), Instruction::Br,
1425 OperandTraits<BranchInst>::op_end(this) - BI.getNumOperands(),
1426 BI.getNumOperands()) {
1427 // Assign in order of operand index to make use-list order predictable.
1428 if (BI.getNumOperands() != 1) {
1429 assert(BI.getNumOperands() == 3 && "BR can have 1 or 3 operands!");
1430 Op<-3>() = BI.Op<-3>();
1431 Op<-2>() = BI.Op<-2>();
1432 }
1433 Op<-1>() = BI.Op<-1>();
1435}
1436
1439 "Cannot swap successors of an unconditional branch");
1440 Op<-1>().swap(Op<-2>());
1441
1442 // Update profile metadata if present and it matches our structural
1443 // expectations.
1445}
1446
1447//===----------------------------------------------------------------------===//
1448// AllocaInst Implementation
1449//===----------------------------------------------------------------------===//
1450
1451static Value *getAISize(LLVMContext &Context, Value *Amt) {
1452 if (!Amt)
1453 Amt = ConstantInt::get(Type::getInt32Ty(Context), 1);
1454 else {
1455 assert(!isa<BasicBlock>(Amt) &&
1456 "Passed basic block into allocation size parameter! Use other ctor");
1457 assert(Amt->getType()->isIntegerTy() &&
1458 "Allocation array size is not an integer!");
1459 }
1460 return Amt;
1461}
1462
1464 assert(BB && "Insertion BB cannot be null when alignment not provided!");
1465 assert(BB->getParent() &&
1466 "BB must be in a Function when alignment not provided!");
1467 const DataLayout &DL = BB->getModule()->getDataLayout();
1468 return DL.getPrefTypeAlign(Ty);
1469}
1470
1472 return computeAllocaDefaultAlign(Ty, It->getParent());
1473}
1474
1476 assert(I && "Insertion position cannot be null when alignment not provided!");
1477 return computeAllocaDefaultAlign(Ty, I->getParent());
1478}
1479
1480AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
1481 BasicBlock::iterator InsertBefore)
1482 : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertBefore) {}
1483
1484AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
1485 Instruction *InsertBefore)
1486 : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertBefore) {}
1487
1488AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
1489 BasicBlock *InsertAtEnd)
1490 : AllocaInst(Ty, AddrSpace, /*ArraySize=*/nullptr, Name, InsertAtEnd) {}
1491
1492AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1493 const Twine &Name, BasicBlock::iterator InsertBefore)
1494 : AllocaInst(Ty, AddrSpace, ArraySize,
1495 computeAllocaDefaultAlign(Ty, InsertBefore), Name,
1496 InsertBefore) {}
1497
1498AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1499 const Twine &Name, Instruction *InsertBefore)
1500 : AllocaInst(Ty, AddrSpace, ArraySize,
1501 computeAllocaDefaultAlign(Ty, InsertBefore), Name,
1502 InsertBefore) {}
1503
1504AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1505 const Twine &Name, BasicBlock *InsertAtEnd)
1506 : AllocaInst(Ty, AddrSpace, ArraySize,
1507 computeAllocaDefaultAlign(Ty, InsertAtEnd), Name,
1508 InsertAtEnd) {}
1509
1510AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1511 Align Align, const Twine &Name,
1512 BasicBlock::iterator InsertBefore)
1513 : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca,
1514 getAISize(Ty->getContext(), ArraySize), InsertBefore),
1515 AllocatedType(Ty) {
1517 assert(!Ty->isVoidTy() && "Cannot allocate void!");
1518 setName(Name);
1519}
1520
1521AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1522 Align Align, const Twine &Name,
1523 Instruction *InsertBefore)
1524 : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca,
1525 getAISize(Ty->getContext(), ArraySize), InsertBefore),
1526 AllocatedType(Ty) {
1528 assert(!Ty->isVoidTy() && "Cannot allocate void!");
1529 setName(Name);
1530}
1531
1532AllocaInst::AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
1533 Align Align, const Twine &Name, BasicBlock *InsertAtEnd)
1534 : UnaryInstruction(PointerType::get(Ty, AddrSpace), Alloca,
1535 getAISize(Ty->getContext(), ArraySize), InsertAtEnd),
1536 AllocatedType(Ty) {
1538 assert(!Ty->isVoidTy() && "Cannot allocate void!");
1539 setName(Name);
1540}
1541
1542
1544 if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(0)))
1545 return !CI->isOne();
1546 return true;
1547}
1548
1549/// isStaticAlloca - Return true if this alloca is in the entry block of the
1550/// function and is a constant size. If so, the code generator will fold it
1551/// into the prolog/epilog code, so it is basically free.
1553 // Must be constant size.
1554 if (!isa<ConstantInt>(getArraySize())) return false;
1555
1556 // Must be in the entry block.
1557 const BasicBlock *Parent = getParent();
1558 return Parent->isEntryBlock() && !isUsedWithInAlloca();
1559}
1560
1561//===----------------------------------------------------------------------===//
1562// LoadInst Implementation
1563//===----------------------------------------------------------------------===//
1564
1565void LoadInst::AssertOK() {
1567 "Ptr must have pointer type.");
1568}
1569
1571 assert(BB && "Insertion BB cannot be null when alignment not provided!");
1572 assert(BB->getParent() &&
1573 "BB must be in a Function when alignment not provided!");
1574 const DataLayout &DL = BB->getModule()->getDataLayout();
1575 return DL.getABITypeAlign(Ty);
1576}
1577
1579 return computeLoadStoreDefaultAlign(Ty, It->getParent());
1580}
1581
1583 assert(I && "Insertion position cannot be null when alignment not provided!");
1584 return computeLoadStoreDefaultAlign(Ty, I->getParent());
1585}
1586
1588 BasicBlock::iterator InsertBef)
1589 : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertBef) {}
1590
1592 Instruction *InsertBef)
1593 : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertBef) {}
1594
1596 BasicBlock *InsertAE)
1597 : LoadInst(Ty, Ptr, Name, /*isVolatile=*/false, InsertAE) {}
1598
1599LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1600 BasicBlock::iterator InsertBef)
1601 : LoadInst(Ty, Ptr, Name, isVolatile,
1602 computeLoadStoreDefaultAlign(Ty, InsertBef), InsertBef) {}
1603
1604LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1605 Instruction *InsertBef)
1606 : LoadInst(Ty, Ptr, Name, isVolatile,
1607 computeLoadStoreDefaultAlign(Ty, InsertBef), InsertBef) {}
1608
1609LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1610 BasicBlock *InsertAE)
1611 : LoadInst(Ty, Ptr, Name, isVolatile,
1612 computeLoadStoreDefaultAlign(Ty, InsertAE), InsertAE) {}
1613
1614LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1615 Align Align, BasicBlock::iterator InsertBef)
1616 : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
1617 SyncScope::System, InsertBef) {}
1618
1619LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1620 Align Align, Instruction *InsertBef)
1621 : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
1622 SyncScope::System, InsertBef) {}
1623
1624LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1625 Align Align, BasicBlock *InsertAE)
1626 : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
1627 SyncScope::System, InsertAE) {}
1628
1629LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1631 BasicBlock::iterator InsertBef)
1632 : UnaryInstruction(Ty, Load, Ptr, InsertBef) {
1635 setAtomic(Order, SSID);
1636 AssertOK();
1637 setName(Name);
1638}
1639
1640LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1642 Instruction *InsertBef)
1643 : UnaryInstruction(Ty, Load, Ptr, InsertBef) {
1646 setAtomic(Order, SSID);
1647 AssertOK();
1648 setName(Name);
1649}
1650
1651LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
1653 BasicBlock *InsertAE)
1654 : UnaryInstruction(Ty, Load, Ptr, InsertAE) {
1657 setAtomic(Order, SSID);
1658 AssertOK();
1659 setName(Name);
1660}
1661
1662//===----------------------------------------------------------------------===//
1663// StoreInst Implementation
1664//===----------------------------------------------------------------------===//
1665
1666void StoreInst::AssertOK() {
1667 assert(getOperand(0) && getOperand(1) && "Both operands must be non-null!");
1669 "Ptr must have pointer type!");
1670}
1671
1672StoreInst::StoreInst(Value *val, Value *addr, Instruction *InsertBefore)
1673 : StoreInst(val, addr, /*isVolatile=*/false, InsertBefore) {}
1674
1675StoreInst::StoreInst(Value *val, Value *addr, BasicBlock *InsertAtEnd)
1676 : StoreInst(val, addr, /*isVolatile=*/false, InsertAtEnd) {}
1677
1679 : StoreInst(val, addr, /*isVolatile=*/false, InsertBefore) {}
1680
1681StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
1682 Instruction *InsertBefore)
1683 : StoreInst(val, addr, isVolatile,
1684 computeLoadStoreDefaultAlign(val->getType(), InsertBefore),
1685 InsertBefore) {}
1686
1687StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
1688 BasicBlock *InsertAtEnd)
1689 : StoreInst(val, addr, isVolatile,
1690 computeLoadStoreDefaultAlign(val->getType(), InsertAtEnd),
1691 InsertAtEnd) {}
1692
1693StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
1694 BasicBlock::iterator InsertBefore)
1695 : StoreInst(val, addr, isVolatile,
1696 computeLoadStoreDefaultAlign(val->getType(), &*InsertBefore),
1697 InsertBefore) {}
1698
1699StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1700 Instruction *InsertBefore)
1701 : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
1702 SyncScope::System, InsertBefore) {}
1703
1704StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1705 BasicBlock *InsertAtEnd)
1706 : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
1707 SyncScope::System, InsertAtEnd) {}
1708
1709StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1710 BasicBlock::iterator InsertBefore)
1711 : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
1712 SyncScope::System, InsertBefore) {}
1713
1714StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1715 AtomicOrdering Order, SyncScope::ID SSID,
1716 Instruction *InsertBefore)
1717 : Instruction(Type::getVoidTy(val->getContext()), Store,
1718 OperandTraits<StoreInst>::op_begin(this),
1719 OperandTraits<StoreInst>::operands(this), InsertBefore) {
1720 Op<0>() = val;
1721 Op<1>() = addr;
1724 setAtomic(Order, SSID);
1725 AssertOK();
1726}
1727
1728StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1729 AtomicOrdering Order, SyncScope::ID SSID,
1730 BasicBlock *InsertAtEnd)
1731 : Instruction(Type::getVoidTy(val->getContext()), Store,
1732 OperandTraits<StoreInst>::op_begin(this),
1733 OperandTraits<StoreInst>::operands(this), InsertAtEnd) {
1734 Op<0>() = val;
1735 Op<1>() = addr;
1738 setAtomic(Order, SSID);
1739 AssertOK();
1740}
1741
1742StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, Align Align,
1743 AtomicOrdering Order, SyncScope::ID SSID,
1744 BasicBlock::iterator InsertBefore)
1745 : Instruction(Type::getVoidTy(val->getContext()), Store,
1746 OperandTraits<StoreInst>::op_begin(this),
1747 OperandTraits<StoreInst>::operands(this)) {
1748 Op<0>() = val;
1749 Op<1>() = addr;
1752 setAtomic(Order, SSID);
1753 insertBefore(*InsertBefore->getParent(), InsertBefore);
1754 AssertOK();
1755}
1756
1757//===----------------------------------------------------------------------===//
1758// AtomicCmpXchgInst Implementation
1759//===----------------------------------------------------------------------===//
1760
1761void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal,
1762 Align Alignment, AtomicOrdering SuccessOrdering,
1763 AtomicOrdering FailureOrdering,
1764 SyncScope::ID SSID) {
1765 Op<0>() = Ptr;
1766 Op<1>() = Cmp;
1767 Op<2>() = NewVal;
1768 setSuccessOrdering(SuccessOrdering);
1769 setFailureOrdering(FailureOrdering);
1770 setSyncScopeID(SSID);
1771 setAlignment(Alignment);
1772
1773 assert(getOperand(0) && getOperand(1) && getOperand(2) &&
1774 "All operands must be non-null!");
1776 "Ptr must have pointer type!");
1777 assert(getOperand(1)->getType() == getOperand(2)->getType() &&
1778 "Cmp type and NewVal type must be same!");
1779}
1780
1782 Align Alignment,
1783 AtomicOrdering SuccessOrdering,
1784 AtomicOrdering FailureOrdering,
1785 SyncScope::ID SSID,
1786 BasicBlock::iterator InsertBefore)
1787 : Instruction(
1788 StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
1789 AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this),
1790 OperandTraits<AtomicCmpXchgInst>::operands(this), InsertBefore) {
1791 Init(Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);
1792}
1793
1795 Align Alignment,
1796 AtomicOrdering SuccessOrdering,
1797 AtomicOrdering FailureOrdering,
1798 SyncScope::ID SSID,
1799 Instruction *InsertBefore)
1800 : Instruction(
1801 StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
1802 AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this),
1803 OperandTraits<AtomicCmpXchgInst>::operands(this), InsertBefore) {
1804 Init(Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);
1805}
1806
1808 Align Alignment,
1809 AtomicOrdering SuccessOrdering,
1810 AtomicOrdering FailureOrdering,
1811 SyncScope::ID SSID,
1812 BasicBlock *InsertAtEnd)
1813 : Instruction(
1814 StructType::get(Cmp->getType(), Type::getInt1Ty(Cmp->getContext())),
1815 AtomicCmpXchg, OperandTraits<AtomicCmpXchgInst>::op_begin(this),
1816 OperandTraits<AtomicCmpXchgInst>::operands(this), InsertAtEnd) {
1817 Init(Ptr, Cmp, NewVal, Alignment, SuccessOrdering, FailureOrdering, SSID);
1818}
1819
1820//===----------------------------------------------------------------------===//
1821// AtomicRMWInst Implementation
1822//===----------------------------------------------------------------------===//
1823
1824void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val,
1825 Align Alignment, AtomicOrdering Ordering,
1826 SyncScope::ID SSID) {
1827 assert(Ordering != AtomicOrdering::NotAtomic &&
1828 "atomicrmw instructions can only be atomic.");
1829 assert(Ordering != AtomicOrdering::Unordered &&
1830 "atomicrmw instructions cannot be unordered.");
1831 Op<0>() = Ptr;
1832 Op<1>() = Val;
1834 setOrdering(Ordering);
1835 setSyncScopeID(SSID);
1836 setAlignment(Alignment);
1837
1838 assert(getOperand(0) && getOperand(1) &&
1839 "All operands must be non-null!");
1841 "Ptr must have pointer type!");
1842 assert(Ordering != AtomicOrdering::NotAtomic &&
1843 "AtomicRMW instructions must be atomic!");
1844}
1845
1847 Align Alignment, AtomicOrdering Ordering,
1848 SyncScope::ID SSID,
1849 BasicBlock::iterator InsertBefore)
1850 : Instruction(Val->getType(), AtomicRMW,
1851 OperandTraits<AtomicRMWInst>::op_begin(this),
1852 OperandTraits<AtomicRMWInst>::operands(this), InsertBefore) {
1853 Init(Operation, Ptr, Val, Alignment, Ordering, SSID);
1854}
1855
1857 Align Alignment, AtomicOrdering Ordering,
1858 SyncScope::ID SSID, Instruction *InsertBefore)
1859 : Instruction(Val->getType(), AtomicRMW,
1860 OperandTraits<AtomicRMWInst>::op_begin(this),
1861 OperandTraits<AtomicRMWInst>::operands(this), InsertBefore) {
1862 Init(Operation, Ptr, Val, Alignment, Ordering, SSID);
1863}
1864
1866 Align Alignment, AtomicOrdering Ordering,
1867 SyncScope::ID SSID, BasicBlock *InsertAtEnd)
1868 : Instruction(Val->getType(), AtomicRMW,
1869 OperandTraits<AtomicRMWInst>::op_begin(this),
1870 OperandTraits<AtomicRMWInst>::operands(this), InsertAtEnd) {
1871 Init(Operation, Ptr, Val, Alignment, Ordering, SSID);
1872}
1873
1875 switch (Op) {
1877 return "xchg";
1878 case AtomicRMWInst::Add:
1879 return "add";
1880 case AtomicRMWInst::Sub:
1881 return "sub";
1882 case AtomicRMWInst::And:
1883 return "and";
1885 return "nand";
1886 case AtomicRMWInst::Or:
1887 return "or";
1888 case AtomicRMWInst::Xor:
1889 return "xor";
1890 case AtomicRMWInst::Max:
1891 return "max";
1892 case AtomicRMWInst::Min:
1893 return "min";
1895 return "umax";
1897 return "umin";
1899 return "fadd";
1901 return "fsub";
1903 return "fmax";
1905 return "fmin";
1907 return "uinc_wrap";
1909 return "udec_wrap";
1911 return "<invalid operation>";
1912 }
1913
1914 llvm_unreachable("invalid atomicrmw operation");
1915}
1916
1917//===----------------------------------------------------------------------===//
1918// FenceInst Implementation
1919//===----------------------------------------------------------------------===//
1920
1922 SyncScope::ID SSID, BasicBlock::iterator InsertBefore)
1923 : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertBefore) {
1924 setOrdering(Ordering);
1925 setSyncScopeID(SSID);
1926}
1927
1929 SyncScope::ID SSID,
1930 Instruction *InsertBefore)
1931 : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertBefore) {
1932 setOrdering(Ordering);
1933 setSyncScopeID(SSID);
1934}
1935
1937 SyncScope::ID SSID,
1938 BasicBlock *InsertAtEnd)
1939 : Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertAtEnd) {
1940 setOrdering(Ordering);
1941 setSyncScopeID(SSID);
1942}
1943
1944//===----------------------------------------------------------------------===//
1945// GetElementPtrInst Implementation
1946//===----------------------------------------------------------------------===//
1947
1948void GetElementPtrInst::init(Value *Ptr, ArrayRef<Value *> IdxList,
1949 const Twine &Name) {
1950 assert(getNumOperands() == 1 + IdxList.size() &&
1951 "NumOperands not initialized?");
1952 Op<0>() = Ptr;
1953 llvm::copy(IdxList, op_begin() + 1);
1954 setName(Name);
1955}
1956
1957GetElementPtrInst::GetElementPtrInst(const GetElementPtrInst &GEPI)
1958 : Instruction(GEPI.getType(), GetElementPtr,
1959 OperandTraits<GetElementPtrInst>::op_end(this) -
1960 GEPI.getNumOperands(),
1961 GEPI.getNumOperands()),
1962 SourceElementType(GEPI.SourceElementType),
1963 ResultElementType(GEPI.ResultElementType) {
1964 std::copy(GEPI.op_begin(), GEPI.op_end(), op_begin());
1966}
1967
1969 if (auto *Struct = dyn_cast<StructType>(Ty)) {
1970 if (!Struct->indexValid(Idx))
1971 return nullptr;
1972 return Struct->getTypeAtIndex(Idx);
1973 }
1974 if (!Idx->getType()->isIntOrIntVectorTy())
1975 return nullptr;
1976 if (auto *Array = dyn_cast<ArrayType>(Ty))
1977 return Array->getElementType();
1978 if (auto *Vector = dyn_cast<VectorType>(Ty))
1979 return Vector->getElementType();
1980 return nullptr;
1981}
1982
1984 if (auto *Struct = dyn_cast<StructType>(Ty)) {
1985 if (Idx >= Struct->getNumElements())
1986 return nullptr;
1987 return Struct->getElementType(Idx);
1988 }
1989 if (auto *Array = dyn_cast<ArrayType>(Ty))
1990 return Array->getElementType();
1991 if (auto *Vector = dyn_cast<VectorType>(Ty))
1992 return Vector->getElementType();
1993 return nullptr;
1994}
1995
1996template <typename IndexTy>
1998 if (IdxList.empty())
1999 return Ty;
2000 for (IndexTy V : IdxList.slice(1)) {
2002 if (!Ty)
2003 return Ty;
2004 }
2005 return Ty;
2006}
2007
2009 return getIndexedTypeInternal(Ty, IdxList);
2010}
2011
2013 ArrayRef<Constant *> IdxList) {
2014 return getIndexedTypeInternal(Ty, IdxList);
2015}
2016
2018 return getIndexedTypeInternal(Ty, IdxList);
2019}
2020
2021/// hasAllZeroIndices - Return true if all of the indices of this GEP are
2022/// zeros. If so, the result pointer and the first operand have the same
2023/// value, just potentially different types.
2025 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
2026 if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(i))) {
2027 if (!CI->isZero()) return false;
2028 } else {
2029 return false;
2030 }
2031 }
2032 return true;
2033}
2034
2035/// hasAllConstantIndices - Return true if all of the indices of this GEP are
2036/// constant integers. If so, the result pointer and the first operand have
2037/// a constant offset between them.
2039 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
2040 if (!isa<ConstantInt>(getOperand(i)))
2041 return false;
2042 }
2043 return true;
2044}
2045
2047 cast<GEPOperator>(this)->setIsInBounds(B);
2048}
2049
2051 return cast<GEPOperator>(this)->isInBounds();
2052}
2053
2055 APInt &Offset) const {
2056 // Delegate to the generic GEPOperator implementation.
2057 return cast<GEPOperator>(this)->accumulateConstantOffset(DL, Offset);
2058}
2059
2061 const DataLayout &DL, unsigned BitWidth,
2062 MapVector<Value *, APInt> &VariableOffsets,
2063 APInt &ConstantOffset) const {
2064 // Delegate to the generic GEPOperator implementation.
2065 return cast<GEPOperator>(this)->collectOffset(DL, BitWidth, VariableOffsets,
2066 ConstantOffset);
2067}
2068
2069//===----------------------------------------------------------------------===//
2070// ExtractElementInst Implementation
2071//===----------------------------------------------------------------------===//
2072
2073ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
2074 const Twine &Name,
2075 BasicBlock::iterator InsertBef)
2076 : Instruction(
2077 cast<VectorType>(Val->getType())->getElementType(), ExtractElement,
2078 OperandTraits<ExtractElementInst>::op_begin(this), 2, InsertBef) {
2079 assert(isValidOperands(Val, Index) &&
2080 "Invalid extractelement instruction operands!");
2081 Op<0>() = Val;
2082 Op<1>() = Index;
2083 setName(Name);
2084}
2085
2086ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
2087 const Twine &Name,
2088 Instruction *InsertBef)
2089 : Instruction(cast<VectorType>(Val->getType())->getElementType(),
2090 ExtractElement,
2091 OperandTraits<ExtractElementInst>::op_begin(this),
2092 2, InsertBef) {
2093 assert(isValidOperands(Val, Index) &&
2094 "Invalid extractelement instruction operands!");
2095 Op<0>() = Val;
2096 Op<1>() = Index;
2097 setName(Name);
2098}
2099
2100ExtractElementInst::ExtractElementInst(Value *Val, Value *Index,
2101 const Twine &Name,
2102 BasicBlock *InsertAE)
2103 : Instruction(cast<VectorType>(Val->getType())->getElementType(),
2104 ExtractElement,
2105 OperandTraits<ExtractElementInst>::op_begin(this),
2106 2, InsertAE) {
2107 assert(isValidOperands(Val, Index) &&
2108 "Invalid extractelement instruction operands!");
2109
2110 Op<0>() = Val;
2111 Op<1>() = Index;
2112 setName(Name);
2113}
2114
2116 if (!Val->getType()->isVectorTy() || !Index->getType()->isIntegerTy())
2117 return false;
2118 return true;
2119}
2120
2121//===----------------------------------------------------------------------===//
2122// InsertElementInst Implementation
2123//===----------------------------------------------------------------------===//
2124
2125InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
2126 const Twine &Name,
2127 BasicBlock::iterator InsertBef)
2128 : Instruction(Vec->getType(), InsertElement,
2129 OperandTraits<InsertElementInst>::op_begin(this), 3,
2130 InsertBef) {
2131 assert(isValidOperands(Vec, Elt, Index) &&
2132 "Invalid insertelement instruction operands!");
2133 Op<0>() = Vec;
2134 Op<1>() = Elt;
2135 Op<2>() = Index;
2136 setName(Name);
2137}
2138
2139InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
2140 const Twine &Name,
2141 Instruction *InsertBef)
2142 : Instruction(Vec->getType(), InsertElement,
2143 OperandTraits<InsertElementInst>::op_begin(this),
2144 3, InsertBef) {
2145 assert(isValidOperands(Vec, Elt, Index) &&
2146 "Invalid insertelement instruction operands!");
2147 Op<0>() = Vec;
2148 Op<1>() = Elt;
2149 Op<2>() = Index;
2150 setName(Name);
2151}
2152
2153InsertElementInst::InsertElementInst(Value *Vec, Value *Elt, Value *Index,
2154 const Twine &Name,
2155 BasicBlock *InsertAE)
2156 : Instruction(Vec->getType(), InsertElement,
2157 OperandTraits<InsertElementInst>::op_begin(this),
2158 3, InsertAE) {
2159 assert(isValidOperands(Vec, Elt, Index) &&
2160 "Invalid insertelement instruction operands!");
2161
2162 Op<0>() = Vec;
2163 Op<1>() = Elt;
2164 Op<2>() = Index;
2165 setName(Name);
2166}
2167
2169 const Value *Index) {
2170 if (!Vec->getType()->isVectorTy())
2171 return false; // First operand of insertelement must be vector type.
2172
2173 if (Elt->getType() != cast<VectorType>(Vec->getType())->getElementType())
2174 return false;// Second operand of insertelement must be vector element type.
2175
2176 if (!Index->getType()->isIntegerTy())
2177 return false; // Third operand of insertelement must be i32.
2178 return true;
2179}
2180
2181//===----------------------------------------------------------------------===//
2182// ShuffleVectorInst Implementation
2183//===----------------------------------------------------------------------===//
2184
2186 assert(V && "Cannot create placeholder of nullptr V");
2187 return PoisonValue::get(V->getType());
2188}
2189
2191 BasicBlock::iterator InsertBefore)
2193 InsertBefore) {}
2194
2196 Instruction *InsertBefore)
2198 InsertBefore) {}
2199
2201 BasicBlock *InsertAtEnd)
2203 InsertAtEnd) {}
2204
2206 const Twine &Name,
2207 BasicBlock::iterator InsertBefore)
2209 InsertBefore) {}
2210
2212 const Twine &Name,
2213 Instruction *InsertBefore)
2215 InsertBefore) {}
2216
2218 const Twine &Name, BasicBlock *InsertAtEnd)
2220 InsertAtEnd) {}
2221
2223 const Twine &Name,
2224 BasicBlock::iterator InsertBefore)
2225 : Instruction(
2226 VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
2227 cast<VectorType>(Mask->getType())->getElementCount()),
2228 ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
2229 OperandTraits<ShuffleVectorInst>::operands(this), InsertBefore) {
2230 assert(isValidOperands(V1, V2, Mask) &&
2231 "Invalid shuffle vector instruction operands!");
2232
2233 Op<0>() = V1;
2234 Op<1>() = V2;
2235 SmallVector<int, 16> MaskArr;
2236 getShuffleMask(cast<Constant>(Mask), MaskArr);
2237 setShuffleMask(MaskArr);
2238 setName(Name);
2239}
2240
2242 const Twine &Name,
2243 Instruction *InsertBefore)
2244 : Instruction(
2245 VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
2246 cast<VectorType>(Mask->getType())->getElementCount()),
2247 ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
2248 OperandTraits<ShuffleVectorInst>::operands(this), InsertBefore) {
2249 assert(isValidOperands(V1, V2, Mask) &&
2250 "Invalid shuffle vector instruction operands!");
2251
2252 Op<0>() = V1;
2253 Op<1>() = V2;
2254 SmallVector<int, 16> MaskArr;
2255 getShuffleMask(cast<Constant>(Mask), MaskArr);
2256 setShuffleMask(MaskArr);
2257 setName(Name);
2258}
2259
2261 const Twine &Name, BasicBlock *InsertAtEnd)
2262 : Instruction(
2263 VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
2264 cast<VectorType>(Mask->getType())->getElementCount()),
2265 ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
2266 OperandTraits<ShuffleVectorInst>::operands(this), InsertAtEnd) {
2267 assert(isValidOperands(V1, V2, Mask) &&
2268 "Invalid shuffle vector instruction operands!");
2269
2270 Op<0>() = V1;
2271 Op<1>() = V2;
2272 SmallVector<int, 16> MaskArr;
2273 getShuffleMask(cast<Constant>(Mask), MaskArr);
2274 setShuffleMask(MaskArr);
2275 setName(Name);
2276}
2277
2279 const Twine &Name,
2280 BasicBlock::iterator InsertBefore)
2281 : Instruction(
2282 VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
2283 Mask.size(), isa<ScalableVectorType>(V1->getType())),
2284 ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
2285 OperandTraits<ShuffleVectorInst>::operands(this), InsertBefore) {
2286 assert(isValidOperands(V1, V2, Mask) &&
2287 "Invalid shuffle vector instruction operands!");
2288 Op<0>() = V1;
2289 Op<1>() = V2;
2290 setShuffleMask(Mask);
2291 setName(Name);
2292}
2293
2295 const Twine &Name,
2296 Instruction *InsertBefore)
2297 : Instruction(
2298 VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
2299 Mask.size(), isa<ScalableVectorType>(V1->getType())),
2300 ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
2301 OperandTraits<ShuffleVectorInst>::operands(this), InsertBefore) {
2302 assert(isValidOperands(V1, V2, Mask) &&
2303 "Invalid shuffle vector instruction operands!");
2304 Op<0>() = V1;
2305 Op<1>() = V2;
2306 setShuffleMask(Mask);
2307 setName(Name);
2308}
2309
2311 const Twine &Name, BasicBlock *InsertAtEnd)
2312 : Instruction(
2313 VectorType::get(cast<VectorType>(V1->getType())->getElementType(),
2314 Mask.size(), isa<ScalableVectorType>(V1->getType())),
2315 ShuffleVector, OperandTraits<ShuffleVectorInst>::op_begin(this),
2316 OperandTraits<ShuffleVectorInst>::operands(this), InsertAtEnd) {
2317 assert(isValidOperands(V1, V2, Mask) &&
2318 "Invalid shuffle vector instruction operands!");
2319
2320 Op<0>() = V1;
2321 Op<1>() = V2;
2322 setShuffleMask(Mask);
2323 setName(Name);
2324}
2325
2327 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2328 int NumMaskElts = ShuffleMask.size();
2329 SmallVector<int, 16> NewMask(NumMaskElts);
2330 for (int i = 0; i != NumMaskElts; ++i) {
2331 int MaskElt = getMaskValue(i);
2332 if (MaskElt == PoisonMaskElem) {
2333 NewMask[i] = PoisonMaskElem;
2334 continue;
2335 }
2336 assert(MaskElt >= 0 && MaskElt < 2 * NumOpElts && "Out-of-range mask");
2337 MaskElt = (MaskElt < NumOpElts) ? MaskElt + NumOpElts : MaskElt - NumOpElts;
2338 NewMask[i] = MaskElt;
2339 }
2340 setShuffleMask(NewMask);
2341 Op<0>().swap(Op<1>());
2342}
2343
2345 ArrayRef<int> Mask) {
2346 // V1 and V2 must be vectors of the same type.
2347 if (!isa<VectorType>(V1->getType()) || V1->getType() != V2->getType())
2348 return false;
2349
2350 // Make sure the mask elements make sense.
2351 int V1Size =
2352 cast<VectorType>(V1->getType())->getElementCount().getKnownMinValue();
2353 for (int Elem : Mask)
2354 if (Elem != PoisonMaskElem && Elem >= V1Size * 2)
2355 return false;
2356
2357 if (isa<ScalableVectorType>(V1->getType()))
2358 if ((Mask[0] != 0 && Mask[0] != PoisonMaskElem) || !all_equal(Mask))
2359 return false;
2360
2361 return true;
2362}
2363
2365 const Value *Mask) {
2366 // V1 and V2 must be vectors of the same type.
2367 if (!V1->getType()->isVectorTy() || V1->getType() != V2->getType())
2368 return false;
2369
2370 // Mask must be vector of i32, and must be the same kind of vector as the
2371 // input vectors
2372 auto *MaskTy = dyn_cast<VectorType>(Mask->getType());
2373 if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32) ||
2374 isa<ScalableVectorType>(MaskTy) != isa<ScalableVectorType>(V1->getType()))
2375 return false;
2376
2377 // Check to see if Mask is valid.
2378 if (isa<UndefValue>(Mask) || isa<ConstantAggregateZero>(Mask))
2379 return true;
2380
2381 if (const auto *MV = dyn_cast<ConstantVector>(Mask)) {
2382 unsigned V1Size = cast<FixedVectorType>(V1->getType())->getNumElements();
2383 for (Value *Op : MV->operands()) {
2384 if (auto *CI = dyn_cast<ConstantInt>(Op)) {
2385 if (CI->uge(V1Size*2))
2386 return false;
2387 } else if (!isa<UndefValue>(Op)) {
2388 return false;
2389 }
2390 }
2391 return true;
2392 }
2393
2394 if (const auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
2395 unsigned V1Size = cast<FixedVectorType>(V1->getType())->getNumElements();
2396 for (unsigned i = 0, e = cast<FixedVectorType>(MaskTy)->getNumElements();
2397 i != e; ++i)
2398 if (CDS->getElementAsInteger(i) >= V1Size*2)
2399 return false;
2400 return true;
2401 }
2402
2403 return false;
2404}
2405
2407 SmallVectorImpl<int> &Result) {
2408 ElementCount EC = cast<VectorType>(Mask->getType())->getElementCount();
2409
2410 if (isa<ConstantAggregateZero>(Mask)) {
2411 Result.resize(EC.getKnownMinValue(), 0);
2412 return;
2413 }
2414
2415 Result.reserve(EC.getKnownMinValue());
2416
2417 if (EC.isScalable()) {
2418 assert((isa<ConstantAggregateZero>(Mask) || isa<UndefValue>(Mask)) &&
2419 "Scalable vector shuffle mask must be undef or zeroinitializer");
2420 int MaskVal = isa<UndefValue>(Mask) ? -1 : 0;
2421 for (unsigned I = 0; I < EC.getKnownMinValue(); ++I)
2422 Result.emplace_back(MaskVal);
2423 return;
2424 }
2425
2426 unsigned NumElts = EC.getKnownMinValue();
2427
2428 if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
2429 for (unsigned i = 0; i != NumElts; ++i)
2430 Result.push_back(CDS->getElementAsInteger(i));
2431 return;
2432 }
2433 for (unsigned i = 0; i != NumElts; ++i) {
2434 Constant *C = Mask->getAggregateElement(i);
2435 Result.push_back(isa<UndefValue>(C) ? -1 :
2436 cast<ConstantInt>(C)->getZExtValue());
2437 }
2438}
2439
2441 ShuffleMask.assign(Mask.begin(), Mask.end());
2442 ShuffleMaskForBitcode = convertShuffleMaskForBitcode(Mask, getType());
2443}
2444
2446 Type *ResultTy) {
2447 Type *Int32Ty = Type::getInt32Ty(ResultTy->getContext());
2448 if (isa<ScalableVectorType>(ResultTy)) {
2449 assert(all_equal(Mask) && "Unexpected shuffle");
2450 Type *VecTy = VectorType::get(Int32Ty, Mask.size(), true);
2451 if (Mask[0] == 0)
2452 return Constant::getNullValue(VecTy);
2453 return UndefValue::get(VecTy);
2454 }
2456 for (int Elem : Mask) {
2457 if (Elem == PoisonMaskElem)
2459 else
2460 MaskConst.push_back(ConstantInt::get(Int32Ty, Elem));
2461 }
2462 return ConstantVector::get(MaskConst);
2463}
2464
2465static bool isSingleSourceMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
2466 assert(!Mask.empty() && "Shuffle mask must contain elements");
2467 bool UsesLHS = false;
2468 bool UsesRHS = false;
2469 for (int I : Mask) {
2470 if (I == -1)
2471 continue;
2472 assert(I >= 0 && I < (NumOpElts * 2) &&
2473 "Out-of-bounds shuffle mask element");
2474 UsesLHS |= (I < NumOpElts);
2475 UsesRHS |= (I >= NumOpElts);
2476 if (UsesLHS && UsesRHS)
2477 return false;
2478 }
2479 // Allow for degenerate case: completely undef mask means neither source is used.
2480 return UsesLHS || UsesRHS;
2481}
2482
2484 // We don't have vector operand size information, so assume operands are the
2485 // same size as the mask.
2486 return isSingleSourceMaskImpl(Mask, NumSrcElts);
2487}
2488
2489static bool isIdentityMaskImpl(ArrayRef<int> Mask, int NumOpElts) {
2490 if (!isSingleSourceMaskImpl(Mask, NumOpElts))
2491 return false;
2492 for (int i = 0, NumMaskElts = Mask.size(); i < NumMaskElts; ++i) {
2493 if (Mask[i] == -1)
2494 continue;
2495 if (Mask[i] != i && Mask[i] != (NumOpElts + i))
2496 return false;
2497 }
2498 return true;
2499}
2500
2502 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2503 return false;
2504 // We don't have vector operand size information, so assume operands are the
2505 // same size as the mask.
2506 return isIdentityMaskImpl(Mask, NumSrcElts);
2507}
2508
2510 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2511 return false;
2512 if (!isSingleSourceMask(Mask, NumSrcElts))
2513 return false;
2514
2515 // The number of elements in the mask must be at least 2.
2516 if (NumSrcElts < 2)
2517 return false;
2518
2519 for (int I = 0, E = Mask.size(); I < E; ++I) {
2520 if (Mask[I] == -1)
2521 continue;
2522 if (Mask[I] != (NumSrcElts - 1 - I) &&
2523 Mask[I] != (NumSrcElts + NumSrcElts - 1 - I))
2524 return false;
2525 }
2526 return true;
2527}
2528
2530 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2531 return false;
2532 if (!isSingleSourceMask(Mask, NumSrcElts))
2533 return false;
2534 for (int I = 0, E = Mask.size(); I < E; ++I) {
2535 if (Mask[I] == -1)
2536 continue;
2537 if (Mask[I] != 0 && Mask[I] != NumSrcElts)
2538 return false;
2539 }
2540 return true;
2541}
2542
2544 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2545 return false;
2546 // Select is differentiated from identity. It requires using both sources.
2547 if (isSingleSourceMask(Mask, NumSrcElts))
2548 return false;
2549 for (int I = 0, E = Mask.size(); I < E; ++I) {
2550 if (Mask[I] == -1)
2551 continue;
2552 if (Mask[I] != I && Mask[I] != (NumSrcElts + I))
2553 return false;
2554 }
2555 return true;
2556}
2557
2559 // Example masks that will return true:
2560 // v1 = <a, b, c, d>
2561 // v2 = <e, f, g, h>
2562 // trn1 = shufflevector v1, v2 <0, 4, 2, 6> = <a, e, c, g>
2563 // trn2 = shufflevector v1, v2 <1, 5, 3, 7> = <b, f, d, h>
2564
2565 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2566 return false;
2567 // 1. The number of elements in the mask must be a power-of-2 and at least 2.
2568 int Sz = Mask.size();
2569 if (Sz < 2 || !isPowerOf2_32(Sz))
2570 return false;
2571
2572 // 2. The first element of the mask must be either a 0 or a 1.
2573 if (Mask[0] != 0 && Mask[0] != 1)
2574 return false;
2575
2576 // 3. The difference between the first 2 elements must be equal to the
2577 // number of elements in the mask.
2578 if ((Mask[1] - Mask[0]) != NumSrcElts)
2579 return false;
2580
2581 // 4. The difference between consecutive even-numbered and odd-numbered
2582 // elements must be equal to 2.
2583 for (int I = 2; I < Sz; ++I) {
2584 int MaskEltVal = Mask[I];
2585 if (MaskEltVal == -1)
2586 return false;
2587 int MaskEltPrevVal = Mask[I - 2];
2588 if (MaskEltVal - MaskEltPrevVal != 2)
2589 return false;
2590 }
2591 return true;
2592}
2593
2595 int &Index) {
2596 if (Mask.size() != static_cast<unsigned>(NumSrcElts))
2597 return false;
2598 // Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
2599 int StartIndex = -1;
2600 for (int I = 0, E = Mask.size(); I != E; ++I) {
2601 int MaskEltVal = Mask[I];
2602 if (MaskEltVal == -1)
2603 continue;
2604
2605 if (StartIndex == -1) {
2606 // Don't support a StartIndex that begins in the second input, or if the
2607 // first non-undef index would access below the StartIndex.
2608 if (MaskEltVal < I || NumSrcElts <= (MaskEltVal - I))
2609 return false;
2610
2611 StartIndex = MaskEltVal - I;
2612 continue;
2613 }
2614
2615 // Splice is sequential starting from StartIndex.
2616 if (MaskEltVal != (StartIndex + I))
2617 return false;
2618 }
2619
2620 if (StartIndex == -1)
2621 return false;
2622
2623 // NOTE: This accepts StartIndex == 0 (COPY).
2624 Index = StartIndex;
2625 return true;
2626}
2627
2629 int NumSrcElts, int &Index) {
2630 // Must extract from a single source.
2631 if (!isSingleSourceMaskImpl(Mask, NumSrcElts))
2632 return false;
2633
2634 // Must be smaller (else this is an Identity shuffle).
2635 if (NumSrcElts <= (int)Mask.size())
2636 return false;
2637
2638 // Find start of extraction, accounting that we may start with an UNDEF.
2639 int SubIndex = -1;
2640 for (int i = 0, e = Mask.size(); i != e; ++i) {
2641 int M = Mask[i];
2642 if (M < 0)
2643 continue;
2644 int Offset = (M % NumSrcElts) - i;
2645 if (0 <= SubIndex && SubIndex != Offset)
2646 return false;
2647 SubIndex = Offset;
2648 }
2649
2650 if (0 <= SubIndex && SubIndex + (int)Mask.size() <= NumSrcElts) {
2651 Index = SubIndex;
2652 return true;
2653 }
2654 return false;
2655}
2656
2658 int NumSrcElts, int &NumSubElts,
2659 int &Index) {
2660 int NumMaskElts = Mask.size();
2661
2662 // Don't try to match if we're shuffling to a smaller size.
2663 if (NumMaskElts < NumSrcElts)
2664 return false;
2665
2666 // TODO: We don't recognize self-insertion/widening.
2667 if (isSingleSourceMaskImpl(Mask, NumSrcElts))
2668 return false;
2669
2670 // Determine which mask elements are attributed to which source.
2671 APInt UndefElts = APInt::getZero(NumMaskElts);
2672 APInt Src0Elts = APInt::getZero(NumMaskElts);
2673 APInt Src1Elts = APInt::getZero(NumMaskElts);
2674 bool Src0Identity = true;
2675 bool Src1Identity = true;
2676
2677 for (int i = 0; i != NumMaskElts; ++i) {
2678 int M = Mask[i];
2679 if (M < 0) {
2680 UndefElts.setBit(i);
2681 continue;
2682 }
2683 if (M < NumSrcElts) {
2684 Src0Elts.setBit(i);
2685 Src0Identity &= (M == i);
2686 continue;
2687 }
2688 Src1Elts.setBit(i);
2689 Src1Identity &= (M == (i + NumSrcElts));
2690 }
2691 assert((Src0Elts | Src1Elts | UndefElts).isAllOnes() &&
2692 "unknown shuffle elements");
2693 assert(!Src0Elts.isZero() && !Src1Elts.isZero() &&
2694 "2-source shuffle not found");
2695
2696 // Determine lo/hi span ranges.
2697 // TODO: How should we handle undefs at the start of subvector insertions?
2698 int Src0Lo = Src0Elts.countr_zero();
2699 int Src1Lo = Src1Elts.countr_zero();
2700 int Src0Hi = NumMaskElts - Src0Elts.countl_zero();
2701 int Src1Hi = NumMaskElts - Src1Elts.countl_zero();
2702
2703 // If src0 is in place, see if the src1 elements is inplace within its own
2704 // span.
2705 if (Src0Identity) {
2706 int NumSub1Elts = Src1Hi - Src1Lo;
2707 ArrayRef<int> Sub1Mask = Mask.slice(Src1Lo, NumSub1Elts);
2708 if (isIdentityMaskImpl(Sub1Mask, NumSrcElts)) {
2709 NumSubElts = NumSub1Elts;
2710 Index = Src1Lo;
2711 return true;
2712 }
2713 }
2714
2715 // If src1 is in place, see if the src0 elements is inplace within its own
2716 // span.
2717 if (Src1Identity) {
2718 int NumSub0Elts = Src0Hi - Src0Lo;
2719 ArrayRef<int> Sub0Mask = Mask.slice(Src0Lo, NumSub0Elts);
2720 if (isIdentityMaskImpl(Sub0Mask, NumSrcElts)) {
2721 NumSubElts = NumSub0Elts;
2722 Index = Src0Lo;
2723 return true;
2724 }
2725 }
2726
2727 return false;
2728}
2729
2731 // FIXME: Not currently possible to express a shuffle mask for a scalable
2732 // vector for this case.
2733 if (isa<ScalableVectorType>(getType()))
2734 return false;
2735
2736 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2737 int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2738 if (NumMaskElts <= NumOpElts)
2739 return false;
2740
2741 // The first part of the mask must choose elements from exactly 1 source op.
2743 if (!isIdentityMaskImpl(Mask, NumOpElts))
2744 return false;
2745
2746 // All extending must be with undef elements.
2747 for (int i = NumOpElts; i < NumMaskElts; ++i)
2748 if (Mask[i] != -1)
2749 return false;
2750
2751 return true;
2752}
2753
2755 // FIXME: Not currently possible to express a shuffle mask for a scalable
2756 // vector for this case.
2757 if (isa<ScalableVectorType>(getType()))
2758 return false;
2759
2760 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2761 int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2762 if (NumMaskElts >= NumOpElts)
2763 return false;
2764
2765 return isIdentityMaskImpl(getShuffleMask(), NumOpElts);
2766}
2767
2769 // Vector concatenation is differentiated from identity with padding.
2770 if (isa<UndefValue>(Op<0>()) || isa<UndefValue>(Op<1>()))
2771 return false;
2772
2773 // FIXME: Not currently possible to express a shuffle mask for a scalable
2774 // vector for this case.
2775 if (isa<ScalableVectorType>(getType()))
2776 return false;
2777
2778 int NumOpElts = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2779 int NumMaskElts = cast<FixedVectorType>(getType())->getNumElements();
2780 if (NumMaskElts != NumOpElts * 2)
2781 return false;
2782
2783 // Use the mask length rather than the operands' vector lengths here. We
2784 // already know that the shuffle returns a vector twice as long as the inputs,
2785 // and neither of the inputs are undef vectors. If the mask picks consecutive
2786 // elements from both inputs, then this is a concatenation of the inputs.
2787 return isIdentityMaskImpl(getShuffleMask(), NumMaskElts);
2788}
2789
2791 int ReplicationFactor, int VF) {
2792 assert(Mask.size() == (unsigned)ReplicationFactor * VF &&
2793 "Unexpected mask size.");
2794
2795 for (int CurrElt : seq(VF)) {
2796 ArrayRef<int> CurrSubMask = Mask.take_front(ReplicationFactor);
2797 assert(CurrSubMask.size() == (unsigned)ReplicationFactor &&
2798 "Run out of mask?");
2799 Mask = Mask.drop_front(ReplicationFactor);
2800 if (!all_of(CurrSubMask, [CurrElt](int MaskElt) {
2801 return MaskElt == PoisonMaskElem || MaskElt == CurrElt;
2802 }))
2803 return false;
2804 }
2805 assert(Mask.empty() && "Did not consume the whole mask?");
2806
2807 return true;
2808}
2809
2811 int &ReplicationFactor, int &VF) {
2812 // undef-less case is trivial.
2813 if (!llvm::is_contained(Mask, PoisonMaskElem)) {
2814 ReplicationFactor =
2815 Mask.take_while([](int MaskElt) { return MaskElt == 0; }).size();
2816 if (ReplicationFactor == 0 || Mask.size() % ReplicationFactor != 0)
2817 return false;
2818 VF = Mask.size() / ReplicationFactor;
2819 return isReplicationMaskWithParams(Mask, ReplicationFactor, VF);
2820 }
2821
2822 // However, if the mask contains undef's, we have to enumerate possible tuples
2823 // and pick one. There are bounds on replication factor: [1, mask size]
2824 // (where RF=1 is an identity shuffle, RF=mask size is a broadcast shuffle)
2825 // Additionally, mask size is a replication factor multiplied by vector size,
2826 // which further significantly reduces the search space.
2827
2828 // Before doing that, let's perform basic correctness checking first.
2829 int Largest = -1;
2830 for (int MaskElt : Mask) {
2831 if (MaskElt == PoisonMaskElem)
2832 continue;
2833 // Elements must be in non-decreasing order.
2834 if (MaskElt < Largest)
2835 return false;
2836 Largest = std::max(Largest, MaskElt);
2837 }
2838
2839 // Prefer larger replication factor if all else equal.
2840 for (int PossibleReplicationFactor :
2841 reverse(seq_inclusive<unsigned>(1, Mask.size()))) {
2842 if (Mask.size() % PossibleReplicationFactor != 0)
2843 continue;
2844 int PossibleVF = Mask.size() / PossibleReplicationFactor;
2845 if (!isReplicationMaskWithParams(Mask, PossibleReplicationFactor,
2846 PossibleVF))
2847 continue;
2848 ReplicationFactor = PossibleReplicationFactor;
2849 VF = PossibleVF;
2850 return true;
2851 }
2852
2853 return false;
2854}
2855
2856bool ShuffleVectorInst::isReplicationMask(int &ReplicationFactor,
2857 int &VF) const {
2858 // Not possible to express a shuffle mask for a scalable vector for this
2859 // case.
2860 if (isa<ScalableVectorType>(getType()))
2861 return false;
2862
2863 VF = cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2864 if (ShuffleMask.size() % VF != 0)
2865 return false;
2866 ReplicationFactor = ShuffleMask.size() / VF;
2867
2868 return isReplicationMaskWithParams(ShuffleMask, ReplicationFactor, VF);
2869}
2870
2872 if (VF <= 0 || Mask.size() < static_cast<unsigned>(VF) ||
2873 Mask.size() % VF != 0)
2874 return false;
2875 for (unsigned K = 0, Sz = Mask.size(); K < Sz; K += VF) {
2876 ArrayRef<int> SubMask = Mask.slice(K, VF);
2877 if (all_of(SubMask, [](int Idx) { return Idx == PoisonMaskElem; }))
2878 continue;
2879 SmallBitVector Used(VF, false);
2880 for (int Idx : SubMask) {
2881 if (Idx != PoisonMaskElem && Idx < VF)
2882 Used.set(Idx);
2883 }
2884 if (!Used.all())
2885 return false;
2886 }
2887 return true;
2888}
2889
2890/// Return true if this shuffle mask is a replication mask.
2892 // Not possible to express a shuffle mask for a scalable vector for this
2893 // case.
2894 if (isa<ScalableVectorType>(getType()))
2895 return false;
2896 if (!isSingleSourceMask(ShuffleMask, VF))
2897 return false;
2898
2899 return isOneUseSingleSourceMask(ShuffleMask, VF);
2900}
2901
2902bool ShuffleVectorInst::isInterleave(unsigned Factor) {
2903 FixedVectorType *OpTy = dyn_cast<FixedVectorType>(getOperand(0)->getType());
2904 // shuffle_vector can only interleave fixed length vectors - for scalable
2905 // vectors, see the @llvm.vector.interleave2 intrinsic
2906 if (!OpTy)
2907 return false;
2908 unsigned OpNumElts = OpTy->getNumElements();
2909
2910 return isInterleaveMask(ShuffleMask, Factor, OpNumElts * 2);
2911}
2912
2914 ArrayRef<int> Mask, unsigned Factor, unsigned NumInputElts,
2915 SmallVectorImpl<unsigned> &StartIndexes) {
2916 unsigned NumElts = Mask.size();
2917 if (NumElts % Factor)
2918 return false;
2919
2920 unsigned LaneLen = NumElts / Factor;
2921 if (!isPowerOf2_32(LaneLen))
2922 return false;
2923
2924 StartIndexes.resize(Factor);
2925
2926 // Check whether each element matches the general interleaved rule.
2927 // Ignore undef elements, as long as the defined elements match the rule.
2928 // Outer loop processes all factors (x, y, z in the above example)
2929 unsigned I = 0, J;
2930 for (; I < Factor; I++) {
2931 unsigned SavedLaneValue;
2932 unsigned SavedNoUndefs = 0;
2933
2934 // Inner loop processes consecutive accesses (x, x+1... in the example)
2935 for (J = 0; J < LaneLen - 1; J++) {
2936 // Lane computes x's position in the Mask
2937 unsigned Lane = J * Factor + I;
2938 unsigned NextLane = Lane + Factor;
2939 int LaneValue = Mask[Lane];
2940 int NextLaneValue = Mask[NextLane];
2941
2942 // If both are defined, values must be sequential
2943 if (LaneValue >= 0 && NextLaneValue >= 0 &&
2944 LaneValue + 1 != NextLaneValue)
2945 break;
2946
2947 // If the next value is undef, save the current one as reference
2948 if (LaneValue >= 0 && NextLaneValue < 0) {
2949 SavedLaneValue = LaneValue;
2950 SavedNoUndefs = 1;
2951 }
2952
2953 // Undefs are allowed, but defined elements must still be consecutive:
2954 // i.e.: x,..., undef,..., x + 2,..., undef,..., undef,..., x + 5, ....
2955 // Verify this by storing the last non-undef followed by an undef
2956 // Check that following non-undef masks are incremented with the
2957 // corresponding distance.
2958 if (SavedNoUndefs > 0 && LaneValue < 0) {
2959 SavedNoUndefs++;
2960 if (NextLaneValue >= 0 &&
2961 SavedLaneValue + SavedNoUndefs != (unsigned)NextLaneValue)
2962 break;
2963 }
2964 }
2965
2966 if (J < LaneLen - 1)
2967 return false;
2968
2969 int StartMask = 0;
2970 if (Mask[I] >= 0) {
2971 // Check that the start of the I range (J=0) is greater than 0
2972 StartMask = Mask[I];
2973 } else if (Mask[(LaneLen - 1) * Factor + I] >= 0) {
2974 // StartMask defined by the last value in lane
2975 StartMask = Mask[(LaneLen - 1) * Factor + I] - J;
2976 } else if (SavedNoUndefs > 0) {
2977 // StartMask defined by some non-zero value in the j loop
2978 StartMask = SavedLaneValue - (LaneLen - 1 - SavedNoUndefs);
2979 }
2980 // else StartMask remains set to 0, i.e. all elements are undefs
2981
2982 if (StartMask < 0)
2983 return false;
2984 // We must stay within the vectors; This case can happen with undefs.
2985 if (StartMask + LaneLen > NumInputElts)
2986 return false;
2987
2988 StartIndexes[I] = StartMask;
2989 }
2990
2991 return true;
2992}
2993
2994/// Check if the mask is a DE-interleave mask of the given factor
2995/// \p Factor like:
2996/// <Index, Index+Factor, ..., Index+(NumElts-1)*Factor>
2998 unsigned Factor,
2999 unsigned &Index) {
3000 // Check all potential start indices from 0 to (Factor - 1).
3001 for (unsigned Idx = 0; Idx < Factor; Idx++) {
3002 unsigned I = 0;
3003
3004 // Check that elements are in ascending order by Factor. Ignore undef
3005 // elements.
3006 for (; I < Mask.size(); I++)
3007 if (Mask[I] >= 0 && static_cast<unsigned>(Mask[I]) != Idx + I * Factor)
3008 break;
3009
3010 if (I == Mask.size()) {
3011 Index = Idx;
3012 return true;
3013 }
3014 }
3015
3016 return false;
3017}
3018
3019/// Try to lower a vector shuffle as a bit rotation.
3020///
3021/// Look for a repeated rotation pattern in each sub group.
3022/// Returns an element-wise left bit rotation amount or -1 if failed.
3023static int matchShuffleAsBitRotate(ArrayRef<int> Mask, int NumSubElts) {
3024 int NumElts = Mask.size();
3025 assert((NumElts % NumSubElts) == 0 && "Illegal shuffle mask");
3026
3027 int RotateAmt = -1;
3028 for (int i = 0; i != NumElts; i += NumSubElts) {
3029 for (int j = 0; j != NumSubElts; ++j) {
3030 int M = Mask[i + j];
3031 if (M < 0)
3032 continue;
3033 if (M < i || M >= i + NumSubElts)
3034 return -1;
3035 int Offset = (NumSubElts - (M - (i + j))) % NumSubElts;
3036 if (0 <= RotateAmt && Offset != RotateAmt)
3037 return -1;
3038 RotateAmt = Offset;
3039 }
3040 }
3041 return RotateAmt;
3042}
3043
3045 ArrayRef<int> Mask, unsigned EltSizeInBits, unsigned MinSubElts,
3046 unsigned MaxSubElts, unsigned &NumSubElts, unsigned &RotateAmt) {
3047 for (NumSubElts = MinSubElts; NumSubElts <= MaxSubElts; NumSubElts *= 2) {
3048 int EltRotateAmt = matchShuffleAsBitRotate(Mask, NumSubElts);
3049 if (EltRotateAmt < 0)
3050 continue;
3051 RotateAmt = EltRotateAmt * EltSizeInBits;
3052 return true;
3053 }
3054
3055 return false;
3056}
3057
3058//===----------------------------------------------------------------------===//
3059// InsertValueInst Class
3060//===----------------------------------------------------------------------===//
3061
3062void InsertValueInst::init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
3063 const Twine &Name) {
3064 assert(getNumOperands() == 2 && "NumOperands not initialized?");
3065
3066 // There's no fundamental reason why we require at least one index
3067 // (other than weirdness with &*IdxBegin being invalid; see
3068 // getelementptr's init routine for example). But there's no
3069 // present need to support it.
3070 assert(!Idxs.empty() && "InsertValueInst must have at least one index");
3071
3073 Val->getType() && "Inserted value must match indexed type!");
3074 Op<0>() = Agg;
3075 Op<1>() = Val;
3076
3077 Indices.append(Idxs.begin(), Idxs.end());
3078 setName(Name);
3079}
3080
3081InsertValueInst::InsertValueInst(const InsertValueInst &IVI)
3082 : Instruction(IVI.getType(), InsertValue,
3083 OperandTraits<InsertValueInst>::op_begin(this), 2),
3084 Indices(IVI.Indices) {
3085 Op<0>() = IVI.getOperand(0);
3086 Op<1>() = IVI.getOperand(1);
3088}
3089
3090//===----------------------------------------------------------------------===//
3091// ExtractValueInst Class
3092//===----------------------------------------------------------------------===//
3093
3094void ExtractValueInst::init(ArrayRef<unsigned> Idxs, const Twine &Name) {
3095 assert(getNumOperands() == 1 && "NumOperands not initialized?");
3096
3097 // There's no fundamental reason why we require at least one index.
3098 // But there's no present need to support it.
3099 assert(!Idxs.empty() && "ExtractValueInst must have at least one index");
3100
3101 Indices.append(Idxs.begin(), Idxs.end());
3102 setName(Name);
3103}
3104
3105ExtractValueInst::ExtractValueInst(const ExtractValueInst &EVI)
3106 : UnaryInstruction(EVI.getType(), ExtractValue, EVI.getOperand(0)),
3107 Indices(EVI.Indices) {
3109}
3110
3111// getIndexedType - Returns the type of the element that would be extracted
3112// with an extractvalue instruction with the specified parameters.
3113//
3114// A null type is returned if the indices are invalid for the specified
3115// pointer type.
3116//
3118 ArrayRef<unsigned> Idxs) {
3119 for (unsigned Index : Idxs) {
3120 // We can't use CompositeType::indexValid(Index) here.
3121 // indexValid() always returns true for arrays because getelementptr allows
3122 // out-of-bounds indices. Since we don't allow those for extractvalue and
3123 // insertvalue we need to check array indexing manually.
3124 // Since the only other types we can index into are struct types it's just
3125 // as easy to check those manually as well.
3126 if (ArrayType *AT = dyn_cast<ArrayType>(Agg)) {
3127 if (Index >= AT->getNumElements())
3128 return nullptr;
3129 Agg = AT->getElementType();
3130 } else if (StructType *ST = dyn_cast<StructType>(Agg)) {
3131 if (Index >= ST->getNumElements())
3132 return nullptr;
3133 Agg = ST->getElementType(Index);
3134 } else {
3135 // Not a valid type to index into.
3136 return nullptr;
3137 }
3138 }
3139 return const_cast<Type*>(Agg);
3140}
3141
3142//===----------------------------------------------------------------------===//
3143// UnaryOperator Class
3144//===----------------------------------------------------------------------===//
3145
3147 const Twine &Name,
3148 BasicBlock::iterator InsertBefore)
3149 : UnaryInstruction(Ty, iType, S, InsertBefore) {
3150 Op<0>() = S;
3151 setName(Name);
3152 AssertOK();
3153}
3154
3156 Type *Ty, const Twine &Name,
3157 Instruction *InsertBefore)
3158 : UnaryInstruction(Ty, iType, S, InsertBefore) {
3159 Op<0>() = S;
3160 setName(Name);
3161 AssertOK();
3162}
3163
3165 Type *Ty, const Twine &Name,
3166 BasicBlock *InsertAtEnd)
3167 : UnaryInstruction(Ty, iType, S, InsertAtEnd) {
3168 Op<0>() = S;
3169 setName(Name);
3170 AssertOK();
3171}
3172
3174 BasicBlock::iterator InsertBefore) {
3175 return new UnaryOperator(Op, S, S->getType(), Name, InsertBefore);
3176}
3177
3179 const Twine &Name,
3180 Instruction *InsertBefore) {
3181 return new UnaryOperator(Op, S, S->getType(), Name, InsertBefore);
3182}
3183
3185 const Twine &Name,
3186 BasicBlock *InsertAtEnd) {
3187 UnaryOperator *Res = Create(Op, S, Name);
3188 Res->insertInto(InsertAtEnd, InsertAtEnd->end());
3189 return Res;
3190}
3191
3192void UnaryOperator::AssertOK() {
3193 Value *LHS = getOperand(0);
3194 (void)LHS; // Silence warnings.
3195#ifndef NDEBUG
3196 switch (getOpcode()) {
3197 case FNeg:
3198 assert(getType() == LHS->getType() &&
3199 "Unary operation should return same type as operand!");
3200 assert(getType()->isFPOrFPVectorTy() &&
3201 "Tried to create a floating-point operation on a "
3202 "non-floating-point type!");
3203 break;
3204 default: llvm_unreachable("Invalid opcode provided");
3205 }
3206#endif
3207}
3208
3209//===----------------------------------------------------------------------===//
3210// BinaryOperator Class
3211//===----------------------------------------------------------------------===//
3212
3214 const Twine &Name,
3215 BasicBlock::iterator InsertBefore)
3216 : Instruction(Ty, iType, OperandTraits<BinaryOperator>::op_begin(this),
3217 OperandTraits<BinaryOperator>::operands(this), InsertBefore) {
3218 Op<0>() = S1;
3219 Op<1>() = S2;
3220 setName(Name);
3221 AssertOK();
3222}
3223
3225 Type *Ty, const Twine &Name,
3226 Instruction *InsertBefore)
3227 : Instruction(Ty, iType,
3228 OperandTraits<BinaryOperator>::op_begin(this),
3229 OperandTraits<BinaryOperator>::operands(this),
3230 InsertBefore) {
3231 Op<0>() = S1;
3232 Op<1>() = S2;
3233 setName(Name);
3234 AssertOK();
3235}
3236
3238 Type *Ty, const Twine &Name,
3239 BasicBlock *InsertAtEnd)
3240 : Instruction(Ty, iType,
3241 OperandTraits<BinaryOperator>::op_begin(this),
3242 OperandTraits<BinaryOperator>::operands(this),
3243 InsertAtEnd) {
3244 Op<0>() = S1;
3245 Op<1>() = S2;
3246 setName(Name);
3247 AssertOK();
3248}
3249
3250void BinaryOperator::AssertOK() {
3251 Value *LHS = getOperand(0), *RHS = getOperand(1);
3252 (void)LHS; (void)RHS; // Silence warnings.
3253 assert(LHS->getType() == RHS->getType() &&
3254 "Binary operator operand types must match!");
3255#ifndef NDEBUG
3256 switch (getOpcode()) {
3257 case Add: case Sub:
3258 case Mul:
3259 assert(getType() == LHS->getType() &&
3260 "Arithmetic operation should return same type as operands!");
3261 assert(getType()->isIntOrIntVectorTy() &&
3262 "Tried to create an integer operation on a non-integer type!");
3263 break;
3264 case FAdd: case FSub:
3265 case FMul:
3266 assert(getType() == LHS->getType() &&
3267 "Arithmetic operation should return same type as operands!");
3268 assert(getType()->isFPOrFPVectorTy() &&
3269 "Tried to create a floating-point operation on a "
3270 "non-floating-point type!");
3271 break;
3272 case UDiv:
3273 case SDiv:
3274 assert(getType() == LHS->getType() &&
3275 "Arithmetic operation should return same type as operands!");
3276 assert(getType()->isIntOrIntVectorTy() &&
3277 "Incorrect operand type (not integer) for S/UDIV");
3278 break;
3279 case FDiv:
3280 assert(getType() == LHS->getType() &&
3281 "Arithmetic operation should return same type as operands!");
3282 assert(getType()->isFPOrFPVectorTy() &&
3283 "Incorrect operand type (not floating point) for FDIV");
3284 break;
3285 case URem:
3286 case SRem:
3287 assert(getType() == LHS->getType() &&
3288 "Arithmetic operation should return same type as operands!");
3289 assert(getType()->isIntOrIntVectorTy() &&
3290 "Incorrect operand type (not integer) for S/UREM");
3291 break;
3292 case FRem:
3293 assert(getType() == LHS->getType() &&
3294 "Arithmetic operation should return same type as operands!");
3295 assert(getType()->isFPOrFPVectorTy() &&
3296 "Incorrect operand type (not floating point) for FREM");
3297 break;
3298 case Shl:
3299 case LShr:
3300 case AShr:
3301 assert(getType() == LHS->getType() &&
3302 "Shift operation should return same type as operands!");
3303 assert(getType()->isIntOrIntVectorTy() &&
3304 "Tried to create a shift operation on a non-integral type!");
3305 break;
3306 case And: case Or:
3307 case Xor:
3308 assert(getType() == LHS->getType() &&
3309 "Logical operation should return same type as operands!");
3310 assert(getType()->isIntOrIntVectorTy() &&
3311 "Tried to create a logical operation on a non-integral type!");
3312 break;
3313 default: llvm_unreachable("Invalid opcode provided");
3314 }
3315#endif
3316}
3317
3319 const Twine &Name,
3320 BasicBlock::iterator InsertBefore) {
3321 assert(S1->getType() == S2->getType() &&
3322 "Cannot create binary operator with two operands of differing type!");
3323 return new BinaryOperator(Op, S1, S2, S1->getType(), Name, InsertBefore);
3324}
3325
3327 const Twine &Name,
3328 Instruction *InsertBefore) {
3329 assert(S1->getType() == S2->getType() &&
3330 "Cannot create binary operator with two operands of differing type!");
3331 return new BinaryOperator(Op, S1, S2, S1->getType(), Name, InsertBefore);
3332}
3333
3335 const Twine &Name,
3336 BasicBlock *InsertAtEnd) {
3337 BinaryOperator *Res = Create(Op, S1, S2, Name);
3338 Res->insertInto(InsertAtEnd, InsertAtEnd->end());
3339 return Res;
3340}
3341
3343 BasicBlock::iterator InsertBefore) {
3344 Value *Zero = ConstantInt::get(Op->getType(), 0);
3345 return new BinaryOperator(Instruction::Sub, Zero, Op, Op->getType(), Name,
3346 InsertBefore);
3347}
3348
3350 BasicBlock *InsertAtEnd) {
3351 Value *Zero = ConstantInt::get(Op->getType(), 0);
3352 return new BinaryOperator(Instruction::Sub,
3353 Zero, Op,
3354 Op->getType(), Name, InsertAtEnd);
3355}
3356
3358 Instruction *InsertBefore) {
3359 Value *Zero = ConstantInt::get(Op->getType(), 0);
3360 return BinaryOperator::CreateNSWSub(Zero, Op, Name, InsertBefore);
3361}
3362
3364 BasicBlock *InsertAtEnd) {
3365 Value *Zero = ConstantInt::get(Op->getType(), 0);
3366 return BinaryOperator::CreateNSWSub(Zero, Op, Name, InsertAtEnd);
3367}
3368
3370 BasicBlock::iterator InsertBefore) {
3371 Constant *C = Constant::getAllOnesValue(Op->getType());
3372 return new BinaryOperator(Instruction::Xor, Op, C,
3373 Op->getType(), Name, InsertBefore);
3374}
3375
3377 Instruction *InsertBefore) {
3378 Constant *C = Constant::getAllOnesValue(Op->getType());
3379 return new BinaryOperator(Instruction::Xor, Op, C,
3380 Op->getType(), Name, InsertBefore);
3381}
3382
3384 BasicBlock *InsertAtEnd) {
3386 return new BinaryOperator(Instruction::Xor, Op, AllOnes,
3387 Op->getType(), Name, InsertAtEnd);
3388}
3389
3390// Exchange the two operands to this instruction. This instruction is safe to
3391// use on any binary instruction and does not modify the semantics of the
3392// instruction. If the instruction is order-dependent (SetLT f.e.), the opcode
3393// is changed.
3395 if (!isCommutative())
3396 return true; // Can't commute operands
3397 Op<0>().swap(Op<1>());
3398 return false;
3399}
3400
3401//===----------------------------------------------------------------------===//
3402// FPMathOperator Class
3403//===----------------------------------------------------------------------===//
3404
3406 const MDNode *MD =
3407 cast<Instruction>(this)->getMetadata(LLVMContext::MD_fpmath);
3408 if (!MD)
3409 return 0.0;
3410 ConstantFP *Accuracy = mdconst::extract<ConstantFP>(MD->getOperand(0));
3411 return Accuracy->getValueAPF().convertToFloat();
3412}
3413
3414//===----------------------------------------------------------------------===//
3415// CastInst Class
3416//===----------------------------------------------------------------------===//
3417
3418// Just determine if this cast only deals with integral->integral conversion.
3420 switch (getOpcode()) {
3421 default: return false;
3422 case Instruction::ZExt:
3423 case Instruction::SExt:
3424 case Instruction::Trunc:
3425 return true;
3426 case Instruction::BitCast:
3427 return getOperand(0)->getType()->isIntegerTy() &&
3428 getType()->isIntegerTy();
3429 }
3430}
3431
3432/// This function determines if the CastInst does not require any bits to be
3433/// changed in order to effect the cast. Essentially, it identifies cases where
3434/// no code gen is necessary for the cast, hence the name no-op cast. For
3435/// example, the following are all no-op casts:
3436/// # bitcast i32* %x to i8*
3437/// # bitcast <2 x i32> %x to <4 x i16>
3438/// # ptrtoint i32* %x to i32 ; on 32-bit plaforms only
3439/// Determine if the described cast is a no-op.
3441 Type *SrcTy,
3442 Type *DestTy,
3443 const DataLayout &DL) {
3444 assert(castIsValid(Opcode, SrcTy, DestTy) && "method precondition");
3445 switch (Opcode) {
3446 default: llvm_unreachable("Invalid CastOp");
3447 case Instruction::Trunc:
3448 case Instruction::ZExt:
3449 case Instruction::SExt:
3450 case Instruction::FPTrunc:
3451 case Instruction::FPExt:
3452 case Instruction::UIToFP:
3453 case Instruction::SIToFP:
3454 case Instruction::FPToUI:
3455 case Instruction::FPToSI:
3456 case Instruction::AddrSpaceCast:
3457 // TODO: Target informations may give a more accurate answer here.
3458 return false;
3459 case Instruction::BitCast:
3460 return true; // BitCast never modifies bits.
3461 case Instruction::PtrToInt:
3462 return DL.getIntPtrType(SrcTy)->getScalarSizeInBits() ==
3463 DestTy->getScalarSizeInBits();
3464 case Instruction::IntToPtr:
3465 return DL.getIntPtrType(DestTy)->getScalarSizeInBits() ==
3466 SrcTy->getScalarSizeInBits();
3467 }
3468}
3469
3471 return isNoopCast(getOpcode(), getOperand(0)->getType(), getType(), DL);
3472}
3473
3474/// This function determines if a pair of casts can be eliminated and what
3475/// opcode should be used in the elimination. This assumes that there are two
3476/// instructions like this:
3477/// * %F = firstOpcode SrcTy %x to MidTy
3478/// * %S = secondOpcode MidTy %F to DstTy
3479/// The function returns a resultOpcode so these two casts can be replaced with:
3480/// * %Replacement = resultOpcode %SrcTy %x to DstTy
3481/// If no such cast is permitted, the function returns 0.
3484 Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy,
3485 Type *DstIntPtrTy) {
3486 // Define the 144 possibilities for these two cast instructions. The values
3487 // in this matrix determine what to do in a given situation and select the
3488 // case in the switch below. The rows correspond to firstOp, the columns
3489 // correspond to secondOp. In looking at the table below, keep in mind
3490 // the following cast properties:
3491 //
3492 // Size Compare Source Destination
3493 // Operator Src ? Size Type Sign Type Sign
3494 // -------- ------------ ------------------- ---------------------
3495 // TRUNC > Integer Any Integral Any
3496 // ZEXT < Integral Unsigned Integer Any
3497 // SEXT < Integral Signed Integer Any
3498 // FPTOUI n/a FloatPt n/a Integral Unsigned
3499 // FPTOSI n/a FloatPt n/a Integral Signed
3500 // UITOFP n/a Integral Unsigned FloatPt n/a
3501 // SITOFP n/a Integral Signed FloatPt n/a
3502 // FPTRUNC > FloatPt n/a FloatPt n/a
3503 // FPEXT < FloatPt n/a FloatPt n/a
3504 // PTRTOINT n/a Pointer n/a Integral Unsigned
3505 // INTTOPTR n/a Integral Unsigned Pointer n/a
3506 // BITCAST = FirstClass n/a FirstClass n/a
3507 // ADDRSPCST n/a Pointer n/a Pointer n/a
3508 //
3509 // NOTE: some transforms are safe, but we consider them to be non-profitable.
3510 // For example, we could merge "fptoui double to i32" + "zext i32 to i64",
3511 // into "fptoui double to i64", but this loses information about the range
3512 // of the produced value (we no longer know the top-part is all zeros).
3513 // Further this conversion is often much more expensive for typical hardware,
3514 // and causes issues when building libgcc. We disallow fptosi+sext for the
3515 // same reason.
3516 const unsigned numCastOps =
3517 Instruction::CastOpsEnd - Instruction::CastOpsBegin;
3518 static const uint8_t CastResults[numCastOps][numCastOps] = {
3519 // T F F U S F F P I B A -+
3520 // R Z S P P I I T P 2 N T S |
3521 // U E E 2 2 2 2 R E I T C C +- secondOp
3522 // N X X U S F F N X N 2 V V |
3523 // C T T I I P P C T T P T T -+
3524 { 1, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // Trunc -+
3525 { 8, 1, 9,99,99, 2,17,99,99,99, 2, 3, 0}, // ZExt |
3526 { 8, 0, 1,99,99, 0, 2,99,99,99, 0, 3, 0}, // SExt |
3527 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToUI |
3528 { 0, 0, 0,99,99, 0, 0,99,99,99, 0, 3, 0}, // FPToSI |
3529 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // UIToFP +- firstOp
3530 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // SIToFP |
3531 { 99,99,99, 0, 0,99,99, 0, 0,99,99, 4, 0}, // FPTrunc |
3532 { 99,99,99, 2, 2,99,99, 8, 2,99,99, 4, 0}, // FPExt |
3533 { 1, 0, 0,99,99, 0, 0,99,99,99, 7, 3, 0}, // PtrToInt |
3534 { 99,99,99,99,99,99,99,99,99,11,99,15, 0}, // IntToPtr |
3535 { 5, 5, 5, 0, 0, 5, 5, 0, 0,16, 5, 1,14}, // BitCast |
3536 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,13,12}, // AddrSpaceCast -+
3537 };
3538
3539 // TODO: This logic could be encoded into the table above and handled in the
3540 // switch below.
3541 // If either of the casts are a bitcast from scalar to vector, disallow the
3542 // merging. However, any pair of bitcasts are allowed.
3543 bool IsFirstBitcast = (firstOp == Instruction::BitCast);
3544 bool IsSecondBitcast = (secondOp == Instruction::BitCast);
3545 bool AreBothBitcasts = IsFirstBitcast && IsSecondBitcast;
3546
3547 // Check if any of the casts convert scalars <-> vectors.
3548 if ((IsFirstBitcast && isa<VectorType>(SrcTy) != isa<VectorType>(MidTy)) ||
3549 (IsSecondBitcast && isa<VectorType>(MidTy) != isa<VectorType>(DstTy)))
3550 if (!AreBothBitcasts)
3551 return 0;
3552
3553 int ElimCase = CastResults[firstOp-Instruction::CastOpsBegin]
3554 [secondOp-Instruction::CastOpsBegin];
3555 switch (ElimCase) {
3556 case 0:
3557 // Categorically disallowed.
3558 return 0;
3559 case 1:
3560 // Allowed, use first cast's opcode.
3561 return firstOp;
3562 case 2:
3563 // Allowed, use second cast's opcode.
3564 return secondOp;
3565 case 3:
3566 // No-op cast in second op implies firstOp as long as the DestTy
3567 // is integer and we are not converting between a vector and a
3568 // non-vector type.
3569 if (!SrcTy->isVectorTy() && DstTy->isIntegerTy())
3570 return firstOp;
3571 return 0;
3572 case 4:
3573 // No-op cast in second op implies firstOp as long as the DestTy
3574 // matches MidTy.
3575 if (DstTy == MidTy)
3576 return firstOp;
3577 return 0;
3578 case 5:
3579 // No-op cast in first op implies secondOp as long as the SrcTy
3580 // is an integer.
3581 if (SrcTy->isIntegerTy())
3582 return secondOp;
3583 return 0;
3584 case 7: {
3585 // Disable inttoptr/ptrtoint optimization if enabled.
3586 if (DisableI2pP2iOpt)
3587 return 0;
3588
3589 // Cannot simplify if address spaces are different!
3590 if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
3591 return 0;
3592
3593 unsigned MidSize = MidTy->getScalarSizeInBits();
3594 // We can still fold this without knowing the actual sizes as long we
3595 // know that the intermediate pointer is the largest possible
3596 // pointer size.
3597 // FIXME: Is this always true?
3598 if (MidSize == 64)
3599 return Instruction::BitCast;
3600
3601 // ptrtoint, inttoptr -> bitcast (ptr -> ptr) if int size is >= ptr size.
3602 if (!SrcIntPtrTy || DstIntPtrTy != SrcIntPtrTy)
3603 return 0;
3604 unsigned PtrSize = SrcIntPtrTy->getScalarSizeInBits();
3605 if (MidSize >= PtrSize)
3606 return Instruction::BitCast;
3607 return 0;
3608 }
3609 case 8: {
3610 // ext, trunc -> bitcast, if the SrcTy and DstTy are the same
3611 // ext, trunc -> ext, if sizeof(SrcTy) < sizeof(DstTy)
3612 // ext, trunc -> trunc, if sizeof(SrcTy) > sizeof(DstTy)
3613 unsigned SrcSize = SrcTy->getScalarSizeInBits();
3614 unsigned DstSize = DstTy->getScalarSizeInBits();
3615 if (SrcTy == DstTy)
3616 return Instruction::BitCast;
3617 if (SrcSize < DstSize)
3618 return firstOp;
3619 if (SrcSize > DstSize)
3620 return secondOp;
3621 return 0;
3622 }
3623 case 9:
3624 // zext, sext -> zext, because sext can't sign extend after zext
3625 return Instruction::ZExt;
3626 case 11: {
3627 // inttoptr, ptrtoint -> bitcast if SrcSize<=PtrSize and SrcSize==DstSize
3628 if (!MidIntPtrTy)
3629 return 0;
3630 unsigned PtrSize = MidIntPtrTy->getScalarSizeInBits();
3631 unsigned SrcSize = SrcTy->getScalarSizeInBits();
3632 unsigned DstSize = DstTy->getScalarSizeInBits();
3633 if (SrcSize <= PtrSize && SrcSize == DstSize)
3634 return Instruction::BitCast;
3635 return 0;
3636 }
3637 case 12:
3638 // addrspacecast, addrspacecast -> bitcast, if SrcAS == DstAS
3639 // addrspacecast, addrspacecast -> addrspacecast, if SrcAS != DstAS
3640 if (SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace())
3641 return Instruction::AddrSpaceCast;
3642 return Instruction::BitCast;
3643 case 13:
3644 // FIXME: this state can be merged with (1), but the following assert
3645 // is useful to check the correcteness of the sequence due to semantic
3646 // change of bitcast.
3647 assert(
3648 SrcTy->isPtrOrPtrVectorTy() &&
3649 MidTy->isPtrOrPtrVectorTy() &&
3650 DstTy->isPtrOrPtrVectorTy() &&
3651 SrcTy->getPointerAddressSpace() != MidTy->getPointerAddressSpace() &&
3652 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
3653 "Illegal addrspacecast, bitcast sequence!");
3654 // Allowed, use first cast's opcode
3655 return firstOp;
3656 case 14:
3657 // bitcast, addrspacecast -> addrspacecast
3658 return Instruction::AddrSpaceCast;
3659 case 15:
3660 // FIXME: this state can be merged with (1), but the following assert
3661 // is useful to check the correcteness of the sequence due to semantic
3662 // change of bitcast.
3663 assert(
3664 SrcTy->isIntOrIntVectorTy() &&
3665 MidTy->isPtrOrPtrVectorTy() &&
3666 DstTy->isPtrOrPtrVectorTy() &&
3667 MidTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace() &&
3668 "Illegal inttoptr, bitcast sequence!");
3669 // Allowed, use first cast's opcode
3670 return firstOp;
3671 case 16:
3672 // FIXME: this state can be merged with (2), but the following assert
3673 // is useful to check the correcteness of the sequence due to semantic
3674 // change of bitcast.
3675 assert(
3676 SrcTy->isPtrOrPtrVectorTy() &&
3677 MidTy->isPtrOrPtrVectorTy() &&
3678 DstTy->isIntOrIntVectorTy() &&
3679 SrcTy->getPointerAddressSpace() == MidTy->getPointerAddressSpace() &&
3680 "Illegal bitcast, ptrtoint sequence!");
3681 // Allowed, use second cast's opcode
3682 return secondOp;
3683 case 17:
3684 // (sitofp (zext x)) -> (uitofp x)
3685 return Instruction::UIToFP;
3686 case 99:
3687 // Cast combination can't happen (error in input). This is for all cases
3688 // where the MidTy is not the same for the two cast instructions.
3689 llvm_unreachable("Invalid Cast Combination");
3690 default:
3691 llvm_unreachable("Error in CastResults table!!!");
3692 }
3693}
3694
3696 const Twine &Name,
3697 BasicBlock::iterator InsertBefore) {
3698 assert(castIsValid(op, S, Ty) && "Invalid cast!");
3699 // Construct and return the appropriate CastInst subclass
3700 switch (op) {
3701 case Trunc: return new TruncInst (S, Ty, Name, InsertBefore);
3702 case ZExt: return new ZExtInst (S, Ty, Name, InsertBefore);
3703 case SExt: return new SExtInst (S, Ty, Name, InsertBefore);
3704 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertBefore);
3705 case FPExt: return new FPExtInst (S, Ty, Name, InsertBefore);
3706 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertBefore);
3707 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertBefore);
3708 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertBefore);
3709 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertBefore);
3710 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertBefore);
3711 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertBefore);
3712 case BitCast: return new BitCastInst (S, Ty, Name, InsertBefore);
3713 case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertBefore);
3714 default: llvm_unreachable("Invalid opcode provided");
3715 }
3716}
3717
3719 const Twine &Name, Instruction *InsertBefore) {
3720 assert(castIsValid(op, S, Ty) && "Invalid cast!");
3721 // Construct and return the appropriate CastInst subclass
3722 switch (op) {
3723 case Trunc: return new TruncInst (S, Ty, Name, InsertBefore);
3724 case ZExt: return new ZExtInst (S, Ty, Name, InsertBefore);
3725 case SExt: return new SExtInst (S, Ty, Name, InsertBefore);
3726 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertBefore);
3727 case FPExt: return new FPExtInst (S, Ty, Name, InsertBefore);
3728 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertBefore);
3729 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertBefore);
3730 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertBefore);
3731 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertBefore);
3732 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertBefore);
3733 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertBefore);
3734 case BitCast: return new BitCastInst (S, Ty, Name, InsertBefore);
3735 case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertBefore);
3736 default: llvm_unreachable("Invalid opcode provided");
3737 }
3738}
3739
3741 const Twine &Name, BasicBlock *InsertAtEnd) {
3742 assert(castIsValid(op, S, Ty) && "Invalid cast!");
3743 // Construct and return the appropriate CastInst subclass
3744 switch (op) {
3745 case Trunc: return new TruncInst (S, Ty, Name, InsertAtEnd);
3746 case ZExt: return new ZExtInst (S, Ty, Name, InsertAtEnd);
3747 case SExt: return new SExtInst (S, Ty, Name, InsertAtEnd);
3748 case FPTrunc: return new FPTruncInst (S, Ty, Name, InsertAtEnd);
3749 case FPExt: return new FPExtInst (S, Ty, Name, InsertAtEnd);
3750 case UIToFP: return new UIToFPInst (S, Ty, Name, InsertAtEnd);
3751 case SIToFP: return new SIToFPInst (S, Ty, Name, InsertAtEnd);
3752 case FPToUI: return new FPToUIInst (S, Ty, Name, InsertAtEnd);
3753 case FPToSI: return new FPToSIInst (S, Ty, Name, InsertAtEnd);
3754 case PtrToInt: return new PtrToIntInst (S, Ty, Name, InsertAtEnd);
3755 case IntToPtr: return new IntToPtrInst (S, Ty, Name, InsertAtEnd);
3756 case BitCast: return new BitCastInst (S, Ty, Name, InsertAtEnd);
3757 case AddrSpaceCast: return new AddrSpaceCastInst (S, Ty, Name, InsertAtEnd);
3758 default: llvm_unreachable("Invalid opcode provided");
3759 }
3760}
3761
3763 BasicBlock::iterator InsertBefore) {
3764 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3765 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3766 return Create(Instruction::ZExt, S, Ty, Name, InsertBefore);
3767}
3768
3770 const Twine &Name,
3771 Instruction *InsertBefore) {
3772 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3773 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3774 return Create(Instruction::ZExt, S, Ty, Name, InsertBefore);
3775}
3776
3778 const Twine &Name,
3779 BasicBlock *InsertAtEnd) {
3780 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3781 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
3782 return Create(Instruction::ZExt, S, Ty, Name, InsertAtEnd);
3783}
3784
3786 BasicBlock::iterator InsertBefore) {
3787 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3788 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3789 return Create(Instruction::SExt, S, Ty, Name, InsertBefore);
3790}
3791
3793 const Twine &Name,
3794 Instruction *InsertBefore) {
3795 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3796 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3797 return Create(Instruction::SExt, S, Ty, Name, InsertBefore);
3798}
3799
3801 const Twine &Name,
3802 BasicBlock *InsertAtEnd) {
3803 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3804 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
3805 return Create(Instruction::SExt, S, Ty, Name, InsertAtEnd);
3806}
3807
3809 BasicBlock::iterator InsertBefore) {
3810 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3811 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3812 return Create(Instruction::Trunc, S, Ty, Name, InsertBefore);
3813}
3814
3816 const Twine &Name,
3817 Instruction *InsertBefore) {
3818 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3819 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3820 return Create(Instruction::Trunc, S, Ty, Name, InsertBefore);
3821}
3822
3824 const Twine &Name,
3825 BasicBlock *InsertAtEnd) {
3826 if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
3827 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
3828 return Create(Instruction::Trunc, S, Ty, Name, InsertAtEnd);
3829}
3830
3832 const Twine &Name,
3833 BasicBlock *InsertAtEnd) {
3834 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3835 assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
3836 "Invalid cast");
3837 assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
3838 assert((!Ty->isVectorTy() ||
3839 cast<VectorType>(Ty)->getElementCount() ==
3840 cast<VectorType>(S->getType())->getElementCount()) &&
3841 "Invalid cast");
3842
3843 if (Ty->isIntOrIntVectorTy())
3844 return Create(Instruction::PtrToInt, S, Ty, Name, InsertAtEnd);
3845
3846 return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertAtEnd);
3847}
3848
3849/// Create a BitCast or a PtrToInt cast instruction
3851 BasicBlock::iterator InsertBefore) {
3852 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3853 assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
3854 "Invalid cast");
3855 assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
3856 assert((!Ty->isVectorTy() ||
3857 cast<VectorType>(Ty)->getElementCount() ==
3858 cast<VectorType>(S->getType())->getElementCount()) &&
3859 "Invalid cast");
3860
3861 if (Ty->isIntOrIntVectorTy())
3862 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3863
3864 return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertBefore);
3865}
3866
3867/// Create a BitCast or a PtrToInt cast instruction
3869 Instruction *InsertBefore) {
3870 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3871 assert((Ty->isIntOrIntVectorTy() || Ty->isPtrOrPtrVectorTy()) &&
3872 "Invalid cast");
3873 assert(Ty->isVectorTy() == S->getType()->isVectorTy() && "Invalid cast");
3874 assert((!Ty->isVectorTy() ||
3875 cast<VectorType>(Ty)->getElementCount() ==
3876 cast<VectorType>(S->getType())->getElementCount()) &&
3877 "Invalid cast");
3878
3879 if (Ty->isIntOrIntVectorTy())
3880 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3881
3882 return CreatePointerBitCastOrAddrSpaceCast(S, Ty, Name, InsertBefore);
3883}
3884
3886 Value *S, Type *Ty,
3887 const Twine &Name,
3888 BasicBlock *InsertAtEnd) {
3889 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3890 assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
3891
3893 return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertAtEnd);
3894
3895 return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
3896}
3897
3899 Value *S, Type *Ty, const Twine &Name, BasicBlock::iterator InsertBefore) {
3900 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3901 assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
3902
3904 return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertBefore);
3905
3906 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3907}
3908
3910 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore) {
3911 assert(S->getType()->isPtrOrPtrVectorTy() && "Invalid cast");
3912 assert(Ty->isPtrOrPtrVectorTy() && "Invalid cast");
3913
3915 return Create(Instruction::AddrSpaceCast, S, Ty, Name, InsertBefore);
3916
3917 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3918}
3919
3921 const Twine &Name,
3922 BasicBlock::iterator InsertBefore) {
3923 if (S->getType()->isPointerTy() && Ty->isIntegerTy())
3924 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3925 if (S->getType()->isIntegerTy() && Ty->isPointerTy())
3926 return Create(Instruction::IntToPtr, S, Ty, Name, InsertBefore);
3927
3928 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3929}
3930
3932 const Twine &Name,
3933 Instruction *InsertBefore) {
3934 if (S->getType()->isPointerTy() && Ty->isIntegerTy())
3935 return Create(Instruction::PtrToInt, S, Ty, Name, InsertBefore);
3936 if (S->getType()->isIntegerTy() && Ty->isPointerTy())
3937 return Create(Instruction::IntToPtr, S, Ty, Name, InsertBefore);
3938
3939 return Create(Instruction::BitCast, S, Ty, Name, InsertBefore);
3940}
3941
3943 const Twine &Name,
3944 BasicBlock::iterator InsertBefore) {
3945 assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
3946 "Invalid integer cast");
3947 unsigned SrcBits = C->getType()->getScalarSizeInBits();
3948 unsigned DstBits = Ty->getScalarSizeInBits();
3949 Instruction::CastOps opcode =
3950 (SrcBits == DstBits ? Instruction::BitCast :
3951 (SrcBits > DstBits ? Instruction::Trunc :
3952 (isSigned ? Instruction::SExt : Instruction::ZExt)));
3953 return Create(opcode, C, Ty, Name, InsertBefore);
3954}
3955
3957 bool isSigned, const Twine &Name,
3958 Instruction *InsertBefore) {
3959 assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
3960 "Invalid integer cast");
3961 unsigned SrcBits = C->getType()->getScalarSizeInBits();
3962 unsigned DstBits = Ty->getScalarSizeInBits();
3963 Instruction::CastOps opcode =
3964 (SrcBits == DstBits ? Instruction::BitCast :
3965 (SrcBits > DstBits ? Instruction::Trunc :
3966 (isSigned ? Instruction::SExt : Instruction::ZExt)));
3967 return Create(opcode, C, Ty, Name, InsertBefore);
3968}
3969
3971 bool isSigned, const Twine &Name,
3972 BasicBlock *InsertAtEnd) {
3973 assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
3974 "Invalid cast");
3975 unsigned SrcBits = C->getType()->getScalarSizeInBits();
3976 unsigned DstBits = Ty->getScalarSizeInBits();
3977 Instruction::CastOps opcode =
3978 (SrcBits == DstBits ? Instruction::BitCast :
3979 (SrcBits > DstBits ? Instruction::Trunc :
3980 (isSigned ? Instruction::SExt : Instruction::ZExt)));
3981 return Create(opcode, C, Ty, Name, InsertAtEnd);
3982}
3983
3985 BasicBlock::iterator InsertBefore) {
3986 assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
3987 "Invalid cast");
3988 unsigned SrcBits = C->getType()->getScalarSizeInBits();
3989 unsigned DstBits = Ty->getScalarSizeInBits();
3990 Instruction::CastOps opcode =
3991 (SrcBits == DstBits ? Instruction::BitCast :
3992 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
3993 return Create(opcode, C, Ty, Name, InsertBefore);
3994}
3995
3997 const Twine &Name,
3998 Instruction *InsertBefore) {
3999 assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
4000 "Invalid cast");
4001 unsigned SrcBits = C->getType()->getScalarSizeInBits();
4002 unsigned DstBits = Ty->getScalarSizeInBits();
4003 assert((C->getType() == Ty || SrcBits != DstBits) && "Invalid cast");
4004 Instruction::CastOps opcode =
4005 (SrcBits == DstBits ? Instruction::BitCast :
4006 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
4007 return Create(opcode, C, Ty, Name, InsertBefore);
4008}
4009
4011 const Twine &Name,
4012 BasicBlock *InsertAtEnd) {
4013 assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
4014 "Invalid cast");
4015 unsigned SrcBits = C->getType()->getScalarSizeInBits();
4016 unsigned DstBits = Ty->getScalarSizeInBits();
4017 Instruction::CastOps opcode =
4018 (SrcBits == DstBits ? Instruction::BitCast :
4019 (SrcBits > DstBits ? Instruction::FPTrunc : Instruction::FPExt));
4020 return Create(opcode, C, Ty, Name, InsertAtEnd);
4021}
4022
4023bool CastInst::isBitCastable(Type *SrcTy, Type *DestTy) {
4024 if (!SrcTy->isFirstClassType() || !DestTy->isFirstClassType())
4025 return false;
4026
4027 if (SrcTy == DestTy)
4028 return true;
4029
4030 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy)) {
4031 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy)) {
4032 if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
4033 // An element by element cast. Valid if casting the elements is valid.
4034 SrcTy = SrcVecTy->getElementType();
4035 DestTy = DestVecTy->getElementType();
4036 }
4037 }
4038 }
4039
4040 if (PointerType *DestPtrTy = dyn_cast<PointerType>(DestTy)) {
4041 if (PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy)) {
4042 return SrcPtrTy->getAddressSpace() == DestPtrTy->getAddressSpace();
4043 }
4044 }
4045
4046 TypeSize SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
4047 TypeSize DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
4048
4049 // Could still have vectors of pointers if the number of elements doesn't
4050 // match
4051 if (SrcBits.getKnownMinValue() == 0 || DestBits.getKnownMinValue() == 0)
4052 return false;
4053
4054 if (SrcBits != DestBits)
4055 return false;
4056
4057 if (DestTy->isX86_MMXTy() || SrcTy->isX86_MMXTy())
4058 return false;
4059
4060 return true;
4061}
4062
4064 const DataLayout &DL) {
4065 // ptrtoint and inttoptr are not allowed on non-integral pointers
4066 if (auto *PtrTy = dyn_cast<PointerType>(SrcTy))
4067 if (auto *IntTy = dyn_cast<IntegerType>(DestTy))
4068 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
4069 !DL.isNonIntegralPointerType(PtrTy));
4070 if (auto *PtrTy = dyn_cast<PointerType>(DestTy))
4071 if (auto *IntTy = dyn_cast<IntegerType>(SrcTy))
4072 return (IntTy->getBitWidth() == DL.getPointerTypeSizeInBits(PtrTy) &&
4073 !DL.isNonIntegralPointerType(PtrTy));
4074
4075 return isBitCastable(SrcTy, DestTy);
4076}
4077
4078// Provide a way to get a "cast" where the cast opcode is inferred from the
4079// types and size of the operand. This, basically, is a parallel of the
4080// logic in the castIsValid function below. This axiom should hold:
4081// castIsValid( getCastOpcode(Val, Ty), Val, Ty)
4082// should not assert in castIsValid. In other words, this produces a "correct"
4083// casting opcode for the arguments passed to it.
4086 const Value *Src, bool SrcIsSigned, Type *DestTy, bool DestIsSigned) {
4087 Type *SrcTy = Src->getType();
4088
4089 assert(SrcTy->isFirstClassType() && DestTy->isFirstClassType() &&
4090 "Only first class types are castable!");
4091
4092 if (SrcTy == DestTy)
4093 return BitCast;
4094
4095 // FIXME: Check address space sizes here
4096 if (VectorType *SrcVecTy = dyn_cast<VectorType>(SrcTy))
4097 if (VectorType *DestVecTy = dyn_cast<VectorType>(DestTy))
4098 if (SrcVecTy->getElementCount() == DestVecTy->getElementCount()) {
4099 // An element by element cast. Find the appropriate opcode based on the
4100 // element types.
4101 SrcTy = SrcVecTy->getElementType();
4102 DestTy = DestVecTy->getElementType();
4103 }
4104
4105 // Get the bit sizes, we'll need these
4106 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); // 0 for ptr
4107 unsigned DestBits = DestTy->getPrimitiveSizeInBits(); // 0 for ptr
4108
4109 // Run through the possibilities ...
4110 if (DestTy->isIntegerTy()) { // Casting to integral
4111 if (SrcTy->isIntegerTy()) { // Casting from integral
4112 if (DestBits < SrcBits)
4113 return Trunc; // int -> smaller int
4114 else if (DestBits > SrcBits) { // its an extension
4115 if (SrcIsSigned)
4116 return SExt; // signed -> SEXT
4117 else
4118 return ZExt; // unsigned -> ZEXT
4119 } else {
4120 return BitCast; // Same size, No-op cast
4121 }
4122 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
4123 if (DestIsSigned)
4124 return FPToSI; // FP -> sint
4125 else
4126 return FPToUI; // FP -> uint
4127 } else if (SrcTy->isVectorTy()) {
4128 assert(DestBits == SrcBits &&
4129 "Casting vector to integer of different width");
4130 return BitCast; // Same size, no-op cast
4131 } else {
4132 assert(SrcTy->isPointerTy() &&
4133 "Casting from a value that is not first-class type");
4134 return PtrToInt; // ptr -> int
4135 }
4136 } else if (DestTy->isFloatingPointTy()) { // Casting to floating pt
4137 if (SrcTy->isIntegerTy()) { // Casting from integral
4138 if (SrcIsSigned)
4139 return SIToFP; // sint -> FP
4140 else
4141 return UIToFP; // uint -> FP
4142 } else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
4143 if (DestBits < SrcBits) {
4144 return FPTrunc; // FP -> smaller FP
4145 } else if (DestBits > SrcBits) {
4146 return FPExt; // FP -> larger FP
4147 } else {
4148 return BitCast; // same size, no-op cast
4149 }
4150 } else if (SrcTy->isVectorTy()) {
4151 assert(DestBits == SrcBits &&
4152 "Casting vector to floating point of different width");
4153 return BitCast; // same size, no-op cast
4154 }
4155 llvm_unreachable("Casting pointer or non-first class to float");
4156 } else if (DestTy->isVectorTy()) {
4157 assert(DestBits == SrcBits &&
4158 "Illegal cast to vector (wrong type or size)");
4159 return BitCast;
4160 } else if (DestTy->isPointerTy()) {
4161 if (SrcTy->isPointerTy()) {
4162 if (DestTy->getPointerAddressSpace() != SrcTy->getPointerAddressSpace())
4163 return AddrSpaceCast;
4164 return BitCast; // ptr -> ptr
4165 } else if (SrcTy->isIntegerTy()) {
4166 return IntToPtr; // int -> ptr
4167 }
4168 llvm_unreachable("Casting pointer to other than pointer or int");
4169 } else if (DestTy->isX86_MMXTy()) {
4170 if (SrcTy->isVectorTy()) {
4171 assert(DestBits == SrcBits && "Casting vector of wrong width to X86_MMX");
4172 return BitCast; // 64-bit vector to MMX
4173 }
4174 llvm_unreachable("Illegal cast to X86_MMX");
4175 }
4176 llvm_unreachable("Casting to type that is not first-class");
4177}
4178
4179//===----------------------------------------------------------------------===//
4180// CastInst SubClass Constructors
4181//===----------------------------------------------------------------------===//
4182
4183/// Check that the construction parameters for a CastInst are correct. This
4184/// could be broken out into the separate constructors but it is useful to have
4185/// it in one place and to eliminate the redundant code for getting the sizes
4186/// of the types involved.
4187bool
4189 if (!SrcTy->isFirstClassType() || !DstTy->isFirstClassType() ||
4190 SrcTy->isAggregateType() || DstTy->isAggregateType())
4191 return false;
4192
4193 // Get the size of the types in bits, and whether we are dealing
4194 // with vector types, we'll need this later.
4195 bool SrcIsVec = isa<VectorType>(SrcTy);
4196 bool DstIsVec = isa<VectorType>(DstTy);
4197 unsigned SrcScalarBitSize = SrcTy->getScalarSizeInBits();
4198 unsigned DstScalarBitSize = DstTy->getScalarSizeInBits();
4199
4200 // If these are vector types, get the lengths of the vectors (using zero for
4201 // scalar types means that checking that vector lengths match also checks that
4202 // scalars are not being converted to vectors or vectors to scalars).
4203 ElementCount SrcEC = SrcIsVec ? cast<VectorType>(SrcTy)->getElementCount()
4205 ElementCount DstEC = DstIsVec ? cast<VectorType>(DstTy)->getElementCount()
4207
4208 // Switch on the opcode provided
4209 switch (op) {
4210 default: return false; // This is an input error
4211 case Instruction::Trunc:
4212 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
4213 SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
4214 case Instruction::ZExt:
4215 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
4216 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
4217 case Instruction::SExt:
4218 return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
4219 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
4220 case Instruction::FPTrunc:
4221 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
4222 SrcEC == DstEC && SrcScalarBitSize > DstScalarBitSize;
4223 case Instruction::FPExt:
4224 return SrcTy->isFPOrFPVectorTy() && DstTy->isFPOrFPVectorTy() &&
4225 SrcEC == DstEC && SrcScalarBitSize < DstScalarBitSize;
4226 case Instruction::UIToFP:
4227 case Instruction::SIToFP:
4228 return SrcTy->isIntOrIntVectorTy() && DstTy->isFPOrFPVectorTy() &&
4229 SrcEC == DstEC;
4230 case Instruction::FPToUI:
4231 case Instruction::FPToSI:
4232 return SrcTy->isFPOrFPVectorTy() && DstTy->isIntOrIntVectorTy() &&
4233 SrcEC == DstEC;
4234 case Instruction::PtrToInt:
4235 if (SrcEC != DstEC)
4236 return false;
4237 return SrcTy->isPtrOrPtrVectorTy() && DstTy->isIntOrIntVectorTy();
4238 case Instruction::IntToPtr:
4239 if (SrcEC != DstEC)
4240 return false;
4241 return SrcTy->isIntOrIntVectorTy() && DstTy->isPtrOrPtrVectorTy();
4242 case Instruction::BitCast: {
4243 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
4244 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
4245
4246 // BitCast implies a no-op cast of type only. No bits change.
4247 // However, you can't cast pointers to anything but pointers.
4248 if (!SrcPtrTy != !DstPtrTy)
4249 return false;
4250
4251 // For non-pointer cases, the cast is okay if the source and destination bit
4252 // widths are identical.
4253 if (!SrcPtrTy)
4254 return SrcTy->getPrimitiveSizeInBits() == DstTy->getPrimitiveSizeInBits();
4255
4256 // If both are pointers then the address spaces must match.
4257 if (SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace())
4258 return false;
4259
4260 // A vector of pointers must have the same number of elements.
4261 if (SrcIsVec && DstIsVec)
4262 return SrcEC == DstEC;
4263 if (SrcIsVec)
4264 return SrcEC == ElementCount::getFixed(1);
4265 if (DstIsVec)
4266 return DstEC == ElementCount::getFixed(1);
4267
4268 return true;
4269 }
4270 case Instruction::AddrSpaceCast: {
4271 PointerType *SrcPtrTy = dyn_cast<PointerType>(SrcTy->getScalarType());
4272 if (!SrcPtrTy)
4273 return false;
4274
4275 PointerType *DstPtrTy = dyn_cast<PointerType>(DstTy->getScalarType());
4276 if (!DstPtrTy)
4277 return false;
4278
4279 if (SrcPtrTy->getAddressSpace() == DstPtrTy->getAddressSpace())
4280 return false;
4281
4282 return SrcEC == DstEC;
4283 }
4284 }
4285}
4286
4288 BasicBlock::iterator InsertBefore)
4289 : CastInst(Ty, Trunc, S, Name, InsertBefore) {
4290 assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
4291}
4292
4294 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4295) : CastInst(Ty, Trunc, S, Name, InsertBefore) {
4296 assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
4297}
4298
4300 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4301) : CastInst(Ty, Trunc, S, Name, InsertAtEnd) {
4302 assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
4303}
4304
4306 BasicBlock::iterator InsertBefore)
4307 : CastInst(Ty, ZExt, S, Name, InsertBefore) {
4308 assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
4309}
4310
4312 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4313) : CastInst(Ty, ZExt, S, Name, InsertBefore) {
4314 assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
4315}
4316
4318 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4319) : CastInst(Ty, ZExt, S, Name, InsertAtEnd) {
4320 assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
4321}
4322
4324 BasicBlock::iterator InsertBefore)
4325 : CastInst(Ty, SExt, S, Name, InsertBefore) {
4326 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
4327}
4328
4330 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4331) : CastInst(Ty, SExt, S, Name, InsertBefore) {
4332 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
4333}
4334
4336 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4337) : CastInst(Ty, SExt, S, Name, InsertAtEnd) {
4338 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
4339}
4340
4342 BasicBlock::iterator InsertBefore)
4343 : CastInst(Ty, FPTrunc, S, Name, InsertBefore) {
4344 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
4345}
4346
4348 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4349) : CastInst(Ty, FPTrunc, S, Name, InsertBefore) {
4350 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
4351}
4352
4354 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4355) : CastInst(Ty, FPTrunc, S, Name, InsertAtEnd) {
4356 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
4357}
4358
4360 BasicBlock::iterator InsertBefore)
4361 : CastInst(Ty, FPExt, S, Name, InsertBefore) {
4362 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
4363}
4364
4366 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4367) : CastInst(Ty, FPExt, S, Name, InsertBefore) {
4368 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
4369}
4370
4372 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4373) : CastInst(Ty, FPExt, S, Name, InsertAtEnd) {
4374 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
4375}
4376
4378 BasicBlock::iterator InsertBefore)
4379 : CastInst(Ty, UIToFP, S, Name, InsertBefore) {
4380 assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
4381}
4382
4384 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4385) : CastInst(Ty, UIToFP, S, Name, InsertBefore) {
4386 assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
4387}
4388
4390 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4391) : CastInst(Ty, UIToFP, S, Name, InsertAtEnd) {
4392 assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
4393}
4394
4396 BasicBlock::iterator InsertBefore)
4397 : CastInst(Ty, SIToFP, S, Name, InsertBefore) {
4398 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
4399}
4400
4402 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4403) : CastInst(Ty, SIToFP, S, Name, InsertBefore) {
4404 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
4405}
4406
4408 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4409) : CastInst(Ty, SIToFP, S, Name, InsertAtEnd) {
4410 assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
4411}
4412
4414 BasicBlock::iterator InsertBefore)
4415 : CastInst(Ty, FPToUI, S, Name, InsertBefore) {
4416 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
4417}
4418
4420 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4421) : CastInst(Ty, FPToUI, S, Name, InsertBefore) {
4422 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
4423}
4424
4426 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4427) : CastInst(Ty, FPToUI, S, Name, InsertAtEnd) {
4428 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
4429}
4430
4432 BasicBlock::iterator InsertBefore)
4433 : CastInst(Ty, FPToSI, S, Name, InsertBefore) {
4434 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
4435}
4436
4438 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4439) : CastInst(Ty, FPToSI, S, Name, InsertBefore) {
4440 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
4441}
4442
4444 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4445) : CastInst(Ty, FPToSI, S, Name, InsertAtEnd) {
4446 assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
4447}
4448
4450 BasicBlock::iterator InsertBefore)
4451 : CastInst(Ty, PtrToInt, S, Name, InsertBefore) {
4452 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
4453}
4454
4456 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4457) : CastInst(Ty, PtrToInt, S, Name, InsertBefore) {
4458 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
4459}
4460
4462 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4463) : CastInst(Ty, PtrToInt, S, Name, InsertAtEnd) {
4464 assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
4465}
4466
4468 BasicBlock::iterator InsertBefore)
4469 : CastInst(Ty, IntToPtr, S, Name, InsertBefore) {
4470 assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
4471}
4472
4474 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4475) : CastInst(Ty, IntToPtr, S, Name, InsertBefore) {
4476 assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
4477}
4478
4480 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4481) : CastInst(Ty, IntToPtr, S, Name, InsertAtEnd) {
4482 assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
4483}
4484
4486 BasicBlock::iterator InsertBefore)
4487 : CastInst(Ty, BitCast, S, Name, InsertBefore) {
4488 assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
4489}
4490
4492 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4493) : CastInst(Ty, BitCast, S, Name, InsertBefore) {
4494 assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
4495}
4496
4498 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4499) : CastInst(Ty, BitCast, S, Name, InsertAtEnd) {
4500 assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
4501}
4502
4504 BasicBlock::iterator InsertBefore)
4505 : CastInst(Ty, AddrSpaceCast, S, Name, InsertBefore) {
4506 assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast");
4507}
4508
4510 Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
4511) : CastInst(Ty, AddrSpaceCast, S, Name, InsertBefore) {
4512 assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast");
4513}
4514
4516 Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
4517) : CastInst(Ty, AddrSpaceCast, S, Name, InsertAtEnd) {
4518 assert(castIsValid(getOpcode(), S, Ty) && "Illegal AddrSpaceCast");
4519}
4520
4521//===----------------------------------------------------------------------===//
4522// CmpInst Classes
4523//===----------------------------------------------------------------------===//
4524
4526 Value *RHS, const Twine &Name,
4527 BasicBlock::iterator InsertBefore, Instruction *FlagsSource)
4528 : Instruction(ty, op, OperandTraits<CmpInst>::op_begin(this),
4529 OperandTraits<CmpInst>::operands(this), InsertBefore) {
4530 Op<0>() = LHS;
4531 Op<1>() = RHS;
4532 setPredicate((Predicate)predicate);
4533 setName(Name);
4534 if (FlagsSource)
4535 copyIRFlags(FlagsSource);
4536}
4537
4539 Value *RHS, const Twine &Name, Instruction *InsertBefore,
4540 Instruction *FlagsSource)
4541 : Instruction(ty, op,
4542 OperandTraits<CmpInst>::op_begin(this),
4543 OperandTraits<CmpInst>::operands(this),
4544 InsertBefore) {
4545 Op<0>() = LHS;
4546 Op<1>() = RHS;
4547 setPredicate((Predicate)predicate);
4548 setName(Name);
4549 if (FlagsSource)
4550 copyIRFlags(FlagsSource);
4551}
4552
4554 Value *RHS, const Twine &Name, BasicBlock *InsertAtEnd)
4555 : Instruction(ty, op,
4556 OperandTraits<CmpInst>::op_begin(this),
4557 OperandTraits<CmpInst>::operands(this),
4558 InsertAtEnd) {
4559 Op<0>() = LHS;
4560 Op<1>() = RHS;
4561 setPredicate((Predicate)predicate);
4562 setName(Name);
4563}
4564
4565CmpInst *
4567 const Twine &Name, BasicBlock::iterator InsertBefore) {
4568 if (Op == Instruction::ICmp) {
4569 return new ICmpInst(InsertBefore, CmpInst::Predicate(predicate),
4570 S1, S2, Name);
4571 }
4572
4573 return new FCmpInst(InsertBefore, CmpInst::Predicate(predicate),
4574 S1, S2, Name);
4575}
4576
4577CmpInst *
4579 const Twine &Name, Instruction *InsertBefore) {
4580 if (Op == Instruction::ICmp) {
4581 if (InsertBefore)
4582 return new ICmpInst(InsertBefore, CmpInst::Predicate(predicate),
4583 S1, S2, Name);
4584 else
4585 return new ICmpInst(CmpInst::Predicate(predicate),
4586 S1, S2, Name);
4587 }
4588
4589 if (InsertBefore)
4590 return new FCmpInst(InsertBefore, CmpInst::Predicate(predicate),
4591 S1, S2, Name);
4592 else
4593 return new FCmpInst(CmpInst::Predicate(predicate),
4594 S1, S2, Name);
4595}
4596
4597CmpInst *
4599 const Twine &Name, BasicBlock *InsertAtEnd) {
4600 if (Op == Instruction::ICmp) {
4601 return new ICmpInst(InsertAtEnd, CmpInst::Predicate(predicate),
4602 S1, S2, Name);
4603 }
4604 return new FCmpInst(InsertAtEnd, CmpInst::Predicate(predicate),
4605 S1, S2, Name);
4606}
4607
4609 Value *S2,
4610 const Instruction *FlagsSource,
4611 const Twine &Name,
4612 Instruction *InsertBefore) {
4613 CmpInst *Inst = Create(Op, Pred, S1, S2, Name, InsertBefore);
4614 Inst->copyIRFlags(FlagsSource);
4615 return Inst;
4616}
4617
4619 if (ICmpInst *IC = dyn_cast<ICmpInst>(this))
4620 IC->swapOperands();
4621 else
4622 cast<FCmpInst>(this)->swapOperands();
4623}
4624
4626 if (const ICmpInst *IC = dyn_cast<ICmpInst>(this))
4627 return IC->isCommutative();
4628 return cast<FCmpInst>(this)->isCommutative();
4629}
4630
4633 return ICmpInst::isEquality(P);
4635 return FCmpInst::isEquality(P);
4636 llvm_unreachable("Unsupported predicate kind");
4637}
4638
4640 switch (pred) {
4641 default: llvm_unreachable("Unknown cmp predicate!");
4642 case ICMP_EQ: return ICMP_NE;
4643 case ICMP_NE: return ICMP_EQ;
4644 case ICMP_UGT: return ICMP_ULE;
4645 case ICMP_ULT: return ICMP_UGE;
4646 case ICMP_UGE: return ICMP_ULT;
4647 case ICMP_ULE: return ICMP_UGT;
4648 case ICMP_SGT: return ICMP_SLE;
4649 case ICMP_SLT: return ICMP_SGE;
4650 case ICMP_SGE: return ICMP_SLT;
4651 case ICMP_SLE: return ICMP_SGT;
4652
4653 case FCMP_OEQ: return FCMP_UNE;
4654 case FCMP_ONE: return FCMP_UEQ;
4655 case FCMP_OGT: return FCMP_ULE;
4656 case FCMP_OLT: return FCMP_UGE;
4657 case FCMP_OGE: return FCMP_ULT;
4658 case FCMP_OLE: return FCMP_UGT;
4659 case FCMP_UEQ: return FCMP_ONE;
4660 case FCMP_UNE: return FCMP_OEQ;
4661 case FCMP_UGT: return FCMP_OLE;
4662 case FCMP_ULT: return FCMP_OGE;
4663 case FCMP_UGE: return FCMP_OLT;
4664 case FCMP_ULE: return FCMP_OGT;
4665 case FCMP_ORD: return FCMP_UNO;
4666 case FCMP_UNO: return FCMP_ORD;
4667 case FCMP_TRUE: return FCMP_FALSE;
4668 case FCMP_FALSE: return FCMP_TRUE;
4669 }
4670}
4671
4673 switch (Pred) {
4674 default: return "unknown";
4675 case FCmpInst::FCMP_FALSE: return "false";
4676 case FCmpInst::FCMP_OEQ: return "oeq";
4677 case FCmpInst::FCMP_OGT: return "ogt";
4678 case FCmpInst::FCMP_OGE: return "oge";
4679 case FCmpInst::FCMP_OLT: return "olt";
4680 case FCmpInst::FCMP_OLE: return "ole";
4681 case FCmpInst::FCMP_ONE: return "one";
4682 case FCmpInst::FCMP_ORD: return "ord";
4683 case FCmpInst::FCMP_UNO: return "uno";
4684 case FCmpInst::FCMP_UEQ: return "ueq";
4685 case FCmpInst::FCMP_UGT: return "ugt";
4686 case FCmpInst::FCMP_UGE: return "uge";
4687 case FCmpInst::FCMP_ULT: return "ult";
4688 case FCmpInst::FCMP_ULE: return "ule";
4689 case FCmpInst::FCMP_UNE: return "une";
4690 case FCmpInst::FCMP_TRUE: return "true";
4691 case ICmpInst::ICMP_EQ: return "eq";
4692 case ICmpInst::ICMP_NE: return "ne";
4693 case ICmpInst::ICMP_SGT: return "sgt";
4694 case ICmpInst::ICMP_SGE: return "sge";
4695 case ICmpInst::ICMP_SLT: return "slt";
4696 case ICmpInst::ICMP_SLE: return "sle";
4697 case ICmpInst::ICMP_UGT: return "ugt";
4698 case ICmpInst::ICMP_UGE: return "uge";
4699 case ICmpInst::ICMP_ULT: return "ult";
4700 case ICmpInst::ICMP_ULE: return "ule";
4701 }
4702}
4703
4706 return OS;
4707}
4708
4710 switch (pred) {
4711 default: llvm_unreachable("Unknown icmp predicate!");
4712 case ICMP_EQ: case ICMP_NE:
4713 case ICMP_SGT: case ICMP_SLT: case ICMP_SGE: case ICMP_SLE:
4714 return pred;
4715 case ICMP_UGT: return ICMP_SGT;
4716 case ICMP_ULT: return ICMP_SLT;
4717 case ICMP_UGE: return ICMP_SGE;
4718 case ICMP_ULE: return ICMP_SLE;
4719 }
4720}
4721
4723 switch (pred) {
4724 default: llvm_unreachable("Unknown icmp predicate!");
4725 case ICMP_EQ: case ICMP_NE:
4726 case ICMP_UGT: case ICMP_ULT: case ICMP_UGE: case ICMP_ULE:
4727 return pred;
4728 case ICMP_SGT: return ICMP_UGT;
4729 case ICMP_SLT: return ICMP_ULT;
4730 case ICMP_SGE: return ICMP_UGE;
4731 case ICMP_SLE: return ICMP_ULE;
4732 }
4733}
4734
4736 switch (pred) {
4737 default: llvm_unreachable("Unknown cmp predicate!");
4738 case ICMP_EQ: case ICMP_NE:
4739 return pred;
4740 case ICMP_SGT: return ICMP_SLT;
4741 case ICMP_SLT: return ICMP_SGT;
4742 case ICMP_SGE: return ICMP_SLE;
4743 case ICMP_SLE: return ICMP_SGE;
4744 case ICMP_UGT: return ICMP_ULT;
4745 case ICMP_ULT: return ICMP_UGT;
4746 case ICMP_UGE: return ICMP_ULE;
4747 case ICMP_ULE: return ICMP_UGE;
4748
4749 case FCMP_FALSE: case FCMP_TRUE:
4750 case FCMP_OEQ: case FCMP_ONE:
4751 case FCMP_UEQ: case FCMP_UNE:
4752 case FCMP_ORD: case FCMP_UNO:
4753 return pred;
4754 case FCMP_OGT: return FCMP_OLT;
4755 case FCMP_OLT: return FCMP_OGT;
4756 case FCMP_OGE: return FCMP_OLE;
4757 case FCMP_OLE: return FCMP_OGE;
4758 case FCMP_UGT: return FCMP_ULT;
4759 case FCMP_ULT: return FCMP_UGT;
4760 case FCMP_UGE: return FCMP_ULE;
4761 case FCMP_ULE: return FCMP_UGE;
4762 }
4763}
4764
4766 switch (pred) {
4767 case ICMP_SGE:
4768 case ICMP_SLE:
4769 case ICMP_UGE:
4770 case ICMP_ULE:
4771 case FCMP_OGE:
4772 case FCMP_OLE:
4773 case FCMP_UGE:
4774 case FCMP_ULE:
4775 return true;
4776 default:
4777 return false;
4778 }
4779}
4780
4782 switch (pred) {
4783 case ICMP_SGT:
4784 case ICMP_SLT:
4785 case ICMP_UGT:
4786 case ICMP_ULT:
4787 case FCMP_OGT:
4788 case FCMP_OLT:
4789 case FCMP_UGT:
4790 case FCMP_ULT:
4791 return true;
4792 default:
4793 return false;
4794 }
4795}
4796
4798 switch (pred) {
4799 case ICMP_SGE:
4800 return ICMP_SGT;
4801 case ICMP_SLE:
4802 return ICMP_SLT;
4803 case ICMP_UGE:
4804 return ICMP_UGT;
4805 case ICMP_ULE:
4806 return ICMP_ULT;
4807 case FCMP_OGE:
4808 return FCMP_OGT;
4809 case FCMP_OLE:
4810 return FCMP_OLT;
4811 case FCMP_UGE:
4812 return FCMP_UGT;
4813 case FCMP_ULE:
4814 return FCMP_ULT;
4815 default:
4816 return pred;
4817 }
4818}
4819
4821 switch (pred) {
4822 case ICMP_SGT:
4823 return ICMP_SGE;
4824 case ICMP_SLT:
4825 return ICMP_SLE;
4826 case ICMP_UGT:
4827 return ICMP_UGE;
4828 case ICMP_ULT:
4829 return ICMP_ULE;
4830 case FCMP_OGT:
4831 return FCMP_OGE;
4832 case FCMP_OLT:
4833 return FCMP_OLE;
4834 case FCMP_UGT:
4835 return FCMP_UGE;
4836 case FCMP_ULT:
4837 return FCMP_ULE;
4838 default:
4839 return pred;
4840 }
4841}
4842
4844 assert(CmpInst::isRelational(pred) && "Call only with relational predicate!");
4845
4849 return getStrictPredicate(pred);
4850
4851 llvm_unreachable("Unknown predicate!");
4852}
4853
4855 assert(CmpInst::isUnsigned(pred) && "Call only with unsigned predicates!");
4856
4857 switch (pred) {
4858 default:
4859 llvm_unreachable("Unknown predicate!");
4860 case CmpInst::ICMP_ULT:
4861 return CmpInst::ICMP_SLT;
4862 case CmpInst::ICMP_ULE:
4863 return CmpInst::ICMP_SLE;
4864 case CmpInst::ICMP_UGT:
4865 return CmpInst::ICMP_SGT;
4866 case CmpInst::ICMP_UGE:
4867 return CmpInst::ICMP_SGE;
4868 }
4869}
4870
4872 assert(CmpInst::isSigned(pred) && "Call only with signed predicates!");
4873
4874 switch (pred) {
4875 default:
4876 llvm_unreachable("Unknown predicate!");
4877 case CmpInst::ICMP_SLT:
4878 return CmpInst::ICMP_ULT;
4879 case CmpInst::ICMP_SLE:
4880 return CmpInst::ICMP_ULE;
4881 case CmpInst::ICMP_SGT:
4882 return CmpInst::ICMP_UGT;
4883 case CmpInst::ICMP_SGE:
4884 return CmpInst::ICMP_UGE;
4885 }
4886}
4887
4889 switch (predicate) {
4890 default: return false;
4892 case ICmpInst::ICMP_UGE: return true;
4893 }
4894}
4895
4897 switch (predicate) {
4898 default: return false;
4900 case ICmpInst::ICMP_SGE: return true;
4901 }
4902}
4903
4904bool ICmpInst::compare(const APInt &LHS, const APInt &RHS,
4905 ICmpInst::Predicate Pred) {
4906 assert(ICmpInst::isIntPredicate(Pred) && "Only for integer predicates!");
4907 switch (Pred) {
4909 return LHS.eq(RHS);
4911 return LHS.ne(RHS);
4913 return LHS.ugt(RHS);
4915 return LHS.uge(RHS);
4917 return LHS.ult(RHS);
4919 return LHS.ule(RHS);
4921 return LHS.sgt(RHS);
4923 return LHS.sge(RHS);
4925 return LHS.slt(RHS);
4927 return LHS.sle(RHS);
4928 default:
4929 llvm_unreachable("Unexpected non-integer predicate.");
4930 };
4931}
4932
4933bool FCmpInst::compare(const APFloat &LHS, const APFloat &RHS,
4934 FCmpInst::Predicate Pred) {
4935 APFloat::cmpResult R = LHS.compare(RHS);
4936 switch (Pred) {
4937 default:
4938 llvm_unreachable("Invalid FCmp Predicate");
4940 return false;
4942 return true;
4943 case FCmpInst::FCMP_UNO:
4944 return R == APFloat::cmpUnordered;
4945 case FCmpInst::FCMP_ORD:
4946 return R != APFloat::cmpUnordered;
4947 case FCmpInst::FCMP_UEQ:
4948 return R == APFloat::cmpUnordered || R == APFloat::cmpEqual;
4949 case FCmpInst::FCMP_OEQ:
4950 return R == APFloat::cmpEqual;
4951 case FCmpInst::FCMP_UNE:
4952 return R != APFloat::cmpEqual;
4953 case FCmpInst::FCMP_ONE:
4955 case FCmpInst::FCMP_ULT:
4956 return R == APFloat::cmpUnordered || R == APFloat::cmpLessThan;
4957 case FCmpInst::FCMP_OLT:
4958 return R == APFloat::cmpLessThan;
4959 case FCmpInst::FCMP_UGT:
4961 case FCmpInst::FCMP_OGT:
4962 return R == APFloat::cmpGreaterThan;
4963 case FCmpInst::FCMP_ULE:
4964 return R != APFloat::cmpGreaterThan;
4965 case FCmpInst::FCMP_OLE:
4966 return R == APFloat::cmpLessThan || R == APFloat::cmpEqual;
4967 case FCmpInst::FCMP_UGE:
4968 return R != APFloat::cmpLessThan;
4969 case FCmpInst::FCMP_OGE:
4970 return R == APFloat::cmpGreaterThan || R == APFloat::cmpEqual;
4971 }
4972}
4973
4976 "Call only with non-equality predicates!");
4977
4978 if (isSigned(pred))
4979 return getUnsignedPredicate(pred);
4980 if (isUnsigned(pred))
4981 return getSignedPredicate(pred);
4982
4983 llvm_unreachable("Unknown predicate!");
4984}
4985
4987 switch (predicate) {
4988 default: return false;
4991 case FCmpInst::FCMP_ORD: return true;
4992 }
4993}
4994
4996 switch (predicate) {
4997 default: return false;
5000 case FCmpInst::FCMP_UNO: return true;
5001 }
5002}
5003
5005 switch(predicate) {
5006 default: return false;
5007 case ICMP_EQ: case ICMP_UGE: case ICMP_ULE: case ICMP_SGE: case ICMP_SLE:
5008 case FCMP_TRUE: case FCMP_UEQ: case FCMP_UGE: case FCMP_ULE: return true;
5009 }
5010}
5011
5013 switch(predicate) {
5014 case ICMP_NE: case ICMP_UGT: case ICMP_ULT: case ICMP_SGT: case ICMP_SLT:
5015 case FCMP_FALSE: case FCMP_ONE: case FCMP_OGT: case FCMP_OLT: return true;
5016 default: return false;
5017 }
5018}
5019
5021 // If the predicates match, then we know the first condition implies the
5022 // second is true.
5023 if (Pred1 == Pred2)
5024 return true;
5025
5026 switch (Pred1) {
5027 default:
5028 break;
5029 case ICMP_EQ:
5030 // A == B implies A >=u B, A <=u B, A >=s B, and A <=s B are true.
5031 return Pred2 == ICMP_UGE || Pred2 == ICMP_ULE || Pred2 == ICMP_SGE ||
5032 Pred2 == ICMP_SLE;
5033 case ICMP_UGT: // A >u B implies A != B and A >=u B are true.
5034 return Pred2 == ICMP_NE || Pred2 == ICMP_UGE;
5035 case ICMP_ULT: // A <u B implies A != B and A <=u B are true.
5036 return Pred2 == ICMP_NE || Pred2 == ICMP_ULE;
5037 case ICMP_SGT: // A >s B implies A != B and A >=s B are true.
5038 return Pred2 == ICMP_NE || Pred2 == ICMP_SGE;
5039 case ICMP_SLT: // A <s B implies A != B and A <=s B are true.
5040 return Pred2 == ICMP_NE || Pred2 == ICMP_SLE;
5041 }
5042 return false;
5043}
5044
5046 return isImpliedTrueByMatchingCmp(Pred1, getInversePredicate(Pred2));
5047}
5048
5049//===----------------------------------------------------------------------===//
5050// SwitchInst Implementation
5051//===----------------------------------------------------------------------===//
5052
5053void SwitchInst::init(Value *Value, BasicBlock *Default, unsigned NumReserved) {
5054 assert(Value && Default && NumReserved);
5055 ReservedSpace = NumReserved;
5057 allocHungoffUses(ReservedSpace);
5058
5059 Op<0>() = Value;
5060 Op<1>() = Default;
5061}
5062
5063/// SwitchInst ctor - Create a new switch instruction, specifying a value to
5064/// switch on and a default destination. The number of additional cases can
5065/// be specified here to make memory allocation more efficient. This
5066/// constructor can also autoinsert before another instruction.
5067SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
5068 BasicBlock::iterator InsertBefore)
5069 : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch,
5070 nullptr, 0, InsertBefore) {
5071 init(Value, Default, 2 + NumCases * 2);
5072}
5073
5074/// SwitchInst ctor - Create a new switch instruction, specifying a value to
5075/// switch on and a default destination. The number of additional cases can
5076/// be specified here to make memory allocation more efficient. This
5077/// constructor can also autoinsert before another instruction.
5078SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
5079 Instruction *InsertBefore)
5080 : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch,
5081 nullptr, 0, InsertBefore) {
5082 init(Value, Default, 2+NumCases*2);
5083}
5084
5085/// SwitchInst ctor - Create a new switch instruction, specifying a value to
5086/// switch on and a default destination. The number of additional cases can
5087/// be specified here to make memory allocation more efficient. This
5088/// constructor also autoinserts at the end of the specified BasicBlock.
5089SwitchInst::SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
5090 BasicBlock *InsertAtEnd)
5091 : Instruction(Type::getVoidTy(Value->getContext()), Instruction::Switch,
5092 nullptr, 0, InsertAtEnd) {
5093 init(Value, Default, 2+NumCases*2);
5094}
5095
5096SwitchInst::SwitchInst(const SwitchInst &SI)
5097 : Instruction(SI.getType(), Instruction::Switch, nullptr, 0) {
5098 init(SI.getCondition(), SI.getDefaultDest(), SI.getNumOperands());
5099 setNumHungOffUseOperands(SI.getNumOperands());
5100 Use *OL = getOperandList();
5101 const Use *InOL = SI.getOperandList();
5102 for (unsigned i = 2, E = SI.getNumOperands(); i != E; i += 2) {
5103 OL[i] = InOL[i];
5104 OL[i+1] = InOL[i+1];
5105 }
5106 SubclassOptionalData = SI.SubclassOptionalData;
5107}
5108
5109/// addCase - Add an entry to the switch instruction...
5110///
5112 unsigned NewCaseIdx = getNumCases();
5113 unsigned OpNo = getNumOperands();
5114 if (OpNo+2 > ReservedSpace)
5115 growOperands(); // Get more space!
5116 // Initialize some new operands.
5117 assert(OpNo+1 < ReservedSpace && "Growing didn't work!");
5119 CaseHandle Case(this, NewCaseIdx);
5120 Case.setValue(OnVal);
5121 Case.setSuccessor(Dest);
5122}
5123
5124/// removeCase - This method removes the specified case and its successor
5125/// from the switch instruction.
5127 unsigned idx = I->getCaseIndex();
5128
5129 assert(2 + idx*2 < getNumOperands() && "Case index out of range!!!");
5130
5131 unsigned NumOps = getNumOperands();
5132 Use *OL = getOperandList();
5133
5134 // Overwrite this case with the end of the list.
5135 if (2 + (idx + 1) * 2 != NumOps) {
5136 OL[2 + idx * 2] = OL[NumOps - 2];
5137 OL[2 + idx * 2 + 1] = OL[NumOps - 1];
5138 }
5139
5140 // Nuke the last value.
5141 OL[NumOps-2].set(nullptr);
5142 OL[NumOps-2+1].set(nullptr);
5143 setNumHungOffUseOperands(NumOps-2);
5144
5145 return CaseIt(this, idx);
5146}
5147
5148/// growOperands - grow operands - This grows the operand list in response
5149/// to a push_back style of operation. This grows the number of ops by 3 times.
5150///
5151void SwitchInst::growOperands() {
5152 unsigned e = getNumOperands();
5153 unsigned NumOps = e*3;
5154
5155 ReservedSpace = NumOps;
5156 growHungoffUses(ReservedSpace);
5157}
5158
5160 assert(Changed && "called only if metadata has changed");
5161
5162 if (!Weights)
5163 return nullptr;
5164
5165 assert(SI.getNumSuccessors() == Weights->size() &&
5166 "num of prof branch_weights must accord with num of successors");
5167
5168 bool AllZeroes = all_of(*Weights, [](uint32_t W) { return W == 0; });
5169
5170 if (AllZeroes || Weights->size() < 2)
5171 return nullptr;
5172
5173 return MDBuilder(SI.getParent()->getContext()).createBranchWeights(*Weights);
5174}
5175
5177 MDNode *ProfileData = getBranchWeightMDNode(SI);
5178 if (!ProfileData)
5179 return;
5180
5181 if (ProfileData->getNumOperands() != SI.getNumSuccessors() + 1) {
5182 llvm_unreachable("number of prof branch_weights metadata operands does "
5183 "not correspond to number of succesors");
5184 }
5185
5187 if (!extractBranchWeights(ProfileData, Weights))
5188 return;
5189 this->Weights = std::move(Weights);
5190}
5191
5194 if (Weights) {
5195 assert(SI.getNumSuccessors() == Weights->size() &&
5196 "num of prof branch_weights must accord with num of successors");
5197 Changed = true;
5198 // Copy the last case to the place of the removed one and shrink.
5199 // This is tightly coupled with the way SwitchInst::removeCase() removes
5200 // the cases in SwitchInst::removeCase(CaseIt).
5201 (*Weights)[I->getCaseIndex() + 1] = Weights->back();
5202 Weights->pop_back();
5203 }
5204 return SI.removeCase(I);
5205}
5206
5208 ConstantInt *OnVal, BasicBlock *Dest,
5210 SI.addCase(OnVal, Dest);
5211
5212 if (!Weights && W && *W) {
5213 Changed = true;
5214 Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);
5215 (*Weights)[SI.getNumSuccessors() - 1] = *W;
5216 } else if (Weights) {
5217 Changed = true;
5218 Weights->push_back(W.value_or(0));
5219 }
5220 if (Weights)
5221 assert(SI.getNumSuccessors() == Weights->size() &&
5222 "num of prof branch_weights must accord with num of successors");
5223}
5224
5227 // Instruction is erased. Mark as unchanged to not touch it in the destructor.
5228 Changed = false;
5229 if (Weights)
5230 Weights->resize(0);
5231 return SI.eraseFromParent();
5232}
5233
5236 if (!Weights)
5237 return std::nullopt;
5238 return (*Weights)[idx];
5239}
5240
5243 if (!W)
5244 return;
5245
5246 if (!Weights && *W)
5247 Weights = SmallVector<uint32_t, 8>(SI.getNumSuccessors(), 0);
5248
5249 if (Weights) {
5250 auto &OldW = (*Weights)[idx];
5251 if (*W != OldW) {
5252 Changed = true;
5253 OldW = *W;
5254 }
5255 }
5256}
5257
5260 unsigned idx) {
5261 if (MDNode *ProfileData = getBranchWeightMDNode(SI))
5262 if (ProfileData->getNumOperands() == SI.getNumSuccessors() + 1)
5263 return mdconst::extract<ConstantInt>(ProfileData->getOperand(idx + 1))
5264 ->getValue()
5265 .getZExtValue();
5266
5267 return std::nullopt;
5268}
5269
5270//===----------------------------------------------------------------------===//
5271// IndirectBrInst Implementation
5272//===----------------------------------------------------------------------===//
5273
5274void IndirectBrInst::init(Value *Address, unsigned NumDests) {
5275 assert(Address && Address->getType()->isPointerTy() &&
5276 "Address of indirectbr must be a pointer");
5277 ReservedSpace = 1+NumDests;
5279 allocHungoffUses(ReservedSpace);
5280
5281 Op<0>() = Address;
5282}
5283
5284
5285/// growOperands - grow operands - This grows the operand list in response
5286/// to a push_back style of operation. This grows the number of ops by 2 times.
5287///
5288void IndirectBrInst::growOperands() {
5289 unsigned e = getNumOperands();
5290 unsigned NumOps = e*2;
5291
5292 ReservedSpace = NumOps;
5293 growHungoffUses(ReservedSpace);
5294}
5295
5296IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,
5297 BasicBlock::iterator InsertBefore)
5298 : Instruction(Type::getVoidTy(Address->getContext()),
5299 Instruction::IndirectBr, nullptr, 0, InsertBefore) {
5300 init(Address, NumCases);
5301}
5302
5303IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,
5304 Instruction *InsertBefore)
5305 : Instruction(Type::getVoidTy(Address->getContext()),
5306 Instruction::IndirectBr, nullptr, 0, InsertBefore) {
5307 init(Address, NumCases);
5308}
5309
5310IndirectBrInst::IndirectBrInst(Value *Address, unsigned NumCases,
5311 BasicBlock *InsertAtEnd)
5312 : Instruction(Type::getVoidTy(Address->getContext()),
5313 Instruction::IndirectBr, nullptr, 0, InsertAtEnd) {
5314 init(Address, NumCases);
5315}
5316
5317IndirectBrInst::IndirectBrInst(const IndirectBrInst &IBI)
5318 : Instruction(Type::getVoidTy(IBI.getContext()), Instruction::IndirectBr,
5319 nullptr, IBI.getNumOperands()) {
5320 allocHungoffUses(IBI.getNumOperands());
5321 Use *OL = getOperandList();
5322 const Use *InOL = IBI.getOperandList();
5323 for (unsigned i = 0, E = IBI.getNumOperands(); i != E; ++i)
5324 OL[i] = InOL[i];
5325 SubclassOptionalData = IBI.SubclassOptionalData;
5326}
5327
5328/// addDestination - Add a destination.
5329///
5331 unsigned OpNo = getNumOperands();
5332 if (OpNo+1 > ReservedSpace)
5333 growOperands(); // Get more space!
5334 // Initialize some new operands.
5335 assert(OpNo < ReservedSpace && "Growing didn't work!");
5337 getOperandList()[OpNo] = DestBB;
5338}
5339
5340/// removeDestination - This method removes the specified successor from the
5341/// indirectbr instruction.
5343 assert(idx < getNumOperands()-1 && "Successor index out of range!");
5344
5345 unsigned NumOps = getNumOperands();
5346 Use *OL = getOperandList();
5347
5348 // Replace this value with the last one.
5349 OL[idx+1] = OL[NumOps-1];
5350
5351 // Nuke the last value.
5352 OL[NumOps-1].set(nullptr);
5353 setNumHungOffUseOperands(NumOps-1);
5354}
5355
5356//===----------------------------------------------------------------------===//
5357// FreezeInst Implementation
5358//===----------------------------------------------------------------------===//
5359
5361 BasicBlock::iterator InsertBefore)
5362 : UnaryInstruction(S->getType(), Freeze, S, InsertBefore) {
5363 setName(Name);
5364}
5365
5367 const Twine &Name, Instruction *InsertBefore)
5368 : UnaryInstruction(S->getType(), Freeze, S, InsertBefore) {
5369 setName(Name);
5370}
5371
5373 const Twine &Name, BasicBlock *InsertAtEnd)
5374 : UnaryInstruction(S->getType(), Freeze, S, InsertAtEnd) {
5375 setName(Name);
5376}
5377
5378//===----------------------------------------------------------------------===//
5379// cloneImpl() implementations
5380//===----------------------------------------------------------------------===//
5381
5382// Define these methods here so vtables don't get emitted into every translation
5383// unit that uses these classes.
5384
5386 return new (getNumOperands()) GetElementPtrInst(*this);
5387}
5388
5390 return Create(getOpcode(), Op<0>());
5391}
5392
5394 return Create(getOpcode(), Op<0>(), Op<1>());
5395}
5396
5398 return new FCmpInst(getPredicate(), Op<0>(), Op<1>());
5399}
5400
5402 return new ICmpInst(getPredicate(), Op<0>(), Op<1>());
5403}
5404
5406 return new ExtractValueInst(*this);
5407}
5408
5410 return new InsertValueInst(*this);
5411}
5412
5415 getOperand(0), getAlign());
5416 Result->setUsedWithInAlloca(isUsedWithInAlloca());
5417 Result->setSwiftError(isSwiftError());
5418 return Result;
5419}
5420
5422 return new LoadInst(getType(), getOperand(0), Twine(), isVolatile(),
5424}
5425
5427 return new StoreInst(getOperand(0), getOperand(1), isVolatile(), getAlign(),
5429}
5430
5435 Result->setVolatile(isVolatile());
5436 Result->setWeak(isWeak());
5437 return Result;
5438}
5439
5441 AtomicRMWInst *Result =
5444 Result->setVolatile(isVolatile());
5445 return Result;
5446}
5447
5449 return new FenceInst(getContext(), getOrdering(), getSyncScopeID());
5450}
5451
5453 return new TruncInst(getOperand(0), getType());
5454}
5455
5457 return new ZExtInst(getOperand(0), getType());
5458}
5459
5461 return new SExtInst(getOperand(0), getType());
5462}
5463
5465 return new FPTruncInst(getOperand(0), getType());
5466}
5467
5469 return new FPExtInst(getOperand(0), getType());
5470}
5471
5473 return new UIToFPInst(getOperand(0), getType());
5474}
5475
5477 return new SIToFPInst(getOperand(0), getType());
5478}
5479
5481 return new FPToUIInst(getOperand(0), getType());
5482}
5483
5485 return new FPToSIInst(getOperand(0), getType());
5486}
5487
5489 return new PtrToIntInst(getOperand(0), getType());
5490}
5491
5493 return new IntToPtrInst(getOperand(0), getType());
5494}
5495
5497 return new BitCastInst(getOperand(0), getType());
5498}
5499
5501 return new AddrSpaceCastInst(getOperand(0), getType());
5502}
5503
5505 if (hasOperandBundles()) {
5506 unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo);
5507 return new(getNumOperands(), DescriptorBytes) CallInst(*this);
5508 }
5509 return new(getNumOperands()) CallInst(*this);
5510}
5511
5514}
5515
5517 return new VAArgInst(getOperand(0), getType());
5518}
5519
5522}
5523
5526}
5527
5530}
5531
5532PHINode *PHINode::cloneImpl() const { return new PHINode(*this); }
5533
5535 return new LandingPadInst(*this);
5536}
5537
5539 return new(getNumOperands()) ReturnInst(*this);
5540}
5541
5543 return new(getNumOperands()) BranchInst(*this);
5544}
5545
5546SwitchInst *SwitchInst::cloneImpl() const { return new SwitchInst(*this); }
5547
5549 return new IndirectBrInst(*this);
5550}
5551
5553 if (hasOperandBundles()) {
5554 unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo);
5555 return new(getNumOperands(), DescriptorBytes) InvokeInst(*this);
5556 }
5557 return new(getNumOperands()) InvokeInst(*this);
5558}
5559
5561 if (hasOperandBundles()) {
5562 unsigned DescriptorBytes = getNumOperandBundles() * sizeof(BundleOpInfo);
5563 return new (getNumOperands(), DescriptorBytes) CallBrInst(*this);
5564 }
5565 return new (getNumOperands()) CallBrInst(*this);
5566}
5567
5568ResumeInst *ResumeInst::cloneImpl() const { return new (1) ResumeInst(*this); }
5569
5571 return new (getNumOperands()) CleanupReturnInst(*this);
5572}
5573
5575 return new (getNumOperands()) CatchReturnInst(*this);
5576}
5577
5579 return new CatchSwitchInst(*this);
5580}
5581
5583 return new (getNumOperands()) FuncletPadInst(*this);
5584}
5585
5588 return new UnreachableInst(Context);
5589}
5590
5592 return new FreezeInst(getOperand(0));
5593}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static const LLT S1
Rewrite undef for PHI
Atomic ordering constants.
This file contains the simple types necessary to represent the attributes associated with functions a...
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
return RetTy
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
std::string Name
uint64_t Size
bool End
Definition: ELF_riscv.cpp:480
static bool isSigned(unsigned int Opcode)
#define op(i)
hexagon gen pred
static Value * createPlaceholderForShuffleVector(Value *V)
static Align computeAllocaDefaultAlign(Type *Ty, BasicBlock *BB)
static cl::opt< bool > DisableI2pP2iOpt("disable-i2p-p2i-opt", cl::init(false), cl::desc("Disables inttoptr/ptrtoint roundtrip optimization"))
static int matchShuffleAsBitRotate(ArrayRef< int > Mask, int NumSubElts)
Try to lower a vector shuffle as a bit rotation.
static Type * getIndexedTypeInternal(Type *Ty, ArrayRef< IndexTy > IdxList)
static bool isReplicationMaskWithParams(ArrayRef< int > Mask, int ReplicationFactor, int VF)
static bool isIdentityMaskImpl(ArrayRef< int > Mask, int NumOpElts)
static bool isSingleSourceMaskImpl(ArrayRef< int > Mask, int NumOpElts)
static Value * getAISize(LLVMContext &Context, Value *Amt)
static Align computeLoadStoreDefaultAlign(Type *Ty, BasicBlock *BB)
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file contains the declarations for metadata subclasses.
Module.h This file contains the declarations for the Module class.
IntegerType * Int32Ty
LLVMContext & Context
#define P(N)
PowerPC Reduce CR logical Operation
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
raw_pwrite_stream & OS
This file implements the SmallBitVector class.
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:40
@ Struct
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
Definition: VPlanSLP.cpp:191
Value * RHS
Value * LHS
float convertToFloat() const
Converts this APFloat to host float value.
Definition: APFloat.cpp:5268
Class for arbitrary precision integers.
Definition: APInt.h:76
void setBit(unsigned BitPosition)
Set the given bit to 1 whose position is given as "bitPosition".
Definition: APInt.h:1308
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition: APInt.h:358
unsigned countr_zero() const
Count the number of trailing zero bits.
Definition: APInt.h:1589
unsigned countl_zero() const
The APInt version of std::countl_zero.
Definition: APInt.h:1548
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition: APInt.h:178
This class represents a conversion between pointers from one address space to another.
AddrSpaceCastInst * cloneImpl() const
Clone an identical AddrSpaceCastInst.
AddrSpaceCastInst(Value *S, Type *Ty, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructor with insert-before-instruction semantics.
an instruction to allocate memory on the stack
Definition: Instructions.h:59
std::optional< TypeSize > getAllocationSizeInBits(const DataLayout &DL) const
Get allocation size in bits.
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
Definition: Instructions.h:157
bool isStaticAlloca() const
Return true if this alloca is in the entry block of the function and is a constant size.
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:132
AllocaInst * cloneImpl() const
AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, const Twine &Name, BasicBlock::iterator InsertBefore)
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:125
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
Definition: Instructions.h:147
unsigned getAddressSpace() const
Return the address space for the allocation.
Definition: Instructions.h:112
std::optional< TypeSize > getAllocationSize(const DataLayout &DL) const
Get allocation size in bytes.
bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
void setAlignment(Align Align)
Definition: Instructions.h:136
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:103
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
iterator end() const
Definition: ArrayRef.h:154
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
iterator begin() const
Definition: ArrayRef.h:153
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:160
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Definition: ArrayRef.h:195
Class to represent array types.
Definition: DerivedTypes.h:371
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:539
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this cmpxchg instruction.
Definition: Instructions.h:669
bool isVolatile() const
Return true if this is a cmpxchg from a volatile memory location.
Definition: Instructions.h:599
void setFailureOrdering(AtomicOrdering Ordering)
Sets the failure ordering constraint of this cmpxchg instruction.
Definition: Instructions.h:643
AtomicOrdering getFailureOrdering() const
Returns the failure ordering constraint of this cmpxchg instruction.
Definition: Instructions.h:638
void setSuccessOrdering(AtomicOrdering Ordering)
Sets the success ordering constraint of this cmpxchg instruction.
Definition: Instructions.h:631
AtomicCmpXchgInst * cloneImpl() const
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:588
bool isWeak() const
Return true if this cmpxchg may spuriously fail.
Definition: Instructions.h:606
void setAlignment(Align Align)
Definition: Instructions.h:592
AtomicOrdering getSuccessOrdering() const
Returns the success ordering constraint of this cmpxchg instruction.
Definition: Instructions.h:626
AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment, AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, SyncScope::ID SSID, BasicBlock::iterator InsertBefore)
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this cmpxchg instruction.
Definition: Instructions.h:664
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:748
Align getAlign() const
Return the alignment of the memory that is being allocated by the instruction.
Definition: Instructions.h:867
AtomicRMWInst * cloneImpl() const
bool isVolatile() const
Return true if this is a RMW on a volatile memory location.
Definition: Instructions.h:877
BinOp
This enumeration lists the possible modifications atomicrmw can make.
Definition: Instructions.h:760
@ Add
*p = old + v
Definition: Instructions.h:764
@ FAdd
*p = old + v
Definition: Instructions.h:785
@ Min
*p = old <signed v ? old : v
Definition: Instructions.h:778
@ Or
*p = old | v
Definition: Instructions.h:772
@ Sub
*p = old - v
Definition: Instructions.h:766
@ And
*p = old & v
Definition: Instructions.h:768
@ Xor
*p = old ^ v
Definition: Instructions.h:774
@ FSub
*p = old - v
Definition: Instructions.h:788
@ UIncWrap
Increment one up to a maximum value.
Definition: Instructions.h:800
@ Max
*p = old >signed v ? old : v
Definition: Instructions.h:776
@ UMin
*p = old <unsigned v ? old : v
Definition: Instructions.h:782
@ FMin
*p = minnum(old, v) minnum matches the behavior of llvm.minnum.
Definition: Instructions.h:796
@ UMax
*p = old >unsigned v ? old : v
Definition: Instructions.h:780
@ FMax
*p = maxnum(old, v) maxnum matches the behavior of llvm.maxnum.
Definition: Instructions.h:792
@ UDecWrap
Decrement one until a minimum value or zero.
Definition: Instructions.h:804
@ Nand
*p = ~(old & v)
Definition: Instructions.h:770
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this rmw instruction.
Definition: Instructions.h:906
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this rmw instruction.
Definition: Instructions.h:892
AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment, AtomicOrdering Ordering, SyncScope::ID SSID, BasicBlock::iterator InsertBefore)
void setOperation(BinOp Operation)
Definition: Instructions.h:861
BinOp getOperation() const
Definition: Instructions.h:845
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this rmw instruction.
Definition: Instructions.h:901
void setAlignment(Align Align)
Definition: Instructions.h:871
static StringRef getOperationName(BinOp Op)
AtomicOrdering getOrdering() const
Returns the ordering constraint of this rmw instruction.
Definition: Instructions.h:887
bool hasAttrSomewhere(Attribute::AttrKind Kind, unsigned *Index=nullptr) const
Return true if the specified attribute is set for at least one parameter or for the return value.
FPClassTest getRetNoFPClass() const
Get the disallowed floating-point classes of the return value.
bool hasParamAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Return true if the attribute exists for the given argument.
Definition: Attributes.h:788
FPClassTest getParamNoFPClass(unsigned ArgNo) const
Get the disallowed floating-point classes of the argument value.
MemoryEffects getMemoryEffects() const
Returns memory effects of the function.
const ConstantRange & getRange() const
Returns the value of the range attribute.
Definition: Attributes.cpp:447
AttrKind
This enumeration lists the attributes that can be associated with parameters, function results,...
Definition: Attributes.h:85
static Attribute getWithMemoryEffects(LLVMContext &Context, MemoryEffects ME)
Definition: Attributes.cpp:241
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition: Attributes.h:193
LLVM Basic Block Representation.
Definition: BasicBlock.h:60
iterator end()
Definition: BasicBlock.h:443
bool isEntryBlock() const
Return true if this is the entry block of the containing function.
Definition: BasicBlock.cpp:564
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:206
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:165
const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
Definition: BasicBlock.cpp:289
BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty, const Twine &Name, BasicBlock::iterator InsertBefore)
static BinaryOperator * Create(BinaryOps Op, Value *S1, Value *S2, const Twine &Name, BasicBlock::iterator InsertBefore)
Construct a binary instruction, given the opcode and the two operands.
static BinaryOperator * CreateNSWNeg(Value *Op, const Twine &Name, BasicBlock::iterator InsertBefore)
BinaryOps getOpcode() const
Definition: InstrTypes.h:513
static BinaryOperator * CreateNeg(Value *Op, const Twine &Name, BasicBlock::iterator InsertBefore)
Helper functions to construct and inspect unary operations (NEG and NOT) via binary operators SUB and...
bool swapOperands()
Exchange the two operands to this instruction.
static BinaryOperator * CreateNot(Value *Op, const Twine &Name, BasicBlock::iterator InsertBefore)
BinaryOperator * cloneImpl() const
This class represents a no-op cast from one type to another.
BitCastInst * cloneImpl() const
Clone an identical BitCastInst.
BitCastInst(Value *S, Type *Ty, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructor with insert-before-instruction semantics.
Conditional or Unconditional Branch instruction.
void swapSuccessors()
Swap the successors of this branch instruction.
BranchInst * cloneImpl() const
bool isConditional() const
Value * getCondition() const
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1494
FPClassTest getParamNoFPClass(unsigned i) const
Extract a test mask for disallowed floating-point value classes for the parameter.
bool isInlineAsm() const
Check if this call is an inline asm statement.
Definition: InstrTypes.h:1809
BundleOpInfo & getBundleOpInfoForOperand(unsigned OpIdx)
Return the BundleOpInfo for the operand at index OpIdx.
Attribute getRetAttr(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind for the return value.
Definition: InstrTypes.h:1957
void setCallingConv(CallingConv::ID CC)
Definition: InstrTypes.h:1804
FPClassTest getRetNoFPClass() const
Extract a test mask for disallowed floating-point value classes for the return value.
bundle_op_iterator bundle_op_info_begin()
Return the start of the list of BundleOpInfo instances associated with this OperandBundleUser.
Definition: InstrTypes.h:2572
MemoryEffects getMemoryEffects() const
void addFnAttr(Attribute::AttrKind Kind)
Adds the attribute to the function.
Definition: InstrTypes.h:1851
bool doesNotAccessMemory() const
Determine if the call does not access memory.
void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const
Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.
void setOnlyAccessesArgMemory()
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
Definition: InstrTypes.h:2380
void setOnlyAccessesInaccessibleMemOrArgMem()
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Definition: InstrTypes.h:2411
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Definition: InstrTypes.h:1742
void setDoesNotAccessMemory()
bool hasRetAttr(Attribute::AttrKind Kind) const
Determine whether the return value has the given attribute.
Definition: InstrTypes.h:1950
bool onlyAccessesInaccessibleMemory() const
Determine if the function may only access memory that is inaccessible from the IR.
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
Definition: InstrTypes.h:2324
CallingConv::ID getCallingConv() const
Definition: InstrTypes.h:1800
bundle_op_iterator bundle_op_info_end()
Return the end of the list of BundleOpInfo instances associated with this OperandBundleUser.
Definition: InstrTypes.h:2589
unsigned getNumSubclassExtraOperandsDynamic() const
Get the number of extra operands for instructions that don't have a fixed number of extra operands.
bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
Definition: InstrTypes.h:1662
bool isMustTailCall() const
Tests if this call site must be tail call optimized.
bool isIndirectCall() const
Return true if the callsite is an indirect call.
bool onlyReadsMemory() const
Determine if the call does not access or only reads memory.
iterator_range< bundle_op_iterator > bundle_op_infos()
Return the range [bundle_op_info_begin, bundle_op_info_end).
Definition: InstrTypes.h:2605
void setOnlyReadsMemory()
bool onlyAccessesInaccessibleMemOrArgMem() const
Determine if the function may only access memory that is either inaccessible from the IR or pointed t...
static CallBase * Create(CallBase *CB, ArrayRef< OperandBundleDef > Bundles, BasicBlock::iterator InsertPt)
Create a clone of CB with a different set of operand bundles and insert it before InsertPt.
static CallBase * removeOperandBundle(CallBase *CB, uint32_t ID, Instruction *InsertPt=nullptr)
Create a clone of CB with operand bundle ID removed.
Value * getCalledOperand() const
Definition: InstrTypes.h:1735
void setOnlyWritesMemory()
op_iterator populateBundleOperandInfos(ArrayRef< OperandBundleDef > Bundles, const unsigned BeginIndex)
Populate the BundleOpInfo instances and the Use& vector from Bundles.
AttributeList Attrs
parameter attributes for callable
Definition: InstrTypes.h:1508
bool hasOperandBundlesOtherThan(ArrayRef< uint32_t > IDs) const
Return true if this operand bundle user contains operand bundles with tags other than those specified...
Definition: InstrTypes.h:2485
std::optional< ConstantRange > getRange() const
If this return value has a range attribute, return the value range of the argument.
bool isReturnNonNull() const
Return true if the return value is known to be not null.
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1687
FunctionType * FTy
Definition: InstrTypes.h:1509
uint64_t getRetDereferenceableBytes() const
Extract the number of dereferenceable bytes for a call or parameter (0=unknown).
Definition: InstrTypes.h:2168
User::op_iterator arg_end()
Return the iterator pointing to the end of the argument list.
Definition: InstrTypes.h:1668
FunctionType * getFunctionType() const
Definition: InstrTypes.h:1600
Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
static unsigned CountBundleInputs(ArrayRef< OperandBundleDef > Bundles)
Return the total number of values used in Bundles.
Definition: InstrTypes.h:2640
Value * getArgOperandWithAttribute(Attribute::AttrKind Kind) const
If one of the arguments has the specified attribute, returns its operand value.
void setOnlyAccessesInaccessibleMemory()
bool onlyWritesMemory() const
Determine if the call does not access or only writes memory.
bool hasClobberingOperandBundles() const
Return true if this operand bundle user has operand bundles that may write to the heap.
void setCalledOperand(Value *V)
Definition: InstrTypes.h:1778
bool hasReadingOperandBundles() const
Return true if this operand bundle user has operand bundles that may read from the heap.
bool onlyAccessesArgMemory() const
Determine if the call can access memmory only using pointers based on its arguments.
unsigned arg_size() const
Definition: InstrTypes.h:1685
AttributeList getAttributes() const
Return the parameter attributes for this call.
Definition: InstrTypes.h:1819
static CallBase * addOperandBundle(CallBase *CB, uint32_t ID, OperandBundleDef OB, Instruction *InsertPt=nullptr)
Create a clone of CB with operand bundle OB added.
void setMemoryEffects(MemoryEffects ME)
bool hasOperandBundles() const
Return true if this User has any operand bundles.
Definition: InstrTypes.h:2329
bool isTailCall() const
Tests if this call site is marked as a tail call.
Function * getCaller()
Helper to get the caller (the parent function).
CallBr instruction, tracking function calls that may not return control but instead transfer it to a ...
static CallBrInst * Create(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, ArrayRef< BasicBlock * > IndirectDests, ArrayRef< Value * > Args, const Twine &NameStr, BasicBlock::iterator InsertBefore)
SmallVector< BasicBlock *, 16 > getIndirectDests() const
void setDefaultDest(BasicBlock *B)
void setIndirectDest(unsigned i, BasicBlock *B)
BasicBlock * getDefaultDest() const
CallBrInst * cloneImpl() const
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr, BasicBlock::iterator InsertBefore)
void updateProfWeight(uint64_t S, uint64_t T)
Updates profile metadata by scaling it by S / T.
TailCallKind getTailCallKind() const
CallInst * cloneImpl() const
This is the base class for all instructions that perform data casts.
Definition: InstrTypes.h:601
static Instruction::CastOps getCastOpcode(const Value *Val, bool SrcIsSigned, Type *Ty, bool DstIsSigned)
Returns the opcode necessary to cast Val into Ty using usual casting rules.
static CastInst * CreateFPCast(Value *S, Type *Ty, const Twine &Name, BasicBlock::iterator InsertBefore)
Create an FPExt, BitCast, or FPTrunc for fp -> fp casts.
Instruction::CastOps getOpcode() const
Return the opcode of this CastInst.
Definition: InstrTypes.h:930
static CastInst * CreateZExtOrBitCast(Value *S, Type *Ty, const Twine &Name, BasicBlock::iterator InsertBefore)
Create a ZExt or BitCast cast instruction.
static CastInst * Create(Instruction::CastOps, Value *S, Type *Ty, const Twine &Name, BasicBlock::iterator InsertBefore)
Provides a way to construct any of the CastInst subclasses using an opcode instead of the subclass's ...
static unsigned isEliminableCastPair(Instruction::CastOps firstOpcode, Instruction::CastOps secondOpcode, Type *SrcTy, Type *MidTy, Type *DstTy, Type *SrcIntPtrTy, Type *MidIntPtrTy, Type *DstIntPtrTy)
Determine how a pair of casts can be eliminated, if they can be at all.
static bool isBitOrNoopPointerCastable(Type *SrcTy, Type *DestTy, const DataLayout &DL)
Check whether a bitcast, inttoptr, or ptrtoint cast between these types is valid and a no-op.
static CastInst * CreatePointerBitCastOrAddrSpaceCast(Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd)
Create a BitCast or an AddrSpaceCast cast instruction.
static bool isBitCastable(Type *SrcTy, Type *DestTy)
Check whether a bitcast between these types is valid.
static CastInst * CreatePointerCast(Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd)
Create a BitCast AddrSpaceCast, or a PtrToInt cast instruction.
static bool isNoopCast(Instruction::CastOps Opcode, Type *SrcTy, Type *DstTy, const DataLayout &DL)
A no-op cast is one that can be effected without changing any bits.
static CastInst * CreateBitOrPointerCast(Value *S, Type *Ty, const Twine &Name, BasicBlock::iterator InsertBefore)
Create a BitCast, a PtrToInt, or an IntToPTr cast instruction.
static CastInst * CreateTruncOrBitCast(Value *S, Type *Ty, const Twine &Name, BasicBlock::iterator InsertBefore)
Create a Trunc or BitCast cast instruction.
static CastInst * CreateSExtOrBitCast(Value *S, Type *Ty, const Twine &Name, BasicBlock::iterator InsertBefore)
Create a SExt or BitCast cast instruction.
bool isIntegerCast() const
There are several places where we need to know if a cast instruction only deals with integer source a...
static CastInst * CreateIntegerCast(Value *S, Type *Ty, bool isSigned, const Twine &Name, BasicBlock::iterator InsertBefore)
Create a ZExt, BitCast, or Trunc for int -> int casts.
static bool castIsValid(Instruction::CastOps op, Type *SrcTy, Type *DstTy)
This method can be used to determine if a cast from SrcTy to DstTy using Opcode op is valid or not.
CatchReturnInst * cloneImpl() const
void setUnwindDest(BasicBlock *UnwindDest)
void addHandler(BasicBlock *Dest)
Add an entry to the switch instruction... Note: This action invalidates handler_end().
CatchSwitchInst * cloneImpl() const
Value * getParentPad() const
void setParentPad(Value *ParentPad)
BasicBlock * getUnwindDest() const
void removeHandler(handler_iterator HI)
bool hasUnwindDest() const
CleanupReturnInst * cloneImpl() const
This class is the base class for the comparison instructions.
Definition: InstrTypes.h:983
Predicate getStrictPredicate() const
For example, SGE -> SGT, SLE -> SLT, ULE -> ULT, UGE -> UGT.
Definition: InstrTypes.h:1198
bool isEquality() const
Determine if this is an equals/not equals predicate.
Definition: InstrTypes.h:1255
void setPredicate(Predicate P)
Set the predicate for this instruction to the specified value.
Definition: InstrTypes.h:1108
bool isFalseWhenEqual() const
This is just a convenience.
Definition: InstrTypes.h:1320
Predicate getSignedPredicate()
For example, ULT->SLT, ULE->SLE, UGT->SGT, UGE->SGE, SLT->Failed assert.
Definition: InstrTypes.h:1284
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:993
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition: InstrTypes.h:996
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
Definition: InstrTypes.h:1010
@ ICMP_SLT
signed less than
Definition: InstrTypes.h:1022
@ ICMP_SLE
signed less or equal
Definition: InstrTypes.h:1023
@ FCMP_OLT
0 1 0 0 True if ordered and less than
Definition: InstrTypes.h:999
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
Definition: InstrTypes.h:1008
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
Definition: InstrTypes.h:997
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
Definition: InstrTypes.h:998
@ ICMP_UGE
unsigned greater or equal
Definition: InstrTypes.h:1017
@ ICMP_UGT
unsigned greater than
Definition: InstrTypes.h:1016
@ ICMP_SGT
signed greater than
Definition: InstrTypes.h:1020
@ FCMP_ULT
1 1 0 0 True if unordered or less than
Definition: InstrTypes.h:1007
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition: InstrTypes.h:1001
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition: InstrTypes.h:1004
@ ICMP_ULT
unsigned less than
Definition: InstrTypes.h:1018
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
Definition: InstrTypes.h:1005
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition: InstrTypes.h:1000
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
Definition: InstrTypes.h:1002
@ ICMP_EQ
equal
Definition: InstrTypes.h:1014
@ ICMP_NE
not equal
Definition: InstrTypes.h:1015
@ ICMP_SGE
signed greater or equal
Definition: InstrTypes.h:1021
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Definition: InstrTypes.h:1009
@ ICMP_ULE
unsigned less or equal
Definition: InstrTypes.h:1019
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
Definition: InstrTypes.h:1006
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
Definition: InstrTypes.h:995
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition: InstrTypes.h:1003
bool isSigned() const
Definition: InstrTypes.h:1265
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition: InstrTypes.h:1167
bool isTrueWhenEqual() const
This is just a convenience.
Definition: InstrTypes.h:1314
Predicate getUnsignedPredicate()
For example, SLT->ULT, SLE->ULE, SGT->UGT, SGE->UGE, ULT->Failed assert.
Definition: InstrTypes.h:1296
Predicate getNonStrictPredicate() const
For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
Definition: InstrTypes.h:1211
bool isNonStrictPredicate() const
Definition: InstrTypes.h:1192
bool isFPPredicate() const
Definition: InstrTypes.h:1122
static CmpInst * Create(OtherOps Op, Predicate Pred, Value *S1, Value *S2, const Twine &Name, BasicBlock::iterator InsertBefore)
Construct a compare instruction, given the opcode, the predicate and the two operands.
void swapOperands()
This is just a convenience that dispatches to the subclasses.
CmpInst(Type *ty, Instruction::OtherOps op, Predicate pred, Value *LHS, Value *RHS, const Twine &Name, BasicBlock::iterator InsertBefore, Instruction *FlagsSource=nullptr)
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition: InstrTypes.h:1129
static StringRef getPredicateName(Predicate P)
Predicate getPredicate() const
Return the predicate for this instruction.
Definition: InstrTypes.h:1105
static CmpInst * CreateWithCopiedFlags(OtherOps Op, Predicate Pred, Value *S1, Value *S2, const Instruction *FlagsSource, const Twine &Name="", Instruction *InsertBefore=nullptr)
Construct a compare instruction, given the opcode, the predicate, the two operands and the instructio...
bool isStrictPredicate() const
Definition: InstrTypes.h:1183
static bool isUnordered(Predicate predicate)
Determine if the predicate is an unordered operation.
Predicate getFlippedStrictnessPredicate() const
For predicate of kind "is X or equal to 0" returns the predicate "is X".
Definition: InstrTypes.h:1233
static bool isImpliedTrueByMatchingCmp(Predicate Pred1, Predicate Pred2)
Determine if Pred1 implies Pred2 is true when two compares have matching operands.
Predicate getFlippedSignednessPredicate()
For example, SLT->ULT, ULT->SLT, SLE->ULE, ULE->SLE, EQ->Failed assert.
Definition: InstrTypes.h:1308
bool isIntPredicate() const
Definition: InstrTypes.h:1123
static bool isOrdered(Predicate predicate)
Determine if the predicate is an ordered operation.
static bool isImpliedFalseByMatchingCmp(Predicate Pred1, Predicate Pred2)
Determine if Pred1 implies Pred2 is false when two compares have matching operands.
bool isUnsigned() const
Definition: InstrTypes.h:1271
bool isCommutative() const
This is just a convenience that dispatches to the subclasses.
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
Definition: InstrTypes.h:1261
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:268
const APFloat & getValueAPF() const
Definition: Constants.h:311
This is the shared class of boolean and integer constants.
Definition: Constants.h:80
static Constant * get(ArrayRef< Constant * > V)
Definition: Constants.cpp:1398
This is an important base class in LLVM.
Definition: Constant.h:41
static Constant * getAllOnesValue(Type *Ty)
Definition: Constants.cpp:417
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
Definition: Constants.cpp:370
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition: TypeSize.h:308
This instruction extracts a single (scalar) element from a VectorType value.
ExtractElementInst * cloneImpl() const
static ExtractElementInst * Create(Value *Vec, Value *Idx, const Twine &NameStr, BasicBlock::iterator InsertBefore)
static bool isValidOperands(const Value *Vec, const Value *Idx)
Return true if an extractelement instruction can be formed with the specified operands.
This instruction extracts a struct member or array element value from an aggregate value.
static Type * getIndexedType(Type *Agg, ArrayRef< unsigned > Idxs)
Returns the type of the element that would be extracted with an extractvalue instruction with the spe...
ExtractValueInst * cloneImpl() const
This instruction compares its operands according to the predicate given to the constructor.
bool isEquality() const
static bool compare(const APFloat &LHS, const APFloat &RHS, FCmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
FCmpInst * cloneImpl() const
Clone an identical FCmpInst.
This class represents an extension of floating point types.
FPExtInst * cloneImpl() const
Clone an identical FPExtInst.
FPExtInst(Value *S, Type *Ty, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructor with insert-before-instruction semantics.
float getFPAccuracy() const
Get the maximum error permitted by this operation in ULPs.
This class represents a cast from floating point to signed integer.
FPToSIInst * cloneImpl() const
Clone an identical FPToSIInst.
FPToSIInst(Value *S, Type *Ty, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructor with insert-before-instruction semantics.
This class represents a cast from floating point to unsigned integer.
FPToUIInst(Value *S, Type *Ty, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructor with insert-before-instruction semantics.
FPToUIInst * cloneImpl() const
Clone an identical FPToUIInst.
This class represents a truncation of floating point types.
FPTruncInst(Value *S, Type *Ty, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructor with insert-before-instruction semantics.
FPTruncInst * cloneImpl() const
Clone an identical FPTruncInst.
An instruction for ordering other memory operations.
Definition: Instructions.h:460
FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID, BasicBlock::iterator InsertBefore)
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this fence instruction.
Definition: Instructions.h:498
void setSyncScopeID(SyncScope::ID SSID)
Sets the synchronization scope ID of this fence instruction.
Definition: Instructions.h:503
FenceInst * cloneImpl() const
void setOrdering(AtomicOrdering Ordering)
Sets the ordering constraint of this fence instruction.
Definition: Instructions.h:493
AtomicOrdering getOrdering() const
Returns the ordering constraint of this fence instruction.
Definition: Instructions.h:487
Class to represent fixed width SIMD vectors.
Definition: DerivedTypes.h:539
unsigned getNumElements() const
Definition: DerivedTypes.h:582
This class represents a freeze function that returns random concrete value if an operand is either a ...
FreezeInst(Value *S, const Twine &NameStr, BasicBlock::iterator InsertBefore)
FreezeInst * cloneImpl() const
Clone an identical FreezeInst.
void setParentPad(Value *ParentPad)
Definition: InstrTypes.h:2725
Value * getParentPad() const
Convenience accessors.
Definition: InstrTypes.h:2724
FuncletPadInst * cloneImpl() const
Class to represent function types.
Definition: DerivedTypes.h:103
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Definition: DerivedTypes.h:142
Type * getParamType(unsigned i) const
Parameter type accessors.
Definition: DerivedTypes.h:135
bool isVarArg() const
Definition: DerivedTypes.h:123
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:973
bool isInBounds() const
Determine whether the GEP has the inbounds flag.
static Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
bool hasAllZeroIndices() const
Return true if all of the indices of this GEP are zeros.
bool hasAllConstantIndices() const
Return true if all of the indices of this GEP are constant integers.
bool collectOffset(const DataLayout &DL, unsigned BitWidth, MapVector< Value *, APInt > &VariableOffsets, APInt &ConstantOffset) const
void setIsInBounds(bool b=true)
Set or clear the inbounds flag on this GEP instruction.
static Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const
Accumulate the constant address offset of this GEP if possible.
GetElementPtrInst * cloneImpl() const
This instruction compares its operands according to the predicate given to the constructor.
static bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
ICmpInst * cloneImpl() const
Clone an identical ICmpInst.
Predicate getSignedPredicate() const
For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
bool isEquality() const
Return true if this predicate is either EQ or NE.
Predicate getUnsignedPredicate() const
For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
Indirect Branch Instruction.
void addDestination(BasicBlock *Dest)
Add a destination.
void removeDestination(unsigned i)
This method removes the specified successor from the indirectbr instruction.
IndirectBrInst * cloneImpl() const
This instruction inserts a single (scalar) element into a VectorType value.
InsertElementInst * cloneImpl() const
static bool isValidOperands(const Value *Vec, const Value *NewElt, const Value *Idx)
Return true if an insertelement instruction can be formed with the specified operands.
static InsertElementInst * Create(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr, BasicBlock::iterator InsertBefore)
This instruction inserts a struct field of array element value into an aggregate value.
InsertValueInst * cloneImpl() const
BitfieldElement::Type getSubclassData() const
Definition: Instruction.h:1003
void copyIRFlags(const Value *V, bool IncludeWrapFlags=true)
Convenience method to copy supported exact, fast-math, and (optionally) wrapping flags from V to this...
void insertBefore(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified instruction.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:454
bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
const BasicBlock * getParent() const
Definition: Instruction.h:152
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
void swapProfMetadata()
If the instruction has "branch_weights" MD_prof metadata and the MDNode has three operands (including...
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:252
InstListType::iterator insertInto(BasicBlock *ParentBB, InstListType::iterator It)
Inserts an unlinked instruction into ParentBB at position It and returns the iterator of the inserted...
This class represents a cast from an integer to a pointer.
IntToPtrInst * cloneImpl() const
Clone an identical IntToPtrInst.
IntToPtrInst(Value *S, Type *Ty, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructor with insert-before-instruction semantics.
Invoke instruction.
BasicBlock * getUnwindDest() const
void setNormalDest(BasicBlock *B)
InvokeInst * cloneImpl() const
LandingPadInst * getLandingPadInst() const
Get the landingpad instruction from the landing pad block (the unwind destination).
void setUnwindDest(BasicBlock *B)
static InvokeInst * Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, BasicBlock *IfException, ArrayRef< Value * > Args, const Twine &NameStr, BasicBlock::iterator InsertBefore)
BasicBlock * getNormalDest() const
void updateProfWeight(uint64_t S, uint64_t T)
Updates profile metadata by scaling it by S / T.
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
LLVMContextImpl *const pImpl
Definition: LLVMContext.h:69
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
LandingPadInst * cloneImpl() const
void addClause(Constant *ClauseVal)
Add a catch or filter clause to the landing pad.
void setCleanup(bool V)
Indicate that this landingpad instruction is a cleanup.
static LandingPadInst * Create(Type *RetTy, unsigned NumReservedClauses, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructors - NumReservedClauses is a hint for the number of incoming clauses that this landingpad w...
An instruction for reading from memory.
Definition: Instructions.h:184
void setAlignment(Align Align)
Definition: Instructions.h:240
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Definition: Instructions.h:230
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this load instruction.
Definition: Instructions.h:266
LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, BasicBlock::iterator InsertBefore)
LoadInst * cloneImpl() const
AtomicOrdering getOrdering() const
Returns the ordering constraint of this load instruction.
Definition: Instructions.h:245
void setVolatile(bool V)
Specify whether this is a volatile load or not.
Definition: Instructions.h:233
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this load instruction.
Definition: Instructions.h:255
Align getAlign() const
Return the alignment of the access that is being performed.
Definition: Instructions.h:236
MDNode * createBranchWeights(uint32_t TrueWeight, uint32_t FalseWeight)
Return metadata containing two branch weights.
Definition: MDBuilder.cpp:37
Metadata node.
Definition: Metadata.h:1067
const MDOperand & getOperand(unsigned I) const
Definition: Metadata.h:1428
unsigned getNumOperands() const
Return number of MDNode operands.
Definition: Metadata.h:1434
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:36
static MemoryEffectsBase readOnly()
Create MemoryEffectsBase that can read any memory.
Definition: ModRef.h:122
bool onlyWritesMemory() const
Whether this function only (at most) writes memory.
Definition: ModRef.h:198
bool doesNotAccessMemory() const
Whether this function accesses no memory.
Definition: ModRef.h:192
static MemoryEffectsBase argMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Create MemoryEffectsBase that can only access argument memory.
Definition: ModRef.h:132
static MemoryEffectsBase inaccessibleMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Create MemoryEffectsBase that can only access inaccessible memory.
Definition: ModRef.h:138
bool onlyAccessesInaccessibleMem() const
Whether this function only (at most) accesses inaccessible memory.
Definition: ModRef.h:211
bool onlyAccessesArgPointees() const
Whether this function only (at most) accesses argument memory.
Definition: ModRef.h:201
bool onlyReadsMemory() const
Whether this function only (at most) reads memory.
Definition: ModRef.h:195
static MemoryEffectsBase writeOnly()
Create MemoryEffectsBase that can write any memory.
Definition: ModRef.h:127
static MemoryEffectsBase inaccessibleOrArgMemOnly(ModRefInfo MR=ModRefInfo::ModRef)
Create MemoryEffectsBase that can only access inaccessible or argument memory.
Definition: ModRef.h:145
static MemoryEffectsBase none()
Create MemoryEffectsBase that cannot read or write any memory.
Definition: ModRef.h:117
bool onlyAccessesInaccessibleOrArgMem() const
Whether this function only (at most) accesses argument and inaccessible memory.
Definition: ModRef.h:217
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Definition: Module.h:293
A container for an operand bundle being viewed as a set of values rather than a set of uses.
Definition: InstrTypes.h:1447
StringRef getTag() const
Definition: InstrTypes.h:1470
iterator_range< const_block_iterator > blocks() const
void allocHungoffUses(unsigned N)
const_block_iterator block_begin() const
void removeIncomingValueIf(function_ref< bool(unsigned)> Predicate, bool DeletePHIIfEmpty=true)
Remove all incoming values for which the predicate returns true.
Value * removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty=true)
Remove an incoming value.
bool hasConstantOrUndefValue() const
Whether the specified PHI node always merges together the same value, assuming undefs are equal to a ...
void copyIncomingBlocks(iterator_range< const_block_iterator > BBRange, uint32_t ToIdx=0)
Copies the basic blocks from BBRange to the incoming basic block list of this PHINode,...
const_block_iterator block_end() const
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
Value * hasConstantValue() const
If the specified PHI node always merges together the same value, return the value,...
PHINode * cloneImpl() const
unsigned getNumIncomingValues() const
Return the number of incoming edges.
Class to represent pointers.
Definition: DerivedTypes.h:646
unsigned getAddressSpace() const
Return the address space of the Pointer type.
Definition: DerivedTypes.h:679
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1827
This class represents a cast from a pointer to an integer.
PtrToIntInst(Value *S, Type *Ty, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructor with insert-before-instruction semantics.
PtrToIntInst * cloneImpl() const
Clone an identical PtrToIntInst.
Resume the propagation of an exception.
ResumeInst * cloneImpl() const
Return a value (possibly void), from a function.
ReturnInst * cloneImpl() const
This class represents a sign extension of integer types.
SExtInst(Value *S, Type *Ty, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructor with insert-before-instruction semantics.
SExtInst * cloneImpl() const
Clone an identical SExtInst.
This class represents a cast from signed integer to floating point.
SIToFPInst * cloneImpl() const
Clone an identical SIToFPInst.
SIToFPInst(Value *S, Type *Ty, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructor with insert-before-instruction semantics.
Class to represent scalable SIMD vectors.
Definition: DerivedTypes.h:586
This class represents the LLVM 'select' instruction.
SelectInst * cloneImpl() const
static const char * areInvalidOperands(Value *Cond, Value *True, Value *False)
Return a string if the specified operands are invalid for a select operation, otherwise return null.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr, BasicBlock::iterator InsertBefore, Instruction *MDFrom=nullptr)
This instruction constructs a fixed permutation of two input vectors.
static bool isZeroEltSplatMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses all elements with the same value as the first element of exa...
ArrayRef< int > getShuffleMask() const
static bool isSpliceMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is a splice mask, concatenating the two inputs together and then ext...
int getMaskValue(unsigned Elt) const
Return the shuffle mask value of this instruction for the given element index.
static bool isValidOperands(const Value *V1, const Value *V2, const Value *Mask)
Return true if a shufflevector instruction can be formed with the specified operands.
ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr, BasicBlock::iterator InsertBefore)
static bool isSelectMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from its source vectors without lane crossings.
static bool isBitRotateMask(ArrayRef< int > Mask, unsigned EltSizeInBits, unsigned MinSubElts, unsigned MaxSubElts, unsigned &NumSubElts, unsigned &RotateAmt)
Checks if the shuffle is a bit rotation of the first operand across multiple subelements,...
VectorType * getType() const
Overload to return most specific vector type.
bool isIdentityWithExtract() const
Return true if this shuffle extracts the first N elements of exactly one source vector.
static bool isOneUseSingleSourceMask(ArrayRef< int > Mask, int VF)
Return true if this shuffle mask represents "clustered" mask of size VF, i.e.
bool isIdentityWithPadding() const
Return true if this shuffle lengthens exactly one source vector with undefs in the high elements.
static bool isSingleSourceMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from exactly one source vector.
bool isConcat() const
Return true if this shuffle concatenates its 2 source vectors.
static bool isDeInterleaveMaskOfFactor(ArrayRef< int > Mask, unsigned Factor, unsigned &Index)
Check if the mask is a DE-interleave mask of the given factor Factor like: <Index,...
ShuffleVectorInst * cloneImpl() const
static bool isIdentityMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask chooses elements from exactly one source vector without lane crossin...
static bool isExtractSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &Index)
Return true if this shuffle mask is an extract subvector mask.
void setShuffleMask(ArrayRef< int > Mask)
bool isInterleave(unsigned Factor)
Return if this shuffle interleaves its two input vectors together.
static bool isReverseMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask swaps the order of elements from exactly one source vector.
static bool isTransposeMask(ArrayRef< int > Mask, int NumSrcElts)
Return true if this shuffle mask is a transpose mask.
void commute()
Swap the operands and adjust the mask to preserve the semantics of the instruction.
static bool isInsertSubvectorMask(ArrayRef< int > Mask, int NumSrcElts, int &NumSubElts, int &Index)
Return true if this shuffle mask is an insert subvector mask.
static Constant * convertShuffleMaskForBitcode(ArrayRef< int > Mask, Type *ResultTy)
static bool isReplicationMask(ArrayRef< int > Mask, int &ReplicationFactor, int &VF)
Return true if this shuffle mask replicates each of the VF elements in a vector ReplicationFactor tim...
static bool isInterleaveMask(ArrayRef< int > Mask, unsigned Factor, unsigned NumInputElts, SmallVectorImpl< unsigned > &StartIndexes)
Return true if the mask interleaves one or more input vectors together.
This is a 'bitvector' (really, a variable-sized bit array), optimized for the case when the array is ...
Implements a dense probed hash-table based set with some number of buckets stored inline.
Definition: DenseSet.h:290
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
void assign(size_type NumElts, ValueParamT Elt)
Definition: SmallVector.h:717
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:950
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:696
void resize(size_type N)
Definition: SmallVector.h:651
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
An instruction for storing to memory.
Definition: Instructions.h:317
AtomicOrdering getOrdering() const
Returns the ordering constraint of this store instruction.
Definition: Instructions.h:378
Align getAlign() const
Definition: Instructions.h:369
void setVolatile(bool V)
Specify whether this is a volatile store or not.
Definition: Instructions.h:364
void setAlignment(Align Align)
Definition: Instructions.h:373
StoreInst * cloneImpl() const
StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore)
SyncScope::ID getSyncScopeID() const
Returns the synchronization scope ID of this store instruction.
Definition: Instructions.h:389
bool isVolatile() const
Return true if this is a store to a volatile memory location.
Definition: Instructions.h:361
void setAtomic(AtomicOrdering Ordering, SyncScope::ID SSID=SyncScope::System)
Sets the ordering constraint and the synchronization scope ID of this store instruction.
Definition: Instructions.h:400
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
Class to represent struct types.
Definition: DerivedTypes.h:216
void setSuccessorWeight(unsigned idx, CaseWeightOpt W)
Instruction::InstListType::iterator eraseFromParent()
Delegate the call to the underlying SwitchInst::eraseFromParent() and mark this object to not touch t...
void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W)
Delegate the call to the underlying SwitchInst::addCase() and set the specified branch weight for the...
CaseWeightOpt getSuccessorWeight(unsigned idx)
std::optional< uint32_t > CaseWeightOpt
SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I)
Delegate the call to the underlying SwitchInst::removeCase() and remove correspondent branch weight.
void setValue(ConstantInt *V) const
Sets the new value for current case.
void setSuccessor(BasicBlock *S) const
Sets the new successor for current case.
Multiway switch.
SwitchInst * cloneImpl() const
void addCase(ConstantInt *OnVal, BasicBlock *Dest)
Add an entry to the switch instruction.
CaseIteratorImpl< CaseHandle > CaseIt
unsigned getNumCases() const
Return the number of 'cases' in this switch instruction, excluding the default case.
CaseIt removeCase(CaseIt I)
This method removes the specified case and its successor from the switch instruction.
This class represents a truncation of integer types.
TruncInst * cloneImpl() const
Clone an identical TruncInst.
TruncInst(Value *S, Type *Ty, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructor with insert-before-instruction semantics.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:81
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:265
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition: Type.h:234
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:255
static IntegerType * getInt1Ty(LLVMContext &C)
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
bool isX86_MMXTy() const
Return true if this is X86 MMX.
Definition: Type.h:201
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isFirstClassType() const
Return true if the type is "first class", meaning it is a valid type for a Value.
Definition: Type.h:281
bool isAggregateType() const
Return true if the type is an aggregate type.
Definition: Type.h:295
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:129
bool isFloatingPointTy() const
Return true if this is one of the floating-point types.
Definition: Type.h:185
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition: Type.h:262
static IntegerType * getInt32Ty(LLVMContext &C)
bool isIntegerTy() const
True if this is an instance of IntegerType.
Definition: Type.h:228
bool isTokenTy() const
Return true if this is 'token'.
Definition: Type.h:225
bool isFPOrFPVectorTy() const
Return true if this is a FP type or a vector of FP.
Definition: Type.h:216
TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
bool isVoidTy() const
Return true if this is 'void'.
Definition: Type.h:140
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition: Type.h:348
This class represents a cast unsigned integer to floating point.
UIToFPInst(Value *S, Type *Ty, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructor with insert-before-instruction semantics.
UIToFPInst * cloneImpl() const
Clone an identical UIToFPInst.
UnaryOperator * cloneImpl() const
UnaryOperator(UnaryOps iType, Value *S, Type *Ty, const Twine &Name, BasicBlock::iterator InsertBefore)
static UnaryOperator * Create(UnaryOps Op, Value *S, const Twine &Name, BasicBlock::iterator InsertBefore)
Construct a unary instruction, given the opcode and an operand.
UnaryOps getOpcode() const
Definition: InstrTypes.h:205
static UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
Definition: Constants.cpp:1808
This function has undefined behavior.
UnreachableInst(LLVMContext &C, BasicBlock::iterator InsertBefore)
UnreachableInst * cloneImpl() const
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
void set(Value *Val)
Definition: Value.h:882
const Use * getOperandList() const
Definition: User.h:162
op_range operands()
Definition: User.h:242
void allocHungoffUses(unsigned N, bool IsPhi=false)
Allocate the array of Uses, followed by a pointer (with bottom bit set) to the User.
Definition: User.cpp:50
op_iterator op_begin()
Definition: User.h:234
void setNumHungOffUseOperands(unsigned NumOps)
Subclasses with hung off uses need to manage the operand count themselves.
Definition: User.h:215
Use & Op()
Definition: User.h:133
Value * getOperand(unsigned i) const
Definition: User.h:169
unsigned getNumOperands() const
Definition: User.h:191
op_iterator op_end()
Definition: User.h:236
void growHungoffUses(unsigned N, bool IsPhi=false)
Grow the number of hung off uses.
Definition: User.cpp:67
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
VAArgInst * cloneImpl() const
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
unsigned char SubclassOptionalData
Hold subclass data that can be dropped.
Definition: Value.h:84
void setName(const Twine &Name)
Change the name of the value.
Definition: Value.cpp:377
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:534
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1074
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
Base class of all SIMD vector types.
Definition: DerivedTypes.h:403
ElementCount getElementCount() const
Return an ElementCount instance to represent the (possibly scalable) number of elements in the vector...
Definition: DerivedTypes.h:641
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Definition: Type.cpp:676
This class represents zero extension of integer types.
ZExtInst(Value *S, Type *Ty, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructor with insert-before-instruction semantics.
ZExtInst * cloneImpl() const
Clone an identical ZExtInst.
std::pair< iterator, bool > insert(const ValueT &V)
Definition: DenseSet.h:206
size_type size() const
Definition: DenseSet.h:81
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
Definition: DenseSet.h:185
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition: TypeSize.h:168
An efficient, type-erasing, non-owning reference to a callable.
base_list_type::iterator iterator
Definition: ilist.h:121
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Attrs[]
Key for Kernel::Metadata::mAttrs.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:450
@ Switch
The "resume-switch" lowering, where there are separate resume and destroy functions that are shared b...
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:329
@ Offset
Definition: DWP.cpp:456
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1722
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition: STLExtras.h:1680
unsigned getPointerAddressSpace(const Type *T)
Definition: SPIRVUtils.h:122
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
MDNode * getBranchWeightMDNode(const Instruction &I)
Get the branch weights metadata node.
auto reverse(ContainerTy &&C)
Definition: STLExtras.h:419
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
Definition: MathExtras.h:275
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
Definition: Function.cpp:2060
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
bool isPointerTy(const Type *T)
Definition: SPIRVUtils.h:116
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition: Casting.h:548
constexpr int PoisonMaskElem
AtomicOrdering
Atomic ordering for LLVM's memory model.
auto remove_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::remove_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1761
@ Or
Bitwise or logical OR of integers.
@ Mul
Product of integers.
@ Xor
Bitwise or logical XOR of integers.
@ FMul
Product of floats.
@ And
Bitwise or logical AND of integers.
@ Add
Sum of integers.
@ FAdd
Sum of floats.
raw_ostream & operator<<(raw_ostream &OS, const APFixedPoint &FX)
Definition: APFixedPoint.h:293
OutputIt copy(R &&Range, OutputIt Out)
Definition: STLExtras.h:1824
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:191
bool extractBranchWeights(const MDNode *ProfileData, SmallVectorImpl< uint32_t > &Weights)
Extract branch weights from MD_prof metadata.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition: Casting.h:565
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition: STLExtras.h:1879
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
Definition: STLExtras.h:2039
auto seq(T Begin, T End)
Iterate over an integral type from Begin up to - but not including - End.
Definition: Sequence.h:305
@ Default
The result values are uniform if and only if all operands are uniform.
void scaleProfData(Instruction &I, uint64_t S, uint64_t T)
Scaling the profile data attached to 'I' using the ratio of S/T.
cmpResult
IEEE-754R 5.11: Floating Point Comparison Relations.
Definition: APFloat.h:220
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Describes an element of a Bitfield.
Definition: Bitfields.h:223
Used to keep track of an operand bundle.
Definition: InstrTypes.h:2496
uint32_t End
The index in the Use& vector where operands for this operand bundle ends.
Definition: InstrTypes.h:2507
uint32_t Begin
The index in the Use& vector where operands for this operand bundle starts.
Definition: InstrTypes.h:2503
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...
Compile-time customization of User operands.
Definition: User.h:42