78#define DEBUG_TYPE "inline-function"
87 cl::desc(
"Convert noalias attributes to metadata during inlining."));
92 cl::desc(
"Use the llvm.experimental.noalias.scope.decl "
93 "intrinsic during inlining."));
101 cl::desc(
"Convert align attributes to assumptions during inlining."));
104 "max-inst-checked-for-throw-during-inlining",
cl::Hidden,
105 cl::desc(
"the maximum number of instructions analyzed for may throw during "
106 "attribute inference in inlined body"),
112 class LandingPadInliningInfo {
123 PHINode *InnerEHValuesPHI =
nullptr;
129 : OuterResumeDest(II->getUnwindDest()) {
135 for (; isa<PHINode>(
I); ++
I) {
138 UnwindDestPHIValues.
push_back(
PHI->getIncomingValueForBlock(InvokeBB));
141 CallerLPad = cast<LandingPadInst>(
I);
147 return OuterResumeDest;
164 void addIncomingPHIValuesFor(
BasicBlock *BB)
const {
165 addIncomingPHIValuesForInto(BB, OuterResumeDest);
170 for (
unsigned i = 0, e = UnwindDestPHIValues.
size(); i != e; ++i, ++
I) {
172 phi->addIncoming(UnwindDestPHIValues[i], src);
180BasicBlock *LandingPadInliningInfo::getInnerResumeDest() {
181 if (InnerResumeDest)
return InnerResumeDest;
187 OuterResumeDest->
getName() +
".body");
190 const unsigned PHICapacity = 2;
195 for (
unsigned i = 0, e = UnwindDestPHIValues.
size(); i != e; ++i, ++
I) {
196 PHINode *OuterPHI = cast<PHINode>(
I);
198 OuterPHI->
getName() +
".lpad-body");
209 InnerEHValuesPHI->
addIncoming(CallerLPad, OuterResumeDest);
212 return InnerResumeDest;
219void LandingPadInliningInfo::forwardResume(
228 addIncomingPHIValuesForInto(Src, Dest);
236 if (
auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
237 return FPI->getParentPad();
238 return cast<CatchSwitchInst>(EHPad)->getParentPad();
249 while (!Worklist.
empty()) {
256 Value *UnwindDestToken =
nullptr;
257 if (
auto *CatchSwitch = dyn_cast<CatchSwitchInst>(CurrentPad)) {
258 if (CatchSwitch->hasUnwindDest()) {
259 UnwindDestToken = CatchSwitch->getUnwindDest()->getFirstNonPHI();
267 for (
auto HI = CatchSwitch->handler_begin(),
268 HE = CatchSwitch->handler_end();
269 HI != HE && !UnwindDestToken; ++HI) {
271 auto *CatchPad = cast<CatchPadInst>(HandlerBlock->
getFirstNonPHI());
277 if (!isa<CleanupPadInst>(Child) && !isa<CatchSwitchInst>(Child))
281 auto Memo = MemoMap.
find(ChildPad);
282 if (Memo == MemoMap.
end()) {
289 Value *ChildUnwindDestToken = Memo->second;
290 if (!ChildUnwindDestToken)
296 if (isa<ConstantTokenNone>(ChildUnwindDestToken)) {
297 UnwindDestToken = ChildUnwindDestToken;
305 auto *CleanupPad = cast<CleanupPadInst>(CurrentPad);
306 for (
User *U : CleanupPad->users()) {
307 if (
auto *CleanupRet = dyn_cast<CleanupReturnInst>(U)) {
308 if (
BasicBlock *RetUnwindDest = CleanupRet->getUnwindDest())
309 UnwindDestToken = RetUnwindDest->getFirstNonPHI();
314 Value *ChildUnwindDestToken;
315 if (
auto *Invoke = dyn_cast<InvokeInst>(U)) {
316 ChildUnwindDestToken = Invoke->getUnwindDest()->getFirstNonPHI();
317 }
else if (isa<CleanupPadInst>(U) || isa<CatchSwitchInst>(U)) {
319 auto Memo = MemoMap.
find(ChildPad);
320 if (Memo == MemoMap.
end()) {
327 ChildUnwindDestToken = Memo->second;
328 if (!ChildUnwindDestToken)
337 if (isa<Instruction>(ChildUnwindDestToken) &&
340 UnwindDestToken = ChildUnwindDestToken;
346 if (!UnwindDestToken)
354 if (
auto *UnwindPad = dyn_cast<Instruction>(UnwindDestToken))
357 UnwindParent =
nullptr;
358 bool ExitedOriginalPad =
false;
360 ExitedPad && ExitedPad != UnwindParent;
361 ExitedPad = dyn_cast<Instruction>(
getParentPad(ExitedPad))) {
363 if (isa<CatchPadInst>(ExitedPad))
365 MemoMap[ExitedPad] = UnwindDestToken;
366 ExitedOriginalPad |= (ExitedPad == EHPad);
369 if (ExitedOriginalPad)
370 return UnwindDestToken;
401 if (
auto *CPI = dyn_cast<CatchPadInst>(EHPad))
402 EHPad = CPI->getCatchSwitch();
405 auto Memo = MemoMap.
find(EHPad);
406 if (Memo != MemoMap.
end())
411 assert((UnwindDestToken ==
nullptr) != (MemoMap.
count(EHPad) != 0));
413 return UnwindDestToken;
420 MemoMap[EHPad] =
nullptr;
426 Value *AncestorToken;
428 auto *AncestorPad = dyn_cast<Instruction>(AncestorToken);
431 if (isa<CatchPadInst>(AncestorPad))
440 assert(!MemoMap.
count(AncestorPad) || MemoMap[AncestorPad]);
441 auto AncestorMemo = MemoMap.
find(AncestorPad);
442 if (AncestorMemo == MemoMap.
end()) {
445 UnwindDestToken = AncestorMemo->second;
449 LastUselessPad = AncestorPad;
450 MemoMap[LastUselessPad] =
nullptr;
452 TempMemos.
insert(LastUselessPad);
470 while (!Worklist.
empty()) {
472 auto Memo = MemoMap.
find(UselessPad);
473 if (Memo != MemoMap.
end() && Memo->second) {
501 MemoMap[UselessPad] = UnwindDestToken;
502 if (
auto *CatchSwitch = dyn_cast<CatchSwitchInst>(UselessPad)) {
503 assert(CatchSwitch->getUnwindDest() ==
nullptr &&
"Expected useless pad");
504 for (
BasicBlock *HandlerBlock : CatchSwitch->handlers()) {
505 auto *CatchPad = HandlerBlock->getFirstNonPHI();
506 for (
User *U : CatchPad->users()) {
508 (!isa<InvokeInst>(U) ||
510 cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) ==
512 "Expected useless pad");
513 if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
514 Worklist.
push_back(cast<Instruction>(U));
518 assert(isa<CleanupPadInst>(UselessPad));
520 assert(!isa<CleanupReturnInst>(U) &&
"Expected useless pad");
521 assert((!isa<InvokeInst>(U) ||
523 cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) ==
525 "Expected useless pad");
526 if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
527 Worklist.
push_back(cast<Instruction>(U));
532 return UnwindDestToken;
558 if (
F->getIntrinsicID() == Intrinsic::experimental_deoptimize ||
559 F->getIntrinsicID() == Intrinsic::experimental_guard)
570 auto *FuncletPad = cast<Instruction>(FuncletBundle->Inputs[0]);
571 Value *UnwindDestToken =
573 if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
577 if (
auto *CatchPad = dyn_cast<CatchPadInst>(FuncletPad))
578 MemoKey = CatchPad->getCatchSwitch();
580 MemoKey = FuncletPad;
581 assert(FuncletUnwindMap->count(MemoKey) &&
582 (*FuncletUnwindMap)[MemoKey] == UnwindDestToken &&
583 "must get memoized to avoid confusing later searches");
608 LandingPadInliningInfo Invoke(II);
614 if (
InvokeInst *II = dyn_cast<InvokeInst>(
I->getTerminator()))
622 InlinedLPad->reserveClauses(OuterNum);
623 for (
unsigned OuterIdx = 0; OuterIdx != OuterNum; ++OuterIdx)
624 InlinedLPad->addClause(OuterLPad->
getClause(OuterIdx));
626 InlinedLPad->setCleanup(
true);
633 &*BB, Invoke.getOuterResumeDest()))
636 Invoke.addIncomingPHIValuesFor(NewBB);
639 if (
ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator()))
640 Invoke.forwardResume(RI, InlinedLPads);
670 UnwindDestPHIValues.
push_back(
PHI.getIncomingValueForBlock(InvokeBB));
677 for (
Value *V : UnwindDestPHIValues) {
679 PHI->addIncoming(V, Src);
689 if (
auto *CRI = dyn_cast<CleanupReturnInst>(BB->getTerminator())) {
690 if (CRI->unwindsToCaller()) {
691 auto *CleanupPad = CRI->getCleanupPad();
693 CRI->eraseFromParent();
700 isa<ConstantTokenNone>(FuncletUnwindMap[CleanupPad]));
701 FuncletUnwindMap[CleanupPad] =
711 if (
auto *CatchSwitch = dyn_cast<CatchSwitchInst>(
I)) {
712 if (CatchSwitch->unwindsToCaller()) {
713 Value *UnwindDestToken;
714 if (
auto *ParentPad =
715 dyn_cast<Instruction>(CatchSwitch->getParentPad())) {
725 if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
738 CatchSwitch->getParentPad(), UnwindDest,
739 CatchSwitch->getNumHandlers(), CatchSwitch->
getName(),
740 CatchSwitch->getIterator());
741 for (
BasicBlock *PadBB : CatchSwitch->handlers())
742 NewCatchSwitch->addHandler(PadBB);
747 FuncletUnwindMap[NewCatchSwitch] = UnwindDestToken;
748 Replacement = NewCatchSwitch;
750 }
else if (!isa<FuncletPadInst>(
I)) {
756 I->replaceAllUsesWith(Replacement);
757 I->eraseFromParent();
767 &*BB, UnwindDest, &FuncletUnwindMap))
780 MDNode *CallsiteStackContext) {
786 for (
auto MIBStackIter = MIBStackContext->
op_begin(),
787 CallsiteStackIter = CallsiteStackContext->
op_begin();
788 MIBStackIter != MIBStackContext->
op_end() &&
789 CallsiteStackIter != CallsiteStackContext->
op_end();
790 MIBStackIter++, CallsiteStackIter++) {
791 auto *Val1 = mdconst::dyn_extract<ConstantInt>(*MIBStackIter);
792 auto *Val2 = mdconst::dyn_extract<ConstantInt>(*CallsiteStackIter);
794 if (Val1->getZExtValue() != Val2->getZExtValue())
801 Call->setMetadata(LLVMContext::MD_memprof,
nullptr);
805 Call->setMetadata(LLVMContext::MD_callsite,
nullptr);
809 const std::vector<Metadata *> &MIBList) {
816 CallStack.addCallStack(cast<MDNode>(MIB));
817 bool MemprofMDAttached =
CallStack.buildAndAttachMIBMetadata(CI);
819 if (!MemprofMDAttached)
829 MDNode *InlinedCallsiteMD) {
831 MDNode *ClonedCallsiteMD =
nullptr;
834 if (OrigCallsiteMD) {
839 ClonedCall->
setMetadata(LLVMContext::MD_callsite, ClonedCallsiteMD);
851 std::vector<Metadata *> NewMIBList;
856 for (
auto &MIBOp : OrigMemProfMD->
operands()) {
857 MDNode *MIB = dyn_cast<MDNode>(MIBOp);
864 NewMIBList.push_back(MIB);
866 if (NewMIBList.empty()) {
882 bool ContainsMemProfMetadata,
887 if (!CallsiteMD && !ContainsMemProfMetadata)
891 for (
const auto &Entry : VMap) {
894 auto *OrigCall = dyn_cast_or_null<CallBase>(Entry.first);
895 auto *ClonedCall = dyn_cast_or_null<CallBase>(Entry.second);
896 if (!OrigCall || !ClonedCall)
915 MDNode *MemParallelLoopAccess =
916 CB.
getMetadata(LLVMContext::MD_mem_parallel_loop_access);
920 if (!MemParallelLoopAccess && !AccessGroup && !AliasScope && !NoAlias)
926 if (!
I.mayReadOrWriteMemory())
929 if (MemParallelLoopAccess) {
932 I.getMetadata(LLVMContext::MD_mem_parallel_loop_access),
933 MemParallelLoopAccess);
934 I.setMetadata(LLVMContext::MD_mem_parallel_loop_access,
935 MemParallelLoopAccess);
940 I.getMetadata(LLVMContext::MD_access_group), AccessGroup));
944 I.getMetadata(LLVMContext::MD_alias_scope), AliasScope));
948 I.getMetadata(LLVMContext::MD_noalias), NoAlias));
966 dyn_cast<Function>(
I->getCalledOperand()->stripPointerCasts());
967 if (CalledFn && CalledFn->isIntrinsic() &&
I->doesNotThrow() &&
972 I->getOperandBundlesAsDefs(OpBundles);
977 I->replaceAllUsesWith(NewInst);
978 I->eraseFromParent();
987class ScopedAliasMetadataDeepCloner {
991 void addRecursiveMetadataUses();
994 ScopedAliasMetadataDeepCloner(
const Function *
F);
1006ScopedAliasMetadataDeepCloner::ScopedAliasMetadataDeepCloner(
1010 if (
const MDNode *M =
I.getMetadata(LLVMContext::MD_alias_scope))
1012 if (
const MDNode *M =
I.getMetadata(LLVMContext::MD_noalias))
1016 if (
const auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&
I))
1017 MD.insert(Decl->getScopeList());
1020 addRecursiveMetadataUses();
1023void ScopedAliasMetadataDeepCloner::addRecursiveMetadataUses() {
1025 while (!
Queue.empty()) {
1028 if (
const MDNode *OpMD = dyn_cast<MDNode>(
Op))
1029 if (MD.insert(OpMD))
1030 Queue.push_back(OpMD);
1034void ScopedAliasMetadataDeepCloner::clone() {
1035 assert(MDMap.empty() &&
"clone() already called ?");
1040 MDMap[
I].reset(DummyNodes.
back().get());
1049 if (
const MDNode *M = dyn_cast<MDNode>(
Op))
1056 MDTuple *TempM = cast<MDTuple>(MDMap[
I]);
1073 if (
MDNode *M =
I.getMetadata(LLVMContext::MD_alias_scope))
1074 if (
MDNode *MNew = MDMap.lookup(M))
1075 I.setMetadata(LLVMContext::MD_alias_scope, MNew);
1077 if (
MDNode *M =
I.getMetadata(LLVMContext::MD_noalias))
1078 if (
MDNode *MNew = MDMap.lookup(M))
1079 I.setMetadata(LLVMContext::MD_noalias, MNew);
1081 if (
auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&
I))
1082 if (
MDNode *MNew = MDMap.lookup(Decl->getScopeList()))
1083 Decl->setScopeList(MNew);
1102 if (CB.
paramHasAttr(Arg.getArgNo(), Attribute::NoAlias) && !Arg.use_empty())
1105 if (NoAliasArgs.
empty())
1125 for (
unsigned i = 0, e = NoAliasArgs.
size(); i != e; ++i) {
1128 std::string
Name = std::string(CalledFunc->
getName());
1131 Name +=
A->getName();
1133 Name +=
": argument ";
1141 NewScopes.
insert(std::make_pair(
A, NewScope));
1158 VMI != VMIE; ++VMI) {
1159 if (
const Instruction *
I = dyn_cast<Instruction>(VMI->first)) {
1163 Instruction *NI = dyn_cast<Instruction>(VMI->second);
1167 bool IsArgMemOnlyCall =
false, IsFuncCall =
false;
1170 if (
const LoadInst *LI = dyn_cast<LoadInst>(
I))
1171 PtrArgs.
push_back(LI->getPointerOperand());
1172 else if (
const StoreInst *SI = dyn_cast<StoreInst>(
I))
1173 PtrArgs.
push_back(SI->getPointerOperand());
1174 else if (
const VAArgInst *VAAI = dyn_cast<VAArgInst>(
I))
1175 PtrArgs.
push_back(VAAI->getPointerOperand());
1177 PtrArgs.
push_back(CXI->getPointerOperand());
1179 PtrArgs.
push_back(RMWI->getPointerOperand());
1180 else if (
const auto *Call = dyn_cast<CallBase>(
I)) {
1184 if (Call->doesNotAccessMemory())
1196 IsArgMemOnlyCall =
true;
1199 for (
Value *Arg : Call->args()) {
1203 if (!Arg->getType()->isPointerTy())
1214 if (PtrArgs.
empty() && !IsFuncCall)
1224 for (
const Value *V : PtrArgs) {
1228 for (
const Value *O : Objects)
1234 bool RequiresNoCaptureBefore =
false, UsesAliasingPtr =
false,
1235 UsesUnknownObject =
false;
1236 for (
const Value *V : ObjSet) {
1240 bool IsNonPtrConst = isa<ConstantInt>(V) || isa<ConstantFP>(V) ||
1241 isa<ConstantPointerNull>(V) ||
1242 isa<ConstantDataVector>(V) || isa<UndefValue>(V);
1249 if (
const Argument *
A = dyn_cast<Argument>(V)) {
1251 UsesAliasingPtr =
true;
1253 UsesAliasingPtr =
true;
1259 RequiresNoCaptureBefore =
true;
1265 UsesUnknownObject =
true;
1271 if (UsesUnknownObject)
1276 if (IsFuncCall && !IsArgMemOnlyCall)
1277 RequiresNoCaptureBefore =
true;
1295 if (!RequiresNoCaptureBefore ||
1317 bool CanAddScopes = !UsesAliasingPtr;
1318 if (CanAddScopes && IsFuncCall)
1319 CanAddScopes = IsArgMemOnlyCall;
1324 Scopes.push_back(NewScopes[
A]);
1327 if (!Scopes.empty())
1329 LLVMContext::MD_alias_scope,
1340 "Expected to be in same basic block!");
1342 assert(BeginIt !=
End->getIterator() &&
"Non-empty BB has empty iterator");
1383 auto &
Context = CalledFunction->getContext();
1385 for (
auto &BB : *CalledFunction) {
1386 auto *RI = dyn_cast<ReturnInst>(BB.getTerminator());
1387 if (!RI || !isa<CallBase>(RI->
getOperand(0)))
1389 auto *RetVal = cast<CallBase>(RI->
getOperand(0));
1393 auto *NewRetVal = dyn_cast_or_null<CallBase>(VMap.
lookup(RetVal));
1427 AL.getRetDereferenceableOrNullBytes())
1477 (RetVal->hasOneUse() && !RetVal->hasRetAttr(Attribute::NoUndef)))
1480 NewRetVal->setAttributes(NewAL);
1496 bool DTCalculated =
false;
1500 if (!Arg.getType()->isPointerTy() || Arg.hasPassPointeeByValueCopyAttr() ||
1507 if (!DTCalculated) {
1509 DTCalculated =
true;
1518 DL, ArgVal, Alignment->value());
1530 Builder.
getInt64(M->getDataLayout().getTypeStoreSize(ByValType));
1543 CI->
setDebugLoc(DILocation::get(SP->getContext(), 0, 0, SP));
1554 const DataLayout &
DL = Caller->getParent()->getDataLayout();
1580 Align Alignment =
DL.getPrefTypeAlign(ByValType);
1586 Alignment = std::max(Alignment, *ByValAlignment);
1589 nullptr, Alignment, Arg->
getName());
1600 for (
User *U : V->users())
1613 if (Ty == Int8PtrTy)
1618 if (U->getType() != Int8PtrTy)
continue;
1619 if (U->stripPointerCasts() != AI)
continue;
1639 return DILocation::get(Ctx, OrigDL.
getLine(), OrigDL.
getCol(),
1656 InlinedAtNode = DILocation::getDistinct(
1657 Ctx, InlinedAtNode->getLine(), InlinedAtNode->getColumn(),
1658 InlinedAtNode->getScope(), InlinedAtNode->getInlinedAt());
1667 bool NoInlineLineTables = Fn->
hasFnAttribute(
"no-inline-line-tables");
1673 auto updateLoopInfoLoc = [&Ctx, &InlinedAtNode,
1675 if (
auto *Loc = dyn_cast_or_null<DILocation>(MD))
1681 if (!NoInlineLineTables)
1689 if (CalleeHasDebugInfo && !NoInlineLineTables)
1699 if (
auto *AI = dyn_cast<AllocaInst>(&
I))
1706 if (isa<PseudoProbeInst>(
I))
1709 I.setDebugLoc(TheCallDL);
1714 assert(DVR->getDebugLoc() &&
"Debug Value must have debug loc");
1715 if (NoInlineLineTables) {
1716 DVR->setDebugLoc(TheCallDL);
1722 DVR->getMarker()->getParent()->
getContext(), IANodes);
1723 DVR->setDebugLoc(IDL);
1727 for (; FI != Fn->
end(); ++FI) {
1731 for (
DbgRecord &DVR : BI->getDbgRecordRange()) {
1737 if (NoInlineLineTables) {
1739 while (BI != FI->end()) {
1740 if (isa<DbgInfoIntrinsic>(BI)) {
1741 BI = BI->eraseFromParent();
1744 BI->dropDbgRecords();
1753#define DEBUG_TYPE "assignment-tracking"
1761 errs() <<
"# Finding caller local variables escaped by callee\n");
1764 if (!Arg->getType()->isPointerTy()) {
1776 assert(Arg->getType()->isPtrOrPtrVectorTy());
1777 APInt TmpOffset(
DL.getIndexTypeSizeInBits(Arg->getType()), 0,
false);
1779 Arg->stripAndAccumulateConstantOffsets(
DL, TmpOffset,
true));
1781 LLVM_DEBUG(
errs() <<
" | SKIP: Couldn't walk back to base storage\n");
1792 auto CollectAssignsForStorage = [&](
auto *DbgAssign) {
1794 if (DbgAssign->getDebugLoc().getInlinedAt())
1802 return EscapedLocals;
1808 << Start->getParent()->getName() <<
" from "
1810 std::unique_ptr<DataLayout>
DL = std::make_unique<DataLayout>(CB.
getModule());
1820 auto GetNewID = [&Map](
Metadata *Old) {
1830 for (
auto BBI = Start; BBI !=
End; ++BBI) {
1833 if (DVR.isDbgAssign())
1834 DVR.setAssignId(GetNewID(DVR.getAssignID()));
1836 if (
auto *
ID =
I.getMetadata(LLVMContext::MD_DIAssignID))
1837 I.setMetadata(LLVMContext::MD_DIAssignID, GetNewID(
ID));
1838 else if (
auto *DAI = dyn_cast<DbgAssignIntrinsic>(&
I))
1839 DAI->setAssignId(GetNewID(DAI->getAssignID()));
1844#define DEBUG_TYPE "inline-function"
1858 for (
auto Entry : VMap) {
1859 if (!isa<BasicBlock>(Entry.first) || !Entry.second)
1861 auto *OrigBB = cast<BasicBlock>(Entry.first);
1862 auto *ClonedBB = cast<BasicBlock>(Entry.second);
1864 if (!ClonedBBs.
insert(ClonedBB).second) {
1876 EntryClone, CallerBFI->
getBlockFreq(CallSiteBlock), ClonedBBs);
1886 auto CallSiteCount =
1889 std::min(CallSiteCount.value_or(0), CalleeEntryCount.
getCount());
1894 Function *Callee, int64_t EntryDelta,
1896 auto CalleeCount = Callee->getEntryCount();
1900 const uint64_t PriorEntryCount = CalleeCount->getCount();
1905 (EntryDelta < 0 && static_cast<uint64_t>(-EntryDelta) > PriorEntryCount)
1907 : PriorEntryCount + EntryDelta;
1911 uint64_t CloneEntryCount = PriorEntryCount - NewEntryCount;
1912 for (
auto Entry : *VMap)
1913 if (isa<CallInst>(Entry.first))
1914 if (
auto *CI = dyn_cast_or_null<CallInst>(Entry.second))
1915 CI->updateProfWeight(CloneEntryCount, PriorEntryCount);
1919 Callee->setEntryCount(NewEntryCount);
1923 if (!VMap || VMap->
count(&BB))
1925 if (
CallInst *CI = dyn_cast<CallInst>(&
I))
1926 CI->updateProfWeight(NewEntryCount, PriorEntryCount);
1952 bool IsRetainRV = RVCallKind == objcarc::ARCInstKind::RetainRV,
1953 IsUnsafeClaimRV = !IsRetainRV;
1955 for (
auto *RI : Returns) {
1957 bool InsertRetainCall = IsRetainRV;
1966 if (isa<CastInst>(
I))
1969 if (
auto *II = dyn_cast<IntrinsicInst>(&
I)) {
1970 if (II->
getIntrinsicID() != Intrinsic::objc_autoreleaseReturnValue ||
1980 if (IsUnsafeClaimRV) {
1987 InsertRetainCall =
false;
1991 auto *CI = dyn_cast<CallInst>(&
I);
2006 NewCall->copyMetadata(*CI);
2007 CI->replaceAllUsesWith(NewCall);
2008 CI->eraseFromParent();
2009 InsertRetainCall =
false;
2013 if (InsertRetainCall) {
2033 bool MergeAttributes,
2035 bool InsertLifetime,
2040 if (isa<CallBrInst>(CB))
2053 Value *ConvergenceControlToken =
nullptr;
2069 ConvergenceControlToken = OBUse.Inputs[0].get();
2088 if (
auto *IntrinsicCall = dyn_cast<IntrinsicInst>(
I)) {
2089 if (IntrinsicCall->getIntrinsicID() ==
2090 Intrinsic::experimental_convergence_entry) {
2091 if (!ConvergenceControlToken) {
2093 "convergent call needs convergencectrl operand");
2110 if (CalledFunc->
hasGC()) {
2111 if (!Caller->hasGC())
2112 Caller->setGC(CalledFunc->
getGC());
2113 else if (CalledFunc->
getGC() != Caller->getGC())
2127 Caller->hasPersonalityFn()
2128 ? Caller->getPersonalityFn()->stripPointerCasts()
2130 if (CalledPersonality) {
2131 if (!CallerPersonality)
2132 Caller->setPersonalityFn(CalledPersonality);
2137 else if (CalledPersonality != CallerPersonality)
2144 if (CallerPersonality) {
2147 std::optional<OperandBundleUse> ParentFunclet =
2150 CallSiteEHPad = cast<FuncletPadInst>(ParentFunclet->Inputs.front());
2154 if (CallSiteEHPad) {
2155 if (Personality == EHPersonality::MSVC_CXX) {
2158 if (isa<CleanupPadInst>(CallSiteEHPad)) {
2161 for (
const BasicBlock &CalledBB : *CalledFunc) {
2162 if (isa<CatchSwitchInst>(CalledBB.getFirstNonPHI()))
2169 for (
const BasicBlock &CalledBB : *CalledFunc) {
2170 if (CalledBB.isEHPad())
2180 bool EHPadForCallUnwindsLocally =
false;
2181 if (CallSiteEHPad && isa<CallInst>(CB)) {
2183 Value *CallSiteUnwindDestToken =
2186 EHPadForCallUnwindsLocally =
2187 CallSiteUnwindDestToken &&
2188 !isa<ConstantTokenNone>(CallSiteUnwindDestToken);
2219 auto &
DL = Caller->getParent()->getDataLayout();
2226 E = CalledFunc->
arg_end();
I != E; ++
I, ++AI, ++ArgNo) {
2227 Value *ActualArg = *AI;
2235 &CB, CalledFunc, IFI,
2237 if (ActualArg != *AI)
2242 VMap[&*
I] = ActualArg;
2262 false, Returns,
".i",
2263 &InlinedFunctionInfo);
2265 FirstNewBlock = LastBlock; ++FirstNewBlock;
2269 if (RVCallKind != objcarc::ARCInstKind::None)
2280 CalledFunc->
front());
2288 for (ByValInit &
Init : ByValInits)
2290 &*FirstNewBlock, IFI, CalledFunc);
2292 std::optional<OperandBundleUse> ParentDeopt =
2298 CallBase *ICS = dyn_cast_or_null<CallBase>(VH);
2319 std::vector<Value *> MergedDeoptArgs;
2320 MergedDeoptArgs.reserve(ParentDeopt->Inputs.size() +
2321 ChildOB.Inputs.size());
2326 OpDefs.
emplace_back(
"deopt", std::move(MergedDeoptArgs));
2356 SAMetadataCloner.clone();
2357 SAMetadataCloner.remap(FirstNewBlock, Caller->end());
2375 make_range(FirstNewBlock->getIterator(), Caller->end()))
2377 if (
auto *II = dyn_cast<AssumeInst>(&
I))
2381 if (ConvergenceControlToken) {
2382 auto *
I = FirstNewBlock->getFirstNonPHI();
2383 if (
auto *IntrinsicCall = dyn_cast<IntrinsicInst>(
I)) {
2384 if (IntrinsicCall->getIntrinsicID() ==
2385 Intrinsic::experimental_convergence_entry) {
2386 IntrinsicCall->replaceAllUsesWith(ConvergenceControlToken);
2387 IntrinsicCall->eraseFromParent();
2399 E = FirstNewBlock->end();
I != E; ) {
2418 while (isa<AllocaInst>(
I) &&
2419 !cast<AllocaInst>(
I)->use_empty() &&
2429 Caller->getEntryBlock().splice(InsertPoint, &*FirstNewBlock,
2442 bool InlinedMustTailCalls =
false, InlinedDeoptimizeCalls =
false;
2445 if (
CallInst *CI = dyn_cast<CallInst>(&CB))
2446 CallSiteTailKind = CI->getTailCallKind();
2461 if (!VarArgsToForward.
empty() &&
2462 ((ForwardVarArgsTo &&
2468 if (!Attrs.isEmpty() || !VarArgsAttrs.
empty()) {
2469 for (
unsigned ArgNo = 0;
2471 ArgAttrs.
push_back(Attrs.getParamAttrs(ArgNo));
2477 Attrs.getRetAttrs(), ArgAttrs);
2492 InlinedDeoptimizeCalls |=
2493 F->getIntrinsicID() == Intrinsic::experimental_deoptimize;
2512 ChildTCK = std::min(CallSiteTailKind, ChildTCK);
2531 if ((InsertLifetime || Caller->isPresplitCoroutine()) &&
2533 IRBuilder<> builder(&*FirstNewBlock, FirstNewBlock->begin());
2534 for (
unsigned ai = 0, ae = IFI.
StaticAllocas.size(); ai != ae; ++ai) {
2549 auto &
DL = Caller->getParent()->getDataLayout();
2551 TypeSize AllocaTypeSize =
DL.getTypeAllocSize(AllocaType);
2552 uint64_t AllocaArraySize = AIArraySize->getLimitedValue();
2555 if (AllocaArraySize == 0)
2561 AllocaArraySize != std::numeric_limits<uint64_t>::max() &&
2562 std::numeric_limits<uint64_t>::max() / AllocaArraySize >=
2565 AllocaArraySize * AllocaTypeSize);
2573 if (InlinedMustTailCalls &&
2576 if (InlinedDeoptimizeCalls &&
2608 if (
auto *II = dyn_cast<InvokeInst>(&CB)) {
2611 if (isa<LandingPadInst>(FirstNonPHI)) {
2621 if (CallSiteEHPad) {
2632 if (
auto *CleanupRet = dyn_cast<CleanupReturnInst>(BB->getTerminator()))
2633 if (CleanupRet->unwindsToCaller() && EHPadForCallUnwindsLocally)
2640 if (
auto *CatchSwitch = dyn_cast<CatchSwitchInst>(
I)) {
2641 if (isa<ConstantTokenNone>(CatchSwitch->getParentPad()))
2642 CatchSwitch->setParentPad(CallSiteEHPad);
2644 auto *FPI = cast<FuncletPadInst>(
I);
2645 if (isa<ConstantTokenNone>(FPI->getParentPad()))
2646 FPI->setParentPad(CallSiteEHPad);
2651 if (InlinedDeoptimizeCalls) {
2657 if (Caller->getReturnType() == CB.
getType()) {
2659 return RI->
getParent()->getTerminatingDeoptimizeCall() !=
nullptr;
2664 Caller->getParent(), Intrinsic::experimental_deoptimize,
2665 {Caller->getReturnType()});
2691 "Expected at least the deopt operand bundle");
2695 Builder.
CreateCall(NewDeoptIntrinsic, CallArgs, OpBundles);
2716 if (InlinedMustTailCalls) {
2718 Type *NewRetTy = Caller->getReturnType();
2726 if (!ReturnedMustTail) {
2735 auto *OldCast = dyn_cast_or_null<BitCastInst>(RI->getReturnValue());
2738 OldCast->eraseFromParent();
2758 make_range(FirstNewBlock->getIterator(), Caller->end()))
2760 if (
auto *CB = dyn_cast<CallBase>(&
I))
2769 if (Returns.
size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) {
2772 FirstNewBlock->end());
2774 Caller->back().eraseFromParent();
2778 if (
InvokeInst *II = dyn_cast<InvokeInst>(&CB)) {
2787 if (&CB == R->getReturnValue())
2796 Returns[0]->eraseFromParent();
2798 if (MergeAttributes)
2812 BranchInst *CreatedBranchToNormalDest =
nullptr;
2813 if (
InvokeInst *II = dyn_cast<InvokeInst>(&CB)) {
2823 CalledFunc->
getName() +
".exit");
2830 CalledFunc->
getName() +
".exit");
2844 "splitBasicBlock broken!");
2850 Caller->splice(AfterCallBB->
getIterator(), Caller, FirstNewBlock,
2858 if (Returns.
size() > 1) {
2863 PHI->insertBefore(AfterCallBB->
begin());
2872 for (
unsigned i = 0, e = Returns.
size(); i != e; ++i) {
2875 "Ret value not consistent in function!");
2882 for (
unsigned i = 0, e = Returns.
size(); i != e; ++i) {
2893 if (CreatedBranchToNormalDest)
2895 }
else if (!Returns.
empty()) {
2899 if (&CB == Returns[0]->getReturnValue())
2906 BasicBlock *ReturnBB = Returns[0]->getParent();
2911 AfterCallBB->
splice(AfterCallBB->
begin(), ReturnBB);
2913 if (CreatedBranchToNormalDest)
2917 Returns[0]->eraseFromParent();
2930 if (InlinedMustTailCalls &&
pred_empty(AfterCallBB))
2935 assert(cast<BranchInst>(Br)->isUnconditional() &&
"splitBasicBlock broken!");
2936 BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0);
2955 auto &
DL = Caller->getParent()->getDataLayout();
2957 PHI->replaceAllUsesWith(V);
2958 PHI->eraseFromParent();
2962 if (MergeAttributes)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static void UpdatePHINodes(BasicBlock *OrigBB, BasicBlock *NewBB, ArrayRef< BasicBlock * > Preds, BranchInst *BI, bool HasLoopExit)
Update the PHI nodes in OrigBB to include the values coming from NewBB.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static cl::opt< bool > NoAliases("csky-no-aliases", cl::desc("Disable the emission of assembler pseudo instructions"), cl::init(false), cl::Hidden)
This file provides interfaces used to build and manipulate a call graph, which is a very useful tool ...
This file contains the declarations for the subclasses of Constant, which represent the different fla...
This file defines the DenseMap class.
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
static AttrBuilder IdentifyValidUBGeneratingAttributes(CallBase &CB)
static at::StorageToVarsMap collectEscapedLocals(const DataLayout &DL, const CallBase &CB)
Find Alloca and linked DbgAssignIntrinsic for locals escaped by CB.
static void fixupLineNumbers(Function *Fn, Function::iterator FI, Instruction *TheCall, bool CalleeHasDebugInfo)
Update inlined instructions' line numbers to to encode location where these instructions are inlined.
static void removeCallsiteMetadata(CallBase *Call)
static void propagateMemProfHelper(const CallBase *OrigCall, CallBase *ClonedCall, MDNode *InlinedCallsiteMD)
static Value * getUnwindDestToken(Instruction *EHPad, UnwindDestMemoTy &MemoMap)
Given an EH pad, find where it unwinds.
static cl::opt< bool > PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining", cl::init(false), cl::Hidden, cl::desc("Convert align attributes to assumptions during inlining."))
static void AddReturnAttributes(CallBase &CB, ValueToValueMapTy &VMap)
static void HandleInlinedLandingPad(InvokeInst *II, BasicBlock *FirstNewBlock, ClonedCodeInfo &InlinedCodeInfo)
If we inlined an invoke site, we need to convert calls in the body of the inlined function into invok...
static Value * getUnwindDestTokenHelper(Instruction *EHPad, UnwindDestMemoTy &MemoMap)
Helper for getUnwindDestToken that does the descendant-ward part of the search.
static BasicBlock * HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB, BasicBlock *UnwindEdge, UnwindDestMemoTy *FuncletUnwindMap=nullptr)
When we inline a basic block into an invoke, we have to turn all of the calls that can throw into inv...
static DebugLoc inlineDebugLoc(DebugLoc OrigDL, DILocation *InlinedAt, LLVMContext &Ctx, DenseMap< const MDNode *, MDNode * > &IANodes)
Returns a DebugLoc for a new DILocation which is a clone of OrigDL inlined at InlinedAt.
static cl::opt< bool > UseNoAliasIntrinsic("use-noalias-intrinsic-during-inlining", cl::Hidden, cl::init(true), cl::desc("Use the llvm.experimental.noalias.scope.decl " "intrinsic during inlining."))
static void PropagateCallSiteMetadata(CallBase &CB, Function::iterator FStart, Function::iterator FEnd)
When inlining a call site that has !llvm.mem.parallel_loop_access, !llvm.access.group,...
static AttrBuilder IdentifyValidPoisonGeneratingAttributes(CallBase &CB)
static void propagateMemProfMetadata(Function *Callee, CallBase &CB, bool ContainsMemProfMetadata, const ValueMap< const Value *, WeakTrackingVH > &VMap)
static void updateCallProfile(Function *Callee, const ValueToValueMapTy &VMap, const ProfileCount &CalleeEntryCount, const CallBase &TheCall, ProfileSummaryInfo *PSI, BlockFrequencyInfo *CallerBFI)
Update the branch metadata for cloned call instructions.
static void updateCallerBFI(BasicBlock *CallSiteBlock, const ValueToValueMapTy &VMap, BlockFrequencyInfo *CallerBFI, BlockFrequencyInfo *CalleeBFI, const BasicBlock &CalleeEntryBlock)
Update the block frequencies of the caller after a callee has been inlined.
static bool MayContainThrowingOrExitingCallAfterCB(CallBase *Begin, ReturnInst *End)
static void HandleByValArgumentInit(Type *ByValType, Value *Dst, Value *Src, Module *M, BasicBlock *InsertBlock, InlineFunctionInfo &IFI, Function *CalledFunc)
static cl::opt< bool > EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true), cl::Hidden, cl::desc("Convert noalias attributes to metadata during inlining."))
static void AddAliasScopeMetadata(CallBase &CB, ValueToValueMapTy &VMap, const DataLayout &DL, AAResults *CalleeAAR, ClonedCodeInfo &InlinedFunctionInfo)
If the inlined function has noalias arguments, then add new alias scopes for each noalias argument,...
static void HandleInlinedEHPad(InvokeInst *II, BasicBlock *FirstNewBlock, ClonedCodeInfo &InlinedCodeInfo)
If we inlined an invoke site, we need to convert calls in the body of the inlined function into invok...
static void inlineRetainOrClaimRVCalls(CallBase &CB, objcarc::ARCInstKind RVCallKind, const SmallVectorImpl< ReturnInst * > &Returns)
An operand bundle "clang.arc.attachedcall" on a call indicates the call result is implicitly consumed...
static Value * getParentPad(Value *EHPad)
Helper for getUnwindDestToken/getUnwindDestTokenHelper.
static void fixupAssignments(Function::iterator Start, Function::iterator End)
Update inlined instructions' DIAssignID metadata.
static bool allocaWouldBeStaticInEntry(const AllocaInst *AI)
Return the result of AI->isStaticAlloca() if AI were moved to the entry block.
static bool isUsedByLifetimeMarker(Value *V)
static void removeMemProfMetadata(CallBase *Call)
static Value * HandleByValArgument(Type *ByValType, Value *Arg, Instruction *TheCall, const Function *CalledFunc, InlineFunctionInfo &IFI, MaybeAlign ByValAlignment)
When inlining a call site that has a byval argument, we have to make the implicit memcpy explicit by ...
static void AddAlignmentAssumptions(CallBase &CB, InlineFunctionInfo &IFI)
If the inlined function has non-byval align arguments, then add @llvm.assume-based alignment assumpti...
static void trackInlinedStores(Function::iterator Start, Function::iterator End, const CallBase &CB)
static cl::opt< unsigned > InlinerAttributeWindow("max-inst-checked-for-throw-during-inlining", cl::Hidden, cl::desc("the maximum number of instructions analyzed for may throw during " "attribute inference in inlined body"), cl::init(4))
static bool haveCommonPrefix(MDNode *MIBStackContext, MDNode *CallsiteStackContext)
static void PropagateOperandBundles(Function::iterator InlinedBB, Instruction *CallSiteEHPad)
Bundle operands of the inlined function must be added to inlined call sites.
static bool hasLifetimeMarkers(AllocaInst *AI)
static void updateMemprofMetadata(CallBase *CI, const std::vector< Metadata * > &MIBList)
static DebugLoc getDebugLoc(MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
Return the first found DebugLoc that has a DILocation, given a range of instructions.
Module.h This file contains the declarations for the Module class.
This file defines common analysis utilities used by the ObjC ARC Optimizer.
This file defines ARC utility functions which are used by various parts of the compiler.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
MemoryEffects getMemoryEffects(const CallBase *Call)
Return the behavior of the given call site.
Class for arbitrary precision integers.
an instruction to allocate memory on the stack
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
PointerType * getType() const
Overload to return most specific pointer type.
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
const Value * getArraySize() const
Get the number of elements allocated.
This class represents an incoming formal argument to a Function.
A cache of @llvm.assume calls within a function.
void registerAssumption(AssumeInst *CI)
Add an @llvm.assume intrinsic to this function's cache.
An instruction that atomically checks whether a specified value is in a memory location,...
an instruction that atomically reads a memory location, combines it with another value,...
AttrBuilder & addAlignmentAttr(MaybeAlign Align)
This turns an alignment into the form used internally in Attribute.
uint64_t getDereferenceableBytes() const
Retrieve the number of dereferenceable bytes, if the dereferenceable attribute exists (zero is return...
bool hasAttributes() const
Return true if the builder has IR-level attributes.
AttrBuilder & addAttribute(Attribute::AttrKind Val)
Add an attribute to the builder.
MaybeAlign getAlignment() const
Retrieve the alignment attribute, if it exists.
AttrBuilder & addDereferenceableAttr(uint64_t Bytes)
This turns the number of dereferenceable bytes into the form used internally in Attribute.
uint64_t getDereferenceableOrNullBytes() const
Retrieve the number of dereferenceable_or_null bytes, if the dereferenceable_or_null attribute exists...
AttrBuilder & removeAttribute(Attribute::AttrKind Val)
Remove an attribute from the builder.
AttrBuilder & addDereferenceableOrNullAttr(uint64_t Bytes)
This turns the number of dereferenceable_or_null bytes into the form used internally in Attribute.
AttributeList addRetAttributes(LLVMContext &C, const AttrBuilder &B) const
Add a return value attribute to the list.
static AttributeList get(LLVMContext &C, ArrayRef< std::pair< unsigned, Attribute > > Attrs)
Create an AttributeList with the specified parameters in it.
AttributeSet getParamAttrs(unsigned ArgNo) const
The attributes for the argument or parameter at the given index are returned.
LLVM Basic Block Representation.
iterator begin()
Instruction iterator methods.
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
BasicBlock * splitBasicBlock(iterator I, const Twine &BBName="", bool Before=false)
Split the basic block into two basic blocks at the specified instruction.
const CallInst * getTerminatingDeoptimizeCall() const
Returns the call instruction calling @llvm.experimental.deoptimize prior to the terminating return in...
const Function * getParent() const
Return the enclosing method, or null if none.
SymbolTableList< BasicBlock >::iterator eraseFromParent()
Unlink 'this' from the containing function and delete it.
InstListType::iterator iterator
Instruction iterators...
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
const CallInst * getTerminatingMustTailCall() const
Returns the call instruction marked 'musttail' prior to the terminating return instruction of this ba...
void splice(BasicBlock::iterator ToIt, BasicBlock *FromBB)
Transfer all instructions from FromBB to this basic block at ToIt.
void removePredecessor(BasicBlock *Pred, bool KeepOneInputPHIs=false)
Update PHI nodes in this BasicBlock before removal of predecessor Pred.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
void setBlockFreq(const BasicBlock *BB, BlockFrequency Freq)
void setBlockFreqAndScale(const BasicBlock *ReferenceBB, BlockFrequency Freq, SmallPtrSetImpl< BasicBlock * > &BlocksToScale)
Set the frequency of ReferenceBB to Freq and scale the frequencies of the blocks in BlocksToScale suc...
BlockFrequency getBlockFreq(const BasicBlock *BB) const
getblockFreq - Return block frequency.
Conditional or Unconditional Branch instruction.
static BranchInst * Create(BasicBlock *IfTrue, BasicBlock::iterator InsertBefore)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
void setCallingConv(CallingConv::ID CC)
MaybeAlign getRetAlign() const
Extract the alignment of the return value.
void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const
Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
void removeRetAttrs(const AttributeMask &AttrsToRemove)
Removes the attributes from the return value.
bool hasRetAttr(Attribute::AttrKind Kind) const
Determine whether the return value has the given attribute.
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
CallingConv::ID getCallingConv() const
bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
bool isByValArgument(unsigned ArgNo) const
Determine whether this argument is passed by value.
static CallBase * Create(CallBase *CB, ArrayRef< OperandBundleDef > Bundles, BasicBlock::iterator InsertPt)
Create a clone of CB with a different set of operand bundles and insert it before InsertPt.
Type * getParamByValType(unsigned ArgNo) const
Extract the byval type for a call or parameter.
Value * getCalledOperand() const
void setAttributes(AttributeList A)
Set the parameter attributes for this call.
bool doesNotThrow() const
Determine if the call cannot unwind.
Value * getArgOperand(unsigned i) const
uint64_t getRetDereferenceableBytes() const
Extract the number of dereferenceable bytes for a call or parameter (0=unknown).
bool isConvergent() const
Determine if the invoke is convergent.
FunctionType * getFunctionType() const
Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
uint64_t getRetDereferenceableOrNullBytes() const
Extract the number of dereferenceable_or_null bytes for a call (0=unknown).
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
unsigned arg_size() const
AttributeList getAttributes() const
Return the parameter attributes for this call.
static CallBase * addOperandBundle(CallBase *CB, uint32_t ID, OperandBundleDef OB, Instruction *InsertPt=nullptr)
Create a clone of CB with operand bundle OB added.
bool hasOperandBundles() const
Return true if this User has any operand bundles.
Function * getCaller()
Helper to get the caller (the parent function).
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr, BasicBlock::iterator InsertBefore)
void setTailCallKind(TailCallKind TCK)
TailCallKind getTailCallKind() const
bool isMustTailCall() const
static CatchSwitchInst * Create(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumHandlers, const Twine &NameStr, BasicBlock::iterator InsertBefore)
static CleanupReturnInst * Create(Value *CleanupPad, BasicBlock *UnwindBB, BasicBlock::iterator InsertBefore)
This is the shared class of boolean and integer constants.
static ConstantTokenNone * get(LLVMContext &Context)
Return the ConstantTokenNone.
This is an important base class in LLVM.
const Constant * stripPointerCasts() const
static DIAssignID * getDistinct(LLVMContext &Context)
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Base class for non-instruction debug metadata records that have positions within IR.
Record of a variable value-assignment, aka a non instruction representation of the dbg....
DILocation * get() const
Get the underlying DILocation.
MDNode * getScope() const
static DebugLoc appendInlinedAt(const DebugLoc &DL, DILocation *InlinedAt, LLVMContext &Ctx, DenseMap< const MDNode *, MDNode * > &Cache)
Rebuild the entire inlined-at chain for this instruction so that the top of the chain now is inlined-...
iterator find(const_arg_type_t< KeyT > Val)
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
void recalculate(ParentType &Func)
recalculate - compute a dominator tree for the given function
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Class to represent profile counts.
uint64_t getCount() const
const BasicBlock & getEntryBlock() const
BasicBlockListType::iterator iterator
FunctionType * getFunctionType() const
Returns the FunctionType for me.
const BasicBlock & front() const
iterator_range< arg_iterator > args()
DISubprogram * getSubprogram() const
Get the attached subprogram.
bool hasGC() const
hasGC/getGC/setGC/clearGC - The name of the garbage collection algorithm to use during code generatio...
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
bool hasPersonalityFn() const
Check whether this function has a personality function.
Constant * getPersonalityFn() const
Get the personality function associated with this function.
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
MaybeAlign getParamAlign(unsigned ArgNo) const
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
const std::string & getGC() const
std::optional< ProfileCount > getEntryCount(bool AllowSynthetic=false) const
Get the entry count for this function.
Type * getReturnType() const
Returns the type of the ret val.
void setCallingConv(CallingConv::ID CC)
bool onlyReadsMemory() const
Determine if the function does not access or only reads memory.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Module * getParent()
Get the module that this global value is contained inside of...
CallInst * CreateStackSave(const Twine &Name="")
Create a call to llvm.stacksave.
CallInst * CreateLifetimeStart(Value *Ptr, ConstantInt *Size=nullptr)
Create a lifetime.start intrinsic.
CallInst * CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue, unsigned Alignment, Value *OffsetValue=nullptr)
Create an assume intrinsic call that represents an alignment assumption on the provided pointer.
ReturnInst * CreateRet(Value *V)
Create a 'ret <val>' instruction.
ConstantInt * getInt64(uint64_t C)
Get a constant 64-bit value.
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
ReturnInst * CreateRetVoid()
Create a 'ret void' instruction.
CallInst * CreateLifetimeEnd(Value *Ptr, ConstantInt *Size=nullptr)
Create a lifetime.end intrinsic.
CallInst * CreateStackRestore(Value *Ptr, const Twine &Name="")
Create a call to llvm.stackrestore.
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=std::nullopt, const Twine &Name="", MDNode *FPMathTag=nullptr)
CallInst * CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, uint64_t Size, bool isVolatile=false, MDNode *TBAATag=nullptr, MDNode *TBAAStructTag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
Create and insert a memcpy between the specified pointers.
Instruction * CreateNoAliasScopeDeclaration(Value *Scope)
Create a llvm.experimental.noalias.scope.decl intrinsic call.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
This class captures the data input to the InlineFunction call, and records the auxiliary results prod...
bool UpdateProfile
Update profile for callee as well as cloned version.
function_ref< AssumptionCache &(Function &)> GetAssumptionCache
If non-null, InlineFunction will update the callgraph to reflect the changes it makes.
BlockFrequencyInfo * CalleeBFI
SmallVector< AllocaInst *, 4 > StaticAllocas
InlineFunction fills this in with all static allocas that get copied into the caller.
BlockFrequencyInfo * CallerBFI
SmallVector< CallBase *, 8 > InlinedCallSites
All of the new call sites inlined into the caller.
InlineResult is basically true or false.
static InlineResult success()
static InlineResult failure(const char *Reason)
bool isLifetimeStartOrEnd() const LLVM_READONLY
Return true if the instruction is a llvm.lifetime.start or llvm.lifetime.end marker.
void insertBefore(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified instruction.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
bool isEHPad() const
Return true if the instruction is a variety of EH-block.
const BasicBlock * getParent() const
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
const Function * getFunction() const
Return the function this instruction belongs to.
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
A wrapper class for inspecting calls to intrinsic functions.
static bool mayLowerToFunctionCall(Intrinsic::ID IID)
Check if the intrinsic might lower into a regular function call in the course of IR transformations.
BasicBlock * getUnwindDest() const
LandingPadInst * getLandingPadInst() const
Get the landingpad instruction from the landing pad block (the unwind destination).
BasicBlock * getNormalDest() const
This is an important class for using LLVM in a threaded context.
@ OB_clang_arc_attachedcall
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
An instruction for reading from memory.
MDNode * createAnonymousAliasScope(MDNode *Domain, StringRef Name=StringRef())
Return metadata appropriate for an alias scope root node.
MDNode * createAnonymousAliasScopeDomain(StringRef Name=StringRef())
Return metadata appropriate for an alias scope domain node.
void replaceAllUsesWith(Metadata *MD)
RAUW a temporary.
static MDNode * concatenate(MDNode *A, MDNode *B)
Methods for metadata merging.
ArrayRef< MDOperand > operands() const
op_iterator op_end() const
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
unsigned getNumOperands() const
Return number of MDNode operands.
op_iterator op_begin() const
LLVMContext & getContext() const
static TempMDTuple getTemporary(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Return a temporary node.
bool onlyAccessesInaccessibleMem() const
Whether this function only (at most) accesses inaccessible memory.
bool onlyAccessesArgPointees() const
Whether this function only (at most) accesses argument memory.
A Module instance is used to store all the information related to an LLVM module.
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
A container for an operand bundle being viewed as a set of values rather than a set of uses.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Analysis providing profile information.
std::optional< uint64_t > getProfileCount(const CallBase &CallInst, BlockFrequencyInfo *BFI, bool AllowSynthetic=false) const
Returns the profile count for CallInst.
Resume the propagation of an exception.
Return a value (possibly void), from a function.
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
A vector that has set insertion semantics.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
reference emplace_back(ArgTypes &&... Args)
void reserve(size_type N)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
The instances of the Type class are immutable: once they are created, they are never changed.
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
static IntegerType * getInt64Ty(LLVMContext &C)
bool isVoidTy() const
Return true if this is 'void'.
void setOperand(unsigned i, Value *Val)
Value * getOperand(unsigned i) const
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
ValueT lookup(const KeyT &Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
size_type count(const KeyT &Val) const
Return 1 if the specified key is in the map, 0 otherwise.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
iterator_range< user_iterator > users()
bool hasNUses(unsigned N) const
Return true if this Value has exactly N uses.
LLVMContext & getContext() const
All values hold a context through their type.
StringRef getName() const
Return a constant reference to the value's name.
void takeName(Value *V)
Transfer the name from V to this value.
constexpr ScalarTy getFixedValue() const
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
self_iterator getIterator()
Class to build a trie of call stack contexts for a particular profiled allocation call,...
Helper class to iterate through stack ids in both metadata (memprof MIB and callsite) and the corresp...
This provides a very simple, boring adaptor for a begin and end iterator into a range type.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
void mergeAttributesForInlining(Function &Caller, const Function &Callee)
Merge caller's and callee's attributes.
AttributeMask typeIncompatible(Type *Ty, AttributeSafetyKind ASK=ASK_ALL)
Which attributes cannot be applied to a type.
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
AssignmentMarkerRange getAssignmentMarkers(DIAssignID *ID)
Return a range of dbg.assign intrinsics which use \ID as an operand.
void trackAssignments(Function::iterator Start, Function::iterator End, const StorageToVarsMap &Vars, const DataLayout &DL, bool DebugPrints=false)
Track assignments to Vars between Start and End.
SmallVector< DbgVariableRecord * > getDVRAssignmentMarkers(const Instruction *Inst)
initializer< Ty > init(const Ty &Val)
MDNode * getMIBStackNode(const MDNode *MIB)
Returns the stack node from an MIB metadata node.
ARCInstKind getAttachedARCFunctionKind(const CallBase *CB)
This function returns the ARCInstKind of the function attached to operand bundle clang_arc_attachedca...
ARCInstKind
Equivalence classes of instructions in the ARC Model.
std::optional< Function * > getAttachedARCFunction(const CallBase *CB)
This function returns operand bundle clang_arc_attachedcall's argument, which is the address of the A...
bool isRetainOrClaimRV(ARCInstKind Kind)
Check whether the function is retainRV/unsafeClaimRV.
const Value * GetRCIdentityRoot(const Value *V)
The RCIdentity root of a value V is a dominating value U for which retaining or releasing U is equiva...
bool hasAttachedCallOpBundle(const CallBase *CB)
This is an optimization pass for GlobalISel generic memory operations.
UnaryFunction for_each(R &&Range, UnaryFunction F)
Provide wrappers to std::for_each which take ranges instead of having to pass begin/end explicitly.
BasicBlock * changeToInvokeAndSplitBasicBlock(CallInst *CI, BasicBlock *UnwindEdge, DomTreeUpdater *DTU=nullptr)
Convert the CallInst to InvokeInst with the specified unwind edge basic block.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
bool PointerMayBeCapturedBefore(const Value *V, bool ReturnCaptures, bool StoreCaptures, const Instruction *I, const DominatorTree *DT, bool IncludeI=false, unsigned MaxUsesToExplore=0, const LoopInfo *LI=nullptr)
PointerMayBeCapturedBefore - Return true if this pointer value may be captured by the enclosing funct...
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
Align getKnownAlignment(Value *V, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to infer an alignment for the specified pointer.
Align getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to ensure that the alignment of V is at least PrefAlign bytes.
void CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc, ValueToValueMapTy &VMap, bool ModuleLevelChanges, SmallVectorImpl< ReturnInst * > &Returns, const char *NameSuffix="", ClonedCodeInfo *CodeInfo=nullptr)
This works exactly like CloneFunctionInto, except that it does some simple constant prop and DCE on t...
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
unsigned changeToUnreachable(Instruction *I, bool PreserveLCSSA=false, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Insert an unreachable instruction before the specified instruction, making it and the rest of the cod...
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, LoopInfo *LI=nullptr, unsigned MaxLookup=6)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
bool salvageKnowledge(Instruction *I, AssumptionCache *AC=nullptr, DominatorTree *DT=nullptr)
Calls BuildAssumeFromInst and if the resulting llvm.assume is valid insert if before I.
void updateProfileCallee(Function *Callee, int64_t EntryDelta, const ValueMap< const Value *, WeakTrackingVH > *VMap=nullptr)
Updates profile information by adjusting the entry count by adding EntryDelta then scaling callsite i...
bool isAssignmentTrackingEnabled(const Module &M)
Return true if assignment tracking is enabled for module M.
MDNode * uniteAccessGroups(MDNode *AccGroups1, MDNode *AccGroups2)
Compute the union of two access-group lists.
InlineResult InlineFunction(CallBase &CB, InlineFunctionInfo &IFI, bool MergeAttributes=false, AAResults *CalleeAAR=nullptr, bool InsertLifetime=true, Function *ForwardVarArgsTo=nullptr)
This function inlines the called function into the basic block of the caller.
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
bool isEscapeSource(const Value *V)
Returns true if the pointer is one which would have been considered an escape by isNonEscapingLocalOb...
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
bool pred_empty(const BasicBlock *BB)
static auto filterDbgVars(iterator_range< simple_ilist< DbgRecord >::iterator > R)
Filter the DbgRecord range to DbgVariableRecord types only and downcast.
void updateLoopMetadataDebugLocations(Instruction &I, function_ref< Metadata *(Metadata *)> Updater)
Update the debug locations contained within the MD_loop metadata attached to the instruction I,...
bool isIdentifiedObject(const Value *V)
Return true if this pointer refers to a distinct and identifiable object.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
This struct can be used to capture information about code being cloned, while it is being cloned.
bool ContainsDynamicAllocas
This is set to true if the cloned code contains a 'dynamic' alloca.
bool isSimplified(const Value *From, const Value *To) const
bool ContainsCalls
This is set to true if the cloned code contains a normal call instruction.
bool ContainsMemProfMetadata
This is set to true if there is memprof related metadata (memprof or callsite metadata) in the cloned...
std::vector< WeakTrackingVH > OperandBundleCallSites
All cloned call sites that have operand bundles attached are appended to this vector.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Helper struct for trackAssignments, below.