LLVM 19.0.0git
InlineFunction.cpp
Go to the documentation of this file.
1//===- InlineFunction.cpp - Code to perform function inlining -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements inlining of a function into a call site, resolving
10// parameters and the return value as appropriate.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/ADT/DenseMap.h"
15#include "llvm/ADT/STLExtras.h"
16#include "llvm/ADT/SetVector.h"
34#include "llvm/IR/Argument.h"
35#include "llvm/IR/BasicBlock.h"
36#include "llvm/IR/CFG.h"
37#include "llvm/IR/Constant.h"
38#include "llvm/IR/Constants.h"
39#include "llvm/IR/DataLayout.h"
40#include "llvm/IR/DebugInfo.h"
42#include "llvm/IR/DebugLoc.h"
44#include "llvm/IR/Dominators.h"
46#include "llvm/IR/Function.h"
47#include "llvm/IR/IRBuilder.h"
48#include "llvm/IR/InlineAsm.h"
49#include "llvm/IR/InstrTypes.h"
50#include "llvm/IR/Instruction.h"
53#include "llvm/IR/Intrinsics.h"
54#include "llvm/IR/LLVMContext.h"
55#include "llvm/IR/MDBuilder.h"
56#include "llvm/IR/Metadata.h"
57#include "llvm/IR/Module.h"
58#include "llvm/IR/Type.h"
59#include "llvm/IR/User.h"
60#include "llvm/IR/Value.h"
68#include <algorithm>
69#include <cassert>
70#include <cstdint>
71#include <iterator>
72#include <limits>
73#include <optional>
74#include <string>
75#include <utility>
76#include <vector>
77
78#define DEBUG_TYPE "inline-function"
79
80using namespace llvm;
81using namespace llvm::memprof;
83
84static cl::opt<bool>
85EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true),
87 cl::desc("Convert noalias attributes to metadata during inlining."));
88
89static cl::opt<bool>
90 UseNoAliasIntrinsic("use-noalias-intrinsic-during-inlining", cl::Hidden,
91 cl::init(true),
92 cl::desc("Use the llvm.experimental.noalias.scope.decl "
93 "intrinsic during inlining."));
94
95// Disabled by default, because the added alignment assumptions may increase
96// compile-time and block optimizations. This option is not suitable for use
97// with frontends that emit comprehensive parameter alignment annotations.
98static cl::opt<bool>
99PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining",
100 cl::init(false), cl::Hidden,
101 cl::desc("Convert align attributes to assumptions during inlining."));
102
104 "max-inst-checked-for-throw-during-inlining", cl::Hidden,
105 cl::desc("the maximum number of instructions analyzed for may throw during "
106 "attribute inference in inlined body"),
107 cl::init(4));
108
109namespace {
110
111 /// A class for recording information about inlining a landing pad.
112 class LandingPadInliningInfo {
113 /// Destination of the invoke's unwind.
114 BasicBlock *OuterResumeDest;
115
116 /// Destination for the callee's resume.
117 BasicBlock *InnerResumeDest = nullptr;
118
119 /// LandingPadInst associated with the invoke.
120 LandingPadInst *CallerLPad = nullptr;
121
122 /// PHI for EH values from landingpad insts.
123 PHINode *InnerEHValuesPHI = nullptr;
124
125 SmallVector<Value*, 8> UnwindDestPHIValues;
126
127 public:
128 LandingPadInliningInfo(InvokeInst *II)
129 : OuterResumeDest(II->getUnwindDest()) {
130 // If there are PHI nodes in the unwind destination block, we need to keep
131 // track of which values came into them from the invoke before removing
132 // the edge from this block.
133 BasicBlock *InvokeBB = II->getParent();
134 BasicBlock::iterator I = OuterResumeDest->begin();
135 for (; isa<PHINode>(I); ++I) {
136 // Save the value to use for this edge.
137 PHINode *PHI = cast<PHINode>(I);
138 UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
139 }
140
141 CallerLPad = cast<LandingPadInst>(I);
142 }
143
144 /// The outer unwind destination is the target of
145 /// unwind edges introduced for calls within the inlined function.
146 BasicBlock *getOuterResumeDest() const {
147 return OuterResumeDest;
148 }
149
150 BasicBlock *getInnerResumeDest();
151
152 LandingPadInst *getLandingPadInst() const { return CallerLPad; }
153
154 /// Forward the 'resume' instruction to the caller's landing pad block.
155 /// When the landing pad block has only one predecessor, this is
156 /// a simple branch. When there is more than one predecessor, we need to
157 /// split the landing pad block after the landingpad instruction and jump
158 /// to there.
159 void forwardResume(ResumeInst *RI,
161
162 /// Add incoming-PHI values to the unwind destination block for the given
163 /// basic block, using the values for the original invoke's source block.
164 void addIncomingPHIValuesFor(BasicBlock *BB) const {
165 addIncomingPHIValuesForInto(BB, OuterResumeDest);
166 }
167
168 void addIncomingPHIValuesForInto(BasicBlock *src, BasicBlock *dest) const {
169 BasicBlock::iterator I = dest->begin();
170 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
171 PHINode *phi = cast<PHINode>(I);
172 phi->addIncoming(UnwindDestPHIValues[i], src);
173 }
174 }
175 };
176
177} // end anonymous namespace
178
179/// Get or create a target for the branch from ResumeInsts.
180BasicBlock *LandingPadInliningInfo::getInnerResumeDest() {
181 if (InnerResumeDest) return InnerResumeDest;
182
183 // Split the landing pad.
184 BasicBlock::iterator SplitPoint = ++CallerLPad->getIterator();
185 InnerResumeDest =
186 OuterResumeDest->splitBasicBlock(SplitPoint,
187 OuterResumeDest->getName() + ".body");
188
189 // The number of incoming edges we expect to the inner landing pad.
190 const unsigned PHICapacity = 2;
191
192 // Create corresponding new PHIs for all the PHIs in the outer landing pad.
193 BasicBlock::iterator InsertPoint = InnerResumeDest->begin();
194 BasicBlock::iterator I = OuterResumeDest->begin();
195 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
196 PHINode *OuterPHI = cast<PHINode>(I);
197 PHINode *InnerPHI = PHINode::Create(OuterPHI->getType(), PHICapacity,
198 OuterPHI->getName() + ".lpad-body");
199 InnerPHI->insertBefore(InsertPoint);
200 OuterPHI->replaceAllUsesWith(InnerPHI);
201 InnerPHI->addIncoming(OuterPHI, OuterResumeDest);
202 }
203
204 // Create a PHI for the exception values.
205 InnerEHValuesPHI =
206 PHINode::Create(CallerLPad->getType(), PHICapacity, "eh.lpad-body");
207 InnerEHValuesPHI->insertBefore(InsertPoint);
208 CallerLPad->replaceAllUsesWith(InnerEHValuesPHI);
209 InnerEHValuesPHI->addIncoming(CallerLPad, OuterResumeDest);
210
211 // All done.
212 return InnerResumeDest;
213}
214
215/// Forward the 'resume' instruction to the caller's landing pad block.
216/// When the landing pad block has only one predecessor, this is a simple
217/// branch. When there is more than one predecessor, we need to split the
218/// landing pad block after the landingpad instruction and jump to there.
219void LandingPadInliningInfo::forwardResume(
221 BasicBlock *Dest = getInnerResumeDest();
222 BasicBlock *Src = RI->getParent();
223
224 BranchInst::Create(Dest, Src);
225
226 // Update the PHIs in the destination. They were inserted in an order which
227 // makes this work.
228 addIncomingPHIValuesForInto(Src, Dest);
229
230 InnerEHValuesPHI->addIncoming(RI->getOperand(0), Src);
231 RI->eraseFromParent();
232}
233
234/// Helper for getUnwindDestToken/getUnwindDestTokenHelper.
235static Value *getParentPad(Value *EHPad) {
236 if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
237 return FPI->getParentPad();
238 return cast<CatchSwitchInst>(EHPad)->getParentPad();
239}
240
242
243/// Helper for getUnwindDestToken that does the descendant-ward part of
244/// the search.
246 UnwindDestMemoTy &MemoMap) {
247 SmallVector<Instruction *, 8> Worklist(1, EHPad);
248
249 while (!Worklist.empty()) {
250 Instruction *CurrentPad = Worklist.pop_back_val();
251 // We only put pads on the worklist that aren't in the MemoMap. When
252 // we find an unwind dest for a pad we may update its ancestors, but
253 // the queue only ever contains uncles/great-uncles/etc. of CurrentPad,
254 // so they should never get updated while queued on the worklist.
255 assert(!MemoMap.count(CurrentPad));
256 Value *UnwindDestToken = nullptr;
257 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(CurrentPad)) {
258 if (CatchSwitch->hasUnwindDest()) {
259 UnwindDestToken = CatchSwitch->getUnwindDest()->getFirstNonPHI();
260 } else {
261 // Catchswitch doesn't have a 'nounwind' variant, and one might be
262 // annotated as "unwinds to caller" when really it's nounwind (see
263 // e.g. SimplifyCFGOpt::SimplifyUnreachable), so we can't infer the
264 // parent's unwind dest from this. We can check its catchpads'
265 // descendants, since they might include a cleanuppad with an
266 // "unwinds to caller" cleanupret, which can be trusted.
267 for (auto HI = CatchSwitch->handler_begin(),
268 HE = CatchSwitch->handler_end();
269 HI != HE && !UnwindDestToken; ++HI) {
270 BasicBlock *HandlerBlock = *HI;
271 auto *CatchPad = cast<CatchPadInst>(HandlerBlock->getFirstNonPHI());
272 for (User *Child : CatchPad->users()) {
273 // Intentionally ignore invokes here -- since the catchswitch is
274 // marked "unwind to caller", it would be a verifier error if it
275 // contained an invoke which unwinds out of it, so any invoke we'd
276 // encounter must unwind to some child of the catch.
277 if (!isa<CleanupPadInst>(Child) && !isa<CatchSwitchInst>(Child))
278 continue;
279
280 Instruction *ChildPad = cast<Instruction>(Child);
281 auto Memo = MemoMap.find(ChildPad);
282 if (Memo == MemoMap.end()) {
283 // Haven't figured out this child pad yet; queue it.
284 Worklist.push_back(ChildPad);
285 continue;
286 }
287 // We've already checked this child, but might have found that
288 // it offers no proof either way.
289 Value *ChildUnwindDestToken = Memo->second;
290 if (!ChildUnwindDestToken)
291 continue;
292 // We already know the child's unwind dest, which can either
293 // be ConstantTokenNone to indicate unwind to caller, or can
294 // be another child of the catchpad. Only the former indicates
295 // the unwind dest of the catchswitch.
296 if (isa<ConstantTokenNone>(ChildUnwindDestToken)) {
297 UnwindDestToken = ChildUnwindDestToken;
298 break;
299 }
300 assert(getParentPad(ChildUnwindDestToken) == CatchPad);
301 }
302 }
303 }
304 } else {
305 auto *CleanupPad = cast<CleanupPadInst>(CurrentPad);
306 for (User *U : CleanupPad->users()) {
307 if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(U)) {
308 if (BasicBlock *RetUnwindDest = CleanupRet->getUnwindDest())
309 UnwindDestToken = RetUnwindDest->getFirstNonPHI();
310 else
311 UnwindDestToken = ConstantTokenNone::get(CleanupPad->getContext());
312 break;
313 }
314 Value *ChildUnwindDestToken;
315 if (auto *Invoke = dyn_cast<InvokeInst>(U)) {
316 ChildUnwindDestToken = Invoke->getUnwindDest()->getFirstNonPHI();
317 } else if (isa<CleanupPadInst>(U) || isa<CatchSwitchInst>(U)) {
318 Instruction *ChildPad = cast<Instruction>(U);
319 auto Memo = MemoMap.find(ChildPad);
320 if (Memo == MemoMap.end()) {
321 // Haven't resolved this child yet; queue it and keep searching.
322 Worklist.push_back(ChildPad);
323 continue;
324 }
325 // We've checked this child, but still need to ignore it if it
326 // had no proof either way.
327 ChildUnwindDestToken = Memo->second;
328 if (!ChildUnwindDestToken)
329 continue;
330 } else {
331 // Not a relevant user of the cleanuppad
332 continue;
333 }
334 // In a well-formed program, the child/invoke must either unwind to
335 // an(other) child of the cleanup, or exit the cleanup. In the
336 // first case, continue searching.
337 if (isa<Instruction>(ChildUnwindDestToken) &&
338 getParentPad(ChildUnwindDestToken) == CleanupPad)
339 continue;
340 UnwindDestToken = ChildUnwindDestToken;
341 break;
342 }
343 }
344 // If we haven't found an unwind dest for CurrentPad, we may have queued its
345 // children, so move on to the next in the worklist.
346 if (!UnwindDestToken)
347 continue;
348
349 // Now we know that CurrentPad unwinds to UnwindDestToken. It also exits
350 // any ancestors of CurrentPad up to but not including UnwindDestToken's
351 // parent pad. Record this in the memo map, and check to see if the
352 // original EHPad being queried is one of the ones exited.
353 Value *UnwindParent;
354 if (auto *UnwindPad = dyn_cast<Instruction>(UnwindDestToken))
355 UnwindParent = getParentPad(UnwindPad);
356 else
357 UnwindParent = nullptr;
358 bool ExitedOriginalPad = false;
359 for (Instruction *ExitedPad = CurrentPad;
360 ExitedPad && ExitedPad != UnwindParent;
361 ExitedPad = dyn_cast<Instruction>(getParentPad(ExitedPad))) {
362 // Skip over catchpads since they just follow their catchswitches.
363 if (isa<CatchPadInst>(ExitedPad))
364 continue;
365 MemoMap[ExitedPad] = UnwindDestToken;
366 ExitedOriginalPad |= (ExitedPad == EHPad);
367 }
368
369 if (ExitedOriginalPad)
370 return UnwindDestToken;
371
372 // Continue the search.
373 }
374
375 // No definitive information is contained within this funclet.
376 return nullptr;
377}
378
379/// Given an EH pad, find where it unwinds. If it unwinds to an EH pad,
380/// return that pad instruction. If it unwinds to caller, return
381/// ConstantTokenNone. If it does not have a definitive unwind destination,
382/// return nullptr.
383///
384/// This routine gets invoked for calls in funclets in inlinees when inlining
385/// an invoke. Since many funclets don't have calls inside them, it's queried
386/// on-demand rather than building a map of pads to unwind dests up front.
387/// Determining a funclet's unwind dest may require recursively searching its
388/// descendants, and also ancestors and cousins if the descendants don't provide
389/// an answer. Since most funclets will have their unwind dest immediately
390/// available as the unwind dest of a catchswitch or cleanupret, this routine
391/// searches top-down from the given pad and then up. To avoid worst-case
392/// quadratic run-time given that approach, it uses a memo map to avoid
393/// re-processing funclet trees. The callers that rewrite the IR as they go
394/// take advantage of this, for correctness, by checking/forcing rewritten
395/// pads' entries to match the original callee view.
397 UnwindDestMemoTy &MemoMap) {
398 // Catchpads unwind to the same place as their catchswitch;
399 // redirct any queries on catchpads so the code below can
400 // deal with just catchswitches and cleanuppads.
401 if (auto *CPI = dyn_cast<CatchPadInst>(EHPad))
402 EHPad = CPI->getCatchSwitch();
403
404 // Check if we've already determined the unwind dest for this pad.
405 auto Memo = MemoMap.find(EHPad);
406 if (Memo != MemoMap.end())
407 return Memo->second;
408
409 // Search EHPad and, if necessary, its descendants.
410 Value *UnwindDestToken = getUnwindDestTokenHelper(EHPad, MemoMap);
411 assert((UnwindDestToken == nullptr) != (MemoMap.count(EHPad) != 0));
412 if (UnwindDestToken)
413 return UnwindDestToken;
414
415 // No information is available for this EHPad from itself or any of its
416 // descendants. An unwind all the way out to a pad in the caller would
417 // need also to agree with the unwind dest of the parent funclet, so
418 // search up the chain to try to find a funclet with information. Put
419 // null entries in the memo map to avoid re-processing as we go up.
420 MemoMap[EHPad] = nullptr;
421#ifndef NDEBUG
423 TempMemos.insert(EHPad);
424#endif
425 Instruction *LastUselessPad = EHPad;
426 Value *AncestorToken;
427 for (AncestorToken = getParentPad(EHPad);
428 auto *AncestorPad = dyn_cast<Instruction>(AncestorToken);
429 AncestorToken = getParentPad(AncestorToken)) {
430 // Skip over catchpads since they just follow their catchswitches.
431 if (isa<CatchPadInst>(AncestorPad))
432 continue;
433 // If the MemoMap had an entry mapping AncestorPad to nullptr, since we
434 // haven't yet called getUnwindDestTokenHelper for AncestorPad in this
435 // call to getUnwindDestToken, that would mean that AncestorPad had no
436 // information in itself, its descendants, or its ancestors. If that
437 // were the case, then we should also have recorded the lack of information
438 // for the descendant that we're coming from. So assert that we don't
439 // find a null entry in the MemoMap for AncestorPad.
440 assert(!MemoMap.count(AncestorPad) || MemoMap[AncestorPad]);
441 auto AncestorMemo = MemoMap.find(AncestorPad);
442 if (AncestorMemo == MemoMap.end()) {
443 UnwindDestToken = getUnwindDestTokenHelper(AncestorPad, MemoMap);
444 } else {
445 UnwindDestToken = AncestorMemo->second;
446 }
447 if (UnwindDestToken)
448 break;
449 LastUselessPad = AncestorPad;
450 MemoMap[LastUselessPad] = nullptr;
451#ifndef NDEBUG
452 TempMemos.insert(LastUselessPad);
453#endif
454 }
455
456 // We know that getUnwindDestTokenHelper was called on LastUselessPad and
457 // returned nullptr (and likewise for EHPad and any of its ancestors up to
458 // LastUselessPad), so LastUselessPad has no information from below. Since
459 // getUnwindDestTokenHelper must investigate all downward paths through
460 // no-information nodes to prove that a node has no information like this,
461 // and since any time it finds information it records it in the MemoMap for
462 // not just the immediately-containing funclet but also any ancestors also
463 // exited, it must be the case that, walking downward from LastUselessPad,
464 // visiting just those nodes which have not been mapped to an unwind dest
465 // by getUnwindDestTokenHelper (the nullptr TempMemos notwithstanding, since
466 // they are just used to keep getUnwindDestTokenHelper from repeating work),
467 // any node visited must have been exhaustively searched with no information
468 // for it found.
469 SmallVector<Instruction *, 8> Worklist(1, LastUselessPad);
470 while (!Worklist.empty()) {
471 Instruction *UselessPad = Worklist.pop_back_val();
472 auto Memo = MemoMap.find(UselessPad);
473 if (Memo != MemoMap.end() && Memo->second) {
474 // Here the name 'UselessPad' is a bit of a misnomer, because we've found
475 // that it is a funclet that does have information about unwinding to
476 // a particular destination; its parent was a useless pad.
477 // Since its parent has no information, the unwind edge must not escape
478 // the parent, and must target a sibling of this pad. This local unwind
479 // gives us no information about EHPad. Leave it and the subtree rooted
480 // at it alone.
481 assert(getParentPad(Memo->second) == getParentPad(UselessPad));
482 continue;
483 }
484 // We know we don't have information for UselesPad. If it has an entry in
485 // the MemoMap (mapping it to nullptr), it must be one of the TempMemos
486 // added on this invocation of getUnwindDestToken; if a previous invocation
487 // recorded nullptr, it would have had to prove that the ancestors of
488 // UselessPad, which include LastUselessPad, had no information, and that
489 // in turn would have required proving that the descendants of
490 // LastUselesPad, which include EHPad, have no information about
491 // LastUselessPad, which would imply that EHPad was mapped to nullptr in
492 // the MemoMap on that invocation, which isn't the case if we got here.
493 assert(!MemoMap.count(UselessPad) || TempMemos.count(UselessPad));
494 // Assert as we enumerate users that 'UselessPad' doesn't have any unwind
495 // information that we'd be contradicting by making a map entry for it
496 // (which is something that getUnwindDestTokenHelper must have proved for
497 // us to get here). Just assert on is direct users here; the checks in
498 // this downward walk at its descendants will verify that they don't have
499 // any unwind edges that exit 'UselessPad' either (i.e. they either have no
500 // unwind edges or unwind to a sibling).
501 MemoMap[UselessPad] = UnwindDestToken;
502 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(UselessPad)) {
503 assert(CatchSwitch->getUnwindDest() == nullptr && "Expected useless pad");
504 for (BasicBlock *HandlerBlock : CatchSwitch->handlers()) {
505 auto *CatchPad = HandlerBlock->getFirstNonPHI();
506 for (User *U : CatchPad->users()) {
507 assert(
508 (!isa<InvokeInst>(U) ||
510 cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) ==
511 CatchPad)) &&
512 "Expected useless pad");
513 if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
514 Worklist.push_back(cast<Instruction>(U));
515 }
516 }
517 } else {
518 assert(isa<CleanupPadInst>(UselessPad));
519 for (User *U : UselessPad->users()) {
520 assert(!isa<CleanupReturnInst>(U) && "Expected useless pad");
521 assert((!isa<InvokeInst>(U) ||
523 cast<InvokeInst>(U)->getUnwindDest()->getFirstNonPHI()) ==
524 UselessPad)) &&
525 "Expected useless pad");
526 if (isa<CatchSwitchInst>(U) || isa<CleanupPadInst>(U))
527 Worklist.push_back(cast<Instruction>(U));
528 }
529 }
530 }
531
532 return UnwindDestToken;
533}
534
535/// When we inline a basic block into an invoke,
536/// we have to turn all of the calls that can throw into invokes.
537/// This function analyze BB to see if there are any calls, and if so,
538/// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI
539/// nodes in that block with the values specified in InvokeDestPHIValues.
541 BasicBlock *BB, BasicBlock *UnwindEdge,
542 UnwindDestMemoTy *FuncletUnwindMap = nullptr) {
544 // We only need to check for function calls: inlined invoke
545 // instructions require no special handling.
546 CallInst *CI = dyn_cast<CallInst>(&I);
547
548 if (!CI || CI->doesNotThrow())
549 continue;
550
551 // We do not need to (and in fact, cannot) convert possibly throwing calls
552 // to @llvm.experimental_deoptimize (resp. @llvm.experimental.guard) into
553 // invokes. The caller's "segment" of the deoptimization continuation
554 // attached to the newly inlined @llvm.experimental_deoptimize
555 // (resp. @llvm.experimental.guard) call should contain the exception
556 // handling logic, if any.
557 if (auto *F = CI->getCalledFunction())
558 if (F->getIntrinsicID() == Intrinsic::experimental_deoptimize ||
559 F->getIntrinsicID() == Intrinsic::experimental_guard)
560 continue;
561
562 if (auto FuncletBundle = CI->getOperandBundle(LLVMContext::OB_funclet)) {
563 // This call is nested inside a funclet. If that funclet has an unwind
564 // destination within the inlinee, then unwinding out of this call would
565 // be UB. Rewriting this call to an invoke which targets the inlined
566 // invoke's unwind dest would give the call's parent funclet multiple
567 // unwind destinations, which is something that subsequent EH table
568 // generation can't handle and that the veirifer rejects. So when we
569 // see such a call, leave it as a call.
570 auto *FuncletPad = cast<Instruction>(FuncletBundle->Inputs[0]);
571 Value *UnwindDestToken =
572 getUnwindDestToken(FuncletPad, *FuncletUnwindMap);
573 if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
574 continue;
575#ifndef NDEBUG
576 Instruction *MemoKey;
577 if (auto *CatchPad = dyn_cast<CatchPadInst>(FuncletPad))
578 MemoKey = CatchPad->getCatchSwitch();
579 else
580 MemoKey = FuncletPad;
581 assert(FuncletUnwindMap->count(MemoKey) &&
582 (*FuncletUnwindMap)[MemoKey] == UnwindDestToken &&
583 "must get memoized to avoid confusing later searches");
584#endif // NDEBUG
585 }
586
587 changeToInvokeAndSplitBasicBlock(CI, UnwindEdge);
588 return BB;
589 }
590 return nullptr;
591}
592
593/// If we inlined an invoke site, we need to convert calls
594/// in the body of the inlined function into invokes.
595///
596/// II is the invoke instruction being inlined. FirstNewBlock is the first
597/// block of the inlined code (the last block is the end of the function),
598/// and InlineCodeInfo is information about the code that got inlined.
599static void HandleInlinedLandingPad(InvokeInst *II, BasicBlock *FirstNewBlock,
600 ClonedCodeInfo &InlinedCodeInfo) {
601 BasicBlock *InvokeDest = II->getUnwindDest();
602
603 Function *Caller = FirstNewBlock->getParent();
604
605 // The inlined code is currently at the end of the function, scan from the
606 // start of the inlined code to its end, checking for stuff we need to
607 // rewrite.
608 LandingPadInliningInfo Invoke(II);
609
610 // Get all of the inlined landing pad instructions.
612 for (Function::iterator I = FirstNewBlock->getIterator(), E = Caller->end();
613 I != E; ++I)
614 if (InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator()))
615 InlinedLPads.insert(II->getLandingPadInst());
616
617 // Append the clauses from the outer landing pad instruction into the inlined
618 // landing pad instructions.
619 LandingPadInst *OuterLPad = Invoke.getLandingPadInst();
620 for (LandingPadInst *InlinedLPad : InlinedLPads) {
621 unsigned OuterNum = OuterLPad->getNumClauses();
622 InlinedLPad->reserveClauses(OuterNum);
623 for (unsigned OuterIdx = 0; OuterIdx != OuterNum; ++OuterIdx)
624 InlinedLPad->addClause(OuterLPad->getClause(OuterIdx));
625 if (OuterLPad->isCleanup())
626 InlinedLPad->setCleanup(true);
627 }
628
629 for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end();
630 BB != E; ++BB) {
631 if (InlinedCodeInfo.ContainsCalls)
633 &*BB, Invoke.getOuterResumeDest()))
634 // Update any PHI nodes in the exceptional block to indicate that there
635 // is now a new entry in them.
636 Invoke.addIncomingPHIValuesFor(NewBB);
637
638 // Forward any resumes that are remaining here.
639 if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator()))
640 Invoke.forwardResume(RI, InlinedLPads);
641 }
642
643 // Now that everything is happy, we have one final detail. The PHI nodes in
644 // the exception destination block still have entries due to the original
645 // invoke instruction. Eliminate these entries (which might even delete the
646 // PHI node) now.
647 InvokeDest->removePredecessor(II->getParent());
648}
649
650/// If we inlined an invoke site, we need to convert calls
651/// in the body of the inlined function into invokes.
652///
653/// II is the invoke instruction being inlined. FirstNewBlock is the first
654/// block of the inlined code (the last block is the end of the function),
655/// and InlineCodeInfo is information about the code that got inlined.
656static void HandleInlinedEHPad(InvokeInst *II, BasicBlock *FirstNewBlock,
657 ClonedCodeInfo &InlinedCodeInfo) {
658 BasicBlock *UnwindDest = II->getUnwindDest();
659 Function *Caller = FirstNewBlock->getParent();
660
661 assert(UnwindDest->getFirstNonPHI()->isEHPad() && "unexpected BasicBlock!");
662
663 // If there are PHI nodes in the unwind destination block, we need to keep
664 // track of which values came into them from the invoke before removing the
665 // edge from this block.
666 SmallVector<Value *, 8> UnwindDestPHIValues;
667 BasicBlock *InvokeBB = II->getParent();
668 for (PHINode &PHI : UnwindDest->phis()) {
669 // Save the value to use for this edge.
670 UnwindDestPHIValues.push_back(PHI.getIncomingValueForBlock(InvokeBB));
671 }
672
673 // Add incoming-PHI values to the unwind destination block for the given basic
674 // block, using the values for the original invoke's source block.
675 auto UpdatePHINodes = [&](BasicBlock *Src) {
676 BasicBlock::iterator I = UnwindDest->begin();
677 for (Value *V : UnwindDestPHIValues) {
678 PHINode *PHI = cast<PHINode>(I);
679 PHI->addIncoming(V, Src);
680 ++I;
681 }
682 };
683
684 // This connects all the instructions which 'unwind to caller' to the invoke
685 // destination.
686 UnwindDestMemoTy FuncletUnwindMap;
687 for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end();
688 BB != E; ++BB) {
689 if (auto *CRI = dyn_cast<CleanupReturnInst>(BB->getTerminator())) {
690 if (CRI->unwindsToCaller()) {
691 auto *CleanupPad = CRI->getCleanupPad();
692 CleanupReturnInst::Create(CleanupPad, UnwindDest, CRI->getIterator());
693 CRI->eraseFromParent();
694 UpdatePHINodes(&*BB);
695 // Finding a cleanupret with an unwind destination would confuse
696 // subsequent calls to getUnwindDestToken, so map the cleanuppad
697 // to short-circuit any such calls and recognize this as an "unwind
698 // to caller" cleanup.
699 assert(!FuncletUnwindMap.count(CleanupPad) ||
700 isa<ConstantTokenNone>(FuncletUnwindMap[CleanupPad]));
701 FuncletUnwindMap[CleanupPad] =
702 ConstantTokenNone::get(Caller->getContext());
703 }
704 }
705
706 Instruction *I = BB->getFirstNonPHI();
707 if (!I->isEHPad())
708 continue;
709
710 Instruction *Replacement = nullptr;
711 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) {
712 if (CatchSwitch->unwindsToCaller()) {
713 Value *UnwindDestToken;
714 if (auto *ParentPad =
715 dyn_cast<Instruction>(CatchSwitch->getParentPad())) {
716 // This catchswitch is nested inside another funclet. If that
717 // funclet has an unwind destination within the inlinee, then
718 // unwinding out of this catchswitch would be UB. Rewriting this
719 // catchswitch to unwind to the inlined invoke's unwind dest would
720 // give the parent funclet multiple unwind destinations, which is
721 // something that subsequent EH table generation can't handle and
722 // that the veirifer rejects. So when we see such a call, leave it
723 // as "unwind to caller".
724 UnwindDestToken = getUnwindDestToken(ParentPad, FuncletUnwindMap);
725 if (UnwindDestToken && !isa<ConstantTokenNone>(UnwindDestToken))
726 continue;
727 } else {
728 // This catchswitch has no parent to inherit constraints from, and
729 // none of its descendants can have an unwind edge that exits it and
730 // targets another funclet in the inlinee. It may or may not have a
731 // descendant that definitively has an unwind to caller. In either
732 // case, we'll have to assume that any unwinds out of it may need to
733 // be routed to the caller, so treat it as though it has a definitive
734 // unwind to caller.
735 UnwindDestToken = ConstantTokenNone::get(Caller->getContext());
736 }
737 auto *NewCatchSwitch = CatchSwitchInst::Create(
738 CatchSwitch->getParentPad(), UnwindDest,
739 CatchSwitch->getNumHandlers(), CatchSwitch->getName(),
740 CatchSwitch->getIterator());
741 for (BasicBlock *PadBB : CatchSwitch->handlers())
742 NewCatchSwitch->addHandler(PadBB);
743 // Propagate info for the old catchswitch over to the new one in
744 // the unwind map. This also serves to short-circuit any subsequent
745 // checks for the unwind dest of this catchswitch, which would get
746 // confused if they found the outer handler in the callee.
747 FuncletUnwindMap[NewCatchSwitch] = UnwindDestToken;
748 Replacement = NewCatchSwitch;
749 }
750 } else if (!isa<FuncletPadInst>(I)) {
751 llvm_unreachable("unexpected EHPad!");
752 }
753
754 if (Replacement) {
755 Replacement->takeName(I);
756 I->replaceAllUsesWith(Replacement);
757 I->eraseFromParent();
758 UpdatePHINodes(&*BB);
759 }
760 }
761
762 if (InlinedCodeInfo.ContainsCalls)
763 for (Function::iterator BB = FirstNewBlock->getIterator(),
764 E = Caller->end();
765 BB != E; ++BB)
767 &*BB, UnwindDest, &FuncletUnwindMap))
768 // Update any PHI nodes in the exceptional block to indicate that there
769 // is now a new entry in them.
770 UpdatePHINodes(NewBB);
771
772 // Now that everything is happy, we have one final detail. The PHI nodes in
773 // the exception destination block still have entries due to the original
774 // invoke instruction. Eliminate these entries (which might even delete the
775 // PHI node) now.
776 UnwindDest->removePredecessor(InvokeBB);
777}
778
779static bool haveCommonPrefix(MDNode *MIBStackContext,
780 MDNode *CallsiteStackContext) {
781 assert(MIBStackContext->getNumOperands() > 0 &&
782 CallsiteStackContext->getNumOperands() > 0);
783 // Because of the context trimming performed during matching, the callsite
784 // context could have more stack ids than the MIB. We match up to the end of
785 // the shortest stack context.
786 for (auto MIBStackIter = MIBStackContext->op_begin(),
787 CallsiteStackIter = CallsiteStackContext->op_begin();
788 MIBStackIter != MIBStackContext->op_end() &&
789 CallsiteStackIter != CallsiteStackContext->op_end();
790 MIBStackIter++, CallsiteStackIter++) {
791 auto *Val1 = mdconst::dyn_extract<ConstantInt>(*MIBStackIter);
792 auto *Val2 = mdconst::dyn_extract<ConstantInt>(*CallsiteStackIter);
793 assert(Val1 && Val2);
794 if (Val1->getZExtValue() != Val2->getZExtValue())
795 return false;
796 }
797 return true;
798}
799
800static void removeMemProfMetadata(CallBase *Call) {
801 Call->setMetadata(LLVMContext::MD_memprof, nullptr);
802}
803
805 Call->setMetadata(LLVMContext::MD_callsite, nullptr);
806}
807
809 const std::vector<Metadata *> &MIBList) {
810 assert(!MIBList.empty());
811 // Remove existing memprof, which will either be replaced or may not be needed
812 // if we are able to use a single allocation type function attribute.
815 for (Metadata *MIB : MIBList)
816 CallStack.addCallStack(cast<MDNode>(MIB));
817 bool MemprofMDAttached = CallStack.buildAndAttachMIBMetadata(CI);
818 assert(MemprofMDAttached == CI->hasMetadata(LLVMContext::MD_memprof));
819 if (!MemprofMDAttached)
820 // If we used a function attribute remove the callsite metadata as well.
822}
823
824// Update the metadata on the inlined copy ClonedCall of a call OrigCall in the
825// inlined callee body, based on the callsite metadata InlinedCallsiteMD from
826// the call that was inlined.
827static void propagateMemProfHelper(const CallBase *OrigCall,
828 CallBase *ClonedCall,
829 MDNode *InlinedCallsiteMD) {
830 MDNode *OrigCallsiteMD = ClonedCall->getMetadata(LLVMContext::MD_callsite);
831 MDNode *ClonedCallsiteMD = nullptr;
832 // Check if the call originally had callsite metadata, and update it for the
833 // new call in the inlined body.
834 if (OrigCallsiteMD) {
835 // The cloned call's context is now the concatenation of the original call's
836 // callsite metadata and the callsite metadata on the call where it was
837 // inlined.
838 ClonedCallsiteMD = MDNode::concatenate(OrigCallsiteMD, InlinedCallsiteMD);
839 ClonedCall->setMetadata(LLVMContext::MD_callsite, ClonedCallsiteMD);
840 }
841
842 // Update any memprof metadata on the cloned call.
843 MDNode *OrigMemProfMD = ClonedCall->getMetadata(LLVMContext::MD_memprof);
844 if (!OrigMemProfMD)
845 return;
846 // We currently expect that allocations with memprof metadata also have
847 // callsite metadata for the allocation's part of the context.
848 assert(OrigCallsiteMD);
849
850 // New call's MIB list.
851 std::vector<Metadata *> NewMIBList;
852
853 // For each MIB metadata, check if its call stack context starts with the
854 // new clone's callsite metadata. If so, that MIB goes onto the cloned call in
855 // the inlined body. If not, it stays on the out-of-line original call.
856 for (auto &MIBOp : OrigMemProfMD->operands()) {
857 MDNode *MIB = dyn_cast<MDNode>(MIBOp);
858 // Stack is first operand of MIB.
859 MDNode *StackMD = getMIBStackNode(MIB);
860 assert(StackMD);
861 // See if the new cloned callsite context matches this profiled context.
862 if (haveCommonPrefix(StackMD, ClonedCallsiteMD))
863 // Add it to the cloned call's MIB list.
864 NewMIBList.push_back(MIB);
865 }
866 if (NewMIBList.empty()) {
867 removeMemProfMetadata(ClonedCall);
868 removeCallsiteMetadata(ClonedCall);
869 return;
870 }
871 if (NewMIBList.size() < OrigMemProfMD->getNumOperands())
872 updateMemprofMetadata(ClonedCall, NewMIBList);
873}
874
875// Update memprof related metadata (!memprof and !callsite) based on the
876// inlining of Callee into the callsite at CB. The updates include merging the
877// inlined callee's callsite metadata with that of the inlined call,
878// and moving the subset of any memprof contexts to the inlined callee
879// allocations if they match the new inlined call stack.
880static void
882 bool ContainsMemProfMetadata,
884 MDNode *CallsiteMD = CB.getMetadata(LLVMContext::MD_callsite);
885 // Only need to update if the inlined callsite had callsite metadata, or if
886 // there was any memprof metadata inlined.
887 if (!CallsiteMD && !ContainsMemProfMetadata)
888 return;
889
890 // Propagate metadata onto the cloned calls in the inlined callee.
891 for (const auto &Entry : VMap) {
892 // See if this is a call that has been inlined and remapped, and not
893 // simplified away in the process.
894 auto *OrigCall = dyn_cast_or_null<CallBase>(Entry.first);
895 auto *ClonedCall = dyn_cast_or_null<CallBase>(Entry.second);
896 if (!OrigCall || !ClonedCall)
897 continue;
898 // If the inlined callsite did not have any callsite metadata, then it isn't
899 // involved in any profiled call contexts, and we can remove any memprof
900 // metadata on the cloned call.
901 if (!CallsiteMD) {
902 removeMemProfMetadata(ClonedCall);
903 removeCallsiteMetadata(ClonedCall);
904 continue;
905 }
906 propagateMemProfHelper(OrigCall, ClonedCall, CallsiteMD);
907 }
908}
909
910/// When inlining a call site that has !llvm.mem.parallel_loop_access,
911/// !llvm.access.group, !alias.scope or !noalias metadata, that metadata should
912/// be propagated to all memory-accessing cloned instructions.
914 Function::iterator FEnd) {
915 MDNode *MemParallelLoopAccess =
916 CB.getMetadata(LLVMContext::MD_mem_parallel_loop_access);
917 MDNode *AccessGroup = CB.getMetadata(LLVMContext::MD_access_group);
918 MDNode *AliasScope = CB.getMetadata(LLVMContext::MD_alias_scope);
919 MDNode *NoAlias = CB.getMetadata(LLVMContext::MD_noalias);
920 if (!MemParallelLoopAccess && !AccessGroup && !AliasScope && !NoAlias)
921 return;
922
923 for (BasicBlock &BB : make_range(FStart, FEnd)) {
924 for (Instruction &I : BB) {
925 // This metadata is only relevant for instructions that access memory.
926 if (!I.mayReadOrWriteMemory())
927 continue;
928
929 if (MemParallelLoopAccess) {
930 // TODO: This probably should not overwrite MemParalleLoopAccess.
931 MemParallelLoopAccess = MDNode::concatenate(
932 I.getMetadata(LLVMContext::MD_mem_parallel_loop_access),
933 MemParallelLoopAccess);
934 I.setMetadata(LLVMContext::MD_mem_parallel_loop_access,
935 MemParallelLoopAccess);
936 }
937
938 if (AccessGroup)
939 I.setMetadata(LLVMContext::MD_access_group, uniteAccessGroups(
940 I.getMetadata(LLVMContext::MD_access_group), AccessGroup));
941
942 if (AliasScope)
943 I.setMetadata(LLVMContext::MD_alias_scope, MDNode::concatenate(
944 I.getMetadata(LLVMContext::MD_alias_scope), AliasScope));
945
946 if (NoAlias)
947 I.setMetadata(LLVMContext::MD_noalias, MDNode::concatenate(
948 I.getMetadata(LLVMContext::MD_noalias), NoAlias));
949 }
950 }
951}
952
953/// Bundle operands of the inlined function must be added to inlined call sites.
955 Instruction *CallSiteEHPad) {
956 for (Instruction &II : llvm::make_early_inc_range(*InlinedBB)) {
957 CallBase *I = dyn_cast<CallBase>(&II);
958 if (!I)
959 continue;
960 // Skip call sites which already have a "funclet" bundle.
961 if (I->getOperandBundle(LLVMContext::OB_funclet))
962 continue;
963 // Skip call sites which are nounwind intrinsics (as long as they don't
964 // lower into regular function calls in the course of IR transformations).
965 auto *CalledFn =
966 dyn_cast<Function>(I->getCalledOperand()->stripPointerCasts());
967 if (CalledFn && CalledFn->isIntrinsic() && I->doesNotThrow() &&
968 !IntrinsicInst::mayLowerToFunctionCall(CalledFn->getIntrinsicID()))
969 continue;
970
972 I->getOperandBundlesAsDefs(OpBundles);
973 OpBundles.emplace_back("funclet", CallSiteEHPad);
974
975 Instruction *NewInst = CallBase::Create(I, OpBundles, I->getIterator());
976 NewInst->takeName(I);
977 I->replaceAllUsesWith(NewInst);
978 I->eraseFromParent();
979 }
980}
981
982namespace {
983/// Utility for cloning !noalias and !alias.scope metadata. When a code region
984/// using scoped alias metadata is inlined, the aliasing relationships may not
985/// hold between the two version. It is necessary to create a deep clone of the
986/// metadata, putting the two versions in separate scope domains.
987class ScopedAliasMetadataDeepCloner {
990 MetadataMap MDMap;
991 void addRecursiveMetadataUses();
992
993public:
994 ScopedAliasMetadataDeepCloner(const Function *F);
995
996 /// Create a new clone of the scoped alias metadata, which will be used by
997 /// subsequent remap() calls.
998 void clone();
999
1000 /// Remap instructions in the given range from the original to the cloned
1001 /// metadata.
1002 void remap(Function::iterator FStart, Function::iterator FEnd);
1003};
1004} // namespace
1005
1006ScopedAliasMetadataDeepCloner::ScopedAliasMetadataDeepCloner(
1007 const Function *F) {
1008 for (const BasicBlock &BB : *F) {
1009 for (const Instruction &I : BB) {
1010 if (const MDNode *M = I.getMetadata(LLVMContext::MD_alias_scope))
1011 MD.insert(M);
1012 if (const MDNode *M = I.getMetadata(LLVMContext::MD_noalias))
1013 MD.insert(M);
1014
1015 // We also need to clone the metadata in noalias intrinsics.
1016 if (const auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&I))
1017 MD.insert(Decl->getScopeList());
1018 }
1019 }
1020 addRecursiveMetadataUses();
1021}
1022
1023void ScopedAliasMetadataDeepCloner::addRecursiveMetadataUses() {
1024 SmallVector<const Metadata *, 16> Queue(MD.begin(), MD.end());
1025 while (!Queue.empty()) {
1026 const MDNode *M = cast<MDNode>(Queue.pop_back_val());
1027 for (const Metadata *Op : M->operands())
1028 if (const MDNode *OpMD = dyn_cast<MDNode>(Op))
1029 if (MD.insert(OpMD))
1030 Queue.push_back(OpMD);
1031 }
1032}
1033
1034void ScopedAliasMetadataDeepCloner::clone() {
1035 assert(MDMap.empty() && "clone() already called ?");
1036
1038 for (const MDNode *I : MD) {
1039 DummyNodes.push_back(MDTuple::getTemporary(I->getContext(), std::nullopt));
1040 MDMap[I].reset(DummyNodes.back().get());
1041 }
1042
1043 // Create new metadata nodes to replace the dummy nodes, replacing old
1044 // metadata references with either a dummy node or an already-created new
1045 // node.
1047 for (const MDNode *I : MD) {
1048 for (const Metadata *Op : I->operands()) {
1049 if (const MDNode *M = dyn_cast<MDNode>(Op))
1050 NewOps.push_back(MDMap[M]);
1051 else
1052 NewOps.push_back(const_cast<Metadata *>(Op));
1053 }
1054
1055 MDNode *NewM = MDNode::get(I->getContext(), NewOps);
1056 MDTuple *TempM = cast<MDTuple>(MDMap[I]);
1057 assert(TempM->isTemporary() && "Expected temporary node");
1058
1059 TempM->replaceAllUsesWith(NewM);
1060 NewOps.clear();
1061 }
1062}
1063
1064void ScopedAliasMetadataDeepCloner::remap(Function::iterator FStart,
1065 Function::iterator FEnd) {
1066 if (MDMap.empty())
1067 return; // Nothing to do.
1068
1069 for (BasicBlock &BB : make_range(FStart, FEnd)) {
1070 for (Instruction &I : BB) {
1071 // TODO: The null checks for the MDMap.lookup() results should no longer
1072 // be necessary.
1073 if (MDNode *M = I.getMetadata(LLVMContext::MD_alias_scope))
1074 if (MDNode *MNew = MDMap.lookup(M))
1075 I.setMetadata(LLVMContext::MD_alias_scope, MNew);
1076
1077 if (MDNode *M = I.getMetadata(LLVMContext::MD_noalias))
1078 if (MDNode *MNew = MDMap.lookup(M))
1079 I.setMetadata(LLVMContext::MD_noalias, MNew);
1080
1081 if (auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&I))
1082 if (MDNode *MNew = MDMap.lookup(Decl->getScopeList()))
1083 Decl->setScopeList(MNew);
1084 }
1085 }
1086}
1087
1088/// If the inlined function has noalias arguments,
1089/// then add new alias scopes for each noalias argument, tag the mapped noalias
1090/// parameters with noalias metadata specifying the new scope, and tag all
1091/// non-derived loads, stores and memory intrinsics with the new alias scopes.
1093 const DataLayout &DL, AAResults *CalleeAAR,
1094 ClonedCodeInfo &InlinedFunctionInfo) {
1096 return;
1097
1098 const Function *CalledFunc = CB.getCalledFunction();
1100
1101 for (const Argument &Arg : CalledFunc->args())
1102 if (CB.paramHasAttr(Arg.getArgNo(), Attribute::NoAlias) && !Arg.use_empty())
1103 NoAliasArgs.push_back(&Arg);
1104
1105 if (NoAliasArgs.empty())
1106 return;
1107
1108 // To do a good job, if a noalias variable is captured, we need to know if
1109 // the capture point dominates the particular use we're considering.
1110 DominatorTree DT;
1111 DT.recalculate(const_cast<Function&>(*CalledFunc));
1112
1113 // noalias indicates that pointer values based on the argument do not alias
1114 // pointer values which are not based on it. So we add a new "scope" for each
1115 // noalias function argument. Accesses using pointers based on that argument
1116 // become part of that alias scope, accesses using pointers not based on that
1117 // argument are tagged as noalias with that scope.
1118
1120 MDBuilder MDB(CalledFunc->getContext());
1121
1122 // Create a new scope domain for this function.
1123 MDNode *NewDomain =
1124 MDB.createAnonymousAliasScopeDomain(CalledFunc->getName());
1125 for (unsigned i = 0, e = NoAliasArgs.size(); i != e; ++i) {
1126 const Argument *A = NoAliasArgs[i];
1127
1128 std::string Name = std::string(CalledFunc->getName());
1129 if (A->hasName()) {
1130 Name += ": %";
1131 Name += A->getName();
1132 } else {
1133 Name += ": argument ";
1134 Name += utostr(i);
1135 }
1136
1137 // Note: We always create a new anonymous root here. This is true regardless
1138 // of the linkage of the callee because the aliasing "scope" is not just a
1139 // property of the callee, but also all control dependencies in the caller.
1140 MDNode *NewScope = MDB.createAnonymousAliasScope(NewDomain, Name);
1141 NewScopes.insert(std::make_pair(A, NewScope));
1142
1143 if (UseNoAliasIntrinsic) {
1144 // Introduce a llvm.experimental.noalias.scope.decl for the noalias
1145 // argument.
1146 MDNode *AScopeList = MDNode::get(CalledFunc->getContext(), NewScope);
1147 auto *NoAliasDecl =
1149 // Ignore the result for now. The result will be used when the
1150 // llvm.noalias intrinsic is introduced.
1151 (void)NoAliasDecl;
1152 }
1153 }
1154
1155 // Iterate over all new instructions in the map; for all memory-access
1156 // instructions, add the alias scope metadata.
1157 for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
1158 VMI != VMIE; ++VMI) {
1159 if (const Instruction *I = dyn_cast<Instruction>(VMI->first)) {
1160 if (!VMI->second)
1161 continue;
1162
1163 Instruction *NI = dyn_cast<Instruction>(VMI->second);
1164 if (!NI || InlinedFunctionInfo.isSimplified(I, NI))
1165 continue;
1166
1167 bool IsArgMemOnlyCall = false, IsFuncCall = false;
1169
1170 if (const LoadInst *LI = dyn_cast<LoadInst>(I))
1171 PtrArgs.push_back(LI->getPointerOperand());
1172 else if (const StoreInst *SI = dyn_cast<StoreInst>(I))
1173 PtrArgs.push_back(SI->getPointerOperand());
1174 else if (const VAArgInst *VAAI = dyn_cast<VAArgInst>(I))
1175 PtrArgs.push_back(VAAI->getPointerOperand());
1176 else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I))
1177 PtrArgs.push_back(CXI->getPointerOperand());
1178 else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I))
1179 PtrArgs.push_back(RMWI->getPointerOperand());
1180 else if (const auto *Call = dyn_cast<CallBase>(I)) {
1181 // If we know that the call does not access memory, then we'll still
1182 // know that about the inlined clone of this call site, and we don't
1183 // need to add metadata.
1184 if (Call->doesNotAccessMemory())
1185 continue;
1186
1187 IsFuncCall = true;
1188 if (CalleeAAR) {
1189 MemoryEffects ME = CalleeAAR->getMemoryEffects(Call);
1190
1191 // We'll retain this knowledge without additional metadata.
1193 continue;
1194
1195 if (ME.onlyAccessesArgPointees())
1196 IsArgMemOnlyCall = true;
1197 }
1198
1199 for (Value *Arg : Call->args()) {
1200 // Only care about pointer arguments. If a noalias argument is
1201 // accessed through a non-pointer argument, it must be captured
1202 // first (e.g. via ptrtoint), and we protect against captures below.
1203 if (!Arg->getType()->isPointerTy())
1204 continue;
1205
1206 PtrArgs.push_back(Arg);
1207 }
1208 }
1209
1210 // If we found no pointers, then this instruction is not suitable for
1211 // pairing with an instruction to receive aliasing metadata.
1212 // However, if this is a call, this we might just alias with none of the
1213 // noalias arguments.
1214 if (PtrArgs.empty() && !IsFuncCall)
1215 continue;
1216
1217 // It is possible that there is only one underlying object, but you
1218 // need to go through several PHIs to see it, and thus could be
1219 // repeated in the Objects list.
1222
1224 for (const Value *V : PtrArgs) {
1226 getUnderlyingObjects(V, Objects, /* LI = */ nullptr);
1227
1228 for (const Value *O : Objects)
1229 ObjSet.insert(O);
1230 }
1231
1232 // Figure out if we're derived from anything that is not a noalias
1233 // argument.
1234 bool RequiresNoCaptureBefore = false, UsesAliasingPtr = false,
1235 UsesUnknownObject = false;
1236 for (const Value *V : ObjSet) {
1237 // Is this value a constant that cannot be derived from any pointer
1238 // value (we need to exclude constant expressions, for example, that
1239 // are formed from arithmetic on global symbols).
1240 bool IsNonPtrConst = isa<ConstantInt>(V) || isa<ConstantFP>(V) ||
1241 isa<ConstantPointerNull>(V) ||
1242 isa<ConstantDataVector>(V) || isa<UndefValue>(V);
1243 if (IsNonPtrConst)
1244 continue;
1245
1246 // If this is anything other than a noalias argument, then we cannot
1247 // completely describe the aliasing properties using alias.scope
1248 // metadata (and, thus, won't add any).
1249 if (const Argument *A = dyn_cast<Argument>(V)) {
1250 if (!CB.paramHasAttr(A->getArgNo(), Attribute::NoAlias))
1251 UsesAliasingPtr = true;
1252 } else {
1253 UsesAliasingPtr = true;
1254 }
1255
1256 if (isEscapeSource(V)) {
1257 // An escape source can only alias with a noalias argument if it has
1258 // been captured beforehand.
1259 RequiresNoCaptureBefore = true;
1260 } else if (!isa<Argument>(V) && !isIdentifiedObject(V)) {
1261 // If this is neither an escape source, nor some identified object
1262 // (which cannot directly alias a noalias argument), nor some other
1263 // argument (which, by definition, also cannot alias a noalias
1264 // argument), conservatively do not make any assumptions.
1265 UsesUnknownObject = true;
1266 }
1267 }
1268
1269 // Nothing we can do if the used underlying object cannot be reliably
1270 // determined.
1271 if (UsesUnknownObject)
1272 continue;
1273
1274 // A function call can always get captured noalias pointers (via other
1275 // parameters, globals, etc.).
1276 if (IsFuncCall && !IsArgMemOnlyCall)
1277 RequiresNoCaptureBefore = true;
1278
1279 // First, we want to figure out all of the sets with which we definitely
1280 // don't alias. Iterate over all noalias set, and add those for which:
1281 // 1. The noalias argument is not in the set of objects from which we
1282 // definitely derive.
1283 // 2. The noalias argument has not yet been captured.
1284 // An arbitrary function that might load pointers could see captured
1285 // noalias arguments via other noalias arguments or globals, and so we
1286 // must always check for prior capture.
1287 for (const Argument *A : NoAliasArgs) {
1288 if (ObjSet.contains(A))
1289 continue; // May be based on a noalias argument.
1290
1291 // It might be tempting to skip the PointerMayBeCapturedBefore check if
1292 // A->hasNoCaptureAttr() is true, but this is incorrect because
1293 // nocapture only guarantees that no copies outlive the function, not
1294 // that the value cannot be locally captured.
1295 if (!RequiresNoCaptureBefore ||
1296 !PointerMayBeCapturedBefore(A, /* ReturnCaptures */ false,
1297 /* StoreCaptures */ false, I, &DT))
1298 NoAliases.push_back(NewScopes[A]);
1299 }
1300
1301 if (!NoAliases.empty())
1302 NI->setMetadata(LLVMContext::MD_noalias,
1304 NI->getMetadata(LLVMContext::MD_noalias),
1305 MDNode::get(CalledFunc->getContext(), NoAliases)));
1306
1307 // Next, we want to figure out all of the sets to which we might belong.
1308 // We might belong to a set if the noalias argument is in the set of
1309 // underlying objects. If there is some non-noalias argument in our list
1310 // of underlying objects, then we cannot add a scope because the fact
1311 // that some access does not alias with any set of our noalias arguments
1312 // cannot itself guarantee that it does not alias with this access
1313 // (because there is some pointer of unknown origin involved and the
1314 // other access might also depend on this pointer). We also cannot add
1315 // scopes to arbitrary functions unless we know they don't access any
1316 // non-parameter pointer-values.
1317 bool CanAddScopes = !UsesAliasingPtr;
1318 if (CanAddScopes && IsFuncCall)
1319 CanAddScopes = IsArgMemOnlyCall;
1320
1321 if (CanAddScopes)
1322 for (const Argument *A : NoAliasArgs) {
1323 if (ObjSet.count(A))
1324 Scopes.push_back(NewScopes[A]);
1325 }
1326
1327 if (!Scopes.empty())
1328 NI->setMetadata(
1329 LLVMContext::MD_alias_scope,
1330 MDNode::concatenate(NI->getMetadata(LLVMContext::MD_alias_scope),
1331 MDNode::get(CalledFunc->getContext(), Scopes)));
1332 }
1333 }
1334}
1335
1337 ReturnInst *End) {
1338
1339 assert(Begin->getParent() == End->getParent() &&
1340 "Expected to be in same basic block!");
1341 auto BeginIt = Begin->getIterator();
1342 assert(BeginIt != End->getIterator() && "Non-empty BB has empty iterator");
1344 ++BeginIt, End->getIterator(), InlinerAttributeWindow + 1);
1345}
1346
1347// Add attributes from CB params and Fn attributes that can always be propagated
1348// to the corresponding argument / inner callbases.
1350 ValueToValueMapTy &VMap) {
1351 auto *CalledFunction = CB.getCalledFunction();
1352 auto &Context = CalledFunction->getContext();
1353
1354 // Collect valid attributes for all params.
1355 SmallVector<AttrBuilder> ValidParamAttrs;
1356 bool HasAttrToPropagate = false;
1357
1358 for (unsigned I = 0, E = CB.arg_size(); I < E; ++I) {
1359 ValidParamAttrs.emplace_back(AttrBuilder{CB.getContext()});
1360 // Access attributes can be propagated to any param with the same underlying
1361 // object as the argument.
1362 if (CB.paramHasAttr(I, Attribute::ReadNone))
1363 ValidParamAttrs.back().addAttribute(Attribute::ReadNone);
1364 if (CB.paramHasAttr(I, Attribute::ReadOnly))
1365 ValidParamAttrs.back().addAttribute(Attribute::ReadOnly);
1366 if (CB.paramHasAttr(I, Attribute::WriteOnly))
1367 ValidParamAttrs.back().addAttribute(Attribute::WriteOnly);
1368 HasAttrToPropagate |= ValidParamAttrs.back().hasAttributes();
1369 }
1370
1371 // Won't be able to propagate anything.
1372 if (!HasAttrToPropagate)
1373 return;
1374
1375 for (BasicBlock &BB : *CalledFunction) {
1376 for (Instruction &Ins : BB) {
1377 const auto *InnerCB = dyn_cast<CallBase>(&Ins);
1378 if (!InnerCB)
1379 continue;
1380 auto *NewInnerCB = dyn_cast_or_null<CallBase>(VMap.lookup(InnerCB));
1381 if (!NewInnerCB)
1382 continue;
1383 AttributeList AL = NewInnerCB->getAttributes();
1384 for (unsigned I = 0, E = InnerCB->arg_size(); I < E; ++I) {
1385 // Check if the underlying value for the parameter is an argument.
1386 const Value *UnderlyingV =
1387 getUnderlyingObject(InnerCB->getArgOperand(I));
1388 const Argument *Arg = dyn_cast<Argument>(UnderlyingV);
1389 if (!Arg)
1390 continue;
1391
1392 unsigned ArgNo = Arg->getArgNo();
1393 // If so, propagate its access attributes.
1394 AL = AL.addParamAttributes(Context, I, ValidParamAttrs[ArgNo]);
1395 // We can have conflicting attributes from the inner callsite and
1396 // to-be-inlined callsite. In that case, choose the most
1397 // restrictive.
1398
1399 // readonly + writeonly means we can never deref so make readnone.
1400 if (AL.hasParamAttr(I, Attribute::ReadOnly) &&
1401 AL.hasParamAttr(I, Attribute::WriteOnly))
1402 AL = AL.addParamAttribute(Context, I, Attribute::ReadNone);
1403
1404 // If have readnone, need to clear readonly/writeonly
1405 if (AL.hasParamAttr(I, Attribute::ReadNone)) {
1406 AL = AL.removeParamAttribute(Context, I, Attribute::ReadOnly);
1407 AL = AL.removeParamAttribute(Context, I, Attribute::WriteOnly);
1408 }
1409
1410 // Writable cannot exist in conjunction w/ readonly/readnone
1411 if (AL.hasParamAttr(I, Attribute::ReadOnly) ||
1412 AL.hasParamAttr(I, Attribute::ReadNone))
1413 AL = AL.removeParamAttribute(Context, I, Attribute::Writable);
1414 }
1415 NewInnerCB->setAttributes(AL);
1416 }
1417 }
1418}
1419
1420// Only allow these white listed attributes to be propagated back to the
1421// callee. This is because other attributes may only be valid on the call
1422// itself, i.e. attributes such as signext and zeroext.
1423
1424// Attributes that are always okay to propagate as if they are violated its
1425// immediate UB.
1427 AttrBuilder Valid(CB.getContext());
1428 if (auto DerefBytes = CB.getRetDereferenceableBytes())
1429 Valid.addDereferenceableAttr(DerefBytes);
1430 if (auto DerefOrNullBytes = CB.getRetDereferenceableOrNullBytes())
1431 Valid.addDereferenceableOrNullAttr(DerefOrNullBytes);
1432 if (CB.hasRetAttr(Attribute::NoAlias))
1433 Valid.addAttribute(Attribute::NoAlias);
1434 if (CB.hasRetAttr(Attribute::NoUndef))
1435 Valid.addAttribute(Attribute::NoUndef);
1436 return Valid;
1437}
1438
1439// Attributes that need additional checks as propagating them may change
1440// behavior or cause new UB.
1442 AttrBuilder Valid(CB.getContext());
1443 if (CB.hasRetAttr(Attribute::NonNull))
1444 Valid.addAttribute(Attribute::NonNull);
1445 if (CB.hasRetAttr(Attribute::Alignment))
1446 Valid.addAlignmentAttr(CB.getRetAlign());
1447 return Valid;
1448}
1449
1453 if (!ValidUB.hasAttributes() && !ValidPG.hasAttributes())
1454 return;
1455 auto *CalledFunction = CB.getCalledFunction();
1456 auto &Context = CalledFunction->getContext();
1457
1458 for (auto &BB : *CalledFunction) {
1459 auto *RI = dyn_cast<ReturnInst>(BB.getTerminator());
1460 if (!RI || !isa<CallBase>(RI->getOperand(0)))
1461 continue;
1462 auto *RetVal = cast<CallBase>(RI->getOperand(0));
1463 // Check that the cloned RetVal exists and is a call, otherwise we cannot
1464 // add the attributes on the cloned RetVal. Simplification during inlining
1465 // could have transformed the cloned instruction.
1466 auto *NewRetVal = dyn_cast_or_null<CallBase>(VMap.lookup(RetVal));
1467 if (!NewRetVal)
1468 continue;
1469 // Backward propagation of attributes to the returned value may be incorrect
1470 // if it is control flow dependent.
1471 // Consider:
1472 // @callee {
1473 // %rv = call @foo()
1474 // %rv2 = call @bar()
1475 // if (%rv2 != null)
1476 // return %rv2
1477 // if (%rv == null)
1478 // exit()
1479 // return %rv
1480 // }
1481 // caller() {
1482 // %val = call nonnull @callee()
1483 // }
1484 // Here we cannot add the nonnull attribute on either foo or bar. So, we
1485 // limit the check to both RetVal and RI are in the same basic block and
1486 // there are no throwing/exiting instructions between these instructions.
1487 if (RI->getParent() != RetVal->getParent() ||
1489 continue;
1490 // Add to the existing attributes of NewRetVal, i.e. the cloned call
1491 // instruction.
1492 // NB! When we have the same attribute already existing on NewRetVal, but
1493 // with a differing value, the AttributeList's merge API honours the already
1494 // existing attribute value (i.e. attributes such as dereferenceable,
1495 // dereferenceable_or_null etc). See AttrBuilder::merge for more details.
1496 AttributeList AL = NewRetVal->getAttributes();
1497 if (ValidUB.getDereferenceableBytes() < AL.getRetDereferenceableBytes())
1498 ValidUB.removeAttribute(Attribute::Dereferenceable);
1499 if (ValidUB.getDereferenceableOrNullBytes() <
1500 AL.getRetDereferenceableOrNullBytes())
1501 ValidUB.removeAttribute(Attribute::DereferenceableOrNull);
1502 AttributeList NewAL = AL.addRetAttributes(Context, ValidUB);
1503 // Attributes that may generate poison returns are a bit tricky. If we
1504 // propagate them, other uses of the callsite might have their behavior
1505 // change or cause UB (if they have noundef) b.c of the new potential
1506 // poison.
1507 // Take the following three cases:
1508 //
1509 // 1)
1510 // define nonnull ptr @foo() {
1511 // %p = call ptr @bar()
1512 // call void @use(ptr %p) willreturn nounwind
1513 // ret ptr %p
1514 // }
1515 //
1516 // 2)
1517 // define noundef nonnull ptr @foo() {
1518 // %p = call ptr @bar()
1519 // call void @use(ptr %p) willreturn nounwind
1520 // ret ptr %p
1521 // }
1522 //
1523 // 3)
1524 // define nonnull ptr @foo() {
1525 // %p = call noundef ptr @bar()
1526 // ret ptr %p
1527 // }
1528 //
1529 // In case 1, we can't propagate nonnull because poison value in @use may
1530 // change behavior or trigger UB.
1531 // In case 2, we don't need to be concerned about propagating nonnull, as
1532 // any new poison at @use will trigger UB anyways.
1533 // In case 3, we can never propagate nonnull because it may create UB due to
1534 // the noundef on @bar.
1535 if (ValidPG.getAlignment().valueOrOne() < AL.getRetAlignment().valueOrOne())
1536 ValidPG.removeAttribute(Attribute::Alignment);
1537 if (ValidPG.hasAttributes()) {
1538 // Three checks.
1539 // If the callsite has `noundef`, then a poison due to violating the
1540 // return attribute will create UB anyways so we can always propagate.
1541 // Otherwise, if the return value (callee to be inlined) has `noundef`, we
1542 // can't propagate as a new poison return will cause UB.
1543 // Finally, check if the return value has no uses whose behavior may
1544 // change/may cause UB if we potentially return poison. At the moment this
1545 // is implemented overly conservatively with a single-use check.
1546 // TODO: Update the single-use check to iterate through uses and only bail
1547 // if we have a potentially dangerous use.
1548
1549 if (CB.hasRetAttr(Attribute::NoUndef) ||
1550 (RetVal->hasOneUse() && !RetVal->hasRetAttr(Attribute::NoUndef)))
1551 NewAL = NewAL.addRetAttributes(Context, ValidPG);
1552 }
1553 NewRetVal->setAttributes(NewAL);
1554 }
1555}
1556
1557/// If the inlined function has non-byval align arguments, then
1558/// add @llvm.assume-based alignment assumptions to preserve this information.
1561 return;
1562
1564 auto &DL = CB.getCaller()->getParent()->getDataLayout();
1565
1566 // To avoid inserting redundant assumptions, we should check for assumptions
1567 // already in the caller. To do this, we might need a DT of the caller.
1568 DominatorTree DT;
1569 bool DTCalculated = false;
1570
1571 Function *CalledFunc = CB.getCalledFunction();
1572 for (Argument &Arg : CalledFunc->args()) {
1573 if (!Arg.getType()->isPointerTy() || Arg.hasPassPointeeByValueCopyAttr() ||
1574 Arg.hasNUses(0))
1575 continue;
1576 MaybeAlign Alignment = Arg.getParamAlign();
1577 if (!Alignment)
1578 continue;
1579
1580 if (!DTCalculated) {
1581 DT.recalculate(*CB.getCaller());
1582 DTCalculated = true;
1583 }
1584 // If we can already prove the asserted alignment in the context of the
1585 // caller, then don't bother inserting the assumption.
1586 Value *ArgVal = CB.getArgOperand(Arg.getArgNo());
1587 if (getKnownAlignment(ArgVal, DL, &CB, AC, &DT) >= *Alignment)
1588 continue;
1589
1591 DL, ArgVal, Alignment->value());
1592 AC->registerAssumption(cast<AssumeInst>(NewAsmp));
1593 }
1594}
1595
1596static void HandleByValArgumentInit(Type *ByValType, Value *Dst, Value *Src,
1597 Module *M, BasicBlock *InsertBlock,
1598 InlineFunctionInfo &IFI,
1599 Function *CalledFunc) {
1600 IRBuilder<> Builder(InsertBlock, InsertBlock->begin());
1601
1602 Value *Size =
1603 Builder.getInt64(M->getDataLayout().getTypeStoreSize(ByValType));
1604
1605 // Always generate a memcpy of alignment 1 here because we don't know
1606 // the alignment of the src pointer. Other optimizations can infer
1607 // better alignment.
1608 CallInst *CI = Builder.CreateMemCpy(Dst, /*DstAlign*/ Align(1), Src,
1609 /*SrcAlign*/ Align(1), Size);
1610
1611 // The verifier requires that all calls of debug-info-bearing functions
1612 // from debug-info-bearing functions have a debug location (for inlining
1613 // purposes). Assign a dummy location to satisfy the constraint.
1614 if (!CI->getDebugLoc() && InsertBlock->getParent()->getSubprogram())
1615 if (DISubprogram *SP = CalledFunc->getSubprogram())
1616 CI->setDebugLoc(DILocation::get(SP->getContext(), 0, 0, SP));
1617}
1618
1619/// When inlining a call site that has a byval argument,
1620/// we have to make the implicit memcpy explicit by adding it.
1621static Value *HandleByValArgument(Type *ByValType, Value *Arg,
1622 Instruction *TheCall,
1623 const Function *CalledFunc,
1624 InlineFunctionInfo &IFI,
1625 MaybeAlign ByValAlignment) {
1626 Function *Caller = TheCall->getFunction();
1627 const DataLayout &DL = Caller->getParent()->getDataLayout();
1628
1629 // If the called function is readonly, then it could not mutate the caller's
1630 // copy of the byval'd memory. In this case, it is safe to elide the copy and
1631 // temporary.
1632 if (CalledFunc->onlyReadsMemory()) {
1633 // If the byval argument has a specified alignment that is greater than the
1634 // passed in pointer, then we either have to round up the input pointer or
1635 // give up on this transformation.
1636 if (ByValAlignment.valueOrOne() == 1)
1637 return Arg;
1638
1639 AssumptionCache *AC =
1640 IFI.GetAssumptionCache ? &IFI.GetAssumptionCache(*Caller) : nullptr;
1641
1642 // If the pointer is already known to be sufficiently aligned, or if we can
1643 // round it up to a larger alignment, then we don't need a temporary.
1644 if (getOrEnforceKnownAlignment(Arg, *ByValAlignment, DL, TheCall, AC) >=
1645 *ByValAlignment)
1646 return Arg;
1647
1648 // Otherwise, we have to make a memcpy to get a safe alignment. This is bad
1649 // for code quality, but rarely happens and is required for correctness.
1650 }
1651
1652 // Create the alloca. If we have DataLayout, use nice alignment.
1653 Align Alignment = DL.getPrefTypeAlign(ByValType);
1654
1655 // If the byval had an alignment specified, we *must* use at least that
1656 // alignment, as it is required by the byval argument (and uses of the
1657 // pointer inside the callee).
1658 if (ByValAlignment)
1659 Alignment = std::max(Alignment, *ByValAlignment);
1660
1661 AllocaInst *NewAlloca = new AllocaInst(ByValType, DL.getAllocaAddrSpace(),
1662 nullptr, Alignment, Arg->getName());
1663 NewAlloca->insertBefore(Caller->begin()->begin());
1664 IFI.StaticAllocas.push_back(NewAlloca);
1665
1666 // Uses of the argument in the function should use our new alloca
1667 // instead.
1668 return NewAlloca;
1669}
1670
1671// Check whether this Value is used by a lifetime intrinsic.
1673 for (User *U : V->users())
1674 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U))
1675 if (II->isLifetimeStartOrEnd())
1676 return true;
1677 return false;
1678}
1679
1680// Check whether the given alloca already has
1681// lifetime.start or lifetime.end intrinsics.
1683 Type *Ty = AI->getType();
1684 Type *Int8PtrTy =
1685 PointerType::get(Ty->getContext(), Ty->getPointerAddressSpace());
1686 if (Ty == Int8PtrTy)
1687 return isUsedByLifetimeMarker(AI);
1688
1689 // Do a scan to find all the casts to i8*.
1690 for (User *U : AI->users()) {
1691 if (U->getType() != Int8PtrTy) continue;
1692 if (U->stripPointerCasts() != AI) continue;
1694 return true;
1695 }
1696 return false;
1697}
1698
1699/// Return the result of AI->isStaticAlloca() if AI were moved to the entry
1700/// block. Allocas used in inalloca calls and allocas of dynamic array size
1701/// cannot be static.
1703 return isa<Constant>(AI->getArraySize()) && !AI->isUsedWithInAlloca();
1704}
1705
1706/// Returns a DebugLoc for a new DILocation which is a clone of \p OrigDL
1707/// inlined at \p InlinedAt. \p IANodes is an inlined-at cache.
1708static DebugLoc inlineDebugLoc(DebugLoc OrigDL, DILocation *InlinedAt,
1709 LLVMContext &Ctx,
1711 auto IA = DebugLoc::appendInlinedAt(OrigDL, InlinedAt, Ctx, IANodes);
1712 return DILocation::get(Ctx, OrigDL.getLine(), OrigDL.getCol(),
1713 OrigDL.getScope(), IA);
1714}
1715
1716/// Update inlined instructions' line numbers to
1717/// to encode location where these instructions are inlined.
1719 Instruction *TheCall, bool CalleeHasDebugInfo) {
1720 const DebugLoc &TheCallDL = TheCall->getDebugLoc();
1721 if (!TheCallDL)
1722 return;
1723
1724 auto &Ctx = Fn->getContext();
1725 DILocation *InlinedAtNode = TheCallDL;
1726
1727 // Create a unique call site, not to be confused with any other call from the
1728 // same location.
1729 InlinedAtNode = DILocation::getDistinct(
1730 Ctx, InlinedAtNode->getLine(), InlinedAtNode->getColumn(),
1731 InlinedAtNode->getScope(), InlinedAtNode->getInlinedAt());
1732
1733 // Cache the inlined-at nodes as they're built so they are reused, without
1734 // this every instruction's inlined-at chain would become distinct from each
1735 // other.
1737
1738 // Check if we are not generating inline line tables and want to use
1739 // the call site location instead.
1740 bool NoInlineLineTables = Fn->hasFnAttribute("no-inline-line-tables");
1741
1742 // Helper-util for updating the metadata attached to an instruction.
1743 auto UpdateInst = [&](Instruction &I) {
1744 // Loop metadata needs to be updated so that the start and end locs
1745 // reference inlined-at locations.
1746 auto updateLoopInfoLoc = [&Ctx, &InlinedAtNode,
1747 &IANodes](Metadata *MD) -> Metadata * {
1748 if (auto *Loc = dyn_cast_or_null<DILocation>(MD))
1749 return inlineDebugLoc(Loc, InlinedAtNode, Ctx, IANodes).get();
1750 return MD;
1751 };
1752 updateLoopMetadataDebugLocations(I, updateLoopInfoLoc);
1753
1754 if (!NoInlineLineTables)
1755 if (DebugLoc DL = I.getDebugLoc()) {
1756 DebugLoc IDL =
1757 inlineDebugLoc(DL, InlinedAtNode, I.getContext(), IANodes);
1758 I.setDebugLoc(IDL);
1759 return;
1760 }
1761
1762 if (CalleeHasDebugInfo && !NoInlineLineTables)
1763 return;
1764
1765 // If the inlined instruction has no line number, or if inline info
1766 // is not being generated, make it look as if it originates from the call
1767 // location. This is important for ((__always_inline, __nodebug__))
1768 // functions which must use caller location for all instructions in their
1769 // function body.
1770
1771 // Don't update static allocas, as they may get moved later.
1772 if (auto *AI = dyn_cast<AllocaInst>(&I))
1774 return;
1775
1776 // Do not force a debug loc for pseudo probes, since they do not need to
1777 // be debuggable, and also they are expected to have a zero/null dwarf
1778 // discriminator at this point which could be violated otherwise.
1779 if (isa<PseudoProbeInst>(I))
1780 return;
1781
1782 I.setDebugLoc(TheCallDL);
1783 };
1784
1785 // Helper-util for updating debug-info records attached to instructions.
1786 auto UpdateDVR = [&](DbgRecord *DVR) {
1787 assert(DVR->getDebugLoc() && "Debug Value must have debug loc");
1788 if (NoInlineLineTables) {
1789 DVR->setDebugLoc(TheCallDL);
1790 return;
1791 }
1792 DebugLoc DL = DVR->getDebugLoc();
1793 DebugLoc IDL =
1794 inlineDebugLoc(DL, InlinedAtNode,
1795 DVR->getMarker()->getParent()->getContext(), IANodes);
1796 DVR->setDebugLoc(IDL);
1797 };
1798
1799 // Iterate over all instructions, updating metadata and debug-info records.
1800 for (; FI != Fn->end(); ++FI) {
1801 for (BasicBlock::iterator BI = FI->begin(), BE = FI->end(); BI != BE;
1802 ++BI) {
1803 UpdateInst(*BI);
1804 for (DbgRecord &DVR : BI->getDbgRecordRange()) {
1805 UpdateDVR(&DVR);
1806 }
1807 }
1808
1809 // Remove debug info intrinsics if we're not keeping inline info.
1810 if (NoInlineLineTables) {
1811 BasicBlock::iterator BI = FI->begin();
1812 while (BI != FI->end()) {
1813 if (isa<DbgInfoIntrinsic>(BI)) {
1814 BI = BI->eraseFromParent();
1815 continue;
1816 } else {
1817 BI->dropDbgRecords();
1818 }
1819 ++BI;
1820 }
1821 }
1822 }
1823}
1824
1825#undef DEBUG_TYPE
1826#define DEBUG_TYPE "assignment-tracking"
1827/// Find Alloca and linked DbgAssignIntrinsic for locals escaped by \p CB.
1829 const CallBase &CB) {
1830 at::StorageToVarsMap EscapedLocals;
1832
1833 LLVM_DEBUG(
1834 errs() << "# Finding caller local variables escaped by callee\n");
1835 for (const Value *Arg : CB.args()) {
1836 LLVM_DEBUG(errs() << "INSPECT: " << *Arg << "\n");
1837 if (!Arg->getType()->isPointerTy()) {
1838 LLVM_DEBUG(errs() << " | SKIP: Not a pointer\n");
1839 continue;
1840 }
1841
1842 const Instruction *I = dyn_cast<Instruction>(Arg);
1843 if (!I) {
1844 LLVM_DEBUG(errs() << " | SKIP: Not result of instruction\n");
1845 continue;
1846 }
1847
1848 // Walk back to the base storage.
1849 assert(Arg->getType()->isPtrOrPtrVectorTy());
1850 APInt TmpOffset(DL.getIndexTypeSizeInBits(Arg->getType()), 0, false);
1851 const AllocaInst *Base = dyn_cast<AllocaInst>(
1852 Arg->stripAndAccumulateConstantOffsets(DL, TmpOffset, true));
1853 if (!Base) {
1854 LLVM_DEBUG(errs() << " | SKIP: Couldn't walk back to base storage\n");
1855 continue;
1856 }
1857
1858 assert(Base);
1859 LLVM_DEBUG(errs() << " | BASE: " << *Base << "\n");
1860 // We only need to process each base address once - skip any duplicates.
1861 if (!SeenBases.insert(Base).second)
1862 continue;
1863
1864 // Find all local variables associated with the backing storage.
1865 auto CollectAssignsForStorage = [&](auto *DbgAssign) {
1866 // Skip variables from inlined functions - they are not local variables.
1867 if (DbgAssign->getDebugLoc().getInlinedAt())
1868 return;
1869 LLVM_DEBUG(errs() << " > DEF : " << *DbgAssign << "\n");
1870 EscapedLocals[Base].insert(at::VarRecord(DbgAssign));
1871 };
1872 for_each(at::getAssignmentMarkers(Base), CollectAssignsForStorage);
1873 for_each(at::getDVRAssignmentMarkers(Base), CollectAssignsForStorage);
1874 }
1875 return EscapedLocals;
1876}
1877
1879 const CallBase &CB) {
1880 LLVM_DEBUG(errs() << "trackInlinedStores into "
1881 << Start->getParent()->getName() << " from "
1882 << CB.getCalledFunction()->getName() << "\n");
1883 std::unique_ptr<DataLayout> DL = std::make_unique<DataLayout>(CB.getModule());
1885}
1886
1887/// Update inlined instructions' DIAssignID metadata. We need to do this
1888/// otherwise a function inlined more than once into the same function
1889/// will cause DIAssignID to be shared by many instructions.
1891 // Map {Old, New} metadata. Not used directly - use GetNewID.
1893 auto GetNewID = [&Map](Metadata *Old) {
1894 DIAssignID *OldID = cast<DIAssignID>(Old);
1895 if (DIAssignID *NewID = Map.lookup(OldID))
1896 return NewID;
1898 Map[OldID] = NewID;
1899 return NewID;
1900 };
1901 // Loop over all the inlined instructions. If we find a DIAssignID
1902 // attachment or use, replace it with a new version.
1903 for (auto BBI = Start; BBI != End; ++BBI) {
1904 for (Instruction &I : *BBI) {
1905 for (DbgVariableRecord &DVR : filterDbgVars(I.getDbgRecordRange())) {
1906 if (DVR.isDbgAssign())
1907 DVR.setAssignId(GetNewID(DVR.getAssignID()));
1908 }
1909 if (auto *ID = I.getMetadata(LLVMContext::MD_DIAssignID))
1910 I.setMetadata(LLVMContext::MD_DIAssignID, GetNewID(ID));
1911 else if (auto *DAI = dyn_cast<DbgAssignIntrinsic>(&I))
1912 DAI->setAssignId(GetNewID(DAI->getAssignID()));
1913 }
1914 }
1915}
1916#undef DEBUG_TYPE
1917#define DEBUG_TYPE "inline-function"
1918
1919/// Update the block frequencies of the caller after a callee has been inlined.
1920///
1921/// Each block cloned into the caller has its block frequency scaled by the
1922/// ratio of CallSiteFreq/CalleeEntryFreq. This ensures that the cloned copy of
1923/// callee's entry block gets the same frequency as the callsite block and the
1924/// relative frequencies of all cloned blocks remain the same after cloning.
1925static void updateCallerBFI(BasicBlock *CallSiteBlock,
1926 const ValueToValueMapTy &VMap,
1927 BlockFrequencyInfo *CallerBFI,
1928 BlockFrequencyInfo *CalleeBFI,
1929 const BasicBlock &CalleeEntryBlock) {
1931 for (auto Entry : VMap) {
1932 if (!isa<BasicBlock>(Entry.first) || !Entry.second)
1933 continue;
1934 auto *OrigBB = cast<BasicBlock>(Entry.first);
1935 auto *ClonedBB = cast<BasicBlock>(Entry.second);
1936 BlockFrequency Freq = CalleeBFI->getBlockFreq(OrigBB);
1937 if (!ClonedBBs.insert(ClonedBB).second) {
1938 // Multiple blocks in the callee might get mapped to one cloned block in
1939 // the caller since we prune the callee as we clone it. When that happens,
1940 // we want to use the maximum among the original blocks' frequencies.
1941 BlockFrequency NewFreq = CallerBFI->getBlockFreq(ClonedBB);
1942 if (NewFreq > Freq)
1943 Freq = NewFreq;
1944 }
1945 CallerBFI->setBlockFreq(ClonedBB, Freq);
1946 }
1947 BasicBlock *EntryClone = cast<BasicBlock>(VMap.lookup(&CalleeEntryBlock));
1948 CallerBFI->setBlockFreqAndScale(
1949 EntryClone, CallerBFI->getBlockFreq(CallSiteBlock), ClonedBBs);
1950}
1951
1952/// Update the branch metadata for cloned call instructions.
1953static void updateCallProfile(Function *Callee, const ValueToValueMapTy &VMap,
1954 const ProfileCount &CalleeEntryCount,
1955 const CallBase &TheCall, ProfileSummaryInfo *PSI,
1956 BlockFrequencyInfo *CallerBFI) {
1957 if (CalleeEntryCount.isSynthetic() || CalleeEntryCount.getCount() < 1)
1958 return;
1959 auto CallSiteCount =
1960 PSI ? PSI->getProfileCount(TheCall, CallerBFI) : std::nullopt;
1961 int64_t CallCount =
1962 std::min(CallSiteCount.value_or(0), CalleeEntryCount.getCount());
1963 updateProfileCallee(Callee, -CallCount, &VMap);
1964}
1965
1967 Function *Callee, int64_t EntryDelta,
1969 auto CalleeCount = Callee->getEntryCount();
1970 if (!CalleeCount)
1971 return;
1972
1973 const uint64_t PriorEntryCount = CalleeCount->getCount();
1974
1975 // Since CallSiteCount is an estimate, it could exceed the original callee
1976 // count and has to be set to 0 so guard against underflow.
1977 const uint64_t NewEntryCount =
1978 (EntryDelta < 0 && static_cast<uint64_t>(-EntryDelta) > PriorEntryCount)
1979 ? 0
1980 : PriorEntryCount + EntryDelta;
1981
1982 // During inlining ?
1983 if (VMap) {
1984 uint64_t CloneEntryCount = PriorEntryCount - NewEntryCount;
1985 for (auto Entry : *VMap) {
1986 if (isa<CallInst>(Entry.first))
1987 if (auto *CI = dyn_cast_or_null<CallInst>(Entry.second))
1988 CI->updateProfWeight(CloneEntryCount, PriorEntryCount);
1989 if (isa<InvokeInst>(Entry.first))
1990 if (auto *II = dyn_cast_or_null<InvokeInst>(Entry.second))
1991 II->updateProfWeight(CloneEntryCount, PriorEntryCount);
1992 }
1993 }
1994
1995 if (EntryDelta) {
1996 Callee->setEntryCount(NewEntryCount);
1997
1998 for (BasicBlock &BB : *Callee)
1999 // No need to update the callsite if it is pruned during inlining.
2000 if (!VMap || VMap->count(&BB))
2001 for (Instruction &I : BB) {
2002 if (CallInst *CI = dyn_cast<CallInst>(&I))
2003 CI->updateProfWeight(NewEntryCount, PriorEntryCount);
2004 if (InvokeInst *II = dyn_cast<InvokeInst>(&I))
2005 II->updateProfWeight(NewEntryCount, PriorEntryCount);
2006 }
2007 }
2008}
2009
2010/// An operand bundle "clang.arc.attachedcall" on a call indicates the call
2011/// result is implicitly consumed by a call to retainRV or claimRV immediately
2012/// after the call. This function inlines the retainRV/claimRV calls.
2013///
2014/// There are three cases to consider:
2015///
2016/// 1. If there is a call to autoreleaseRV that takes a pointer to the returned
2017/// object in the callee return block, the autoreleaseRV call and the
2018/// retainRV/claimRV call in the caller cancel out. If the call in the caller
2019/// is a claimRV call, a call to objc_release is emitted.
2020///
2021/// 2. If there is a call in the callee return block that doesn't have operand
2022/// bundle "clang.arc.attachedcall", the operand bundle on the original call
2023/// is transferred to the call in the callee.
2024///
2025/// 3. Otherwise, a call to objc_retain is inserted if the call in the caller is
2026/// a retainRV call.
2027static void
2029 const SmallVectorImpl<ReturnInst *> &Returns) {
2030 Module *Mod = CB.getModule();
2031 assert(objcarc::isRetainOrClaimRV(RVCallKind) && "unexpected ARC function");
2032 bool IsRetainRV = RVCallKind == objcarc::ARCInstKind::RetainRV,
2033 IsUnsafeClaimRV = !IsRetainRV;
2034
2035 for (auto *RI : Returns) {
2036 Value *RetOpnd = objcarc::GetRCIdentityRoot(RI->getOperand(0));
2037 bool InsertRetainCall = IsRetainRV;
2038 IRBuilder<> Builder(RI->getContext());
2039
2040 // Walk backwards through the basic block looking for either a matching
2041 // autoreleaseRV call or an unannotated call.
2042 auto InstRange = llvm::make_range(++(RI->getIterator().getReverse()),
2043 RI->getParent()->rend());
2044 for (Instruction &I : llvm::make_early_inc_range(InstRange)) {
2045 // Ignore casts.
2046 if (isa<CastInst>(I))
2047 continue;
2048
2049 if (auto *II = dyn_cast<IntrinsicInst>(&I)) {
2050 if (II->getIntrinsicID() != Intrinsic::objc_autoreleaseReturnValue ||
2051 !II->hasNUses(0) ||
2052 objcarc::GetRCIdentityRoot(II->getOperand(0)) != RetOpnd)
2053 break;
2054
2055 // If we've found a matching authoreleaseRV call:
2056 // - If claimRV is attached to the call, insert a call to objc_release
2057 // and erase the autoreleaseRV call.
2058 // - If retainRV is attached to the call, just erase the autoreleaseRV
2059 // call.
2060 if (IsUnsafeClaimRV) {
2061 Builder.SetInsertPoint(II);
2062 Function *IFn =
2063 Intrinsic::getDeclaration(Mod, Intrinsic::objc_release);
2064 Builder.CreateCall(IFn, RetOpnd, "");
2065 }
2066 II->eraseFromParent();
2067 InsertRetainCall = false;
2068 break;
2069 }
2070
2071 auto *CI = dyn_cast<CallInst>(&I);
2072
2073 if (!CI)
2074 break;
2075
2076 if (objcarc::GetRCIdentityRoot(CI) != RetOpnd ||
2078 break;
2079
2080 // If we've found an unannotated call that defines RetOpnd, add a
2081 // "clang.arc.attachedcall" operand bundle.
2082 Value *BundleArgs[] = {*objcarc::getAttachedARCFunction(&CB)};
2083 OperandBundleDef OB("clang.arc.attachedcall", BundleArgs);
2084 auto *NewCall = CallBase::addOperandBundle(
2085 CI, LLVMContext::OB_clang_arc_attachedcall, OB, CI->getIterator());
2086 NewCall->copyMetadata(*CI);
2087 CI->replaceAllUsesWith(NewCall);
2088 CI->eraseFromParent();
2089 InsertRetainCall = false;
2090 break;
2091 }
2092
2093 if (InsertRetainCall) {
2094 // The retainRV is attached to the call and we've failed to find a
2095 // matching autoreleaseRV or an annotated call in the callee. Emit a call
2096 // to objc_retain.
2097 Builder.SetInsertPoint(RI);
2098 Function *IFn = Intrinsic::getDeclaration(Mod, Intrinsic::objc_retain);
2099 Builder.CreateCall(IFn, RetOpnd, "");
2100 }
2101 }
2102}
2103
2104/// This function inlines the called function into the basic block of the
2105/// caller. This returns false if it is not possible to inline this call.
2106/// The program is still in a well defined state if this occurs though.
2107///
2108/// Note that this only does one level of inlining. For example, if the
2109/// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
2110/// exists in the instruction stream. Similarly this will inline a recursive
2111/// function by one level.
2113 bool MergeAttributes,
2114 AAResults *CalleeAAR,
2115 bool InsertLifetime,
2116 Function *ForwardVarArgsTo) {
2117 assert(CB.getParent() && CB.getFunction() && "Instruction not in function!");
2118
2119 // FIXME: we don't inline callbr yet.
2120 if (isa<CallBrInst>(CB))
2121 return InlineResult::failure("We don't inline callbr yet.");
2122
2123 // If IFI has any state in it, zap it before we fill it in.
2124 IFI.reset();
2125
2126 Function *CalledFunc = CB.getCalledFunction();
2127 if (!CalledFunc || // Can't inline external function or indirect
2128 CalledFunc->isDeclaration()) // call!
2129 return InlineResult::failure("external or indirect");
2130
2131 // The inliner does not know how to inline through calls with operand bundles
2132 // in general ...
2133 Value *ConvergenceControlToken = nullptr;
2134 if (CB.hasOperandBundles()) {
2135 for (int i = 0, e = CB.getNumOperandBundles(); i != e; ++i) {
2136 auto OBUse = CB.getOperandBundleAt(i);
2137 uint32_t Tag = OBUse.getTagID();
2138 // ... but it knows how to inline through "deopt" operand bundles ...
2140 continue;
2141 // ... and "funclet" operand bundles.
2143 continue;
2145 continue;
2147 continue;
2149 ConvergenceControlToken = OBUse.Inputs[0].get();
2150 continue;
2151 }
2152
2153 return InlineResult::failure("unsupported operand bundle");
2154 }
2155 }
2156
2157 // FIXME: The check below is redundant and incomplete. According to spec, if a
2158 // convergent call is missing a token, then the caller is using uncontrolled
2159 // convergence. If the callee has an entry intrinsic, then the callee is using
2160 // controlled convergence, and the call cannot be inlined. A proper
2161 // implemenation of this check requires a whole new analysis that identifies
2162 // convergence in every function. For now, we skip that and just do this one
2163 // cursory check. The underlying assumption is that in a compiler flow that
2164 // fully implements convergence control tokens, there is no mixing of
2165 // controlled and uncontrolled convergent operations in the whole program.
2166 if (CB.isConvergent()) {
2167 auto *I = CalledFunc->getEntryBlock().getFirstNonPHI();
2168 if (auto *IntrinsicCall = dyn_cast<IntrinsicInst>(I)) {
2169 if (IntrinsicCall->getIntrinsicID() ==
2170 Intrinsic::experimental_convergence_entry) {
2171 if (!ConvergenceControlToken) {
2172 return InlineResult::failure(
2173 "convergent call needs convergencectrl operand");
2174 }
2175 }
2176 }
2177 }
2178
2179 // If the call to the callee cannot throw, set the 'nounwind' flag on any
2180 // calls that we inline.
2181 bool MarkNoUnwind = CB.doesNotThrow();
2182
2183 BasicBlock *OrigBB = CB.getParent();
2184 Function *Caller = OrigBB->getParent();
2185
2186 // GC poses two hazards to inlining, which only occur when the callee has GC:
2187 // 1. If the caller has no GC, then the callee's GC must be propagated to the
2188 // caller.
2189 // 2. If the caller has a differing GC, it is invalid to inline.
2190 if (CalledFunc->hasGC()) {
2191 if (!Caller->hasGC())
2192 Caller->setGC(CalledFunc->getGC());
2193 else if (CalledFunc->getGC() != Caller->getGC())
2194 return InlineResult::failure("incompatible GC");
2195 }
2196
2197 // Get the personality function from the callee if it contains a landing pad.
2198 Constant *CalledPersonality =
2199 CalledFunc->hasPersonalityFn()
2200 ? CalledFunc->getPersonalityFn()->stripPointerCasts()
2201 : nullptr;
2202
2203 // Find the personality function used by the landing pads of the caller. If it
2204 // exists, then check to see that it matches the personality function used in
2205 // the callee.
2206 Constant *CallerPersonality =
2207 Caller->hasPersonalityFn()
2208 ? Caller->getPersonalityFn()->stripPointerCasts()
2209 : nullptr;
2210 if (CalledPersonality) {
2211 if (!CallerPersonality)
2212 Caller->setPersonalityFn(CalledPersonality);
2213 // If the personality functions match, then we can perform the
2214 // inlining. Otherwise, we can't inline.
2215 // TODO: This isn't 100% true. Some personality functions are proper
2216 // supersets of others and can be used in place of the other.
2217 else if (CalledPersonality != CallerPersonality)
2218 return InlineResult::failure("incompatible personality");
2219 }
2220
2221 // We need to figure out which funclet the callsite was in so that we may
2222 // properly nest the callee.
2223 Instruction *CallSiteEHPad = nullptr;
2224 if (CallerPersonality) {
2225 EHPersonality Personality = classifyEHPersonality(CallerPersonality);
2226 if (isScopedEHPersonality(Personality)) {
2227 std::optional<OperandBundleUse> ParentFunclet =
2229 if (ParentFunclet)
2230 CallSiteEHPad = cast<FuncletPadInst>(ParentFunclet->Inputs.front());
2231
2232 // OK, the inlining site is legal. What about the target function?
2233
2234 if (CallSiteEHPad) {
2235 if (Personality == EHPersonality::MSVC_CXX) {
2236 // The MSVC personality cannot tolerate catches getting inlined into
2237 // cleanup funclets.
2238 if (isa<CleanupPadInst>(CallSiteEHPad)) {
2239 // Ok, the call site is within a cleanuppad. Let's check the callee
2240 // for catchpads.
2241 for (const BasicBlock &CalledBB : *CalledFunc) {
2242 if (isa<CatchSwitchInst>(CalledBB.getFirstNonPHI()))
2243 return InlineResult::failure("catch in cleanup funclet");
2244 }
2245 }
2246 } else if (isAsynchronousEHPersonality(Personality)) {
2247 // SEH is even less tolerant, there may not be any sort of exceptional
2248 // funclet in the callee.
2249 for (const BasicBlock &CalledBB : *CalledFunc) {
2250 if (CalledBB.isEHPad())
2251 return InlineResult::failure("SEH in cleanup funclet");
2252 }
2253 }
2254 }
2255 }
2256 }
2257
2258 // Determine if we are dealing with a call in an EHPad which does not unwind
2259 // to caller.
2260 bool EHPadForCallUnwindsLocally = false;
2261 if (CallSiteEHPad && isa<CallInst>(CB)) {
2262 UnwindDestMemoTy FuncletUnwindMap;
2263 Value *CallSiteUnwindDestToken =
2264 getUnwindDestToken(CallSiteEHPad, FuncletUnwindMap);
2265
2266 EHPadForCallUnwindsLocally =
2267 CallSiteUnwindDestToken &&
2268 !isa<ConstantTokenNone>(CallSiteUnwindDestToken);
2269 }
2270
2271 // Get an iterator to the last basic block in the function, which will have
2272 // the new function inlined after it.
2273 Function::iterator LastBlock = --Caller->end();
2274
2275 // Make sure to capture all of the return instructions from the cloned
2276 // function.
2278 ClonedCodeInfo InlinedFunctionInfo;
2279 Function::iterator FirstNewBlock;
2280
2281 { // Scope to destroy VMap after cloning.
2282 ValueToValueMapTy VMap;
2283 struct ByValInit {
2284 Value *Dst;
2285 Value *Src;
2286 Type *Ty;
2287 };
2288 // Keep a list of pair (dst, src) to emit byval initializations.
2289 SmallVector<ByValInit, 4> ByValInits;
2290
2291 // When inlining a function that contains noalias scope metadata,
2292 // this metadata needs to be cloned so that the inlined blocks
2293 // have different "unique scopes" at every call site.
2294 // Track the metadata that must be cloned. Do this before other changes to
2295 // the function, so that we do not get in trouble when inlining caller ==
2296 // callee.
2297 ScopedAliasMetadataDeepCloner SAMetadataCloner(CB.getCalledFunction());
2298
2299 auto &DL = Caller->getParent()->getDataLayout();
2300
2301 // Calculate the vector of arguments to pass into the function cloner, which
2302 // matches up the formal to the actual argument values.
2303 auto AI = CB.arg_begin();
2304 unsigned ArgNo = 0;
2305 for (Function::arg_iterator I = CalledFunc->arg_begin(),
2306 E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) {
2307 Value *ActualArg = *AI;
2308
2309 // When byval arguments actually inlined, we need to make the copy implied
2310 // by them explicit. However, we don't do this if the callee is readonly
2311 // or readnone, because the copy would be unneeded: the callee doesn't
2312 // modify the struct.
2313 if (CB.isByValArgument(ArgNo)) {
2314 ActualArg = HandleByValArgument(CB.getParamByValType(ArgNo), ActualArg,
2315 &CB, CalledFunc, IFI,
2316 CalledFunc->getParamAlign(ArgNo));
2317 if (ActualArg != *AI)
2318 ByValInits.push_back(
2319 {ActualArg, (Value *)*AI, CB.getParamByValType(ArgNo)});
2320 }
2321
2322 VMap[&*I] = ActualArg;
2323 }
2324
2325 // TODO: Remove this when users have been updated to the assume bundles.
2326 // Add alignment assumptions if necessary. We do this before the inlined
2327 // instructions are actually cloned into the caller so that we can easily
2328 // check what will be known at the start of the inlined code.
2329 AddAlignmentAssumptions(CB, IFI);
2330
2331 AssumptionCache *AC =
2332 IFI.GetAssumptionCache ? &IFI.GetAssumptionCache(*Caller) : nullptr;
2333
2334 /// Preserve all attributes on of the call and its parameters.
2335 salvageKnowledge(&CB, AC);
2336
2337 // We want the inliner to prune the code as it copies. We would LOVE to
2338 // have no dead or constant instructions leftover after inlining occurs
2339 // (which can happen, e.g., because an argument was constant), but we'll be
2340 // happy with whatever the cloner can do.
2341 CloneAndPruneFunctionInto(Caller, CalledFunc, VMap,
2342 /*ModuleLevelChanges=*/false, Returns, ".i",
2343 &InlinedFunctionInfo);
2344 // Remember the first block that is newly cloned over.
2345 FirstNewBlock = LastBlock; ++FirstNewBlock;
2346
2347 // Insert retainRV/clainRV runtime calls.
2349 if (RVCallKind != objcarc::ARCInstKind::None)
2350 inlineRetainOrClaimRVCalls(CB, RVCallKind, Returns);
2351
2352 // Updated caller/callee profiles only when requested. For sample loader
2353 // inlining, the context-sensitive inlinee profile doesn't need to be
2354 // subtracted from callee profile, and the inlined clone also doesn't need
2355 // to be scaled based on call site count.
2356 if (IFI.UpdateProfile) {
2357 if (IFI.CallerBFI != nullptr && IFI.CalleeBFI != nullptr)
2358 // Update the BFI of blocks cloned into the caller.
2359 updateCallerBFI(OrigBB, VMap, IFI.CallerBFI, IFI.CalleeBFI,
2360 CalledFunc->front());
2361
2362 if (auto Profile = CalledFunc->getEntryCount())
2363 updateCallProfile(CalledFunc, VMap, *Profile, CB, IFI.PSI,
2364 IFI.CallerBFI);
2365 }
2366
2367 // Inject byval arguments initialization.
2368 for (ByValInit &Init : ByValInits)
2369 HandleByValArgumentInit(Init.Ty, Init.Dst, Init.Src, Caller->getParent(),
2370 &*FirstNewBlock, IFI, CalledFunc);
2371
2372 std::optional<OperandBundleUse> ParentDeopt =
2374 if (ParentDeopt) {
2376
2377 for (auto &VH : InlinedFunctionInfo.OperandBundleCallSites) {
2378 CallBase *ICS = dyn_cast_or_null<CallBase>(VH);
2379 if (!ICS)
2380 continue; // instruction was DCE'd or RAUW'ed to undef
2381
2382 OpDefs.clear();
2383
2384 OpDefs.reserve(ICS->getNumOperandBundles());
2385
2386 for (unsigned COBi = 0, COBe = ICS->getNumOperandBundles(); COBi < COBe;
2387 ++COBi) {
2388 auto ChildOB = ICS->getOperandBundleAt(COBi);
2389 if (ChildOB.getTagID() != LLVMContext::OB_deopt) {
2390 // If the inlined call has other operand bundles, let them be
2391 OpDefs.emplace_back(ChildOB);
2392 continue;
2393 }
2394
2395 // It may be useful to separate this logic (of handling operand
2396 // bundles) out to a separate "policy" component if this gets crowded.
2397 // Prepend the parent's deoptimization continuation to the newly
2398 // inlined call's deoptimization continuation.
2399 std::vector<Value *> MergedDeoptArgs;
2400 MergedDeoptArgs.reserve(ParentDeopt->Inputs.size() +
2401 ChildOB.Inputs.size());
2402
2403 llvm::append_range(MergedDeoptArgs, ParentDeopt->Inputs);
2404 llvm::append_range(MergedDeoptArgs, ChildOB.Inputs);
2405
2406 OpDefs.emplace_back("deopt", std::move(MergedDeoptArgs));
2407 }
2408
2409 Instruction *NewI = CallBase::Create(ICS, OpDefs, ICS->getIterator());
2410
2411 // Note: the RAUW does the appropriate fixup in VMap, so we need to do
2412 // this even if the call returns void.
2413 ICS->replaceAllUsesWith(NewI);
2414
2415 VH = nullptr;
2416 ICS->eraseFromParent();
2417 }
2418 }
2419
2420 // For 'nodebug' functions, the associated DISubprogram is always null.
2421 // Conservatively avoid propagating the callsite debug location to
2422 // instructions inlined from a function whose DISubprogram is not null.
2423 fixupLineNumbers(Caller, FirstNewBlock, &CB,
2424 CalledFunc->getSubprogram() != nullptr);
2425
2426 if (isAssignmentTrackingEnabled(*Caller->getParent())) {
2427 // Interpret inlined stores to caller-local variables as assignments.
2428 trackInlinedStores(FirstNewBlock, Caller->end(), CB);
2429
2430 // Update DIAssignID metadata attachments and uses so that they are
2431 // unique to this inlined instance.
2432 fixupAssignments(FirstNewBlock, Caller->end());
2433 }
2434
2435 // Now clone the inlined noalias scope metadata.
2436 SAMetadataCloner.clone();
2437 SAMetadataCloner.remap(FirstNewBlock, Caller->end());
2438
2439 // Add noalias metadata if necessary.
2440 AddAliasScopeMetadata(CB, VMap, DL, CalleeAAR, InlinedFunctionInfo);
2441
2442 // Clone return attributes on the callsite into the calls within the inlined
2443 // function which feed into its return value.
2444 AddReturnAttributes(CB, VMap);
2445
2446 // Clone attributes on the params of the callsite to calls within the
2447 // inlined function which use the same param.
2449
2450 propagateMemProfMetadata(CalledFunc, CB,
2451 InlinedFunctionInfo.ContainsMemProfMetadata, VMap);
2452
2453 // Propagate metadata on the callsite if necessary.
2454 PropagateCallSiteMetadata(CB, FirstNewBlock, Caller->end());
2455
2456 // Register any cloned assumptions.
2457 if (IFI.GetAssumptionCache)
2458 for (BasicBlock &NewBlock :
2459 make_range(FirstNewBlock->getIterator(), Caller->end()))
2460 for (Instruction &I : NewBlock)
2461 if (auto *II = dyn_cast<AssumeInst>(&I))
2462 IFI.GetAssumptionCache(*Caller).registerAssumption(II);
2463 }
2464
2465 if (ConvergenceControlToken) {
2466 auto *I = FirstNewBlock->getFirstNonPHI();
2467 if (auto *IntrinsicCall = dyn_cast<IntrinsicInst>(I)) {
2468 if (IntrinsicCall->getIntrinsicID() ==
2469 Intrinsic::experimental_convergence_entry) {
2470 IntrinsicCall->replaceAllUsesWith(ConvergenceControlToken);
2471 IntrinsicCall->eraseFromParent();
2472 }
2473 }
2474 }
2475
2476 // If there are any alloca instructions in the block that used to be the entry
2477 // block for the callee, move them to the entry block of the caller. First
2478 // calculate which instruction they should be inserted before. We insert the
2479 // instructions at the end of the current alloca list.
2480 {
2481 BasicBlock::iterator InsertPoint = Caller->begin()->begin();
2482 for (BasicBlock::iterator I = FirstNewBlock->begin(),
2483 E = FirstNewBlock->end(); I != E; ) {
2484 AllocaInst *AI = dyn_cast<AllocaInst>(I++);
2485 if (!AI) continue;
2486
2487 // If the alloca is now dead, remove it. This often occurs due to code
2488 // specialization.
2489 if (AI->use_empty()) {
2490 AI->eraseFromParent();
2491 continue;
2492 }
2493
2495 continue;
2496
2497 // Keep track of the static allocas that we inline into the caller.
2498 IFI.StaticAllocas.push_back(AI);
2499
2500 // Scan for the block of allocas that we can move over, and move them
2501 // all at once.
2502 while (isa<AllocaInst>(I) &&
2503 !cast<AllocaInst>(I)->use_empty() &&
2504 allocaWouldBeStaticInEntry(cast<AllocaInst>(I))) {
2505 IFI.StaticAllocas.push_back(cast<AllocaInst>(I));
2506 ++I;
2507 }
2508
2509 // Transfer all of the allocas over in a block. Using splice means
2510 // that the instructions aren't removed from the symbol table, then
2511 // reinserted.
2512 I.setTailBit(true);
2513 Caller->getEntryBlock().splice(InsertPoint, &*FirstNewBlock,
2514 AI->getIterator(), I);
2515 }
2516 }
2517
2518 SmallVector<Value*,4> VarArgsToForward;
2519 SmallVector<AttributeSet, 4> VarArgsAttrs;
2520 for (unsigned i = CalledFunc->getFunctionType()->getNumParams();
2521 i < CB.arg_size(); i++) {
2522 VarArgsToForward.push_back(CB.getArgOperand(i));
2523 VarArgsAttrs.push_back(CB.getAttributes().getParamAttrs(i));
2524 }
2525
2526 bool InlinedMustTailCalls = false, InlinedDeoptimizeCalls = false;
2527 if (InlinedFunctionInfo.ContainsCalls) {
2528 CallInst::TailCallKind CallSiteTailKind = CallInst::TCK_None;
2529 if (CallInst *CI = dyn_cast<CallInst>(&CB))
2530 CallSiteTailKind = CI->getTailCallKind();
2531
2532 // For inlining purposes, the "notail" marker is the same as no marker.
2533 if (CallSiteTailKind == CallInst::TCK_NoTail)
2534 CallSiteTailKind = CallInst::TCK_None;
2535
2536 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E;
2537 ++BB) {
2539 CallInst *CI = dyn_cast<CallInst>(&I);
2540 if (!CI)
2541 continue;
2542
2543 // Forward varargs from inlined call site to calls to the
2544 // ForwardVarArgsTo function, if requested, and to musttail calls.
2545 if (!VarArgsToForward.empty() &&
2546 ((ForwardVarArgsTo &&
2547 CI->getCalledFunction() == ForwardVarArgsTo) ||
2548 CI->isMustTailCall())) {
2549 // Collect attributes for non-vararg parameters.
2550 AttributeList Attrs = CI->getAttributes();
2552 if (!Attrs.isEmpty() || !VarArgsAttrs.empty()) {
2553 for (unsigned ArgNo = 0;
2554 ArgNo < CI->getFunctionType()->getNumParams(); ++ArgNo)
2555 ArgAttrs.push_back(Attrs.getParamAttrs(ArgNo));
2556 }
2557
2558 // Add VarArg attributes.
2559 ArgAttrs.append(VarArgsAttrs.begin(), VarArgsAttrs.end());
2560 Attrs = AttributeList::get(CI->getContext(), Attrs.getFnAttrs(),
2561 Attrs.getRetAttrs(), ArgAttrs);
2562 // Add VarArgs to existing parameters.
2563 SmallVector<Value *, 6> Params(CI->args());
2564 Params.append(VarArgsToForward.begin(), VarArgsToForward.end());
2565 CallInst *NewCI = CallInst::Create(
2566 CI->getFunctionType(), CI->getCalledOperand(), Params, "", CI->getIterator());
2567 NewCI->setDebugLoc(CI->getDebugLoc());
2568 NewCI->setAttributes(Attrs);
2569 NewCI->setCallingConv(CI->getCallingConv());
2570 CI->replaceAllUsesWith(NewCI);
2571 CI->eraseFromParent();
2572 CI = NewCI;
2573 }
2574
2575 if (Function *F = CI->getCalledFunction())
2576 InlinedDeoptimizeCalls |=
2577 F->getIntrinsicID() == Intrinsic::experimental_deoptimize;
2578
2579 // We need to reduce the strength of any inlined tail calls. For
2580 // musttail, we have to avoid introducing potential unbounded stack
2581 // growth. For example, if functions 'f' and 'g' are mutually recursive
2582 // with musttail, we can inline 'g' into 'f' so long as we preserve
2583 // musttail on the cloned call to 'f'. If either the inlined call site
2584 // or the cloned call site is *not* musttail, the program already has
2585 // one frame of stack growth, so it's safe to remove musttail. Here is
2586 // a table of example transformations:
2587 //
2588 // f -> musttail g -> musttail f ==> f -> musttail f
2589 // f -> musttail g -> tail f ==> f -> tail f
2590 // f -> g -> musttail f ==> f -> f
2591 // f -> g -> tail f ==> f -> f
2592 //
2593 // Inlined notail calls should remain notail calls.
2594 CallInst::TailCallKind ChildTCK = CI->getTailCallKind();
2595 if (ChildTCK != CallInst::TCK_NoTail)
2596 ChildTCK = std::min(CallSiteTailKind, ChildTCK);
2597 CI->setTailCallKind(ChildTCK);
2598 InlinedMustTailCalls |= CI->isMustTailCall();
2599
2600 // Call sites inlined through a 'nounwind' call site should be
2601 // 'nounwind' as well. However, avoid marking call sites explicitly
2602 // where possible. This helps expose more opportunities for CSE after
2603 // inlining, commonly when the callee is an intrinsic.
2604 if (MarkNoUnwind && !CI->doesNotThrow())
2605 CI->setDoesNotThrow();
2606 }
2607 }
2608 }
2609
2610 // Leave lifetime markers for the static alloca's, scoping them to the
2611 // function we just inlined.
2612 // We need to insert lifetime intrinsics even at O0 to avoid invalid
2613 // access caused by multithreaded coroutines. The check
2614 // `Caller->isPresplitCoroutine()` would affect AlwaysInliner at O0 only.
2615 if ((InsertLifetime || Caller->isPresplitCoroutine()) &&
2616 !IFI.StaticAllocas.empty()) {
2617 IRBuilder<> builder(&*FirstNewBlock, FirstNewBlock->begin());
2618 for (unsigned ai = 0, ae = IFI.StaticAllocas.size(); ai != ae; ++ai) {
2619 AllocaInst *AI = IFI.StaticAllocas[ai];
2620 // Don't mark swifterror allocas. They can't have bitcast uses.
2621 if (AI->isSwiftError())
2622 continue;
2623
2624 // If the alloca is already scoped to something smaller than the whole
2625 // function then there's no need to add redundant, less accurate markers.
2626 if (hasLifetimeMarkers(AI))
2627 continue;
2628
2629 // Try to determine the size of the allocation.
2630 ConstantInt *AllocaSize = nullptr;
2631 if (ConstantInt *AIArraySize =
2632 dyn_cast<ConstantInt>(AI->getArraySize())) {
2633 auto &DL = Caller->getParent()->getDataLayout();
2634 Type *AllocaType = AI->getAllocatedType();
2635 TypeSize AllocaTypeSize = DL.getTypeAllocSize(AllocaType);
2636 uint64_t AllocaArraySize = AIArraySize->getLimitedValue();
2637
2638 // Don't add markers for zero-sized allocas.
2639 if (AllocaArraySize == 0)
2640 continue;
2641
2642 // Check that array size doesn't saturate uint64_t and doesn't
2643 // overflow when it's multiplied by type size.
2644 if (!AllocaTypeSize.isScalable() &&
2645 AllocaArraySize != std::numeric_limits<uint64_t>::max() &&
2646 std::numeric_limits<uint64_t>::max() / AllocaArraySize >=
2647 AllocaTypeSize.getFixedValue()) {
2648 AllocaSize = ConstantInt::get(Type::getInt64Ty(AI->getContext()),
2649 AllocaArraySize * AllocaTypeSize);
2650 }
2651 }
2652
2653 builder.CreateLifetimeStart(AI, AllocaSize);
2654 for (ReturnInst *RI : Returns) {
2655 // Don't insert llvm.lifetime.end calls between a musttail or deoptimize
2656 // call and a return. The return kills all local allocas.
2657 if (InlinedMustTailCalls &&
2659 continue;
2660 if (InlinedDeoptimizeCalls &&
2662 continue;
2663 IRBuilder<>(RI).CreateLifetimeEnd(AI, AllocaSize);
2664 }
2665 }
2666 }
2667
2668 // If the inlined code contained dynamic alloca instructions, wrap the inlined
2669 // code with llvm.stacksave/llvm.stackrestore intrinsics.
2670 if (InlinedFunctionInfo.ContainsDynamicAllocas) {
2671 // Insert the llvm.stacksave.
2672 CallInst *SavedPtr = IRBuilder<>(&*FirstNewBlock, FirstNewBlock->begin())
2673 .CreateStackSave("savedstack");
2674
2675 // Insert a call to llvm.stackrestore before any return instructions in the
2676 // inlined function.
2677 for (ReturnInst *RI : Returns) {
2678 // Don't insert llvm.stackrestore calls between a musttail or deoptimize
2679 // call and a return. The return will restore the stack pointer.
2680 if (InlinedMustTailCalls && RI->getParent()->getTerminatingMustTailCall())
2681 continue;
2682 if (InlinedDeoptimizeCalls && RI->getParent()->getTerminatingDeoptimizeCall())
2683 continue;
2684 IRBuilder<>(RI).CreateStackRestore(SavedPtr);
2685 }
2686 }
2687
2688 // If we are inlining for an invoke instruction, we must make sure to rewrite
2689 // any call instructions into invoke instructions. This is sensitive to which
2690 // funclet pads were top-level in the inlinee, so must be done before
2691 // rewriting the "parent pad" links.
2692 if (auto *II = dyn_cast<InvokeInst>(&CB)) {
2693 BasicBlock *UnwindDest = II->getUnwindDest();
2694 Instruction *FirstNonPHI = UnwindDest->getFirstNonPHI();
2695 if (isa<LandingPadInst>(FirstNonPHI)) {
2696 HandleInlinedLandingPad(II, &*FirstNewBlock, InlinedFunctionInfo);
2697 } else {
2698 HandleInlinedEHPad(II, &*FirstNewBlock, InlinedFunctionInfo);
2699 }
2700 }
2701
2702 // Update the lexical scopes of the new funclets and callsites.
2703 // Anything that had 'none' as its parent is now nested inside the callsite's
2704 // EHPad.
2705 if (CallSiteEHPad) {
2706 for (Function::iterator BB = FirstNewBlock->getIterator(),
2707 E = Caller->end();
2708 BB != E; ++BB) {
2709 // Add bundle operands to inlined call sites.
2710 PropagateOperandBundles(BB, CallSiteEHPad);
2711
2712 // It is problematic if the inlinee has a cleanupret which unwinds to
2713 // caller and we inline it into a call site which doesn't unwind but into
2714 // an EH pad that does. Such an edge must be dynamically unreachable.
2715 // As such, we replace the cleanupret with unreachable.
2716 if (auto *CleanupRet = dyn_cast<CleanupReturnInst>(BB->getTerminator()))
2717 if (CleanupRet->unwindsToCaller() && EHPadForCallUnwindsLocally)
2718 changeToUnreachable(CleanupRet);
2719
2720 Instruction *I = BB->getFirstNonPHI();
2721 if (!I->isEHPad())
2722 continue;
2723
2724 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(I)) {
2725 if (isa<ConstantTokenNone>(CatchSwitch->getParentPad()))
2726 CatchSwitch->setParentPad(CallSiteEHPad);
2727 } else {
2728 auto *FPI = cast<FuncletPadInst>(I);
2729 if (isa<ConstantTokenNone>(FPI->getParentPad()))
2730 FPI->setParentPad(CallSiteEHPad);
2731 }
2732 }
2733 }
2734
2735 if (InlinedDeoptimizeCalls) {
2736 // We need to at least remove the deoptimizing returns from the Return set,
2737 // so that the control flow from those returns does not get merged into the
2738 // caller (but terminate it instead). If the caller's return type does not
2739 // match the callee's return type, we also need to change the return type of
2740 // the intrinsic.
2741 if (Caller->getReturnType() == CB.getType()) {
2742 llvm::erase_if(Returns, [](ReturnInst *RI) {
2743 return RI->getParent()->getTerminatingDeoptimizeCall() != nullptr;
2744 });
2745 } else {
2746 SmallVector<ReturnInst *, 8> NormalReturns;
2747 Function *NewDeoptIntrinsic = Intrinsic::getDeclaration(
2748 Caller->getParent(), Intrinsic::experimental_deoptimize,
2749 {Caller->getReturnType()});
2750
2751 for (ReturnInst *RI : Returns) {
2752 CallInst *DeoptCall = RI->getParent()->getTerminatingDeoptimizeCall();
2753 if (!DeoptCall) {
2754 NormalReturns.push_back(RI);
2755 continue;
2756 }
2757
2758 // The calling convention on the deoptimize call itself may be bogus,
2759 // since the code we're inlining may have undefined behavior (and may
2760 // never actually execute at runtime); but all
2761 // @llvm.experimental.deoptimize declarations have to have the same
2762 // calling convention in a well-formed module.
2763 auto CallingConv = DeoptCall->getCalledFunction()->getCallingConv();
2764 NewDeoptIntrinsic->setCallingConv(CallingConv);
2765 auto *CurBB = RI->getParent();
2766 RI->eraseFromParent();
2767
2768 SmallVector<Value *, 4> CallArgs(DeoptCall->args());
2769
2771 DeoptCall->getOperandBundlesAsDefs(OpBundles);
2772 auto DeoptAttributes = DeoptCall->getAttributes();
2773 DeoptCall->eraseFromParent();
2774 assert(!OpBundles.empty() &&
2775 "Expected at least the deopt operand bundle");
2776
2777 IRBuilder<> Builder(CurBB);
2778 CallInst *NewDeoptCall =
2779 Builder.CreateCall(NewDeoptIntrinsic, CallArgs, OpBundles);
2780 NewDeoptCall->setCallingConv(CallingConv);
2781 NewDeoptCall->setAttributes(DeoptAttributes);
2782 if (NewDeoptCall->getType()->isVoidTy())
2783 Builder.CreateRetVoid();
2784 else
2785 Builder.CreateRet(NewDeoptCall);
2786 // Since the ret type is changed, remove the incompatible attributes.
2787 NewDeoptCall->removeRetAttrs(
2788 AttributeFuncs::typeIncompatible(NewDeoptCall->getType()));
2789 }
2790
2791 // Leave behind the normal returns so we can merge control flow.
2792 std::swap(Returns, NormalReturns);
2793 }
2794 }
2795
2796 // Handle any inlined musttail call sites. In order for a new call site to be
2797 // musttail, the source of the clone and the inlined call site must have been
2798 // musttail. Therefore it's safe to return without merging control into the
2799 // phi below.
2800 if (InlinedMustTailCalls) {
2801 // Check if we need to bitcast the result of any musttail calls.
2802 Type *NewRetTy = Caller->getReturnType();
2803 bool NeedBitCast = !CB.use_empty() && CB.getType() != NewRetTy;
2804
2805 // Handle the returns preceded by musttail calls separately.
2806 SmallVector<ReturnInst *, 8> NormalReturns;
2807 for (ReturnInst *RI : Returns) {
2808 CallInst *ReturnedMustTail =
2810 if (!ReturnedMustTail) {
2811 NormalReturns.push_back(RI);
2812 continue;
2813 }
2814 if (!NeedBitCast)
2815 continue;
2816
2817 // Delete the old return and any preceding bitcast.
2818 BasicBlock *CurBB = RI->getParent();
2819 auto *OldCast = dyn_cast_or_null<BitCastInst>(RI->getReturnValue());
2820 RI->eraseFromParent();
2821 if (OldCast)
2822 OldCast->eraseFromParent();
2823
2824 // Insert a new bitcast and return with the right type.
2825 IRBuilder<> Builder(CurBB);
2826 Builder.CreateRet(Builder.CreateBitCast(ReturnedMustTail, NewRetTy));
2827 }
2828
2829 // Leave behind the normal returns so we can merge control flow.
2830 std::swap(Returns, NormalReturns);
2831 }
2832
2833 // Now that all of the transforms on the inlined code have taken place but
2834 // before we splice the inlined code into the CFG and lose track of which
2835 // blocks were actually inlined, collect the call sites. We only do this if
2836 // call graph updates weren't requested, as those provide value handle based
2837 // tracking of inlined call sites instead. Calls to intrinsics are not
2838 // collected because they are not inlineable.
2839 if (InlinedFunctionInfo.ContainsCalls) {
2840 // Otherwise just collect the raw call sites that were inlined.
2841 for (BasicBlock &NewBB :
2842 make_range(FirstNewBlock->getIterator(), Caller->end()))
2843 for (Instruction &I : NewBB)
2844 if (auto *CB = dyn_cast<CallBase>(&I))
2845 if (!(CB->getCalledFunction() &&
2847 IFI.InlinedCallSites.push_back(CB);
2848 }
2849
2850 // If we cloned in _exactly one_ basic block, and if that block ends in a
2851 // return instruction, we splice the body of the inlined callee directly into
2852 // the calling basic block.
2853 if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) {
2854 // Move all of the instructions right before the call.
2855 OrigBB->splice(CB.getIterator(), &*FirstNewBlock, FirstNewBlock->begin(),
2856 FirstNewBlock->end());
2857 // Remove the cloned basic block.
2858 Caller->back().eraseFromParent();
2859
2860 // If the call site was an invoke instruction, add a branch to the normal
2861 // destination.
2862 if (InvokeInst *II = dyn_cast<InvokeInst>(&CB)) {
2864 NewBr->setDebugLoc(Returns[0]->getDebugLoc());
2865 }
2866
2867 // If the return instruction returned a value, replace uses of the call with
2868 // uses of the returned value.
2869 if (!CB.use_empty()) {
2870 ReturnInst *R = Returns[0];
2871 if (&CB == R->getReturnValue())
2873 else
2874 CB.replaceAllUsesWith(R->getReturnValue());
2875 }
2876 // Since we are now done with the Call/Invoke, we can delete it.
2877 CB.eraseFromParent();
2878
2879 // Since we are now done with the return instruction, delete it also.
2880 Returns[0]->eraseFromParent();
2881
2882 if (MergeAttributes)
2883 AttributeFuncs::mergeAttributesForInlining(*Caller, *CalledFunc);
2884
2885 // We are now done with the inlining.
2886 return InlineResult::success();
2887 }
2888
2889 // Otherwise, we have the normal case, of more than one block to inline or
2890 // multiple return sites.
2891
2892 // We want to clone the entire callee function into the hole between the
2893 // "starter" and "ender" blocks. How we accomplish this depends on whether
2894 // this is an invoke instruction or a call instruction.
2895 BasicBlock *AfterCallBB;
2896 BranchInst *CreatedBranchToNormalDest = nullptr;
2897 if (InvokeInst *II = dyn_cast<InvokeInst>(&CB)) {
2898
2899 // Add an unconditional branch to make this look like the CallInst case...
2900 CreatedBranchToNormalDest = BranchInst::Create(II->getNormalDest(), CB.getIterator());
2901
2902 // Split the basic block. This guarantees that no PHI nodes will have to be
2903 // updated due to new incoming edges, and make the invoke case more
2904 // symmetric to the call case.
2905 AfterCallBB =
2906 OrigBB->splitBasicBlock(CreatedBranchToNormalDest->getIterator(),
2907 CalledFunc->getName() + ".exit");
2908
2909 } else { // It's a call
2910 // If this is a call instruction, we need to split the basic block that
2911 // the call lives in.
2912 //
2913 AfterCallBB = OrigBB->splitBasicBlock(CB.getIterator(),
2914 CalledFunc->getName() + ".exit");
2915 }
2916
2917 if (IFI.CallerBFI) {
2918 // Copy original BB's block frequency to AfterCallBB
2919 IFI.CallerBFI->setBlockFreq(AfterCallBB,
2920 IFI.CallerBFI->getBlockFreq(OrigBB));
2921 }
2922
2923 // Change the branch that used to go to AfterCallBB to branch to the first
2924 // basic block of the inlined function.
2925 //
2926 Instruction *Br = OrigBB->getTerminator();
2927 assert(Br && Br->getOpcode() == Instruction::Br &&
2928 "splitBasicBlock broken!");
2929 Br->setOperand(0, &*FirstNewBlock);
2930
2931 // Now that the function is correct, make it a little bit nicer. In
2932 // particular, move the basic blocks inserted from the end of the function
2933 // into the space made by splitting the source basic block.
2934 Caller->splice(AfterCallBB->getIterator(), Caller, FirstNewBlock,
2935 Caller->end());
2936
2937 // Handle all of the return instructions that we just cloned in, and eliminate
2938 // any users of the original call/invoke instruction.
2939 Type *RTy = CalledFunc->getReturnType();
2940
2941 PHINode *PHI = nullptr;
2942 if (Returns.size() > 1) {
2943 // The PHI node should go at the front of the new basic block to merge all
2944 // possible incoming values.
2945 if (!CB.use_empty()) {
2946 PHI = PHINode::Create(RTy, Returns.size(), CB.getName());
2947 PHI->insertBefore(AfterCallBB->begin());
2948 // Anything that used the result of the function call should now use the
2949 // PHI node as their operand.
2951 }
2952
2953 // Loop over all of the return instructions adding entries to the PHI node
2954 // as appropriate.
2955 if (PHI) {
2956 for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
2957 ReturnInst *RI = Returns[i];
2958 assert(RI->getReturnValue()->getType() == PHI->getType() &&
2959 "Ret value not consistent in function!");
2960 PHI->addIncoming(RI->getReturnValue(), RI->getParent());
2961 }
2962 }
2963
2964 // Add a branch to the merge points and remove return instructions.
2965 DebugLoc Loc;
2966 for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
2967 ReturnInst *RI = Returns[i];
2968 BranchInst* BI = BranchInst::Create(AfterCallBB, RI->getIterator());
2969 Loc = RI->getDebugLoc();
2970 BI->setDebugLoc(Loc);
2971 RI->eraseFromParent();
2972 }
2973 // We need to set the debug location to *somewhere* inside the
2974 // inlined function. The line number may be nonsensical, but the
2975 // instruction will at least be associated with the right
2976 // function.
2977 if (CreatedBranchToNormalDest)
2978 CreatedBranchToNormalDest->setDebugLoc(Loc);
2979 } else if (!Returns.empty()) {
2980 // Otherwise, if there is exactly one return value, just replace anything
2981 // using the return value of the call with the computed value.
2982 if (!CB.use_empty()) {
2983 if (&CB == Returns[0]->getReturnValue())
2985 else
2986 CB.replaceAllUsesWith(Returns[0]->getReturnValue());
2987 }
2988
2989 // Update PHI nodes that use the ReturnBB to use the AfterCallBB.
2990 BasicBlock *ReturnBB = Returns[0]->getParent();
2991 ReturnBB->replaceAllUsesWith(AfterCallBB);
2992
2993 // Splice the code from the return block into the block that it will return
2994 // to, which contains the code that was after the call.
2995 AfterCallBB->splice(AfterCallBB->begin(), ReturnBB);
2996
2997 if (CreatedBranchToNormalDest)
2998 CreatedBranchToNormalDest->setDebugLoc(Returns[0]->getDebugLoc());
2999
3000 // Delete the return instruction now and empty ReturnBB now.
3001 Returns[0]->eraseFromParent();
3002 ReturnBB->eraseFromParent();
3003 } else if (!CB.use_empty()) {
3004 // No returns, but something is using the return value of the call. Just
3005 // nuke the result.
3007 }
3008
3009 // Since we are now done with the Call/Invoke, we can delete it.
3010 CB.eraseFromParent();
3011
3012 // If we inlined any musttail calls and the original return is now
3013 // unreachable, delete it. It can only contain a bitcast and ret.
3014 if (InlinedMustTailCalls && pred_empty(AfterCallBB))
3015 AfterCallBB->eraseFromParent();
3016
3017 // We should always be able to fold the entry block of the function into the
3018 // single predecessor of the block...
3019 assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!");
3020 BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0);
3021
3022 // Splice the code entry block into calling block, right before the
3023 // unconditional branch.
3024 CalleeEntry->replaceAllUsesWith(OrigBB); // Update PHI nodes
3025 OrigBB->splice(Br->getIterator(), CalleeEntry);
3026
3027 // Remove the unconditional branch.
3028 Br->eraseFromParent();
3029
3030 // Now we can remove the CalleeEntry block, which is now empty.
3031 CalleeEntry->eraseFromParent();
3032
3033 // If we inserted a phi node, check to see if it has a single value (e.g. all
3034 // the entries are the same or undef). If so, remove the PHI so it doesn't
3035 // block other optimizations.
3036 if (PHI) {
3037 AssumptionCache *AC =
3038 IFI.GetAssumptionCache ? &IFI.GetAssumptionCache(*Caller) : nullptr;
3039 auto &DL = Caller->getParent()->getDataLayout();
3040 if (Value *V = simplifyInstruction(PHI, {DL, nullptr, nullptr, AC})) {
3041 PHI->replaceAllUsesWith(V);
3042 PHI->eraseFromParent();
3043 }
3044 }
3045
3046 if (MergeAttributes)
3047 AttributeFuncs::mergeAttributesForInlining(*Caller, *CalledFunc);
3048
3049 return InlineResult::success();
3050}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Rewrite undef for PHI
static void UpdatePHINodes(BasicBlock *OrigBB, BasicBlock *NewBB, ArrayRef< BasicBlock * > Preds, BranchInst *BI, bool HasLoopExit)
Update the PHI nodes in OrigBB to include the values coming from NewBB.
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static cl::opt< bool > NoAliases("csky-no-aliases", cl::desc("Disable the emission of assembler pseudo instructions"), cl::init(false), cl::Hidden)
This file provides interfaces used to build and manipulate a call graph, which is a very useful tool ...
This file contains the declarations for the subclasses of Constant, which represent the different fla...
#define LLVM_DEBUG(X)
Definition: Debug.h:101
This file defines the DenseMap class.
std::string Name
uint64_t Size
bool End
Definition: ELF_riscv.cpp:480
This file provides various utilities for inspecting and working with the control flow graph in LLVM I...
static AttrBuilder IdentifyValidUBGeneratingAttributes(CallBase &CB)
static at::StorageToVarsMap collectEscapedLocals(const DataLayout &DL, const CallBase &CB)
Find Alloca and linked DbgAssignIntrinsic for locals escaped by CB.
static void fixupLineNumbers(Function *Fn, Function::iterator FI, Instruction *TheCall, bool CalleeHasDebugInfo)
Update inlined instructions' line numbers to to encode location where these instructions are inlined.
static void removeCallsiteMetadata(CallBase *Call)
static void propagateMemProfHelper(const CallBase *OrigCall, CallBase *ClonedCall, MDNode *InlinedCallsiteMD)
static Value * getUnwindDestToken(Instruction *EHPad, UnwindDestMemoTy &MemoMap)
Given an EH pad, find where it unwinds.
static cl::opt< bool > PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining", cl::init(false), cl::Hidden, cl::desc("Convert align attributes to assumptions during inlining."))
static void AddReturnAttributes(CallBase &CB, ValueToValueMapTy &VMap)
static void HandleInlinedLandingPad(InvokeInst *II, BasicBlock *FirstNewBlock, ClonedCodeInfo &InlinedCodeInfo)
If we inlined an invoke site, we need to convert calls in the body of the inlined function into invok...
static Value * getUnwindDestTokenHelper(Instruction *EHPad, UnwindDestMemoTy &MemoMap)
Helper for getUnwindDestToken that does the descendant-ward part of the search.
static BasicBlock * HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB, BasicBlock *UnwindEdge, UnwindDestMemoTy *FuncletUnwindMap=nullptr)
When we inline a basic block into an invoke, we have to turn all of the calls that can throw into inv...
static DebugLoc inlineDebugLoc(DebugLoc OrigDL, DILocation *InlinedAt, LLVMContext &Ctx, DenseMap< const MDNode *, MDNode * > &IANodes)
Returns a DebugLoc for a new DILocation which is a clone of OrigDL inlined at InlinedAt.
static cl::opt< bool > UseNoAliasIntrinsic("use-noalias-intrinsic-during-inlining", cl::Hidden, cl::init(true), cl::desc("Use the llvm.experimental.noalias.scope.decl " "intrinsic during inlining."))
static void PropagateCallSiteMetadata(CallBase &CB, Function::iterator FStart, Function::iterator FEnd)
When inlining a call site that has !llvm.mem.parallel_loop_access, !llvm.access.group,...
static AttrBuilder IdentifyValidPoisonGeneratingAttributes(CallBase &CB)
static void propagateMemProfMetadata(Function *Callee, CallBase &CB, bool ContainsMemProfMetadata, const ValueMap< const Value *, WeakTrackingVH > &VMap)
static void updateCallProfile(Function *Callee, const ValueToValueMapTy &VMap, const ProfileCount &CalleeEntryCount, const CallBase &TheCall, ProfileSummaryInfo *PSI, BlockFrequencyInfo *CallerBFI)
Update the branch metadata for cloned call instructions.
static void updateCallerBFI(BasicBlock *CallSiteBlock, const ValueToValueMapTy &VMap, BlockFrequencyInfo *CallerBFI, BlockFrequencyInfo *CalleeBFI, const BasicBlock &CalleeEntryBlock)
Update the block frequencies of the caller after a callee has been inlined.
static bool MayContainThrowingOrExitingCallAfterCB(CallBase *Begin, ReturnInst *End)
static void HandleByValArgumentInit(Type *ByValType, Value *Dst, Value *Src, Module *M, BasicBlock *InsertBlock, InlineFunctionInfo &IFI, Function *CalledFunc)
static cl::opt< bool > EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true), cl::Hidden, cl::desc("Convert noalias attributes to metadata during inlining."))
static void AddAliasScopeMetadata(CallBase &CB, ValueToValueMapTy &VMap, const DataLayout &DL, AAResults *CalleeAAR, ClonedCodeInfo &InlinedFunctionInfo)
If the inlined function has noalias arguments, then add new alias scopes for each noalias argument,...
static void HandleInlinedEHPad(InvokeInst *II, BasicBlock *FirstNewBlock, ClonedCodeInfo &InlinedCodeInfo)
If we inlined an invoke site, we need to convert calls in the body of the inlined function into invok...
static void inlineRetainOrClaimRVCalls(CallBase &CB, objcarc::ARCInstKind RVCallKind, const SmallVectorImpl< ReturnInst * > &Returns)
An operand bundle "clang.arc.attachedcall" on a call indicates the call result is implicitly consumed...
static void AddParamAndFnBasicAttributes(const CallBase &CB, ValueToValueMapTy &VMap)
static Value * getParentPad(Value *EHPad)
Helper for getUnwindDestToken/getUnwindDestTokenHelper.
static void fixupAssignments(Function::iterator Start, Function::iterator End)
Update inlined instructions' DIAssignID metadata.
static bool allocaWouldBeStaticInEntry(const AllocaInst *AI)
Return the result of AI->isStaticAlloca() if AI were moved to the entry block.
static bool isUsedByLifetimeMarker(Value *V)
static void removeMemProfMetadata(CallBase *Call)
static Value * HandleByValArgument(Type *ByValType, Value *Arg, Instruction *TheCall, const Function *CalledFunc, InlineFunctionInfo &IFI, MaybeAlign ByValAlignment)
When inlining a call site that has a byval argument, we have to make the implicit memcpy explicit by ...
static void AddAlignmentAssumptions(CallBase &CB, InlineFunctionInfo &IFI)
If the inlined function has non-byval align arguments, then add @llvm.assume-based alignment assumpti...
static void trackInlinedStores(Function::iterator Start, Function::iterator End, const CallBase &CB)
static cl::opt< unsigned > InlinerAttributeWindow("max-inst-checked-for-throw-during-inlining", cl::Hidden, cl::desc("the maximum number of instructions analyzed for may throw during " "attribute inference in inlined body"), cl::init(4))
static bool haveCommonPrefix(MDNode *MIBStackContext, MDNode *CallsiteStackContext)
static void PropagateOperandBundles(Function::iterator InlinedBB, Instruction *CallSiteEHPad)
Bundle operands of the inlined function must be added to inlined call sites.
static bool hasLifetimeMarkers(AllocaInst *AI)
static void updateMemprofMetadata(CallBase *CI, const std::vector< Metadata * > &MIBList)
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
Load MIR Sample Profile
static DebugLoc getDebugLoc(MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
Return the first found DebugLoc that has a DILocation, given a range of instructions.
This file contains the declarations for metadata subclasses.
Module.h This file contains the declarations for the Module class.
LLVMContext & Context
This file defines common analysis utilities used by the ObjC ARC Optimizer.
This file defines ARC utility functions which are used by various parts of the compiler.
Module * Mod
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
MemoryEffects getMemoryEffects(const CallBase *Call)
Return the behavior of the given call site.
Class for arbitrary precision integers.
Definition: APInt.h:76
an instruction to allocate memory on the stack
Definition: Instructions.h:59
bool isSwiftError() const
Return true if this alloca is used as a swifterror argument to a call.
Definition: Instructions.h:157
PointerType * getType() const
Overload to return most specific pointer type.
Definition: Instructions.h:107
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
Definition: Instructions.h:125
bool isUsedWithInAlloca() const
Return true if this alloca is used as an inalloca argument to a call.
Definition: Instructions.h:147
const Value * getArraySize() const
Get the number of elements allocated.
Definition: Instructions.h:103
This class represents an incoming formal argument to a Function.
Definition: Argument.h:31
unsigned getArgNo() const
Return the index of this formal argument in its containing function.
Definition: Argument.h:49
A cache of @llvm.assume calls within a function.
void registerAssumption(AssumeInst *CI)
Add an @llvm.assume intrinsic to this function's cache.
An instruction that atomically checks whether a specified value is in a memory location,...
Definition: Instructions.h:539
an instruction that atomically reads a memory location, combines it with another value,...
Definition: Instructions.h:748
AttrBuilder & addAlignmentAttr(MaybeAlign Align)
This turns an alignment into the form used internally in Attribute.
uint64_t getDereferenceableBytes() const
Retrieve the number of dereferenceable bytes, if the dereferenceable attribute exists (zero is return...
Definition: Attributes.h:1101
bool hasAttributes() const
Return true if the builder has IR-level attributes.
Definition: Attributes.h:1075
AttrBuilder & addAttribute(Attribute::AttrKind Val)
Add an attribute to the builder.
MaybeAlign getAlignment() const
Retrieve the alignment attribute, if it exists.
Definition: Attributes.h:1090
AttrBuilder & addDereferenceableAttr(uint64_t Bytes)
This turns the number of dereferenceable bytes into the form used internally in Attribute.
uint64_t getDereferenceableOrNullBytes() const
Retrieve the number of dereferenceable_or_null bytes, if the dereferenceable_or_null attribute exists...
Definition: Attributes.h:1107
AttrBuilder & removeAttribute(Attribute::AttrKind Val)
Remove an attribute from the builder.
AttrBuilder & addDereferenceableOrNullAttr(uint64_t Bytes)
This turns the number of dereferenceable_or_null bytes into the form used internally in Attribute.
AttributeList addRetAttributes(LLVMContext &C, const AttrBuilder &B) const
Add a return value attribute to the list.
Definition: Attributes.h:581
static AttributeList get(LLVMContext &C, ArrayRef< std::pair< unsigned, Attribute > > Attrs)
Create an AttributeList with the specified parameters in it.
AttributeSet getParamAttrs(unsigned ArgNo) const
The attributes for the argument or parameter at the given index are returned.
LLVM Basic Block Representation.
Definition: BasicBlock.h:60
iterator begin()
Instruction iterator methods.
Definition: BasicBlock.h:430
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
Definition: BasicBlock.h:499
const Instruction * getFirstNonPHI() const
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
Definition: BasicBlock.cpp:360
BasicBlock * splitBasicBlock(iterator I, const Twine &BBName="", bool Before=false)
Split the basic block into two basic blocks at the specified instruction.
Definition: BasicBlock.cpp:570
const CallInst * getTerminatingDeoptimizeCall() const
Returns the call instruction calling @llvm.experimental.deoptimize prior to the terminating return in...
Definition: BasicBlock.cpp:324
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:206
SymbolTableList< BasicBlock >::iterator eraseFromParent()
Unlink 'this' from the containing function and delete it.
Definition: BasicBlock.cpp:276
reverse_iterator rend()
Definition: BasicBlock.h:448
InstListType::iterator iterator
Instruction iterators...
Definition: BasicBlock.h:165
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.h:221
const CallInst * getTerminatingMustTailCall() const
Returns the call instruction marked 'musttail' prior to the terminating return instruction of this ba...
Definition: BasicBlock.cpp:293
void splice(BasicBlock::iterator ToIt, BasicBlock *FromBB)
Transfer all instructions from FromBB to this basic block at ToIt.
Definition: BasicBlock.h:613
void removePredecessor(BasicBlock *Pred, bool KeepOneInputPHIs=false)
Update PHI nodes in this BasicBlock before removal of predecessor Pred.
Definition: BasicBlock.cpp:509
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
void setBlockFreq(const BasicBlock *BB, BlockFrequency Freq)
void setBlockFreqAndScale(const BasicBlock *ReferenceBB, BlockFrequency Freq, SmallPtrSetImpl< BasicBlock * > &BlocksToScale)
Set the frequency of ReferenceBB to Freq and scale the frequencies of the blocks in BlocksToScale suc...
BlockFrequency getBlockFreq(const BasicBlock *BB) const
getblockFreq - Return block frequency.
Conditional or Unconditional Branch instruction.
static BranchInst * Create(BasicBlock *IfTrue, BasicBlock::iterator InsertBefore)
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
Definition: InstrTypes.h:1494
void setCallingConv(CallingConv::ID CC)
Definition: InstrTypes.h:1804
void setDoesNotThrow()
Definition: InstrTypes.h:2284
MaybeAlign getRetAlign() const
Extract the alignment of the return value.
Definition: InstrTypes.h:2106
void getOperandBundlesAsDefs(SmallVectorImpl< OperandBundleDef > &Defs) const
Return the list of operand bundles attached to this instruction as a vector of OperandBundleDefs.
OperandBundleUse getOperandBundleAt(unsigned Index) const
Return the operand bundle at a specific index.
Definition: InstrTypes.h:2380
std::optional< OperandBundleUse > getOperandBundle(StringRef Name) const
Return an operand bundle by name, if present.
Definition: InstrTypes.h:2411
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
Definition: InstrTypes.h:1742
void removeRetAttrs(const AttributeMask &AttrsToRemove)
Removes the attributes from the return value.
Definition: InstrTypes.h:1913
bool hasRetAttr(Attribute::AttrKind Kind) const
Determine whether the return value has the given attribute.
Definition: InstrTypes.h:1950
unsigned getNumOperandBundles() const
Return the number of operand bundles associated with this User.
Definition: InstrTypes.h:2324
CallingConv::ID getCallingConv() const
Definition: InstrTypes.h:1800
bool paramHasAttr(unsigned ArgNo, Attribute::AttrKind Kind) const
Determine whether the argument or parameter has the given attribute.
User::op_iterator arg_begin()
Return the iterator pointing to the beginning of the argument list.
Definition: InstrTypes.h:1662
bool isByValArgument(unsigned ArgNo) const
Determine whether this argument is passed by value.
Definition: InstrTypes.h:2051
static CallBase * Create(CallBase *CB, ArrayRef< OperandBundleDef > Bundles, BasicBlock::iterator InsertPt)
Create a clone of CB with a different set of operand bundles and insert it before InsertPt.
Type * getParamByValType(unsigned ArgNo) const
Extract the byval type for a call or parameter.
Definition: InstrTypes.h:2124
Value * getCalledOperand() const
Definition: InstrTypes.h:1735
void setAttributes(AttributeList A)
Set the parameter attributes for this call.
Definition: InstrTypes.h:1823
bool doesNotThrow() const
Determine if the call cannot unwind.
Definition: InstrTypes.h:2283
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1687
uint64_t getRetDereferenceableBytes() const
Extract the number of dereferenceable bytes for a call or parameter (0=unknown).
Definition: InstrTypes.h:2168
bool isConvergent() const
Determine if the invoke is convergent.
Definition: InstrTypes.h:2295
FunctionType * getFunctionType() const
Definition: InstrTypes.h:1600
Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
uint64_t getRetDereferenceableOrNullBytes() const
Extract the number of dereferenceable_or_null bytes for a call (0=unknown).
Definition: InstrTypes.h:2183
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
Definition: InstrTypes.h:1678
unsigned arg_size() const
Definition: InstrTypes.h:1685
AttributeList getAttributes() const
Return the parameter attributes for this call.
Definition: InstrTypes.h:1819
static CallBase * addOperandBundle(CallBase *CB, uint32_t ID, OperandBundleDef OB, Instruction *InsertPt=nullptr)
Create a clone of CB with operand bundle OB added.
bool hasOperandBundles() const
Return true if this User has any operand bundles.
Definition: InstrTypes.h:2329
Function * getCaller()
Helper to get the caller (the parent function).
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr, BasicBlock::iterator InsertBefore)
void setTailCallKind(TailCallKind TCK)
TailCallKind getTailCallKind() const
bool isMustTailCall() const
static CatchSwitchInst * Create(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumHandlers, const Twine &NameStr, BasicBlock::iterator InsertBefore)
static CleanupReturnInst * Create(Value *CleanupPad, BasicBlock *UnwindBB, BasicBlock::iterator InsertBefore)
This is the shared class of boolean and integer constants.
Definition: Constants.h:80
static ConstantTokenNone * get(LLVMContext &Context)
Return the ConstantTokenNone.
Definition: Constants.cpp:1499
This is an important base class in LLVM.
Definition: Constant.h:41
const Constant * stripPointerCasts() const
Definition: Constant.h:213
Assignment ID.
static DIAssignID * getDistinct(LLVMContext &Context)
Debug location.
Subprogram description.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
Base class for non-instruction debug metadata records that have positions within IR.
Record of a variable value-assignment, aka a non instruction representation of the dbg....
A debug info location.
Definition: DebugLoc.h:33
unsigned getLine() const
Definition: DebugLoc.cpp:24
DILocation * get() const
Get the underlying DILocation.
Definition: DebugLoc.cpp:20
MDNode * getScope() const
Definition: DebugLoc.cpp:34
static DebugLoc appendInlinedAt(const DebugLoc &DL, DILocation *InlinedAt, LLVMContext &Ctx, DenseMap< const MDNode *, MDNode * > &Cache)
Rebuild the entire inlined-at chain for this instruction so that the top of the chain now is inlined-...
Definition: DebugLoc.cpp:110
unsigned getCol() const
Definition: DebugLoc.cpp:29
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:155
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition: DenseMap.h:151
iterator end()
Definition: DenseMap.h:84
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:220
void recalculate(ParentType &Func)
recalculate - compute a dominator tree for the given function
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
unsigned getNumParams() const
Return the number of fixed parameters this function type requires.
Definition: DerivedTypes.h:142
Class to represent profile counts.
Definition: Function.h:279
uint64_t getCount() const
Definition: Function.h:287
const BasicBlock & getEntryBlock() const
Definition: Function.h:787
BasicBlockListType::iterator iterator
Definition: Function.h:68
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition: Function.h:202
const BasicBlock & front() const
Definition: Function.h:810
iterator_range< arg_iterator > args()
Definition: Function.h:842
DISubprogram * getSubprogram() const
Get the attached subprogram.
Definition: Metadata.cpp:1830
bool hasGC() const
hasGC/getGC/setGC/clearGC - The name of the garbage collection algorithm to use during code generatio...
Definition: Function.h:332
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:264
bool hasPersonalityFn() const
Check whether this function has a personality function.
Definition: Function.h:855
Constant * getPersonalityFn() const
Get the personality function associated with this function.
Definition: Function.cpp:1921
arg_iterator arg_end()
Definition: Function.h:827
arg_iterator arg_begin()
Definition: Function.h:818
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
Definition: Function.h:237
MaybeAlign getParamAlign(unsigned ArgNo) const
Definition: Function.h:469
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:358
const std::string & getGC() const
Definition: Function.cpp:772
std::optional< ProfileCount > getEntryCount(bool AllowSynthetic=false) const
Get the entry count for this function.
Definition: Function.cpp:2006
Type * getReturnType() const
Returns the type of the ret val.
Definition: Function.h:207
iterator end()
Definition: Function.h:805
void setCallingConv(CallingConv::ID CC)
Definition: Function.h:268
bool onlyReadsMemory() const
Determine if the function does not access or only reads memory.
Definition: Function.cpp:829
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.cpp:677
bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
Definition: Globals.cpp:281
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:656
CallInst * CreateStackSave(const Twine &Name="")
Create a call to llvm.stacksave.
Definition: IRBuilder.h:1053
CallInst * CreateLifetimeStart(Value *Ptr, ConstantInt *Size=nullptr)
Create a lifetime.start intrinsic.
Definition: IRBuilder.cpp:481
CallInst * CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue, unsigned Alignment, Value *OffsetValue=nullptr)
Create an assume intrinsic call that represents an alignment assumption on the provided pointer.
Definition: IRBuilder.cpp:1307
ReturnInst * CreateRet(Value *V)
Create a 'ret <val>' instruction.
Definition: IRBuilder.h:1095
ConstantInt * getInt64(uint64_t C)
Get a constant 64-bit value.
Definition: IRBuilder.h:491
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:2127
ReturnInst * CreateRetVoid()
Create a 'ret void' instruction.
Definition: IRBuilder.h:1090
CallInst * CreateLifetimeEnd(Value *Ptr, ConstantInt *Size=nullptr)
Create a lifetime.end intrinsic.
Definition: IRBuilder.cpp:496
CallInst * CreateStackRestore(Value *Ptr, const Twine &Name="")
Create a call to llvm.stackrestore.
Definition: IRBuilder.h:1060
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block.
Definition: IRBuilder.h:180
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=std::nullopt, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:2412
CallInst * CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, uint64_t Size, bool isVolatile=false, MDNode *TBAATag=nullptr, MDNode *TBAAStructTag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
Create and insert a memcpy between the specified pointers.
Definition: IRBuilder.h:659
Instruction * CreateNoAliasScopeDeclaration(Value *Scope)
Create a llvm.experimental.noalias.scope.decl intrinsic call.
Definition: IRBuilder.cpp:562
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:2666
This class captures the data input to the InlineFunction call, and records the auxiliary results prod...
Definition: Cloning.h:202
ProfileSummaryInfo * PSI
Definition: Cloning.h:215
bool UpdateProfile
Update profile for callee as well as cloned version.
Definition: Cloning.h:235
function_ref< AssumptionCache &(Function &)> GetAssumptionCache
If non-null, InlineFunction will update the callgraph to reflect the changes it makes.
Definition: Cloning.h:214
BlockFrequencyInfo * CalleeBFI
Definition: Cloning.h:216
SmallVector< AllocaInst *, 4 > StaticAllocas
InlineFunction fills this in with all static allocas that get copied into the caller.
Definition: Cloning.h:220
BlockFrequencyInfo * CallerBFI
Definition: Cloning.h:216
SmallVector< CallBase *, 8 > InlinedCallSites
All of the new call sites inlined into the caller.
Definition: Cloning.h:231
InlineResult is basically true or false.
Definition: InlineCost.h:180
static InlineResult success()
Definition: InlineCost.h:185
static InlineResult failure(const char *Reason)
Definition: InlineCost.h:186
bool isLifetimeStartOrEnd() const LLVM_READONLY
Return true if the instruction is a llvm.lifetime.start or llvm.lifetime.end marker.
void insertBefore(Instruction *InsertPos)
Insert an unlinked instruction into a basic block immediately before the specified instruction.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:454
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:83
bool hasMetadata() const
Return true if this instruction has any metadata attached to it.
Definition: Instruction.h:341
bool isEHPad() const
Return true if the instruction is a variety of EH-block.
Definition: Instruction.h:812
const BasicBlock * getParent() const
Definition: Instruction.h:152
InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
const Function * getFunction() const
Return the function this instruction belongs to.
Definition: Instruction.cpp:87
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
Definition: Instruction.h:359
void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
Definition: Metadata.cpp:1635
unsigned getOpcode() const
Returns a member of one of the enums like Instruction::Add.
Definition: Instruction.h:252
void setDebugLoc(DebugLoc Loc)
Set the debug location information for this instruction.
Definition: Instruction.h:451
A wrapper class for inspecting calls to intrinsic functions.
Definition: IntrinsicInst.h:47
static bool mayLowerToFunctionCall(Intrinsic::ID IID)
Check if the intrinsic might lower into a regular function call in the course of IR transformations.
Invoke instruction.
BasicBlock * getUnwindDest() const
LandingPadInst * getLandingPadInst() const
Get the landingpad instruction from the landing pad block (the unwind destination).
BasicBlock * getNormalDest() const
void updateProfWeight(uint64_t S, uint64_t T)
Updates profile metadata by scaling it by S / T.
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
The landingpad instruction holds all of the information necessary to generate correct exception handl...
bool isCleanup() const
Return 'true' if this landingpad instruction is a cleanup.
unsigned getNumClauses() const
Get the number of clauses for this landing pad.
Constant * getClause(unsigned Idx) const
Get the value of the clause at index Idx.
An instruction for reading from memory.
Definition: Instructions.h:184
MDNode * createAnonymousAliasScope(MDNode *Domain, StringRef Name=StringRef())
Return metadata appropriate for an alias scope root node.
Definition: MDBuilder.h:167
MDNode * createAnonymousAliasScopeDomain(StringRef Name=StringRef())
Return metadata appropriate for an alias scope domain node.
Definition: MDBuilder.h:160
Metadata node.
Definition: Metadata.h:1067
void replaceAllUsesWith(Metadata *MD)
RAUW a temporary.
Definition: Metadata.h:1264
static MDNode * concatenate(MDNode *A, MDNode *B)
Methods for metadata merging.
Definition: Metadata.cpp:1108
bool isTemporary() const
Definition: Metadata.h:1251
ArrayRef< MDOperand > operands() const
Definition: Metadata.h:1426
op_iterator op_end() const
Definition: Metadata.h:1422
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Definition: Metadata.h:1541
unsigned getNumOperands() const
Return number of MDNode operands.
Definition: Metadata.h:1434
op_iterator op_begin() const
Definition: Metadata.h:1418
LLVMContext & getContext() const
Definition: Metadata.h:1231
Tuple of metadata.
Definition: Metadata.h:1470
static TempMDTuple getTemporary(LLVMContext &Context, ArrayRef< Metadata * > MDs)
Return a temporary node.
Definition: Metadata.h:1518
bool onlyAccessesInaccessibleMem() const
Whether this function only (at most) accesses inaccessible memory.
Definition: ModRef.h:211
bool onlyAccessesArgPointees() const
Whether this function only (at most) accesses argument memory.
Definition: ModRef.h:201
Root of the metadata hierarchy.
Definition: Metadata.h:62
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Definition: Module.h:293
A container for an operand bundle being viewed as a set of values rather than a set of uses.
Definition: InstrTypes.h:1447
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
static PHINode * Create(Type *Ty, unsigned NumReservedValues, const Twine &NameStr, BasicBlock::iterator InsertBefore)
Constructors - NumReservedValues is a hint for the number of incoming edges that this phi node will h...
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
Definition: Constants.cpp:1827
Analysis providing profile information.
std::optional< uint64_t > getProfileCount(const CallBase &CallInst, BlockFrequencyInfo *BFI, bool AllowSynthetic=false) const
Returns the profile count for CallInst.
Resume the propagation of an exception.
Return a value (possibly void), from a function.
Value * getReturnValue() const
Convenience accessor. Returns null if there is no return value.
A vector that has set insertion semantics.
Definition: SetVector.h:57
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:321
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:360
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:342
bool contains(ConstPtrType Ptr) const
Definition: SmallPtrSet.h:366
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:427
A SetVector that performs no allocations if smaller than a certain size.
Definition: SetVector.h:370
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:950
void reserve(size_type N)
Definition: SmallVector.h:676
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
Definition: SmallVector.h:696
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
An instruction for storing to memory.
Definition: Instructions.h:317
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:129
static IntegerType * getInt64Ty(LLVMContext &C)
bool isVoidTy() const
Return true if this is 'void'.
Definition: Type.h:140
void setOperand(unsigned i, Value *Val)
Definition: User.h:174
Value * getOperand(unsigned i) const
Definition: User.h:169
This class represents the va_arg llvm instruction, which returns an argument of the specified type gi...
See the file comment.
Definition: ValueMap.h:84
ValueT lookup(const KeyT &Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
Definition: ValueMap.h:164
size_type count(const KeyT &Val) const
Return 1 if the specified key is in the map, 0 otherwise.
Definition: ValueMap.h:151
iterator begin()
Definition: ValueMap.h:134
iterator end()
Definition: ValueMap.h:135
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:534
iterator_range< user_iterator > users()
Definition: Value.h:421
bool hasNUses(unsigned N) const
Return true if this Value has exactly N uses.
Definition: Value.cpp:149
bool use_empty() const
Definition: Value.h:344
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1074
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
void takeName(Value *V)
Transfer the name from V to this value.
Definition: Value.cpp:383
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:199
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition: TypeSize.h:171
self_iterator getIterator()
Definition: ilist_node.h:109
Class to build a trie of call stack contexts for a particular profiled allocation call,...
Helper class to iterate through stack ids in both metadata (memprof MIB and callsite) and the corresp...
This provides a very simple, boring adaptor for a begin and end iterator into a range type.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
void mergeAttributesForInlining(Function &Caller, const Function &Callee)
Merge caller's and callee's attributes.
AttributeMask typeIncompatible(Type *Ty, AttributeSafetyKind ASK=ASK_ALL)
Which attributes cannot be applied to a type.
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1471
AssignmentMarkerRange getAssignmentMarkers(DIAssignID *ID)
Return a range of dbg.assign intrinsics which use \ID as an operand.
Definition: DebugInfo.cpp:1895
void trackAssignments(Function::iterator Start, Function::iterator End, const StorageToVarsMap &Vars, const DataLayout &DL, bool DebugPrints=false)
Track assignments to Vars between Start and End.
Definition: DebugInfo.cpp:2244
SmallVector< DbgVariableRecord * > getDVRAssignmentMarkers(const Instruction *Inst)
Definition: DebugInfo.h:238
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:450
MDNode * getMIBStackNode(const MDNode *MIB)
Returns the stack node from an MIB metadata node.
constexpr double phi
Definition: MathExtras.h:45
ARCInstKind getAttachedARCFunctionKind(const CallBase *CB)
This function returns the ARCInstKind of the function attached to operand bundle clang_arc_attachedca...
Definition: ObjCARCUtil.h:60
ARCInstKind
Equivalence classes of instructions in the ARC Model.
std::optional< Function * > getAttachedARCFunction(const CallBase *CB)
This function returns operand bundle clang_arc_attachedcall's argument, which is the address of the A...
Definition: ObjCARCUtil.h:43
bool isRetainOrClaimRV(ARCInstKind Kind)
Check whether the function is retainRV/unsafeClaimRV.
Definition: ObjCARCUtil.h:52
const Value * GetRCIdentityRoot(const Value *V)
The RCIdentity root of a value V is a dominating value U for which retaining or releasing U is equiva...
bool hasAttachedCallOpBundle(const CallBase *CB)
Definition: ObjCARCUtil.h:29
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
UnaryFunction for_each(R &&Range, UnaryFunction F)
Provide wrappers to std::for_each which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1715
BasicBlock * changeToInvokeAndSplitBasicBlock(CallInst *CI, BasicBlock *UnwindEdge, DomTreeUpdater *DTU=nullptr)
Convert the CallInst to InvokeInst with the specified unwind edge basic block.
Definition: Local.cpp:2919
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
bool PointerMayBeCapturedBefore(const Value *V, bool ReturnCaptures, bool StoreCaptures, const Instruction *I, const DominatorTree *DT, bool IncludeI=false, unsigned MaxUsesToExplore=0, const LoopInfo *LI=nullptr)
PointerMayBeCapturedBefore - Return true if this pointer value may be captured by the enclosing funct...
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition: STLExtras.h:2073
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
Definition: STLExtras.h:656
bool isScopedEHPersonality(EHPersonality Pers)
Returns true if this personality uses scope-style EH IR instructions: catchswitch,...
Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
Align getKnownAlignment(Value *V, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to infer an alignment for the specified pointer.
Definition: Local.h:242
Align getOrEnforceKnownAlignment(Value *V, MaybeAlign PrefAlign, const DataLayout &DL, const Instruction *CxtI=nullptr, AssumptionCache *AC=nullptr, const DominatorTree *DT=nullptr)
Try to ensure that the alignment of V is at least PrefAlign bytes.
Definition: Local.cpp:1543
void CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc, ValueToValueMapTy &VMap, bool ModuleLevelChanges, SmallVectorImpl< ReturnInst * > &Returns, const char *NameSuffix="", ClonedCodeInfo *CodeInfo=nullptr)
This works exactly like CloneFunctionInto, except that it does some simple constant prop and DCE on t...
EHPersonality classifyEHPersonality(const Value *Pers)
See if the given exception handling personality function is one that we understand.
unsigned changeToUnreachable(Instruction *I, bool PreserveLCSSA=false, DomTreeUpdater *DTU=nullptr, MemorySSAUpdater *MSSAU=nullptr)
Insert an unreachable instruction before the specified instruction, making it and the rest of the cod...
Definition: Local.cpp:2833
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, LoopInfo *LI=nullptr, unsigned MaxLookup=6)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
bool salvageKnowledge(Instruction *I, AssumptionCache *AC=nullptr, DominatorTree *DT=nullptr)
Calls BuildAssumeFromInst and if the resulting llvm.assume is valid insert if before I.
void updateProfileCallee(Function *Callee, int64_t EntryDelta, const ValueMap< const Value *, WeakTrackingVH > *VMap=nullptr)
Updates profile information by adjusting the entry count by adding EntryDelta then scaling callsite i...
bool isAssignmentTrackingEnabled(const Module &M)
Return true if assignment tracking is enabled for module M.
Definition: DebugInfo.cpp:2430
MDNode * uniteAccessGroups(MDNode *AccGroups1, MDNode *AccGroups2)
Compute the union of two access-group lists.
InlineResult InlineFunction(CallBase &CB, InlineFunctionInfo &IFI, bool MergeAttributes=false, AAResults *CalleeAAR=nullptr, bool InsertLifetime=true, Function *ForwardVarArgsTo=nullptr)
This function inlines the called function into the basic block of the caller.
bool isAsynchronousEHPersonality(EHPersonality Pers)
Returns true if this personality function catches asynchronous exceptions.
bool isGuaranteedToTransferExecutionToSuccessor(const Instruction *I)
Return true if this function can prove that the instruction I will always transfer execution to one o...
bool isEscapeSource(const Value *V)
Returns true if the pointer is one which would have been considered an escape by isNonEscapingLocalOb...
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
Definition: STLExtras.h:2051
bool pred_empty(const BasicBlock *BB)
Definition: CFG.h:118
static auto filterDbgVars(iterator_range< simple_ilist< DbgRecord >::iterator > R)
Filter the DbgRecord range to DbgVariableRecord types only and downcast.
void updateLoopMetadataDebugLocations(Instruction &I, function_ref< Metadata *(Metadata *)> Updater)
Update the debug locations contained within the MD_loop metadata attached to the instruction I,...
Definition: DebugInfo.cpp:422
bool isIdentifiedObject(const Value *V)
Return true if this pointer refers to a distinct and identifiable object.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
This struct can be used to capture information about code being cloned, while it is being cloned.
Definition: Cloning.h:61
bool ContainsDynamicAllocas
This is set to true if the cloned code contains a 'dynamic' alloca.
Definition: Cloning.h:72
bool isSimplified(const Value *From, const Value *To) const
Definition: Cloning.h:86
bool ContainsCalls
This is set to true if the cloned code contains a normal call instruction.
Definition: Cloning.h:63
bool ContainsMemProfMetadata
This is set to true if there is memprof related metadata (memprof or callsite metadata) in the cloned...
Definition: Cloning.h:67
std::vector< WeakTrackingVH > OperandBundleCallSites
All cloned call sites that have operand bundles attached are appended to this vector.
Definition: Cloning.h:77
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition: Alignment.h:117
Align valueOrOne() const
For convenience, returns a valid alignment or 1 if undefined.
Definition: Alignment.h:141
Helper struct for trackAssignments, below.
Definition: DebugInfo.h:277