llvm.org GIT mirror llvm / 6091514
refactor the interface to InlineFunction so that most of the in/out arguments are handled with a new InlineFunctionInfo class. This makes it easier to extend InlineFunction to return more info in the future. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@102137 91177308-0d34-0410-b5e6-96231b3b80d8 Chris Lattner 10 years ago
6 changed file(s) with 64 addition(s) and 47 deletion(s). Raw diff Collapse all Expand all
1818 #define LLVM_TRANSFORMS_UTILS_CLONING_H
1919
2020 #include "llvm/ADT/DenseMap.h"
21 #include "llvm/ADT/SmallVector.h"
2122 #include "llvm/ADT/Twine.h"
2223
2324 namespace llvm {
3940 class Loop;
4041 class LoopInfo;
4142 class AllocaInst;
42 template class SmallVectorImpl;
4343
4444 /// CloneModule - Return an exact copy of the specified module
4545 ///
157157 const TargetData *TD = 0,
158158 Instruction *TheCall = 0);
159159
160
161 /// InlineFunctionInfo - This class captures the data input to the
162 /// InlineFunction call, and records the auxiliary results produced by it.
163 class InlineFunctionInfo {
164 public:
165 explicit InlineFunctionInfo(CallGraph *cg = 0, const TargetData *td = 0)
166 : CG(cg), TD(td) {}
167
168 /// CG - If non-null, InlineFunction will update the callgraph to reflect the
169 /// changes it makes.
170 CallGraph *CG;
171 const TargetData *TD;
172
173 /// StaticAllocas - InlineFunction fills this in with all static allocas that
174 /// get copied into the caller.
175 SmallVector StaticAllocas;
176
177
178 void reset() {
179 StaticAllocas.clear();
180 }
181 };
182
160183 /// InlineFunction - This function inlines the called function into the basic
161184 /// block of the caller. This returns false if it is not possible to inline
162185 /// this call. The program is still in a well defined state if this occurs
167190 /// exists in the instruction stream. Similiarly this will inline a recursive
168191 /// function by one level.
169192 ///
170 /// If a non-null callgraph pointer is provided, these functions update the
171 /// CallGraph to represent the program after inlining.
172 ///
173 /// If StaticAllocas is non-null, InlineFunction populates it with all of the
174 /// static allocas that it inlines into the caller.
175 ///
176 bool InlineFunction(CallInst *C, CallGraph *CG = 0, const TargetData *TD = 0,
177 SmallVectorImpl *StaticAllocas = 0);
178 bool InlineFunction(InvokeInst *II, CallGraph *CG = 0, const TargetData *TD = 0,
179 SmallVectorImpl *StaticAllocas = 0);
180 bool InlineFunction(CallSite CS, CallGraph *CG = 0, const TargetData *TD = 0,
181 SmallVectorImpl *StaticAllocas = 0);
193 bool InlineFunction(CallInst *C, InlineFunctionInfo &IFI);
194 bool InlineFunction(InvokeInst *II, InlineFunctionInfo &IFI);
195 bool InlineFunction(CallSite CS, InlineFunctionInfo &IFI);
182196
183197 } // End llvm namespace
184198
7272 /// available from other functions inlined into the caller. If we are able to
7373 /// inline this call site we attempt to reuse already available allocas or add
7474 /// any new allocas to the set if not possible.
75 static bool InlineCallIfPossible(CallSite CS, CallGraph &CG,
76 const TargetData *TD,
75 static bool InlineCallIfPossible(CallSite CS, InlineFunctionInfo &IFI,
7776 InlinedArrayAllocasTy &InlinedArrayAllocas) {
7877 Function *Callee = CS.getCalledFunction();
7978 Function *Caller = CS.getCaller();
8079
8180 // Try to inline the function. Get the list of static allocas that were
8281 // inlined.
83 SmallVector StaticAllocas;
84 if (!InlineFunction(CS, &CG, TD, &StaticAllocas))
82 if (!InlineFunction(CS, IFI))
8583 return false;
8684
8785 // If the inlined function had a higher stack protection level than the
118116
119117 // Loop over all the allocas we have so far and see if they can be merged with
120118 // a previously inlined alloca. If not, remember that we had it.
121 for (unsigned AllocaNo = 0, e = StaticAllocas.size();
119 for (unsigned AllocaNo = 0, e = IFI.StaticAllocas.size();
122120 AllocaNo != e; ++AllocaNo) {
123 AllocaInst *AI = StaticAllocas[AllocaNo];
121 AllocaInst *AI = IFI.StaticAllocas[AllocaNo];
124122
125123 // Don't bother trying to merge array allocations (they will usually be
126124 // canonicalized to be an allocation *of* an array), or allocations whose
346344
347345
348346 InlinedArrayAllocasTy InlinedArrayAllocas;
347 InlineFunctionInfo InlineInfo(&CG, TD);
349348
350349 // Now that we have all of the call sites, loop over them and inline them if
351350 // it looks profitable to do so.
384383 continue;
385384
386385 // Attempt to inline the function...
387 if (!InlineCallIfPossible(CS, CG, TD, InlinedArrayAllocas))
386 if (!InlineCallIfPossible(CS, InlineInfo, InlinedArrayAllocas))
388387 continue;
389388 ++NumInlined;
390389
119119 // Extract the body of the if.
120120 Function* extractedFunction = ExtractCodeRegion(DT, toExtract);
121121
122 InlineFunctionInfo IFI;
123
122124 // Inline the top-level if test into all callers.
123125 std::vector Users(duplicateFunction->use_begin(),
124126 duplicateFunction->use_end());
125127 for (std::vector::iterator UI = Users.begin(), UE = Users.end();
126128 UI != UE; ++UI)
127 if (CallInst* CI = dyn_cast(*UI))
128 InlineFunction(CI);
129 else if (InvokeInst* II = dyn_cast(*UI))
130 InlineFunction(II);
129 if (CallInst *CI = dyn_cast(*UI))
130 InlineFunction(CI, IFI);
131 else if (InvokeInst *II = dyn_cast(*UI))
132 InlineFunction(II, IFI);
131133
132134 // Ditch the duplicate, since we're done with it, and rewrite all remaining
133135 // users (function pointers, etc.) back to the original function.
9292 // Inline the call, taking care of what code ends up where.
9393 NewBlock = SplitBlock(NextInst->getParent(), NextInst, this);
9494
95 bool B = InlineFunction(Call, 0, TD);
95 InlineFunctionInfo IFI(0, TD);
96 bool B = InlineFunction(Call, IFI);
9697 assert(B && "half_powr didn't inline?"); B=B;
9798
9899 BasicBlock *NewBody = NewBlock->getSinglePredecessor();
128128 }
129129
130130 // Inline
131 if (InlineFunction(CS, NULL, TD)) {
131 InlineFunctionInfo IFI(0, TD);
132 if (InlineFunction(CS, IFI)) {
132133 if (Callee->use_empty() && (Callee->hasLocalLinkage() ||
133134 Callee->hasAvailableExternallyLinkage()))
134135 DeadFunctions.insert(Callee);
2727 #include "llvm/Support/CallSite.h"
2828 using namespace llvm;
2929
30 bool llvm::InlineFunction(CallInst *CI, CallGraph *CG, const TargetData *TD,
31 SmallVectorImpl *StaticAllocas) {
32 return InlineFunction(CallSite(CI), CG, TD, StaticAllocas);
30 bool llvm::InlineFunction(CallInst *CI, InlineFunctionInfo &IFI) {
31 return InlineFunction(CallSite(CI), IFI);
3332 }
34 bool llvm::InlineFunction(InvokeInst *II, CallGraph *CG, const TargetData *TD,
35 SmallVectorImpl *StaticAllocas) {
36 return InlineFunction(CallSite(II), CG, TD, StaticAllocas);
33 bool llvm::InlineFunction(InvokeInst *II, InlineFunctionInfo &IFI) {
34 return InlineFunction(CallSite(II), IFI);
3735 }
3836
3937
231229 // exists in the instruction stream. Similiarly this will inline a recursive
232230 // function by one level.
233231 //
234 bool llvm::InlineFunction(CallSite CS, CallGraph *CG, const TargetData *TD,
235 SmallVectorImpl *StaticAllocas) {
232 bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI) {
236233 Instruction *TheCall = CS.getInstruction();
237234 LLVMContext &Context = TheCall->getContext();
238235 assert(TheCall->getParent() && TheCall->getParent()->getParent() &&
239236 "Instruction not in function!");
240237
238 // If IFI has any state in it, zap it before we fill it in.
239 IFI.reset();
240
241241 const Function *CalledFunc = CS.getCalledFunction();
242242 if (CalledFunc == 0 || // Can't inline external function or indirect
243243 CalledFunc->isDeclaration() || // call, or call to a vararg function!
304304
305305 // Create the alloca. If we have TargetData, use nice alignment.
306306 unsigned Align = 1;
307 if (TD) Align = TD->getPrefTypeAlignment(AggTy);
307 if (IFI.TD) Align = IFI.TD->getPrefTypeAlignment(AggTy);
308308 Value *NewAlloca = new AllocaInst(AggTy, 0, Align,
309309 I->getName(),
310310 &*Caller->begin()->begin());
317317 Value *SrcCast = new BitCastInst(*AI, VoidPtrTy, "tmp", TheCall);
318318
319319 Value *Size;
320 if (TD == 0)
320 if (IFI.TD == 0)
321321 Size = ConstantExpr::getSizeOf(AggTy);
322322 else
323323 Size = ConstantInt::get(Type::getInt64Ty(Context),
324 TD->getTypeStoreSize(AggTy));
324 IFI.TD->getTypeStoreSize(AggTy));
325325
326326 // Always generate a memcpy of alignment 1 here because we don't know
327327 // the alignment of the src pointer. Other optimizations can infer
335335 CallInst::Create(MemCpyFn, CallArgs, CallArgs+5, "", TheCall);
336336
337337 // If we have a call graph, update it.
338 if (CG) {
338 if (CallGraph *CG = IFI.CG) {
339339 CallGraphNode *MemCpyCGN = CG->getOrInsertFunction(MemCpyFn);
340340 CallGraphNode *CallerNode = (*CG)[Caller];
341341 CallerNode->addCalledFunction(TheMemCpy, MemCpyCGN);
354354 // (which can happen, e.g., because an argument was constant), but we'll be
355355 // happy with whatever the cloner can do.
356356 CloneAndPruneFunctionInto(Caller, CalledFunc, ValueMap, Returns, ".i",
357 &InlinedFunctionInfo, TD, TheCall);
357 &InlinedFunctionInfo, IFI.TD, TheCall);
358358
359359 // Remember the first block that is newly cloned over.
360360 FirstNewBlock = LastBlock; ++FirstNewBlock;
361361
362362 // Update the callgraph if requested.
363 if (CG)
364 UpdateCallGraphAfterInlining(CS, FirstNewBlock, ValueMap, *CG);
363 if (IFI.CG)
364 UpdateCallGraphAfterInlining(CS, FirstNewBlock, ValueMap, *IFI.CG);
365365 }
366366
367367 // If there are any alloca instructions in the block that used to be the entry
388388
389389 // Keep track of the static allocas that we inline into the caller if the
390390 // StaticAllocas pointer is non-null.
391 if (StaticAllocas) StaticAllocas->push_back(AI);
391 IFI.StaticAllocas.push_back(AI);
392392
393393 // Scan for the block of allocas that we can move over, and move them
394394 // all at once.
395395 while (isa(I) &&
396396 isa(cast(I)->getArraySize())) {
397 if (StaticAllocas) StaticAllocas->push_back(cast(I));
397 IFI.StaticAllocas.push_back(cast(I));
398398 ++I;
399399 }
400400
418418 // If we are preserving the callgraph, add edges to the stacksave/restore
419419 // functions for the calls we insert.
420420 CallGraphNode *StackSaveCGN = 0, *StackRestoreCGN = 0, *CallerNode = 0;
421 if (CG) {
421 if (CallGraph *CG = IFI.CG) {
422422 StackSaveCGN = CG->getOrInsertFunction(StackSave);
423423 StackRestoreCGN = CG->getOrInsertFunction(StackRestore);
424424 CallerNode = (*CG)[Caller];
427427 // Insert the llvm.stacksave.
428428 CallInst *SavedPtr = CallInst::Create(StackSave, "savedstack",
429429 FirstNewBlock->begin());
430 if (CG) CallerNode->addCalledFunction(SavedPtr, StackSaveCGN);
430 if (IFI.CG) CallerNode->addCalledFunction(SavedPtr, StackSaveCGN);
431431
432432 // Insert a call to llvm.stackrestore before any return instructions in the
433433 // inlined function.
434434 for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
435435 CallInst *CI = CallInst::Create(StackRestore, SavedPtr, "", Returns[i]);
436 if (CG) CallerNode->addCalledFunction(CI, StackRestoreCGN);
436 if (IFI.CG) CallerNode->addCalledFunction(CI, StackRestoreCGN);
437437 }
438438
439439 // Count the number of StackRestore calls we insert.
446446 BB != E; ++BB)
447447 if (UnwindInst *UI = dyn_cast(BB->getTerminator())) {
448448 CallInst *CI = CallInst::Create(StackRestore, SavedPtr, "", UI);
449 if (CG) CallerNode->addCalledFunction(CI, StackRestoreCGN);
449 if (IFI.CG) CallerNode->addCalledFunction(CI, StackRestoreCGN);
450450 ++NumStackRestores;
451451 }
452452 }