llvm.org GIT mirror llvm / b0f1e17
Add a new codegen pass that normalizes dwarf exception handling code in preparation for code generation. The main thing it does is handle the case when eh.exception calls (and, in a future patch, eh.selector calls) are far away from landing pads. Right now in practice you only find eh.exception calls close to landing pads: either in a landing pad (the common case) or in a landing pad successor, due to loop passes shifting them about. However future exception handling improvements will result in calls far from landing pads: (1) Inlining of rewinds. Consider the following case: In function @f: ... invoke @g to label %normal unwind label %unwinds ... unwinds: %ex = call i8* @llvm.eh.exception() ... In function @g: ... invoke @something to label %continue unwind label %handler ... handler: %ex = call i8* @llvm.eh.exception() ... perform cleanups ... "rethrow exception" Now inline @g into @f. Currently this is turned into: In function @f: ... invoke @something to label %continue unwind label %handler ... handler: %ex = call i8* @llvm.eh.exception() ... perform cleanups ... invoke "rethrow exception" to label %normal unwind label %unwinds unwinds: %ex = call i8* @llvm.eh.exception() ... However we would like to simplify invoke of "rethrow exception" into a branch to the %unwinds label. Then %unwinds is no longer a landing pad, and the eh.exception call there is then far away from any landing pads. (2) Using the unwind instruction for cleanups. It would be nice to have codegen handle the following case: invoke @something to label %continue unwind label %run_cleanups ... handler: ... perform cleanups ... unwind This requires turning "unwind" into a library call, which necessarily takes a pointer to the exception as an argument (this patch also does this unwind lowering). But that means you are using eh.exception again far from a landing pad. (3) Bugpoint simplifications. When bugpoint is simplifying exception handling code it often generates eh.exception calls far from a landing pad, which then causes codegen to assert. Bugpoint then latches on to this assertion and loses sight of the original problem. Note that it is currently rare for this pass to actually do anything. And in fact it normally shouldn't do anything at all given the code coming out of llvm-gcc! But it does fire a few times in the testsuite. As far as I can see this is almost always due to the LoopStrengthReduce codegen pass introducing pointless loop preheader blocks which are landing pads and only contain a branch to another block. This other block contains an eh.exception call. So probably by tweaking LoopStrengthReduce a bit this can be avoided. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@72276 91177308-0d34-0410-b5e6-96231b3b80d8 Duncan Sands 10 years ago
10 changed file(s) with 419 addition(s) and 13 deletion(s). Raw diff Collapse all Expand all
202202 /// registers. Useful before LiveVariables has run.
203203 FunctionPass *createMachineVerifierPass(bool allowDoubleDefs);
204204
205 /// createDwarfEHPass - This pass mulches exception handling code into a form
206 /// adapted to code generation. Required if using dwarf exception handling.
207 FunctionPass *createDwarfEHPass(const TargetLowering *tli, bool fast);
208
205209 } // End llvm namespace
206210
207211 #endif
219219 O_F32,
220220 O_F64,
221221
222 // EXCEPTION HANDLING
223 UNWIND_RESUME,
224
222225 UNKNOWN_LIBCALL
223226 };
224227
11 BranchFolding.cpp
22 CodePlacementOpt.cpp
33 DeadMachineInstructionElim.cpp
4 DwarfEHPrepare.cpp
45 ELFWriter.cpp
56 GCMetadata.cpp
67 GCMetadataPrinter.cpp
0 //===-- DwarfEHPrepare - Prepare exception handling for code generation ---===//
1 //
2 // The LLVM Compiler Infrastructure
3 //
4 // This file is distributed under the University of Illinois Open Source
5 // License. See LICENSE.TXT for details.
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass mulches exception handling code into a form adapted to code
10 // generation. Required if using dwarf exception handling.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #define DEBUG_TYPE "dwarfehprepare"
15 #include "llvm/ADT/Statistic.h"
16 #include "llvm/Analysis/Dominators.h"
17 #include "llvm/CodeGen/Passes.h"
18 #include "llvm/Function.h"
19 #include "llvm/Instructions.h"
20 #include "llvm/IntrinsicInst.h"
21 #include "llvm/Module.h"
22 #include "llvm/Pass.h"
23 #include "llvm/Support/Compiler.h"
24 #include "llvm/Target/TargetLowering.h"
25 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
26 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
27 using namespace llvm;
28
29 STATISTIC(NumLandingPadsSplit, "Number of landing pads split");
30 STATISTIC(NumUnwindsLowered, "Number of unwind instructions lowered");
31 STATISTIC(NumExceptionValuesMoved, "Number of eh.exception calls moved");
32 STATISTIC(NumStackTempsIntroduced, "Number of stack temporaries introduced");
33
34 namespace {
35 class VISIBILITY_HIDDEN DwarfEHPrepare : public FunctionPass {
36 const TargetLowering *TLI;
37 bool CompileFast;
38
39 // The eh.exception intrinsic.
40 Function *ExceptionValueIntrinsic;
41
42 // _Unwind_Resume or the target equivalent.
43 Constant *RewindFunction;
44
45 // Dominator info is used when turning stack temporaries into registers.
46 DominatorTree *DT;
47 DominanceFrontier *DF;
48
49 // The function we are running on.
50 Function *F;
51
52 // The landing pads for this function.
53 typedef SmallPtrSet BBSet;
54 BBSet LandingPads;
55
56 // Stack temporary used to hold eh.exception values.
57 AllocaInst *ExceptionValueVar;
58
59 bool NormalizeLandingPads();
60 bool LowerUnwinds();
61 bool MoveExceptionValueCalls();
62 bool FinishStackTemporaries();
63 bool PromoteStackTemporaries();
64
65 Instruction *CreateExceptionValueCall(BasicBlock *BB);
66 Instruction *CreateValueLoad(BasicBlock *BB);
67
68 /// CreateReadOfExceptionValue - Return the result of the eh.exception
69 /// intrinsic by calling the intrinsic if in a landing pad, or loading
70 /// it from the exception value variable otherwise.
71 Instruction *CreateReadOfExceptionValue(BasicBlock *BB) {
72 return LandingPads.count(BB) ?
73 CreateExceptionValueCall(BB) : CreateValueLoad(BB);
74 }
75
76 public:
77 static char ID; // Pass identification, replacement for typeid.
78 DwarfEHPrepare(const TargetLowering *tli, bool fast) :
79 FunctionPass(&ID), TLI(tli), CompileFast(fast),
80 ExceptionValueIntrinsic(0), RewindFunction(0) {}
81
82 virtual bool runOnFunction(Function &Fn);
83
84 // getAnalysisUsage - We need dominance frontiers for memory promotion.
85 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
86 if (!CompileFast)
87 AU.addRequired();
88 AU.addPreserved();
89 if (!CompileFast)
90 AU.addRequired();
91 AU.addPreserved();
92 }
93
94 const char *getPassName() const {
95 return "Exception handling preparation";
96 }
97
98 };
99 } // end anonymous namespace
100
101 char DwarfEHPrepare::ID = 0;
102
103 FunctionPass *llvm::createDwarfEHPass(const TargetLowering *tli, bool fast) {
104 return new DwarfEHPrepare(tli, fast);
105 }
106
107 /// NormalizeLandingPads - Normalize and discover landing pads, noting them
108 /// in the LandingPads set. A landing pad is normal if the only CFG edges
109 /// that end at it are unwind edges from invoke instructions.
110 /// Abnormal landing pads are fixed up by redirecting all unwind edges to
111 /// a new basic block which falls through to the original.
112 bool DwarfEHPrepare::NormalizeLandingPads() {
113 bool Changed = false;
114
115 for (Function::iterator I = F->begin(), E = F->end(); I != E; ++I) {
116 TerminatorInst *TI = I->getTerminator();
117 if (!isa(TI))
118 continue;
119 BasicBlock *LPad = TI->getSuccessor(1);
120 // Skip landing pads that have already been normalized.
121 if (LandingPads.count(LPad))
122 continue;
123
124 // Check that only invoke unwind edges end at the landing pad.
125 bool OnlyUnwoundTo = true;
126 for (pred_iterator PI = pred_begin(LPad), PE = pred_end(LPad);
127 PI != PE; ++PI) {
128 TerminatorInst *PT = (*PI)->getTerminator();
129 if (!isa(PT) || LPad == PT->getSuccessor(0)) {
130 OnlyUnwoundTo = false;
131 break;
132 }
133 }
134 if (OnlyUnwoundTo) {
135 // Only unwind edges lead to the landing pad. Remember the landing pad.
136 LandingPads.insert(LPad);
137 continue;
138 }
139
140 // At least one normal edge ends at the landing pad. Redirect the unwind
141 // edges to a new basic block which falls through into this one.
142
143 // Create the new basic block.
144 BasicBlock *NewBB = BasicBlock::Create(LPad->getName() + "_unwind_edge");
145
146 // Insert it into the function right before the original landing pad.
147 LPad->getParent()->getBasicBlockList().insert(LPad, NewBB);
148
149 // Redirect unwind edges from the original landing pad to NewBB.
150 for (pred_iterator PI = pred_begin(LPad), PE = pred_end(LPad); PI != PE; ) {
151 TerminatorInst *PT = (*PI++)->getTerminator();
152 if (isa(PT) && PT->getSuccessor(1) == LPad)
153 // Unwind to the new block.
154 PT->setSuccessor(1, NewBB);
155 }
156
157 // If there are any PHI nodes in LPad, we need to update them so that they
158 // merge incoming values from NewBB instead.
159 for (BasicBlock::iterator II = LPad->begin(); isa(II); ++II) {
160 PHINode *PN = cast(II);
161 pred_iterator PB = pred_begin(NewBB), PE = pred_end(NewBB);
162
163 // Check to see if all of the values coming in via unwind edges are the
164 // same. If so, we don't need to create a new PHI node.
165 Value *InVal = PN->getIncomingValueForBlock(*PB);
166 for (pred_iterator PI = PB; PI != PE; ++PI) {
167 if (PI != PB && InVal != PN->getIncomingValueForBlock(*PI)) {
168 InVal = 0;
169 break;
170 }
171 }
172
173 if (InVal == 0) {
174 // Different unwind edges have different values. Create a new PHI node
175 // in NewBB.
176 PHINode *NewPN = PHINode::Create(PN->getType(), PN->getName()+".unwind",
177 NewBB);
178 // Add an entry for each unwind edge, using the value from the old PHI.
179 for (pred_iterator PI = PB; PI != PE; ++PI)
180 NewPN->addIncoming(PN->getIncomingValueForBlock(*PI), *PI);
181
182 // Now use this new PHI as the common incoming value for NewBB in PN.
183 InVal = NewPN;
184 }
185
186 // Revector exactly one entry in the PHI node to come from NewBB
187 // and delete all other entries that come from unwind edges. If
188 // there are both normal and unwind edges from the same predecessor,
189 // this leaves an entry for the normal edge.
190 for (pred_iterator PI = PB; PI != PE; ++PI)
191 PN->removeIncomingValue(*PI);
192 PN->addIncoming(InVal, NewBB);
193 }
194
195 // Add a fallthrough from NewBB to the original landing pad.
196 BranchInst::Create(LPad, NewBB);
197
198 // Now update DominatorTree and DominanceFrontier analysis information.
199 if (DT)
200 DT->splitBlock(NewBB);
201 if (DF)
202 DF->splitBlock(NewBB);
203
204 // Remember the newly constructed landing pad. The original landing pad
205 // LPad is no longer a landing pad now that all unwind edges have been
206 // revectored to NewBB.
207 LandingPads.insert(NewBB);
208 ++NumLandingPadsSplit;
209 Changed = true;
210 }
211
212 return Changed;
213 }
214
215 /// LowerUnwinds - Turn unwind instructions into calls to _Unwind_Resume,
216 /// rethrowing any previously caught exception. This will crash horribly
217 /// at runtime if there is no such exception: using unwind to throw a new
218 /// exception is currently not supported.
219 bool DwarfEHPrepare::LowerUnwinds() {
220 bool Changed = false;
221
222 for (Function::iterator I = F->begin(), E = F->end(); I != E; ++I) {
223 TerminatorInst *TI = I->getTerminator();
224 if (!isa(TI))
225 continue;
226
227 // Replace the unwind instruction with a call to _Unwind_Resume (or the
228 // appropriate target equivalent) followed by an UnreachableInst.
229
230 // Find the rewind function if we didn't already.
231 if (!RewindFunction) {
232 std::vector Params(1, PointerType::getUnqual(Type::Int8Ty));
233 FunctionType *FTy = FunctionType::get(Type::VoidTy, Params, false);
234 const char *RewindName = TLI->getLibcallName(RTLIB::UNWIND_RESUME);
235 RewindFunction = F->getParent()->getOrInsertFunction(RewindName, FTy);
236 }
237
238 // Create the call...
239 CallInst::Create(RewindFunction, CreateReadOfExceptionValue(I), "", TI);
240 // ...followed by an UnreachableInst.
241 new UnreachableInst(TI);
242
243 // Nuke the unwind instruction.
244 TI->eraseFromParent();
245 ++NumUnwindsLowered;
246 Changed = true;
247 }
248
249 return Changed;
250 }
251
252 /// MoveExceptionValueCalls - Ensure that eh.exception is only ever called from
253 /// landing pads by replacing calls outside of landing pads with loads from a
254 /// stack temporary. Move eh.exception calls inside landing pads to the start
255 /// of the landing pad (optional, but may make things simpler for later passes).
256 bool DwarfEHPrepare::MoveExceptionValueCalls() {
257 // If the eh.exception intrinsic is not declared in the module then there is
258 // nothing to do. Speed up compilation by checking for this common case.
259 if (!ExceptionValueIntrinsic &&
260 !F->getParent()->getFunction(Intrinsic::getName(Intrinsic::eh_exception)))
261 return false;
262
263 bool Changed = false;
264
265 for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) {
266 for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E;)
267 if (IntrinsicInst *CI = dyn_cast(II++))
268 if (CI->getIntrinsicID() == Intrinsic::eh_exception) {
269 if (!CI->use_empty()) {
270 Value *ExceptionValue = CreateReadOfExceptionValue(BB);
271 if (CI == ExceptionValue) {
272 // The call was at the start of a landing pad - leave it alone.
273 assert(LandingPads.count(BB) &&
274 "Created eh.exception call outside landing pad!");
275 continue;
276 }
277 CI->replaceAllUsesWith(ExceptionValue);
278 }
279 CI->eraseFromParent();
280 ++NumExceptionValuesMoved;
281 Changed = true;
282 }
283 }
284
285 return Changed;
286 }
287
288 /// FinishStackTemporaries - If we introduced a stack variable to hold the
289 /// exception value then initialize it in each landing pad.
290 bool DwarfEHPrepare::FinishStackTemporaries() {
291 if (!ExceptionValueVar)
292 // Nothing to do.
293 return false;
294
295 bool Changed = false;
296
297 // Make sure that there is a store of the exception value at the start of
298 // each landing pad.
299 for (BBSet::iterator LI = LandingPads.begin(), LE = LandingPads.end();
300 LI != LE; ++LI) {
301 Instruction *ExceptionValue = CreateReadOfExceptionValue(*LI);
302 Instruction *Store = new StoreInst(ExceptionValue, ExceptionValueVar);
303 Store->insertAfter(ExceptionValue);
304 Changed = true;
305 }
306
307 return Changed;
308 }
309
310 /// PromoteStackTemporaries - Turn any stack temporaries we introduced into
311 /// registers if possible.
312 bool DwarfEHPrepare::PromoteStackTemporaries() {
313 if (ExceptionValueVar && DT && DF && isAllocaPromotable(ExceptionValueVar)) {
314 // Turn the exception temporary into registers and phi nodes if possible.
315 std::vector Allocas(1, ExceptionValueVar);
316 PromoteMemToReg(Allocas, *DT, *DF);
317 return true;
318 }
319 return false;
320 }
321
322 /// CreateExceptionValueCall - Insert a call to the eh.exception intrinsic at
323 /// the start of the basic block (unless there already is one, in which case
324 /// the existing call is returned).
325 Instruction *DwarfEHPrepare::CreateExceptionValueCall(BasicBlock *BB) {
326 Instruction *Start = BB->getFirstNonPHI();
327 // Is this a call to eh.exception?
328 if (IntrinsicInst *CI = dyn_cast(Start))
329 if (CI->getIntrinsicID() == Intrinsic::eh_exception)
330 // Reuse the existing call.
331 return Start;
332
333 // Find the eh.exception intrinsic if we didn't already.
334 if (!ExceptionValueIntrinsic)
335 ExceptionValueIntrinsic = Intrinsic::getDeclaration(F->getParent(),
336 Intrinsic::eh_exception);
337
338 // Create the call.
339 return CallInst::Create(ExceptionValueIntrinsic, "eh.value.call", Start);
340 }
341
342 /// CreateValueLoad - Insert a load of the exception value stack variable
343 /// (creating it if necessary) at the start of the basic block (unless
344 /// there already is a load, in which case the existing load is returned).
345 Instruction *DwarfEHPrepare::CreateValueLoad(BasicBlock *BB) {
346 Instruction *Start = BB->getFirstNonPHI();
347 // Is this a load of the exception temporary?
348 if (ExceptionValueVar)
349 if (LoadInst* LI = dyn_cast(Start))
350 if (LI->getPointerOperand() == ExceptionValueVar)
351 // Reuse the existing load.
352 return Start;
353
354 // Create the temporary if we didn't already.
355 if (!ExceptionValueVar) {
356 ExceptionValueVar = new AllocaInst(PointerType::getUnqual(Type::Int8Ty),
357 "eh.value", F->begin()->begin());
358 ++NumStackTempsIntroduced;
359 }
360
361 // Load the value.
362 return new LoadInst(ExceptionValueVar, "eh.value.load", Start);
363 }
364
365 bool DwarfEHPrepare::runOnFunction(Function &Fn) {
366 bool Changed = false;
367
368 // Initialize internal state.
369 DT = getAnalysisIfAvailable();
370 DF = getAnalysisIfAvailable();
371 ExceptionValueVar = 0;
372 F = &Fn;
373
374 // Ensure that only unwind edges end at landing pads (a landing pad is a
375 // basic block where an invoke unwind edge ends).
376 Changed |= NormalizeLandingPads();
377
378 // Turn unwind instructions into libcalls.
379 Changed |= LowerUnwinds();
380
381 // TODO: Move eh.selector calls to landing pads and combine them.
382
383 // Move eh.exception calls to landing pads.
384 Changed |= MoveExceptionValueCalls();
385
386 // Initialize any stack temporaries we introduced.
387 Changed |= FinishStackTemporaries();
388
389 // Turn any stack temporaries into registers if possible.
390 if (!CompileFast)
391 Changed |= PromoteStackTemporaries();
392
393 LandingPads.clear();
394
395 return Changed;
396 }
157157 PM.add(createPrintFunctionPass("\n\n*** Code after LSR ***\n", &errs()));
158158 }
159159
160 PM.add(createGCLoweringPass());
161
160 // Turn exception handling constructs into something the code generators can
161 // handle.
162162 if (!getTargetAsmInfo()->doesSupportExceptionHandling())
163163 PM.add(createLowerInvokePass(getTargetLowering()));
164 else
165 PM.add(createDwarfEHPass(getTargetLowering(), OptLevel==CodeGenOpt::None));
166
167 PM.add(createGCLoweringPass());
164168
165169 // Make sure that no unreachable blocks are instruction selected.
166170 PM.add(createUnreachableBlockEliminationPass());
454454 switch (TLI.getOperationAction(ISD::EXCEPTIONADDR, VT)) {
455455 default: break;
456456 case TargetLowering::Expand: {
457 if (!MBB->isLandingPad()) {
458 // FIXME: Mark exception register as live in. Hack for PR1508.
459 unsigned Reg = TLI.getExceptionAddressRegister();
460 if (Reg) MBB->addLiveIn(Reg);
461 }
457 assert(MBB->isLandingPad() && "Call to eh.exception not in landing pad!");
462458 unsigned Reg = TLI.getExceptionAddressRegister();
463459 const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
464460 unsigned ResultReg = createResultReg(RC);
40544054 return 0;
40554055 }
40564056 case Intrinsic::eh_exception: {
4057 if (!CurMBB->isLandingPad()) {
4058 // FIXME: Mark exception register as live in. Hack for PR1508.
4059 unsigned Reg = TLI.getExceptionAddressRegister();
4060 if (Reg) CurMBB->addLiveIn(Reg);
4061 }
40624057 // Insert the EXCEPTIONADDR instruction.
4058 assert(CurMBB->isLandingPad() &&"Call to eh.exception not in landing pad!");
40634059 SDVTList VTs = DAG.getVTList(TLI.getPointerTy(), MVT::Other);
40644060 SDValue Ops[1];
40654061 Ops[0] = DAG.getRoot();
234234 Names[RTLIB::UO_F64] = "__unorddf2";
235235 Names[RTLIB::O_F32] = "__unordsf2";
236236 Names[RTLIB::O_F64] = "__unorddf2";
237 Names[RTLIB::UNWIND_RESUME] = "_Unwind_Resume";
237238 }
238239
239240 /// getFPEXT - Return the FPEXT_*_* value for the given types, or
0 ; RUN: llvm-as < %s | llc -march=x86 -enable-eh -asm-verbose -o - | \
1 ; RUN: grep -A 3 {Llabel137.*Region start} | grep {3.*Action}
1 ; RUN: grep -A 3 {Llabel138.*Region start} | grep {3.*Action}
22 ; PR1422
33 ; PR1508
44
312312 >
313313
314314
315 RelativePath="..\..\lib\CodeGen\DwarfEHPrepare.cpp"
316 >
317
318
315319 RelativePath="..\..\lib\CodeGen\ELFWriter.cpp"
316320 >
317321