llvm.org GIT mirror llvm / 5eb6d65
Move several SelectionDAG-independent utility functions out of the SelectionDAG directory and into a new Analysis.cpp file. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@101975 91177308-0d34-0410-b5e6-96231b3b80d8 Dan Gohman 10 years ago
5 changed file(s) with 367 addition(s) and 310 deletion(s). Raw diff Collapse all Expand all
0 //===- CodeGen/Analysis.h - CodeGen LLVM IR Analysis Utilities --*- C++ -*-===//
1 //
2 // The LLVM Compiler Infrastructure
3 //
4 // This file is distributed under the University of Illinois Open Source
5 // License. See LICENSE.TXT for details.
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file declares several CodeGen-specific LLVM IR analysis utilties.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #ifndef LLVM_CODEGEN_ANALYSIS_H
14 #define LLVM_CODEGEN_ANALYSIS_H
15
16 #include "llvm/Instructions.h"
17 #include "llvm/InlineAsm.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/CodeGen/ValueTypes.h"
20 #include "llvm/CodeGen/ISDOpcodes.h"
21 #include "llvm/Support/CallSite.h"
22
23 namespace llvm {
24
25 class TargetLowering;
26 class GlobalVariable;
27
28 /// ComputeLinearIndex - Given an LLVM IR aggregate type and a sequence
29 /// of insertvalue or extractvalue indices that identify a member, return
30 /// the linearized index of the start of the member.
31 ///
32 unsigned ComputeLinearIndex(const TargetLowering &TLI, const Type *Ty,
33 const unsigned *Indices,
34 const unsigned *IndicesEnd,
35 unsigned CurIndex = 0);
36
37 /// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
38 /// EVTs that represent all the individual underlying
39 /// non-aggregate types that comprise it.
40 ///
41 /// If Offsets is non-null, it points to a vector to be filled in
42 /// with the in-memory offsets of each of the individual values.
43 ///
44 void ComputeValueVTs(const TargetLowering &TLI, const Type *Ty,
45 SmallVectorImpl &ValueVTs,
46 SmallVectorImpl *Offsets = 0,
47 uint64_t StartingOffset = 0);
48
49 /// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
50 GlobalVariable *ExtractTypeInfo(Value *V);
51
52 /// hasInlineAsmMemConstraint - Return true if the inline asm instruction being
53 /// processed uses a memory 'm' constraint.
54 bool hasInlineAsmMemConstraint(std::vector &CInfos,
55 const TargetLowering &TLI);
56
57 /// getFCmpCondCode - Return the ISD condition code corresponding to
58 /// the given LLVM IR floating-point condition code. This includes
59 /// consideration of global floating-point math flags.
60 ///
61 ISD::CondCode getFCmpCondCode(FCmpInst::Predicate Pred);
62
63 /// getICmpCondCode - Return the ISD condition code corresponding to
64 /// the given LLVM IR integer condition code.
65 ///
66 ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred);
67
68 /// Test if the given instruction is in a position to be optimized
69 /// with a tail-call. This roughly means that it's in a block with
70 /// a return and there's nothing that needs to be scheduled
71 /// between it and the return.
72 ///
73 /// This function only tests target-independent requirements.
74 bool isInTailCallPosition(ImmutableCallSite CS, Attributes CalleeRetAttr,
75 const TargetLowering &TLI);
76
77 } // End llvm namespace
78
79 #endif
0 //===-- Analysis.cpp - CodeGen LLVM IR Analysis Utilities --*- C++ ------*-===//
1 //
2 // The LLVM Compiler Infrastructure
3 //
4 // This file is distributed under the University of Illinois Open Source
5 // License. See LICENSE.TXT for details.
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines several CodeGen-specific LLVM IR analysis utilties.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "llvm/CodeGen/Analysis.h"
14 #include "llvm/DerivedTypes.h"
15 #include "llvm/Function.h"
16 #include "llvm/Instructions.h"
17 #include "llvm/IntrinsicInst.h"
18 #include "llvm/LLVMContext.h"
19 #include "llvm/Module.h"
20 #include "llvm/CodeGen/MachineFunction.h"
21 #include "llvm/Target/TargetData.h"
22 #include "llvm/Target/TargetLowering.h"
23 #include "llvm/Target/TargetOptions.h"
24 #include "llvm/Support/ErrorHandling.h"
25 #include "llvm/Support/MathExtras.h"
26 using namespace llvm;
27
28 /// ComputeLinearIndex - Given an LLVM IR aggregate type and a sequence
29 /// of insertvalue or extractvalue indices that identify a member, return
30 /// the linearized index of the start of the member.
31 ///
32 unsigned llvm::ComputeLinearIndex(const TargetLowering &TLI, const Type *Ty,
33 const unsigned *Indices,
34 const unsigned *IndicesEnd,
35 unsigned CurIndex) {
36 // Base case: We're done.
37 if (Indices && Indices == IndicesEnd)
38 return CurIndex;
39
40 // Given a struct type, recursively traverse the elements.
41 if (const StructType *STy = dyn_cast(Ty)) {
42 for (StructType::element_iterator EB = STy->element_begin(),
43 EI = EB,
44 EE = STy->element_end();
45 EI != EE; ++EI) {
46 if (Indices && *Indices == unsigned(EI - EB))
47 return ComputeLinearIndex(TLI, *EI, Indices+1, IndicesEnd, CurIndex);
48 CurIndex = ComputeLinearIndex(TLI, *EI, 0, 0, CurIndex);
49 }
50 return CurIndex;
51 }
52 // Given an array type, recursively traverse the elements.
53 else if (const ArrayType *ATy = dyn_cast(Ty)) {
54 const Type *EltTy = ATy->getElementType();
55 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) {
56 if (Indices && *Indices == i)
57 return ComputeLinearIndex(TLI, EltTy, Indices+1, IndicesEnd, CurIndex);
58 CurIndex = ComputeLinearIndex(TLI, EltTy, 0, 0, CurIndex);
59 }
60 return CurIndex;
61 }
62 // We haven't found the type we're looking for, so keep searching.
63 return CurIndex + 1;
64 }
65
66 /// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
67 /// EVTs that represent all the individual underlying
68 /// non-aggregate types that comprise it.
69 ///
70 /// If Offsets is non-null, it points to a vector to be filled in
71 /// with the in-memory offsets of each of the individual values.
72 ///
73 void llvm::ComputeValueVTs(const TargetLowering &TLI, const Type *Ty,
74 SmallVectorImpl &ValueVTs,
75 SmallVectorImpl *Offsets,
76 uint64_t StartingOffset) {
77 // Given a struct type, recursively traverse the elements.
78 if (const StructType *STy = dyn_cast(Ty)) {
79 const StructLayout *SL = TLI.getTargetData()->getStructLayout(STy);
80 for (StructType::element_iterator EB = STy->element_begin(),
81 EI = EB,
82 EE = STy->element_end();
83 EI != EE; ++EI)
84 ComputeValueVTs(TLI, *EI, ValueVTs, Offsets,
85 StartingOffset + SL->getElementOffset(EI - EB));
86 return;
87 }
88 // Given an array type, recursively traverse the elements.
89 if (const ArrayType *ATy = dyn_cast(Ty)) {
90 const Type *EltTy = ATy->getElementType();
91 uint64_t EltSize = TLI.getTargetData()->getTypeAllocSize(EltTy);
92 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
93 ComputeValueVTs(TLI, EltTy, ValueVTs, Offsets,
94 StartingOffset + i * EltSize);
95 return;
96 }
97 // Interpret void as zero return values.
98 if (Ty->isVoidTy())
99 return;
100 // Base case: we can get an EVT for this LLVM IR type.
101 ValueVTs.push_back(TLI.getValueType(Ty));
102 if (Offsets)
103 Offsets->push_back(StartingOffset);
104 }
105
106 /// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
107 GlobalVariable *llvm::ExtractTypeInfo(Value *V) {
108 V = V->stripPointerCasts();
109 GlobalVariable *GV = dyn_cast(V);
110
111 if (GV && GV->getName() == ".llvm.eh.catch.all.value") {
112 assert(GV->hasInitializer() &&
113 "The EH catch-all value must have an initializer");
114 Value *Init = GV->getInitializer();
115 GV = dyn_cast(Init);
116 if (!GV) V = cast(Init);
117 }
118
119 assert((GV || isa(V)) &&
120 "TypeInfo must be a global variable or NULL");
121 return GV;
122 }
123
124 /// hasInlineAsmMemConstraint - Return true if the inline asm instruction being
125 /// processed uses a memory 'm' constraint.
126 bool
127 llvm::hasInlineAsmMemConstraint(std::vector &CInfos,
128 const TargetLowering &TLI) {
129 for (unsigned i = 0, e = CInfos.size(); i != e; ++i) {
130 InlineAsm::ConstraintInfo &CI = CInfos[i];
131 for (unsigned j = 0, ee = CI.Codes.size(); j != ee; ++j) {
132 TargetLowering::ConstraintType CType = TLI.getConstraintType(CI.Codes[j]);
133 if (CType == TargetLowering::C_Memory)
134 return true;
135 }
136
137 // Indirect operand accesses access memory.
138 if (CI.isIndirect)
139 return true;
140 }
141
142 return false;
143 }
144
145 /// getFCmpCondCode - Return the ISD condition code corresponding to
146 /// the given LLVM IR floating-point condition code. This includes
147 /// consideration of global floating-point math flags.
148 ///
149 ISD::CondCode llvm::getFCmpCondCode(FCmpInst::Predicate Pred) {
150 ISD::CondCode FPC, FOC;
151 switch (Pred) {
152 case FCmpInst::FCMP_FALSE: FOC = FPC = ISD::SETFALSE; break;
153 case FCmpInst::FCMP_OEQ: FOC = ISD::SETEQ; FPC = ISD::SETOEQ; break;
154 case FCmpInst::FCMP_OGT: FOC = ISD::SETGT; FPC = ISD::SETOGT; break;
155 case FCmpInst::FCMP_OGE: FOC = ISD::SETGE; FPC = ISD::SETOGE; break;
156 case FCmpInst::FCMP_OLT: FOC = ISD::SETLT; FPC = ISD::SETOLT; break;
157 case FCmpInst::FCMP_OLE: FOC = ISD::SETLE; FPC = ISD::SETOLE; break;
158 case FCmpInst::FCMP_ONE: FOC = ISD::SETNE; FPC = ISD::SETONE; break;
159 case FCmpInst::FCMP_ORD: FOC = FPC = ISD::SETO; break;
160 case FCmpInst::FCMP_UNO: FOC = FPC = ISD::SETUO; break;
161 case FCmpInst::FCMP_UEQ: FOC = ISD::SETEQ; FPC = ISD::SETUEQ; break;
162 case FCmpInst::FCMP_UGT: FOC = ISD::SETGT; FPC = ISD::SETUGT; break;
163 case FCmpInst::FCMP_UGE: FOC = ISD::SETGE; FPC = ISD::SETUGE; break;
164 case FCmpInst::FCMP_ULT: FOC = ISD::SETLT; FPC = ISD::SETULT; break;
165 case FCmpInst::FCMP_ULE: FOC = ISD::SETLE; FPC = ISD::SETULE; break;
166 case FCmpInst::FCMP_UNE: FOC = ISD::SETNE; FPC = ISD::SETUNE; break;
167 case FCmpInst::FCMP_TRUE: FOC = FPC = ISD::SETTRUE; break;
168 default:
169 llvm_unreachable("Invalid FCmp predicate opcode!");
170 FOC = FPC = ISD::SETFALSE;
171 break;
172 }
173 if (FiniteOnlyFPMath())
174 return FOC;
175 else
176 return FPC;
177 }
178
179 /// getICmpCondCode - Return the ISD condition code corresponding to
180 /// the given LLVM IR integer condition code.
181 ///
182 ISD::CondCode llvm::getICmpCondCode(ICmpInst::Predicate Pred) {
183 switch (Pred) {
184 case ICmpInst::ICMP_EQ: return ISD::SETEQ;
185 case ICmpInst::ICMP_NE: return ISD::SETNE;
186 case ICmpInst::ICMP_SLE: return ISD::SETLE;
187 case ICmpInst::ICMP_ULE: return ISD::SETULE;
188 case ICmpInst::ICMP_SGE: return ISD::SETGE;
189 case ICmpInst::ICMP_UGE: return ISD::SETUGE;
190 case ICmpInst::ICMP_SLT: return ISD::SETLT;
191 case ICmpInst::ICMP_ULT: return ISD::SETULT;
192 case ICmpInst::ICMP_SGT: return ISD::SETGT;
193 case ICmpInst::ICMP_UGT: return ISD::SETUGT;
194 default:
195 llvm_unreachable("Invalid ICmp predicate opcode!");
196 return ISD::SETNE;
197 }
198 }
199
200 /// Test if the given instruction is in a position to be optimized
201 /// with a tail-call. This roughly means that it's in a block with
202 /// a return and there's nothing that needs to be scheduled
203 /// between it and the return.
204 ///
205 /// This function only tests target-independent requirements.
206 bool llvm::isInTailCallPosition(ImmutableCallSite CS, Attributes CalleeRetAttr,
207 const TargetLowering &TLI) {
208 const Instruction *I = CS.getInstruction();
209 const BasicBlock *ExitBB = I->getParent();
210 const TerminatorInst *Term = ExitBB->getTerminator();
211 const ReturnInst *Ret = dyn_cast(Term);
212 const Function *F = ExitBB->getParent();
213
214 // The block must end in a return statement or unreachable.
215 //
216 // FIXME: Decline tailcall if it's not guaranteed and if the block ends in
217 // an unreachable, for now. The way tailcall optimization is currently
218 // implemented means it will add an epilogue followed by a jump. That is
219 // not profitable. Also, if the callee is a special function (e.g.
220 // longjmp on x86), it can end up causing miscompilation that has not
221 // been fully understood.
222 if (!Ret &&
223 (!GuaranteedTailCallOpt || !isa(Term))) return false;
224
225 // If I will have a chain, make sure no other instruction that will have a
226 // chain interposes between I and the return.
227 if (I->mayHaveSideEffects() || I->mayReadFromMemory() ||
228 !I->isSafeToSpeculativelyExecute())
229 for (BasicBlock::const_iterator BBI = prior(prior(ExitBB->end())); ;
230 --BBI) {
231 if (&*BBI == I)
232 break;
233 // Debug info intrinsics do not get in the way of tail call optimization.
234 if (isa(BBI))
235 continue;
236 if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() ||
237 !BBI->isSafeToSpeculativelyExecute())
238 return false;
239 }
240
241 // If the block ends with a void return or unreachable, it doesn't matter
242 // what the call's return type is.
243 if (!Ret || Ret->getNumOperands() == 0) return true;
244
245 // If the return value is undef, it doesn't matter what the call's
246 // return type is.
247 if (isa(Ret->getOperand(0))) return true;
248
249 // Conservatively require the attributes of the call to match those of
250 // the return. Ignore noalias because it doesn't affect the call sequence.
251 unsigned CallerRetAttr = F->getAttributes().getRetAttributes();
252 if ((CalleeRetAttr ^ CallerRetAttr) & ~Attribute::NoAlias)
253 return false;
254
255 // It's not safe to eliminate the sign / zero extension of the return value.
256 if ((CallerRetAttr & Attribute::ZExt) || (CallerRetAttr & Attribute::SExt))
257 return false;
258
259 // Otherwise, make sure the unmodified return value of I is the return value.
260 for (const Instruction *U = dyn_cast(Ret->getOperand(0)); ;
261 U = dyn_cast(U->getOperand(0))) {
262 if (!U)
263 return false;
264 if (!U->hasOneUse())
265 return false;
266 if (U == I)
267 break;
268 // Check for a truly no-op truncate.
269 if (isa(U) &&
270 TLI.isTruncateFree(U->getOperand(0)->getType(), U->getType()))
271 continue;
272 // Check for a truly no-op bitcast.
273 if (isa(U) &&
274 (U->getOperand(0)->getType() == U->getType() ||
275 (U->getOperand(0)->getType()->isPointerTy() &&
276 U->getType()->isPointerTy())))
277 continue;
278 // Otherwise it's not a true no-op.
279 return false;
280 }
281
282 return true;
283 }
284
1313
1414 #define DEBUG_TYPE "function-lowering-info"
1515 #include "FunctionLoweringInfo.h"
16 #include "llvm/CallingConv.h"
1716 #include "llvm/DerivedTypes.h"
1817 #include "llvm/Function.h"
1918 #include "llvm/Instructions.h"
2019 #include "llvm/IntrinsicInst.h"
2120 #include "llvm/LLVMContext.h"
2221 #include "llvm/Module.h"
22 #include "llvm/CodeGen/Analysis.h"
2323 #include "llvm/CodeGen/MachineFunction.h"
2424 #include "llvm/CodeGen/MachineFrameInfo.h"
2525 #include "llvm/CodeGen/MachineInstrBuilder.h"
2626 #include "llvm/CodeGen/MachineModuleInfo.h"
2727 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/Analysis/DebugInfo.h"
2928 #include "llvm/Target/TargetRegisterInfo.h"
3029 #include "llvm/Target/TargetData.h"
3130 #include "llvm/Target/TargetFrameInfo.h"
3332 #include "llvm/Target/TargetIntrinsicInfo.h"
3433 #include "llvm/Target/TargetLowering.h"
3534 #include "llvm/Target/TargetOptions.h"
36 #include "llvm/Support/Compiler.h"
3735 #include "llvm/Support/Debug.h"
3836 #include "llvm/Support/ErrorHandling.h"
3937 #include "llvm/Support/MathExtras.h"
40 #include "llvm/Support/raw_ostream.h"
4138 #include
4239 using namespace llvm;
43
44 /// ComputeLinearIndex - Given an LLVM IR aggregate type and a sequence
45 /// of insertvalue or extractvalue indices that identify a member, return
46 /// the linearized index of the start of the member.
47 ///
48 unsigned llvm::ComputeLinearIndex(const TargetLowering &TLI, const Type *Ty,
49 const unsigned *Indices,
50 const unsigned *IndicesEnd,
51 unsigned CurIndex) {
52 // Base case: We're done.
53 if (Indices && Indices == IndicesEnd)
54 return CurIndex;
55
56 // Given a struct type, recursively traverse the elements.
57 if (const StructType *STy = dyn_cast(Ty)) {
58 for (StructType::element_iterator EB = STy->element_begin(),
59 EI = EB,
60 EE = STy->element_end();
61 EI != EE; ++EI) {
62 if (Indices && *Indices == unsigned(EI - EB))
63 return ComputeLinearIndex(TLI, *EI, Indices+1, IndicesEnd, CurIndex);
64 CurIndex = ComputeLinearIndex(TLI, *EI, 0, 0, CurIndex);
65 }
66 return CurIndex;
67 }
68 // Given an array type, recursively traverse the elements.
69 else if (const ArrayType *ATy = dyn_cast(Ty)) {
70 const Type *EltTy = ATy->getElementType();
71 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) {
72 if (Indices && *Indices == i)
73 return ComputeLinearIndex(TLI, EltTy, Indices+1, IndicesEnd, CurIndex);
74 CurIndex = ComputeLinearIndex(TLI, EltTy, 0, 0, CurIndex);
75 }
76 return CurIndex;
77 }
78 // We haven't found the type we're looking for, so keep searching.
79 return CurIndex + 1;
80 }
81
82 /// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
83 /// EVTs that represent all the individual underlying
84 /// non-aggregate types that comprise it.
85 ///
86 /// If Offsets is non-null, it points to a vector to be filled in
87 /// with the in-memory offsets of each of the individual values.
88 ///
89 void llvm::ComputeValueVTs(const TargetLowering &TLI, const Type *Ty,
90 SmallVectorImpl &ValueVTs,
91 SmallVectorImpl *Offsets,
92 uint64_t StartingOffset) {
93 // Given a struct type, recursively traverse the elements.
94 if (const StructType *STy = dyn_cast(Ty)) {
95 const StructLayout *SL = TLI.getTargetData()->getStructLayout(STy);
96 for (StructType::element_iterator EB = STy->element_begin(),
97 EI = EB,
98 EE = STy->element_end();
99 EI != EE; ++EI)
100 ComputeValueVTs(TLI, *EI, ValueVTs, Offsets,
101 StartingOffset + SL->getElementOffset(EI - EB));
102 return;
103 }
104 // Given an array type, recursively traverse the elements.
105 if (const ArrayType *ATy = dyn_cast(Ty)) {
106 const Type *EltTy = ATy->getElementType();
107 uint64_t EltSize = TLI.getTargetData()->getTypeAllocSize(EltTy);
108 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
109 ComputeValueVTs(TLI, EltTy, ValueVTs, Offsets,
110 StartingOffset + i * EltSize);
111 return;
112 }
113 // Interpret void as zero return values.
114 if (Ty->isVoidTy())
115 return;
116 // Base case: we can get an EVT for this LLVM IR type.
117 ValueVTs.push_back(TLI.getValueType(Ty));
118 if (Offsets)
119 Offsets->push_back(StartingOffset);
120 }
12140
12241 /// isUsedOutsideOfDefiningBlock - Return true if this instruction is used by
12342 /// PHI nodes or outside of the basic block that defines it, or used by a
284203 return FirstReg;
285204 }
286205
287 /// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
288 GlobalVariable *llvm::ExtractTypeInfo(Value *V) {
289 V = V->stripPointerCasts();
290 GlobalVariable *GV = dyn_cast(V);
291
292 if (GV && GV->getName() == ".llvm.eh.catch.all.value") {
293 assert(GV->hasInitializer() &&
294 "The EH catch-all value must have an initializer");
295 Value *Init = GV->getInitializer();
296 GV = dyn_cast(Init);
297 if (!GV) V = cast(Init);
298 }
299
300 assert((GV || isa(V)) &&
301 "TypeInfo must be a global variable or NULL");
302 return GV;
303 }
304
305206 /// AddCatchInfo - Extract the personality and type infos from an eh.selector
306207 /// call, and add them to the specified machine basic block.
307208 void llvm::AddCatchInfo(const CallInst &I, MachineModuleInfo *MMI,
369270 #endif
370271 }
371272 }
372
373 /// hasInlineAsmMemConstraint - Return true if the inline asm instruction being
374 /// processed uses a memory 'm' constraint.
375 bool
376 llvm::hasInlineAsmMemConstraint(std::vector &CInfos,
377 const TargetLowering &TLI) {
378 for (unsigned i = 0, e = CInfos.size(); i != e; ++i) {
379 InlineAsm::ConstraintInfo &CI = CInfos[i];
380 for (unsigned j = 0, ee = CI.Codes.size(); j != ee; ++j) {
381 TargetLowering::ConstraintType CType = TLI.getConstraintType(CI.Codes[j]);
382 if (CType == TargetLowering::C_Memory)
383 return true;
384 }
385
386 // Indirect operand accesses access memory.
387 if (CI.isIndirect)
388 return true;
389 }
390
391 return false;
392 }
393
394 /// getFCmpCondCode - Return the ISD condition code corresponding to
395 /// the given LLVM IR floating-point condition code. This includes
396 /// consideration of global floating-point math flags.
397 ///
398 ISD::CondCode llvm::getFCmpCondCode(FCmpInst::Predicate Pred) {
399 ISD::CondCode FPC, FOC;
400 switch (Pred) {
401 case FCmpInst::FCMP_FALSE: FOC = FPC = ISD::SETFALSE; break;
402 case FCmpInst::FCMP_OEQ: FOC = ISD::SETEQ; FPC = ISD::SETOEQ; break;
403 case FCmpInst::FCMP_OGT: FOC = ISD::SETGT; FPC = ISD::SETOGT; break;
404 case FCmpInst::FCMP_OGE: FOC = ISD::SETGE; FPC = ISD::SETOGE; break;
405 case FCmpInst::FCMP_OLT: FOC = ISD::SETLT; FPC = ISD::SETOLT; break;
406 case FCmpInst::FCMP_OLE: FOC = ISD::SETLE; FPC = ISD::SETOLE; break;
407 case FCmpInst::FCMP_ONE: FOC = ISD::SETNE; FPC = ISD::SETONE; break;
408 case FCmpInst::FCMP_ORD: FOC = FPC = ISD::SETO; break;
409 case FCmpInst::FCMP_UNO: FOC = FPC = ISD::SETUO; break;
410 case FCmpInst::FCMP_UEQ: FOC = ISD::SETEQ; FPC = ISD::SETUEQ; break;
411 case FCmpInst::FCMP_UGT: FOC = ISD::SETGT; FPC = ISD::SETUGT; break;
412 case FCmpInst::FCMP_UGE: FOC = ISD::SETGE; FPC = ISD::SETUGE; break;
413 case FCmpInst::FCMP_ULT: FOC = ISD::SETLT; FPC = ISD::SETULT; break;
414 case FCmpInst::FCMP_ULE: FOC = ISD::SETLE; FPC = ISD::SETULE; break;
415 case FCmpInst::FCMP_UNE: FOC = ISD::SETNE; FPC = ISD::SETUNE; break;
416 case FCmpInst::FCMP_TRUE: FOC = FPC = ISD::SETTRUE; break;
417 default:
418 llvm_unreachable("Invalid FCmp predicate opcode!");
419 FOC = FPC = ISD::SETFALSE;
420 break;
421 }
422 if (FiniteOnlyFPMath())
423 return FOC;
424 else
425 return FPC;
426 }
427
428 /// getICmpCondCode - Return the ISD condition code corresponding to
429 /// the given LLVM IR integer condition code.
430 ///
431 ISD::CondCode llvm::getICmpCondCode(ICmpInst::Predicate Pred) {
432 switch (Pred) {
433 case ICmpInst::ICMP_EQ: return ISD::SETEQ;
434 case ICmpInst::ICMP_NE: return ISD::SETNE;
435 case ICmpInst::ICMP_SLE: return ISD::SETLE;
436 case ICmpInst::ICMP_ULE: return ISD::SETULE;
437 case ICmpInst::ICMP_SGE: return ISD::SETGE;
438 case ICmpInst::ICMP_UGE: return ISD::SETUGE;
439 case ICmpInst::ICMP_SLT: return ISD::SETLT;
440 case ICmpInst::ICMP_ULT: return ISD::SETULT;
441 case ICmpInst::ICMP_SGT: return ISD::SETGT;
442 case ICmpInst::ICMP_UGT: return ISD::SETUGT;
443 default:
444 llvm_unreachable("Invalid ICmp predicate opcode!");
445 return ISD::SETNE;
446 }
447 }
448
449 /// Test if the given instruction is in a position to be optimized
450 /// with a tail-call. This roughly means that it's in a block with
451 /// a return and there's nothing that needs to be scheduled
452 /// between it and the return.
453 ///
454 /// This function only tests target-independent requirements.
455 bool llvm::isInTailCallPosition(ImmutableCallSite CS, Attributes CalleeRetAttr,
456 const TargetLowering &TLI) {
457 const Instruction *I = CS.getInstruction();
458 const BasicBlock *ExitBB = I->getParent();
459 const TerminatorInst *Term = ExitBB->getTerminator();
460 const ReturnInst *Ret = dyn_cast(Term);
461 const Function *F = ExitBB->getParent();
462
463 // The block must end in a return statement or unreachable.
464 //
465 // FIXME: Decline tailcall if it's not guaranteed and if the block ends in
466 // an unreachable, for now. The way tailcall optimization is currently
467 // implemented means it will add an epilogue followed by a jump. That is
468 // not profitable. Also, if the callee is a special function (e.g.
469 // longjmp on x86), it can end up causing miscompilation that has not
470 // been fully understood.
471 if (!Ret &&
472 (!GuaranteedTailCallOpt || !isa(Term))) return false;
473
474 // If I will have a chain, make sure no other instruction that will have a
475 // chain interposes between I and the return.
476 if (I->mayHaveSideEffects() || I->mayReadFromMemory() ||
477 !I->isSafeToSpeculativelyExecute())
478 for (BasicBlock::const_iterator BBI = prior(prior(ExitBB->end())); ;
479 --BBI) {
480 if (&*BBI == I)
481 break;
482 // Debug info intrinsics do not get in the way of tail call optimization.
483 if (isa(BBI))
484 continue;
485 if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() ||
486 !BBI->isSafeToSpeculativelyExecute())
487 return false;
488 }
489
490 // If the block ends with a void return or unreachable, it doesn't matter
491 // what the call's return type is.
492 if (!Ret || Ret->getNumOperands() == 0) return true;
493
494 // If the return value is undef, it doesn't matter what the call's
495 // return type is.
496 if (isa(Ret->getOperand(0))) return true;
497
498 // Conservatively require the attributes of the call to match those of
499 // the return. Ignore noalias because it doesn't affect the call sequence.
500 unsigned CallerRetAttr = F->getAttributes().getRetAttributes();
501 if ((CalleeRetAttr ^ CallerRetAttr) & ~Attribute::NoAlias)
502 return false;
503
504 // It's not safe to eliminate the sign / zero extension of the return value.
505 if ((CallerRetAttr & Attribute::ZExt) || (CallerRetAttr & Attribute::SExt))
506 return false;
507
508 // Otherwise, make sure the unmodified return value of I is the return value.
509 for (const Instruction *U = dyn_cast(Ret->getOperand(0)); ;
510 U = dyn_cast(U->getOperand(0))) {
511 if (!U)
512 return false;
513 if (!U->hasOneUse())
514 return false;
515 if (U == I)
516 break;
517 // Check for a truly no-op truncate.
518 if (isa(U) &&
519 TLI.isTruncateFree(U->getOperand(0)->getType(), U->getType()))
520 continue;
521 // Check for a truly no-op bitcast.
522 if (isa(U) &&
523 (U->getOperand(0)->getType() == U->getType() ||
524 (U->getOperand(0)->getType()->isPointerTy() &&
525 U->getType()->isPointerTy())))
526 continue;
527 // Otherwise it's not a true no-op.
528 return false;
529 }
530
531 return true;
532 }
117117 }
118118 };
119119
120 /// ComputeLinearIndex - Given an LLVM IR aggregate type and a sequence
121 /// of insertvalue or extractvalue indices that identify a member, return
122 /// the linearized index of the start of the member.
123 ///
124 unsigned ComputeLinearIndex(const TargetLowering &TLI, const Type *Ty,
125 const unsigned *Indices,
126 const unsigned *IndicesEnd,
127 unsigned CurIndex = 0);
128
129 /// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
130 /// EVTs that represent all the individual underlying
131 /// non-aggregate types that comprise it.
132 ///
133 /// If Offsets is non-null, it points to a vector to be filled in
134 /// with the in-memory offsets of each of the individual values.
135 ///
136 void ComputeValueVTs(const TargetLowering &TLI, const Type *Ty,
137 SmallVectorImpl &ValueVTs,
138 SmallVectorImpl *Offsets = 0,
139 uint64_t StartingOffset = 0);
140
141 /// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
142 GlobalVariable *ExtractTypeInfo(Value *V);
143
144120 /// AddCatchInfo - Extract the personality and type infos from an eh.selector
145121 /// call, and add them to the specified machine basic block.
146122 void AddCatchInfo(const CallInst &I,
150126 void CopyCatchInfo(const BasicBlock *SrcBB, const BasicBlock *DestBB,
151127 MachineModuleInfo *MMI, FunctionLoweringInfo &FLI);
152128
153 /// hasInlineAsmMemConstraint - Return true if the inline asm instruction being
154 /// processed uses a memory 'm' constraint.
155 bool hasInlineAsmMemConstraint(std::vector &CInfos,
156 const TargetLowering &TLI);
157
158 /// getFCmpCondCode - Return the ISD condition code corresponding to
159 /// the given LLVM IR floating-point condition code. This includes
160 /// consideration of global floating-point math flags.
161 ///
162 ISD::CondCode getFCmpCondCode(FCmpInst::Predicate Pred);
163
164 /// getICmpCondCode - Return the ISD condition code corresponding to
165 /// the given LLVM IR integer condition code.
166 ///
167 ISD::CondCode getICmpCondCode(ICmpInst::Predicate Pred);
168
169 /// Test if the given instruction is in a position to be optimized
170 /// with a tail-call. This roughly means that it's in a block with
171 /// a return and there's nothing that needs to be scheduled
172 /// between it and the return.
173 ///
174 /// This function only tests target-independent requirements.
175 bool isInTailCallPosition(ImmutableCallSite CS, Attributes CalleeRetAttr,
176 const TargetLowering &TLI);
177
178129 } // end namespace llvm
179130
180131 #endif
2929 #include "llvm/IntrinsicInst.h"
3030 #include "llvm/LLVMContext.h"
3131 #include "llvm/Module.h"
32 #include "llvm/CodeGen/Analysis.h"
3233 #include "llvm/CodeGen/FastISel.h"
3334 #include "llvm/CodeGen/GCStrategy.h"
3435 #include "llvm/CodeGen/GCMetadata.h"