llvm.org GIT mirror llvm / ece6c6b
Revert the series of commits starting with r166578 which introduced the getIntPtrType support for multiple address spaces via a pointer type, and also introduced a crasher bug in the constant folder reported in PR14233. These commits also contained several problems that should really be addressed before they are re-committed. I have avoided reverting various cleanups to the DataLayout APIs that are reasonable to have moving forward in order to reduce the amount of churn, and minimize the number of commits that were reverted. I've also manually updated merge conflicts and manually arranged for the getIntPtrType function to stay in DataLayout and to be defined in a plausible way after this revert. Thanks to Duncan for working through this exact strategy with me, and Nick Lewycky for tracking down the really annoying crasher this triggered. (Test case to follow in its own commit.) After discussing with Duncan extensively, and based on a note from Micah, I'm going to continue to back out some more of the more problematic patches in this series in order to ensure we go into the LLVM 3.2 branch with a reasonable story here. I'll send a note to llvmdev explaining what's going on and why. Summary of reverted revisions: r166634: Fix a compiler warning with an unused variable. r166607: Add some cleanup to the DataLayout changes requested by Chandler. r166596: Revert "Back out r166591, not sure why this made it through since I cancelled the command. Bleh, sorry about this! r166591: Delete a directory that wasn't supposed to be checked in yet. r166578: Add in support for getIntPtrType to get the pointer type based on the address space. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@167221 91177308-0d34-0410-b5e6-96231b3b80d8 Chandler Carruth 6 years ago
46 changed file(s) with 472 addition(s) and 814 deletion(s). Raw diff Collapse all Expand all
167167
168168 public:
169169 ObjectSizeOffsetVisitor(const DataLayout *TD, const TargetLibraryInfo *TLI,
170 LLVMContext &Context, bool RoundToAlign = false,
171 unsigned AS = 0);
170 LLVMContext &Context, bool RoundToAlign = false);
172171
173172 SizeOffsetType compute(Value *V);
174173
229228
230229 public:
231230 ObjectSizeOffsetEvaluator(const DataLayout *TD, const TargetLibraryInfo *TLI,
232 LLVMContext &Context, unsigned AS = 0);
231 LLVMContext &Context);
233232 SizeOffsetEvalType compute(Value *V);
234233
235234 bool knownSize(SizeOffsetEvalType SizeOffset) {
627627
628628 /// getSizeOfExpr - Return an expression for sizeof on the given type.
629629 ///
630 const SCEV *getSizeOfExpr(Type *AllocTy, Type *IntPtrTy);
630 const SCEV *getSizeOfExpr(Type *AllocTy);
631631
632632 /// getAlignOfExpr - Return an expression for alignof on the given type.
633633 ///
635635
636636 /// getOffsetOfExpr - Return an expression for offsetof on the given field.
637637 ///
638 const SCEV *getOffsetOfExpr(StructType *STy, Type *IntPtrTy,
639 unsigned FieldNo);
638 const SCEV *getOffsetOfExpr(StructType *STy, unsigned FieldNo);
640639
641640 /// getOffsetOfExpr - Return an expression for offsetof on the given field.
642641 ///
257257 unsigned getPointerSizeInBits(unsigned AS) const {
258258 return getPointerSize(AS) * 8;
259259 }
260 /// Layout pointer size, in bits, based on the type.
261 /// If this function is called with a pointer type, then
262 /// the type size of the pointer is returned.
263 /// If this function is called with a vector of pointers,
264 /// then the type size of the pointer is returned.
265 /// Otherwise the type sizeo f a default pointer is returned.
266 unsigned getPointerTypeSizeInBits(Type* Ty) const;
267
268260 /// Size examples:
269261 ///
270262 /// Type SizeInBits StoreSizeInBits AllocSizeInBits[*]
342334
343335 /// getIntPtrType - Return an integer type with size at least as big as that
344336 /// of a pointer in the given address space.
345 IntegerType *getIntPtrType(LLVMContext &C, unsigned AddressSpace) const;
337 IntegerType *getIntPtrType(LLVMContext &C, unsigned AddressSpace = 0) const;
346338
347339 /// getIntPtrType - Return an integer (vector of integer) type with size at
348340 /// least as big as that of a pointer of the given pointer (vector of pointer)
1616 #define LLVM_INSTRUCTION_TYPES_H
1717
1818 #include "llvm/Instruction.h"
19 #include "llvm/DataLayout.h"
2019 #include "llvm/OperandTraits.h"
2120 #include "llvm/DerivedTypes.h"
2221 #include "llvm/ADT/Twine.h"
575574 bool isNoopCast(
576575 Type *IntPtrTy ///< Integer type corresponding to pointer
577576 ) const;
578
579 /// @brief Determine if this cast is a no-op cast.
580 bool isNoopCast(
581 const DataLayout &DL ///< DataLayout to get the Int Ptr type from.
582 ) const;
583577
584578 /// Determine how a pair of casts can be eliminated, if they can be at all.
585579 /// This is a helper function for both CastInst and ConstantExpr.
178178 template
179179 Value *EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &TD, User *GEP,
180180 bool NoAssumptions = false) {
181 unsigned AS = cast(GEP)->getPointerAddressSpace();
182181 gep_type_iterator GTI = gep_type_begin(GEP);
183 Type *IntPtrTy = TD.getIntPtrType(GEP->getContext(), AS);
182 Type *IntPtrTy = TD.getIntPtrType(GEP->getContext());
184183 Value *Result = Constant::getNullValue(IntPtrTy);
185184
186185 // If the GEP is inbounds, we know that none of the addressing operations will
188187 bool isInBounds = cast(GEP)->isInBounds() && !NoAssumptions;
189188
190189 // Build a mask for high order bits.
190 unsigned AS = cast(GEP)->getPointerAddressSpace();
191191 unsigned IntPtrWidth = TD.getPointerSizeInBits(AS);
192192 uint64_t PtrSizeMask = ~0ULL >> (64-IntPtrWidth);
193193
4040 // Constant Folding internal helper functions
4141 //===----------------------------------------------------------------------===//
4242
43 /// FoldBitCast - Constant fold bitcast, symbolically evaluating it with
43 /// FoldBitCast - Constant fold bitcast, symbolically evaluating it with
4444 /// DataLayout. This always returns a non-null constant, but it may be a
4545 /// ConstantExpr if unfoldable.
4646 static Constant *FoldBitCast(Constant *C, Type *DestTy,
5858 return ConstantExpr::getBitCast(C, DestTy);
5959
6060 unsigned NumSrcElts = CDV->getType()->getNumElements();
61
61
6262 Type *SrcEltTy = CDV->getType()->getElementType();
63
63
6464 // If the vector is a vector of floating point, convert it to vector of int
6565 // to simplify things.
6666 if (SrcEltTy->isFloatingPointTy()) {
7171 C = ConstantExpr::getBitCast(C, SrcIVTy);
7272 CDV = cast(C);
7373 }
74
74
7575 // Now that we know that the input value is a vector of integers, just shift
7676 // and insert them into our result.
7777 unsigned BitShift = TD.getTypeAllocSizeInBits(SrcEltTy);
8383 else
8484 Result |= CDV->getElementAsInteger(i);
8585 }
86
86
8787 return ConstantInt::get(IT, Result);
8888 }
89
89
9090 // The code below only handles casts to vectors currently.
9191 VectorType *DestVTy = dyn_cast(DestTy);
9292 if (DestVTy == 0)
9393 return ConstantExpr::getBitCast(C, DestTy);
94
94
9595 // If this is a scalar -> vector cast, convert the input into a <1 x scalar>
9696 // vector so the code below can handle it uniformly.
9797 if (isa(C) || isa(C)) {
9898 Constant *Ops = C; // don't take the address of C!
9999 return FoldBitCast(ConstantVector::get(Ops), DestTy, TD);
100100 }
101
101
102102 // If this is a bitcast from constant vector -> vector, fold it.
103103 if (!isa(C) && !isa(C))
104104 return ConstantExpr::getBitCast(C, DestTy);
105
105
106106 // If the element types match, VMCore can fold it.
107107 unsigned NumDstElt = DestVTy->getNumElements();
108108 unsigned NumSrcElt = C->getType()->getVectorNumElements();
109109 if (NumDstElt == NumSrcElt)
110110 return ConstantExpr::getBitCast(C, DestTy);
111
111
112112 Type *SrcEltTy = C->getType()->getVectorElementType();
113113 Type *DstEltTy = DestVTy->getElementType();
114
115 // Otherwise, we're changing the number of elements in a vector, which
114
115 // Otherwise, we're changing the number of elements in a vector, which
116116 // requires endianness information to do the right thing. For example,
117117 // bitcast (<2 x i64> to <4 x i32>)
118118 // folds to (little endian):
119119 // <4 x i32>
120120 // and to (big endian):
121121 // <4 x i32>
122
122
123123 // First thing is first. We only want to think about integer here, so if
124124 // we have something in FP form, recast it as integer.
125125 if (DstEltTy->isFloatingPointTy()) {
129129 VectorType::get(IntegerType::get(C->getContext(), FPWidth), NumDstElt);
130130 // Recursively handle this integer conversion, if possible.
131131 C = FoldBitCast(C, DestIVTy, TD);
132
132
133133 // Finally, VMCore can handle this now that #elts line up.
134134 return ConstantExpr::getBitCast(C, DestTy);
135135 }
136
136
137137 // Okay, we know the destination is integer, if the input is FP, convert
138138 // it to integer first.
139139 if (SrcEltTy->isFloatingPointTy()) {
147147 !isa(C))
148148 return C;
149149 }
150
150
151151 // Now we know that the input and output vectors are both integer vectors
152152 // of the same size, and that their #elements is not the same. Do the
153153 // conversion here, which depends on whether the input or output has
154154 // more elements.
155155 bool isLittleEndian = TD.isLittleEndian();
156
156
157157 SmallVector Result;
158158 if (NumDstElt < NumSrcElt) {
159159 // Handle: bitcast (<4 x i32> to <2 x i64>)
169169 Constant *Src =dyn_cast(C->getAggregateElement(SrcElt++));
170170 if (!Src) // Reject constantexpr elements.
171171 return ConstantExpr::getBitCast(C, DestTy);
172
172
173173 // Zero extend the element to the right size.
174174 Src = ConstantExpr::getZExt(Src, Elt->getType());
175
175
176176 // Shift it to the right place, depending on endianness.
177 Src = ConstantExpr::getShl(Src,
177 Src = ConstantExpr::getShl(Src,
178178 ConstantInt::get(Src->getType(), ShiftAmt));
179179 ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
180
180
181181 // Mix it in.
182182 Elt = ConstantExpr::getOr(Elt, Src);
183183 }
185185 }
186186 return ConstantVector::get(Result);
187187 }
188
188
189189 // Handle: bitcast (<2 x i64> to <4 x i32>)
190190 unsigned Ratio = NumDstElt/NumSrcElt;
191191 unsigned DstBitSize = DstEltTy->getPrimitiveSizeInBits();
192
192
193193 // Loop over each source value, expanding into multiple results.
194194 for (unsigned i = 0; i != NumSrcElt; ++i) {
195195 Constant *Src = dyn_cast(C->getAggregateElement(i));
196196 if (!Src) // Reject constantexpr elements.
197197 return ConstantExpr::getBitCast(C, DestTy);
198
198
199199 unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1);
200200 for (unsigned j = 0; j != Ratio; ++j) {
201201 // Shift the piece of the value into the right place, depending on
202202 // endianness.
203 Constant *Elt = ConstantExpr::getLShr(Src,
203 Constant *Elt = ConstantExpr::getLShr(Src,
204204 ConstantInt::get(Src->getType(), ShiftAmt));
205205 ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
206
206
207207 // Truncate and remember this piece.
208208 Result.push_back(ConstantExpr::getTrunc(Elt, DstEltTy));
209209 }
210210 }
211
211
212212 return ConstantVector::get(Result);
213213 }
214214
223223 Offset = 0;
224224 return true;
225225 }
226
226
227227 // Otherwise, if this isn't a constant expr, bail out.
228228 ConstantExpr *CE = dyn_cast(C);
229229 if (!CE) return false;
230
230
231231 // Look through ptr->int and ptr->ptr casts.
232232 if (CE->getOpcode() == Instruction::PtrToInt ||
233233 CE->getOpcode() == Instruction::BitCast)
234234 return IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, TD);
235
236 // i32* getelementptr ([5 x i32]* @a, i32 0, i32 5)
235
236 // i32* getelementptr ([5 x i32]* @a, i32 0, i32 5)
237237 if (CE->getOpcode() == Instruction::GetElementPtr) {
238238 // Cannot compute this if the element type of the pointer is missing size
239239 // info.
240240 if (!cast(CE->getOperand(0)->getType())
241241 ->getElementType()->isSized())
242242 return false;
243
243
244244 // If the base isn't a global+constant, we aren't either.
245245 if (!IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, TD))
246246 return false;
247
247
248248 // Otherwise, add any offset that our operands provide.
249249 gep_type_iterator GTI = gep_type_begin(CE);
250250 for (User::const_op_iterator i = CE->op_begin() + 1, e = CE->op_end();
252252 ConstantInt *CI = dyn_cast(*i);
253253 if (!CI) return false; // Index isn't a simple constant?
254254 if (CI->isZero()) continue; // Not adding anything.
255
255
256256 if (StructType *ST = dyn_cast(*GTI)) {
257257 // N = N + Offset
258258 Offset += TD.getStructLayout(ST)->getElementOffset(CI->getZExtValue());
263263 }
264264 return true;
265265 }
266
266
267267 return false;
268268 }
269269
276276 const DataLayout &TD) {
277277 assert(ByteOffset <= TD.getTypeAllocSize(C->getType()) &&
278278 "Out of range access");
279
279
280280 // If this element is zero or undefined, we can just return since *CurPtr is
281281 // zero initialized.
282282 if (isa(C) || isa(C))
283283 return true;
284
284
285285 if (ConstantInt *CI = dyn_cast(C)) {
286286 if (CI->getBitWidth() > 64 ||
287287 (CI->getBitWidth() & 7) != 0)
288288 return false;
289
289
290290 uint64_t Val = CI->getZExtValue();
291291 unsigned IntBytes = unsigned(CI->getBitWidth()/8);
292
292
293293 for (unsigned i = 0; i != BytesLeft && ByteOffset != IntBytes; ++i) {
294294 CurPtr[i] = (unsigned char)(Val >> (ByteOffset * 8));
295295 ++ByteOffset;
296296 }
297297 return true;
298298 }
299
299
300300 if (ConstantFP *CFP = dyn_cast(C)) {
301301 if (CFP->getType()->isDoubleTy()) {
302302 C = FoldBitCast(C, Type::getInt64Ty(C->getContext()), TD);
308308 }
309309 return false;
310310 }
311
311
312312 if (ConstantStruct *CS = dyn_cast(C)) {
313313 const StructLayout *SL = TD.getStructLayout(CS->getType());
314314 unsigned Index = SL->getElementContainingOffset(ByteOffset);
315315 uint64_t CurEltOffset = SL->getElementOffset(Index);
316316 ByteOffset -= CurEltOffset;
317
317
318318 while (1) {
319319 // If the element access is to the element itself and not to tail padding,
320320 // read the bytes from the element.
324324 !ReadDataFromGlobal(CS->getOperand(Index), ByteOffset, CurPtr,
325325 BytesLeft, TD))
326326 return false;
327
327
328328 ++Index;
329
329
330330 // Check to see if we read from the last struct element, if so we're done.
331331 if (Index == CS->getType()->getNumElements())
332332 return true;
374374 }
375375 return true;
376376 }
377
377
378378 if (ConstantExpr *CE = dyn_cast(C)) {
379379 if (CE->getOpcode() == Instruction::IntToPtr &&
380 CE->getOperand(0)->getType() == TD.getIntPtrType(CE->getType()))
381 return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr,
380 CE->getOperand(0)->getType() == TD.getIntPtrType(CE->getContext()))
381 return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr,
382382 BytesLeft, TD);
383383 }
384384
390390 const DataLayout &TD) {
391391 Type *LoadTy = cast(C->getType())->getElementType();
392392 IntegerType *IntType = dyn_cast(LoadTy);
393
393
394394 // If this isn't an integer load we can't fold it directly.
395395 if (!IntType) {
396396 // If this is a float/double load, we can try folding it as an int32/64 load
414414 return FoldBitCast(Res, LoadTy, TD);
415415 return 0;
416416 }
417
417
418418 unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8;
419419 if (BytesLoaded > 32 || BytesLoaded == 0) return 0;
420
420
421421 GlobalValue *GVal;
422422 int64_t Offset;
423423 if (!IsConstantOffsetFromGlobal(C, GVal, Offset, TD))
424424 return 0;
425
425
426426 GlobalVariable *GV = dyn_cast(GVal);
427427 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() ||
428428 !GV->getInitializer()->getType()->isSized())
431431 // If we're loading off the beginning of the global, some bytes may be valid,
432432 // but we don't try to handle this.
433433 if (Offset < 0) return 0;
434
434
435435 // If we're not accessing anything in this constant, the result is undefined.
436436 if (uint64_t(Offset) >= TD.getTypeAllocSize(GV->getInitializer()->getType()))
437437 return UndefValue::get(IntType);
438
438
439439 unsigned char RawBytes[32] = {0};
440440 if (!ReadDataFromGlobal(GV->getInitializer(), Offset, RawBytes,
441441 BytesLoaded, TD))
463463 // If the loaded value isn't a constant expr, we can't handle it.
464464 ConstantExpr *CE = dyn_cast(C);
465465 if (!CE) return 0;
466
466
467467 if (CE->getOpcode() == Instruction::GetElementPtr) {
468468 if (GlobalVariable *GV = dyn_cast(CE->getOperand(0)))
469469 if (GV->isConstant() && GV->hasDefinitiveInitializer())
470 if (Constant *V =
470 if (Constant *V =
471471 ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE))
472472 return V;
473473 }
474
474
475475 // Instead of loading constant c string, use corresponding integer value
476476 // directly if string length is small enough.
477477 StringRef Str;
499499 SingleChar = 0;
500500 StrVal = (StrVal << 8) | SingleChar;
501501 }
502
502
503503 Constant *Res = ConstantInt::get(CE->getContext(), StrVal);
504504 if (Ty->isFloatingPointTy())
505505 Res = ConstantExpr::getBitCast(Res, Ty);
506506 return Res;
507507 }
508508 }
509
509
510510 // If this load comes from anywhere in a constant global, and if the global
511511 // is all undef or zero, we know what it loads.
512512 if (GlobalVariable *GV =
519519 return UndefValue::get(ResTy);
520520 }
521521 }
522
522
523523 // Try hard to fold loads from bitcasted strange and non-type-safe things. We
524524 // currently don't do any of this for big endian systems. It can be
525525 // generalized in the future if someone is interested.
530530
531531 static Constant *ConstantFoldLoadInst(const LoadInst *LI, const DataLayout *TD){
532532 if (LI->isVolatile()) return 0;
533
533
534534 if (Constant *C = dyn_cast(LI->getOperand(0)))
535535 return ConstantFoldLoadFromConstPtr(C, TD);
536536
539539
540540 /// SymbolicallyEvaluateBinop - One of Op0/Op1 is a constant expression.
541541 /// Attempt to symbolically evaluate the result of a binary operator merging
542 /// these together. If target data info is available, it is provided as TD,
542 /// these together. If target data info is available, it is provided as TD,
543543 /// otherwise TD is null.
544544 static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0,
545545 Constant *Op1, const DataLayout *TD){
546546 // SROA
547
547
548548 // Fold (and 0xffffffff00000000, (shl x, 32)) -> shl.
549549 // Fold (lshr (or X, Y), 32) -> (lshr [X/Y], 32) if one doesn't contribute
550550 // bits.
551
552
551
552
553553 // If the constant expr is something like &A[123] - &A[4].f, fold this into a
554554 // constant. This happens frequently when iterating over a global array.
555555 if (Opc == Instruction::Sub && TD) {
556556 GlobalValue *GV1, *GV2;
557557 int64_t Offs1, Offs2;
558
558
559559 if (IsConstantOffsetFromGlobal(Op0, GV1, Offs1, *TD))
560560 if (IsConstantOffsetFromGlobal(Op1, GV2, Offs2, *TD) &&
561561 GV1 == GV2) {
563563 return ConstantInt::get(Op0->getType(), Offs1-Offs2);
564564 }
565565 }
566
566
567567 return 0;
568568 }
569569
574574 Type *ResultTy, const DataLayout *TD,
575575 const TargetLibraryInfo *TLI) {
576576 if (!TD) return 0;
577 Type *IntPtrTy = TD->getIntPtrType(ResultTy);
577 Type *IntPtrTy = TD->getIntPtrType(ResultTy->getContext());
578578
579579 bool Any = false;
580580 SmallVector NewIdxs;
627627 if (!TD || !cast(Ptr->getType())->getElementType()->isSized() ||
628628 !Ptr->getType()->isPointerTy())
629629 return 0;
630
631 unsigned AS = cast(Ptr->getType())->getAddressSpace();
632 Type *IntPtrTy = TD->getIntPtrType(Ptr->getContext(), AS);
630
631 Type *IntPtrTy = TD->getIntPtrType(Ptr->getContext());
633632
634633 // If this is a constant expr gep that is effectively computing an
635634 // "offsetof", fold it into 'cast int Size to T*' instead of 'gep 0, 0, 12'
636635 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
637636 if (!isa(Ops[i])) {
638
637
639638 // If this is "gep i8* Ptr, (sub 0, V)", fold this as:
640639 // "inttoptr (sub (ptrtoint Ptr), V)"
641640 if (Ops.size() == 2 &&
702701 // Also, this helps GlobalOpt do SROA on GlobalVariables.
703702 Type *Ty = Ptr->getType();
704703 assert(Ty->isPointerTy() && "Forming regular GEP of non-pointer type");
705 assert(Ty->getPointerAddressSpace() == AS
706 && "Operand and result of GEP should be in the same address space.");
707704 SmallVector NewIdxs;
708705 do {
709706 if (SequentialType *ATy = dyn_cast(Ty)) {
711708 // The only pointer indexing we'll do is on the first index of the GEP.
712709 if (!NewIdxs.empty())
713710 break;
714
711
715712 // Only handle pointers to sized types, not pointers to functions.
716713 if (!ATy->getElementType()->isSized())
717714 return 0;
718715 }
719
716
720717 // Determine which element of the array the offset points into.
721718 APInt ElemSize(BitWidth, TD->getTypeAllocSize(ATy->getElementType()));
722 IntegerType *IntPtrTy = TD->getIntPtrType(Ty->getContext(), AS);
719 IntegerType *IntPtrTy = TD->getIntPtrType(Ty->getContext());
723720 if (ElemSize == 0)
724721 // The element size is 0. This may be [0 x Ty]*, so just use a zero
725722 // index for this level and proceed to the next level to see if it can
839836 if (const CmpInst *CI = dyn_cast(I))
840837 return ConstantFoldCompareInstOperands(CI->getPredicate(), Ops[0], Ops[1],
841838 TD, TLI);
842
839
843840 if (const LoadInst *LI = dyn_cast(I))
844841 return ConstantFoldLoadInst(LI, TD);
845842
889886 /// information, due to only being passed an opcode and operands. Constant
890887 /// folding using this function strips this information.
891888 ///
892 Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
889 Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
893890 ArrayRef Ops,
894891 const DataLayout *TD,
895 const TargetLibraryInfo *TLI) {
892 const TargetLibraryInfo *TLI) {
896893 // Handle easy binops first.
897894 if (Instruction::isBinaryOp(Opcode)) {
898895 if (isa(Ops[0]) || isa(Ops[1]))
899896 if (Constant *C = SymbolicallyEvaluateBinop(Opcode, Ops[0], Ops[1], TD))
900897 return C;
901
898
902899 return ConstantExpr::get(Opcode, Ops[0], Ops[1]);
903900 }
904
901
905902 switch (Opcode) {
906903 default: return 0;
907904 case Instruction::ICmp:
920917 unsigned InWidth = Input->getType()->getScalarSizeInBits();
921918 unsigned AS = cast(CE->getType())->getAddressSpace();
922919 if (TD->getPointerSizeInBits(AS) < InWidth) {
923 Constant *Mask =
920 Constant *Mask =
924921 ConstantInt::get(CE->getContext(), APInt::getLowBitsSet(InWidth,
925922 TD->getPointerSizeInBits(AS)));
926923 Input = ConstantExpr::getAnd(Input, Mask);
936933 // pointer, so it can't be done in ConstantExpr::getCast.
937934 if (ConstantExpr *CE = dyn_cast(Ops[0]))
938935 if (TD && CE->getOpcode() == Instruction::PtrToInt &&
939 TD->getTypeSizeInBits(CE->getOperand(0)->getType())
936 TD->getPointerSizeInBits(
937 cast(CE->getOperand(0)->getType())->getAddressSpace())
940938 <= CE->getType()->getScalarSizeInBits())
941939 return FoldBitCast(CE->getOperand(0), DestTy, *TD);
942940
968966 return C;
969967 if (Constant *C = SymbolicallyEvaluateGEP(Ops, DestTy, TD, TLI))
970968 return C;
971
969
972970 return ConstantExpr::getGetElementPtr(Ops[0], Ops.slice(1));
973971 }
974972 }
978976 /// returns a constant expression of the specified operands.
979977 ///
980978 Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
981 Constant *Ops0, Constant *Ops1,
979 Constant *Ops0, Constant *Ops1,
982980 const DataLayout *TD,
983981 const TargetLibraryInfo *TLI) {
984982 // fold: icmp (inttoptr x), null -> icmp x, 0
989987 // ConstantExpr::getCompare cannot do this, because it doesn't have TD
990988 // around to know if bit truncation is happening.
991989 if (ConstantExpr *CE0 = dyn_cast(Ops0)) {
992 Type *IntPtrTy = NULL;
993990 if (TD && Ops1->isNullValue()) {
991 Type *IntPtrTy = TD->getIntPtrType(CE0->getContext());
994992 if (CE0->getOpcode() == Instruction::IntToPtr) {
995 IntPtrTy = TD->getIntPtrType(CE0->getType());
996993 // Convert the integer value to the right size to ensure we get the
997994 // proper extension or truncation.
998995 Constant *C = ConstantExpr::getIntegerCast(CE0->getOperand(0),
1000997 Constant *Null = Constant::getNullValue(C->getType());
1001998 return ConstantFoldCompareInstOperands(Predicate, C, Null, TD, TLI);
1002999 }
1003
1000
10041001 // Only do this transformation if the int is intptrty in size, otherwise
10051002 // there is a truncation or extension that we aren't modeling.
1006 if (CE0->getOpcode() == Instruction::PtrToInt) {
1007 IntPtrTy = TD->getIntPtrType(CE0->getOperand(0)->getType());
1008 if (CE0->getType() == IntPtrTy) {
1009 Constant *C = CE0->getOperand(0);
1010 Constant *Null = Constant::getNullValue(C->getType());
1011 return ConstantFoldCompareInstOperands(Predicate, C, Null, TD, TLI);
1012 }
1013 }
1014 }
1015
1003 if (CE0->getOpcode() == Instruction::PtrToInt &&
1004 CE0->getType() == IntPtrTy) {
1005 Constant *C = CE0->getOperand(0);
1006 Constant *Null = Constant::getNullValue(C->getType());
1007 return ConstantFoldCompareInstOperands(Predicate, C, Null, TD, TLI);
1008 }
1009 }
1010
10161011 if (ConstantExpr *CE1 = dyn_cast(Ops1)) {
10171012 if (TD && CE0->getOpcode() == CE1->getOpcode()) {
1013 Type *IntPtrTy = TD->getIntPtrType(CE0->getContext());
10181014
10191015 if (CE0->getOpcode() == Instruction::IntToPtr) {
1020 Type *IntPtrTy = TD->getIntPtrType(CE0->getType());
10211016 // Convert the integer value to the right size to ensure we get the
10221017 // proper extension or truncation.
10231018 Constant *C0 = ConstantExpr::getIntegerCast(CE0->getOperand(0),
10261021 IntPtrTy, false);
10271022 return ConstantFoldCompareInstOperands(Predicate, C0, C1, TD, TLI);
10281023 }
1029 }
1030
1031 // Only do this transformation if the int is intptrty in size, otherwise
1032 // there is a truncation or extension that we aren't modeling.
1033 if (CE0->getOpcode() == Instruction::PtrToInt) {
1034 IntPtrTy = TD->getIntPtrType(CE0->getOperand(0)->getType());
1035 if (CE0->getType() == IntPtrTy &&
1036 CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType())
1024
1025 // Only do this transformation if the int is intptrty in size, otherwise
1026 // there is a truncation or extension that we aren't modeling.
1027 if ((CE0->getOpcode() == Instruction::PtrToInt &&
1028 CE0->getType() == IntPtrTy &&
1029 CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType()))
10371030 return ConstantFoldCompareInstOperands(Predicate, CE0->getOperand(0),
1038 CE1->getOperand(0), TD, TLI);
1039 }
1040 }
1041
1031 CE1->getOperand(0), TD, TLI);
1032 }
1033 }
1034
10421035 // icmp eq (or x, y), 0 -> (icmp eq x, 0) & (icmp eq y, 0)
10431036 // icmp ne (or x, y), 0 -> (icmp ne x, 0) | (icmp ne y, 0)
10441037 if ((Predicate == ICmpInst::ICMP_EQ || Predicate == ICmpInst::ICMP_NE) &&
10451038 CE0->getOpcode() == Instruction::Or && Ops1->isNullValue()) {
1046 Constant *LHS =
1039 Constant *LHS =
10471040 ConstantFoldCompareInstOperands(Predicate, CE0->getOperand(0), Ops1,
10481041 TD, TLI);
1049 Constant *RHS =
1042 Constant *RHS =
10501043 ConstantFoldCompareInstOperands(Predicate, CE0->getOperand(1), Ops1,
10511044 TD, TLI);
1052 unsigned OpC =
1045 unsigned OpC =
10531046 Predicate == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
10541047 Constant *Ops[] = { LHS, RHS };
10551048 return ConstantFoldInstOperands(OpC, LHS->getType(), Ops, TD, TLI);
10561049 }
10571050 }
1058
1051
10591052 return ConstantExpr::getCompare(Predicate, Ops0, Ops1);
10601053 }
10611054
10631056 /// ConstantFoldLoadThroughGEPConstantExpr - Given a constant and a
10641057 /// getelementptr constantexpr, return the constant value being addressed by the
10651058 /// constant expression, or null if something is funny and we can't decide.
1066 Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C,
1059 Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C,
10671060 ConstantExpr *CE) {
10681061 if (!CE->getOperand(1)->isNullValue())
10691062 return 0; // Do not allow stepping over the value!
11331126
11341127 if (!F->hasName()) return false;
11351128 StringRef Name = F->getName();
1136
1129
11371130 // In these cases, the check of the length is required. We don't want to
11381131 // return true for a name like "cos\0blah" which strcmp would return equal to
11391132 // "cos", but has length 8.
11401133 switch (Name[0]) {
11411134 default: return false;
11421135 case 'a':
1143 return Name == "acos" || Name == "asin" ||
1136 return Name == "acos" || Name == "asin" ||
11441137 Name == "atan" || Name == "atan2";
11451138 case 'c':
11461139 return Name == "cos" || Name == "ceil" || Name == "cosf" || Name == "cosh";
11601153 }
11611154 }
11621155
1163 static Constant *ConstantFoldFP(double (*NativeFP)(double), double V,
1156 static Constant *ConstantFoldFP(double (*NativeFP)(double), double V,
11641157 Type *Ty) {
11651158 sys::llvm_fenv_clearexcept();
11661159 V = NativeFP(V);
11681161 sys::llvm_fenv_clearexcept();
11691162 return 0;
11701163 }
1171
1164
11721165 if (Ty->isFloatTy())
11731166 return ConstantFP::get(Ty->getContext(), APFloat((float)V));
11741167 if (Ty->isDoubleTy())
11841177 sys::llvm_fenv_clearexcept();
11851178 return 0;
11861179 }
1187
1180
11881181 if (Ty->isFloatTy())
11891182 return ConstantFP::get(Ty->getContext(), APFloat((float)V));
11901183 if (Ty->isDoubleTy())
12781271 case 'e':
12791272 if (Name == "exp" && TLI->has(LibFunc::exp))
12801273 return ConstantFoldFP(exp, V, Ty);
1281
1274
12821275 if (Name == "exp2" && TLI->has(LibFunc::exp2)) {
12831276 // Constant fold exp2(x) as pow(2,x) in case the host doesn't have a
12841277 // C99 library.
13541347 }
13551348
13561349 // Support ConstantVector in case we have an Undef in the top.
1357 if (isa(Operands[0]) ||
1350 if (isa(Operands[0]) ||
13581351 isa(Operands[0])) {
13591352 Constant *Op = cast(Operands[0]);
13601353 switch (F->getIntrinsicID()) {
13731366 case Intrinsic::x86_sse2_cvttsd2si64:
13741367 if (ConstantFP *FPOp =
13751368 dyn_cast_or_null(Op->getAggregateElement(0U)))
1376 return ConstantFoldConvertToInt(FPOp->getValueAPF(),
1369 return ConstantFoldConvertToInt(FPOp->getValueAPF(),
13771370 /*roundTowardZero=*/true, Ty);
13781371 }
13791372 }
1380
1373
13811374 if (isa(Operands[0])) {
13821375 if (F->getIntrinsicID() == Intrinsic::bswap)
13831376 return Operands[0];
13911384 if (ConstantFP *Op1 = dyn_cast(Operands[0])) {
13921385 if (!Ty->isFloatTy() && !Ty->isDoubleTy())
13931386 return 0;
1394 double Op1V = Ty->isFloatTy() ?
1387 double Op1V = Ty->isFloatTy() ?
13951388 (double)Op1->getValueAPF().convertToFloat() :
13961389 Op1->getValueAPF().convertToDouble();
13971390 if (ConstantFP *Op2 = dyn_cast(Operands[1])) {
13981391 if (Op2->getType() != Op1->getType())
13991392 return 0;
14001393
1401 double Op2V = Ty->isFloatTy() ?
1394 double Op2V = Ty->isFloatTy() ?
14021395 (double)Op2->getValueAPF().convertToFloat():
14031396 Op2->getValueAPF().convertToDouble();
14041397
14251418 }
14261419 return 0;
14271420 }
1428
1421
14291422 if (ConstantInt *Op1 = dyn_cast(Operands[0])) {
14301423 if (ConstantInt *Op2 = dyn_cast(Operands[1])) {
14311424 switch (F->getIntrinsicID()) {
14751468 return ConstantInt::get(Ty, Op1->getValue().countLeadingZeros());
14761469 }
14771470 }
1478
1471
14791472 return 0;
14801473 }
14811474 return 0;
787787 assert(V->getType()->isPointerTy() && "Unexpected operand type!");
788788 } while (Visited.insert(V));
789789
790 Type *IntPtrTy = TD->getIntPtrType(V->getType());
790 Type *IntPtrTy = TD->getIntPtrType(V->getContext());
791791 return cast(ConstantInt::get(IntPtrTy, Offset));
792792 }
793793
827827 // size of the byval type by the target's pointer size.
828828 PointerType *PTy = cast(CS.getArgument(I)->getType());
829829 unsigned TypeSize = TD->getTypeSizeInBits(PTy->getElementType());
830 unsigned PointerSize = TD->getTypeSizeInBits(PTy);
830 unsigned AS = PTy->getAddressSpace();
831 unsigned PointerSize = TD->getPointerSizeInBits(AS);
831832 // Ceiling division.
832833 unsigned NumStores = (TypeSize + PointerSize - 1) / PointerSize;
833834
727727 assert(V->getType()->isPointerTy() && "Unexpected operand type!");
728728 } while (Visited.insert(V));
729729
730 Type *IntPtrTy = TD.getIntPtrType(V->getContext(), AS);
730 Type *IntPtrTy = TD.getIntPtrType(V->getContext());
731731 return ConstantInt::get(IntPtrTy, Offset);
732732 }
733733
18791879 // Turn icmp (ptrtoint x), (ptrtoint/constant) into a compare of the input
18801880 // if the integer type is the same size as the pointer type.
18811881 if (MaxRecurse && Q.TD && isa(LI) &&
1882 Q.TD->getTypeSizeInBits(SrcTy) == DstTy->getPrimitiveSizeInBits()) {
1882 Q.TD->getPointerSizeInBits(
1883 cast(LI)->getPointerAddressSpace()) ==
1884 DstTy->getPrimitiveSizeInBits()) {
18831885 if (Constant *RHSC = dyn_cast(RHS)) {
18841886 // Transfer the cast to the constant.
18851887 if (Value *V = SimplifyICmpInst(Pred, SrcOp,
625625 if (W != V)
626626 return findValueImpl(W, OffsetOk, Visited);
627627 } else if (CastInst *CI = dyn_cast(V)) {
628 if (CI->isNoopCast(*TD))
628 if (CI->isNoopCast(TD ? TD->getIntPtrType(V->getContext()) :
629 Type::getInt64Ty(V->getContext())))
629630 return findValueImpl(CI->getOperand(0), OffsetOk, Visited);
630631 } else if (ExtractValueInst *Ex = dyn_cast(V)) {
631632 if (Value *W = FindInsertedValue(Ex->getAggregateOperand(),
638639 if (CastInst::isNoopCast(Instruction::CastOps(CE->getOpcode()),
639640 CE->getOperand(0)->getType(),
640641 CE->getType(),
641 TD ? TD->getIntPtrType(CE->getType()) :
642 TD ? TD->getIntPtrType(V->getContext()) :
642643 Type::getInt64Ty(V->getContext())))
643644 return findValueImpl(CE->getOperand(0), OffsetOk, Visited);
644645 } else if (CE->getOpcode() == Instruction::ExtractValue) {
375375 ObjectSizeOffsetVisitor::ObjectSizeOffsetVisitor(const DataLayout *TD,
376376 const TargetLibraryInfo *TLI,
377377 LLVMContext &Context,
378 bool RoundToAlign,
379 unsigned AS)
378 bool RoundToAlign)
380379 : TD(TD), TLI(TLI), RoundToAlign(RoundToAlign) {
381 IntegerType *IntTy = TD->getIntPtrType(Context, AS);
380 IntegerType *IntTy = TD->getIntPtrType(Context);
382381 IntTyBits = IntTy->getBitWidth();
383382 Zero = APInt::getNullValue(IntTyBits);
384383 }
561560
562561 ObjectSizeOffsetEvaluator::ObjectSizeOffsetEvaluator(const DataLayout *TD,
563562 const TargetLibraryInfo *TLI,
564 LLVMContext &Context,
565 unsigned AS)
563 LLVMContext &Context)
566564 : TD(TD), TLI(TLI), Context(Context), Builder(Context, TargetFolder(TD)) {
567 IntTy = TD->getIntPtrType(Context, AS);
565 IntTy = TD->getIntPtrType(Context);
568566 Zero = ConstantInt::get(IntTy, 0);
569567 }
570568
25852585 return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
25862586 }
25872587
2588 const SCEV *ScalarEvolution::getSizeOfExpr(Type *AllocTy, Type *IntPtrTy) {
2588 const SCEV *ScalarEvolution::getSizeOfExpr(Type *AllocTy) {
25892589 // If we have DataLayout, we can bypass creating a target-independent
25902590 // constant expression and then folding it back into a ConstantInt.
25912591 // This is just a compile-time optimization.
25922592 if (TD)
2593 return getConstant(IntPtrTy, TD->getTypeAllocSize(AllocTy));
2593 return getConstant(TD->getIntPtrType(getContext()),
2594 TD->getTypeAllocSize(AllocTy));
25942595
25952596 Constant *C = ConstantExpr::getSizeOf(AllocTy);
25962597 if (ConstantExpr *CE = dyn_cast(C))
26092610 return getTruncateOrZeroExtend(getSCEV(C), Ty);
26102611 }
26112612
2612 const SCEV *ScalarEvolution::getOffsetOfExpr(StructType *STy, Type *IntPtrTy,
2613 const SCEV *ScalarEvolution::getOffsetOfExpr(StructType *STy,
26132614 unsigned FieldNo) {
26142615 // If we have DataLayout, we can bypass creating a target-independent
26152616 // constant expression and then folding it back into a ConstantInt.
26162617 // This is just a compile-time optimization.
26172618 if (TD)
2618 return getConstant(IntPtrTy,
2619 return getConstant(TD->getIntPtrType(getContext()),
26192620 TD->getStructLayout(STy)->getElementOffset(FieldNo));
26202621
26212622 Constant *C = ConstantExpr::getOffsetOf(STy, FieldNo);
27022703
27032704 // The only other support type is pointer.
27042705 assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!");
2705 if (TD) return TD->getIntPtrType(Ty);
2706 if (TD) return TD->getIntPtrType(getContext());
27062707
27072708 // Without DataLayout, conservatively assume pointers are 64-bit.
27082709 return Type::getInt64Ty(getContext());
31553156 if (StructType *STy = dyn_cast(*GTI++)) {
31563157 // For a struct, add the member offset.
31573158 unsigned FieldNo = cast(Index)->getZExtValue();
3158 const SCEV *FieldOffset = getOffsetOfExpr(STy, IntPtrTy, FieldNo);
3159 const SCEV *FieldOffset = getOffsetOfExpr(STy, FieldNo);
31593160
31603161 // Add the field offset to the running total offset.
31613162 TotalOffset = getAddExpr(TotalOffset, FieldOffset);
31623163 } else {
31633164 // For an array, add the element offset, explicitly scaled.
3164 const SCEV *ElementSize = getSizeOfExpr(*GTI, IntPtrTy);
3165 const SCEV *ElementSize = getSizeOfExpr(*GTI);
31653166 const SCEV *IndexS = getSCEV(Index);
31663167 // Getelementptr indices are signed.
31673168 IndexS = getTruncateOrSignExtend(IndexS, IntPtrTy);
416416 // array indexing.
417417 SmallVector ScaledOps;
418418 if (ElTy->isSized()) {
419 Type *IntPtrTy = SE.TD ? SE.TD->getIntPtrType(PTy) :
420 IntegerType::getInt64Ty(PTy->getContext());
421 const SCEV *ElSize = SE.getSizeOfExpr(ElTy, IntPtrTy);
419 const SCEV *ElSize = SE.getSizeOfExpr(ElTy);
422420 if (!ElSize->isZero()) {
423421 SmallVector NewOps;
424422 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
384384 // - __tlv_bootstrap - used to make sure support exists
385385 // - spare pointer, used when mapped by the runtime
386386 // - pointer to mangled symbol above with initializer
387 assert(GV->getType()->isPointerTy() && "GV must be a pointer type!");
388 unsigned PtrSize = TD->getTypeSizeInBits(GV->getType())/8;
387 unsigned AS = GV->getType()->getAddressSpace();
388 unsigned PtrSize = TD->getPointerSizeInBits(AS)/8;
389389 OutStreamer.EmitSymbolValue(GetExternalSymbolSymbol("_tlv_bootstrap"),
390390 PtrSize, 0);
391391 OutStreamer.EmitIntValue(0, PtrSize, 0);
14801480 if (Offset == 0)
14811481 return Base;
14821482
1483 assert(CE->getType()->isPointerTy() && "We must have a pointer type!");
1483 unsigned AS = cast(CE->getType())->getAddressSpace();
14841484 // Truncate/sext the offset to the pointer size.
1485 unsigned Width = TD.getTypeSizeInBits(CE->getType());
1485 unsigned Width = TD.getPointerSizeInBits(AS);
14861486 if (Width < 64)
14871487 Offset = SignExtend64(Offset, Width);
14881488
15041504 // Handle casts to pointers by changing them into casts to the appropriate
15051505 // integer type. This promotes constant folding and simplifies this code.
15061506 Constant *Op = CE->getOperand(0);
1507 Op = ConstantExpr::getIntegerCast(Op, TD.getIntPtrType(CE->getType()),
1507 Op = ConstantExpr::getIntegerCast(Op, TD.getIntPtrType(CV->getContext()),
15081508 false/*ZExt*/);
15091509 return lowerConstant(Op, AP);
15101510 }
114114 Type::getInt8PtrTy(Context),
115115 Type::getInt8PtrTy(Context),
116116 Type::getInt8PtrTy(Context),
117 TD.getIntPtrType(Context, 0), (Type *)0);
117 TD.getIntPtrType(Context), (Type *)0);
118118 break;
119119 case Intrinsic::memmove:
120120 M.getOrInsertFunction("memmove",
121121 Type::getInt8PtrTy(Context),
122122 Type::getInt8PtrTy(Context),
123123 Type::getInt8PtrTy(Context),
124 TD.getIntPtrType(Context, 0), (Type *)0);
124 TD.getIntPtrType(Context), (Type *)0);
125125 break;
126126 case Intrinsic::memset:
127127 M.getOrInsertFunction("memset",
128128 Type::getInt8PtrTy(Context),
129129 Type::getInt8PtrTy(Context),
130130 Type::getInt32Ty(M.getContext()),
131 TD.getIntPtrType(Context, 0), (Type *)0);
131 TD.getIntPtrType(Context), (Type *)0);
132132 break;
133133 case Intrinsic::sqrt:
134134 EnsureFPIntrinsicsExist(M, I, "sqrtf", "sqrt", "sqrtl");
456456 break; // Strip out annotate intrinsic
457457
458458 case Intrinsic::memcpy: {
459 Type *IntPtr = TD.getIntPtrType(CI->getArgOperand(0)->getType());
459 Type *IntPtr = TD.getIntPtrType(Context);
460460 Value *Size = Builder.CreateIntCast(CI->getArgOperand(2), IntPtr,
461461 /* isSigned */ false);
462462 Value *Ops[3];
467467 break;
468468 }
469469 case Intrinsic::memmove: {
470 Type *IntPtr = TD.getIntPtrType(CI->getArgOperand(0)->getType());
470 Type *IntPtr = TD.getIntPtrType(Context);
471471 Value *Size = Builder.CreateIntCast(CI->getArgOperand(2), IntPtr,
472472 /* isSigned */ false);
473473 Value *Ops[3];
478478 break;
479479 }
480480 case Intrinsic::memset: {
481 Type *IntPtr = TD.getIntPtrType(CI->getArgOperand(0)->getType());
481 Type *IntPtr = TD.getIntPtrType(Context);
482482 Value *Size = Builder.CreateIntCast(CI->getArgOperand(2), IntPtr,
483483 /* isSigned */ false);
484484 Value *Ops[3];
100100
101101 // No-op casts are trivially coalesced by fast-isel.
102102 if (const CastInst *Cast = dyn_cast(I))
103 if (Cast->isNoopCast(TD) && !hasTrivialKill(Cast->getOperand(0)))
103 if (Cast->isNoopCast(TD.getIntPtrType(Cast->getContext())) &&
104 !hasTrivialKill(Cast->getOperand(0)))
104105 return false;
105106
106107 // GEPs with all zero indices are trivially coalesced by fast-isel.
173174 // Translate this as an integer zero so that it can be
174175 // local-CSE'd with actual integer zeros.
175176 Reg =
176 getRegForValue(Constant::getNullValue(TD.getIntPtrType(V->getType())));
177 getRegForValue(Constant::getNullValue(TD.getIntPtrType(V->getContext())));
177178 } else if (const ConstantFP *CF = dyn_cast(V)) {
178179 if (CF->isNullValue()) {
179180 Reg = TargetMaterializeFloatZero(CF);
37903790 // Emit a library call.
37913791 TargetLowering::ArgListTy Args;
37923792 TargetLowering::ArgListEntry Entry;
3793 unsigned AS = SrcPtrInfo.getAddrSpace();
3794 Entry.Ty = TLI.getDataLayout()->getIntPtrType(*getContext(), AS);
3793 Entry.Ty = TLI.getDataLayout()->getIntPtrType(*getContext());
37953794 Entry.Node = Dst; Args.push_back(Entry);
37963795 Entry.Node = Src; Args.push_back(Entry);
37973796 Entry.Node = Size; Args.push_back(Entry);
38463845 // Emit a library call.
38473846 TargetLowering::ArgListTy Args;
38483847 TargetLowering::ArgListEntry Entry;
3849 unsigned AS = SrcPtrInfo.getAddrSpace();
3850 Entry.Ty = TLI.getDataLayout()->getIntPtrType(*getContext(), AS);
3848 Entry.Ty = TLI.getDataLayout()->getIntPtrType(*getContext());
38513849 Entry.Node = Dst; Args.push_back(Entry);
38523850 Entry.Node = Src; Args.push_back(Entry);
38533851 Entry.Node = Size; Args.push_back(Entry);
38963894 return Result;
38973895
38983896 // Emit a library call.
3899 unsigned AS = DstPtrInfo.getAddrSpace();
3900 Type *IntPtrTy = TLI.getDataLayout()->getIntPtrType(*getContext(), AS);
3897 Type *IntPtrTy = TLI.getDataLayout()->getIntPtrType(*getContext());
39013898 TargetLowering::ArgListTy Args;
39023899 TargetLowering::ArgListEntry Entry;
39033900 Entry.Node = Dst; Entry.Ty = IntPtrTy;
154154 TargetLowering::ArgListEntry Entry;
155155
156156 // First argument: data pointer
157 unsigned AS = DstPtrInfo.getAddrSpace();
158 Type *IntPtrTy = TLI.getDataLayout()->getIntPtrType(*DAG.getContext(), AS);
157 Type *IntPtrTy = TLI.getDataLayout()->getIntPtrType(*DAG.getContext());
159158 Entry.Node = Dst;
160159 Entry.Ty = IntPtrTy;
161160 Args.push_back(Entry);
125125 return Base;
126126
127127 // Truncate/sext the offset to the pointer size.
128 unsigned PtrSize = TD.getPointerTypeSizeInBits(PtrVal->getType());
129 if (PtrSize != 64) {
130 int SExtAmount = 64-PtrSize;
128 unsigned AS = PtrVal->getType()->isPointerTy() ?
129 cast(PtrVal->getType())->getAddressSpace() : 0;
130 if (TD.getPointerSizeInBits(AS) != 64) {
131 int SExtAmount = 64-TD.getPointerSizeInBits(AS);
131132 Offset = (Offset << SExtAmount) >> SExtAmount;
132133 }
133134
149150 // Handle casts to pointers by changing them into casts to the appropriate
150151 // integer type. This promotes constant folding and simplifies this code.
151152 Constant *Op = CE->getOperand(0);
152 Op = ConstantExpr::getIntegerCast(Op, TD.getIntPtrType(CE->getType()),
153 Op = ConstantExpr::getIntegerCast(Op, TD.getIntPtrType(CV->getContext()),
153154 false/*ZExt*/);
154155 return LowerConstant(Op, AP);
155156 }
15111511
15121512 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
15131513 bool isPPC64 = (PtrVT == MVT::i64);
1514 unsigned AS = 0;
15151514 Type *IntPtrTy =
15161515 DAG.getTargetLoweringInfo().getDataLayout()->getIntPtrType(
1517 *DAG.getContext(), AS);
1516 *DAG.getContext());
15181517
15191518 TargetLowering::ArgListTy Args;
15201519 TargetLowering::ArgListEntry Entry;
6363 }
6464
6565 LLVMTypeRef LLVMIntPtrType(LLVMTargetDataRef TD) {
66 return wrap(unwrap(TD)->getIntPtrType(getGlobalContext(), 0));
66 return wrap(unwrap(TD)->getIntPtrType(getGlobalContext()));
6767 }
6868
6969 LLVMTypeRef LLVMIntPtrTypeForAS(LLVMTargetDataRef TD, unsigned AS) {
281281 bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val,
282282 const X86AddressMode &AM) {
283283 // Handle 'null' like i32/i64 0.
284 if (isa(Val)) {
285 Val = Constant::getNullValue(TD.getIntPtrType(Val->getType()));
286 }
284 if (isa(Val))
285 Val = Constant::getNullValue(TD.getIntPtrType(Val->getContext()));
287286
288287 // If this is a store of a simple constant, fold the constant into the store.
289288 if (const ConstantInt *CI = dyn_cast(Val)) {
894893 if (Op0Reg == 0) return false;
895894
896895 // Handle 'null' like i32/i64 0.
897 if (isa(Op1)) {
898 Op1 = Constant::getNullValue(TD.getIntPtrType(Op0->getType()));
899 }
896 if (isa(Op1))
897 Op1 = Constant::getNullValue(TD.getIntPtrType(Op0->getContext()));
900898
901899 // We have two options: compare with register or immediate. If the RHS of
902900 // the compare is an immediate that we can fold into this compare, use
5353 if (const char *bzeroEntry = V &&
5454 V->isNullValue() ? Subtarget->getBZeroEntry() : 0) {
5555 EVT IntPtr = TLI.getPointerTy();
56 unsigned AS = DstPtrInfo.getAddrSpace();
57 Type *IntPtrTy = getDataLayout()->getIntPtrType(*DAG.getContext(), AS);
56 Type *IntPtrTy = getDataLayout()->getIntPtrType(*DAG.getContext());
5857 TargetLowering::ArgListTy Args;
5958 TargetLowering::ArgListEntry Entry;
6059 Entry.Node = Dst;
476476 }
477477
478478 // Lower to a call to __misaligned_load(BasePtr).
479 unsigned AS = LD->getAddressSpace();
480 Type *IntPtrTy = getDataLayout()->getIntPtrType(*DAG.getContext(), AS);
479 Type *IntPtrTy = getDataLayout()->getIntPtrType(*DAG.getContext());
481480 TargetLowering::ArgListTy Args;
482481 TargetLowering::ArgListEntry Entry;
483482
536535 }
537536
538537 // Lower to a call to __misaligned_store(BasePtr, Value).
539 unsigned AS = ST->getAddressSpace();
540 Type *IntPtrTy = getDataLayout()->getIntPtrType(*DAG.getContext(), AS);
538 Type *IntPtrTy = getDataLayout()->getIntPtrType(*DAG.getContext());
541539 TargetLowering::ArgListTy Args;
542540 TargetLowering::ArgListEntry Entry;
543541
14991499 unsigned TypeSize = TD->getTypeAllocSize(FieldTy);
15001500 if (StructType *ST = dyn_cast(FieldTy))
15011501 TypeSize = TD->getStructLayout(ST)->getSizeInBytes();
1502 Type *IntPtrTy = TD->getIntPtrType(GV->getType());
1502 Type *IntPtrTy = TD->getIntPtrType(CI->getContext());
15031503 Value *NMI = CallInst::CreateMalloc(CI, IntPtrTy, FieldTy,
15041504 ConstantInt::get(IntPtrTy, TypeSize),
15051505 NElems, 0,
17291729 // If this is a fixed size array, transform the Malloc to be an alloc of
17301730 // structs. malloc [100 x struct],1 -> malloc struct, 100
17311731 if (ArrayType *AT = dyn_cast(getMallocAllocatedType(CI, TLI))) {
1732 Type *IntPtrTy = TD->getIntPtrType(GV->getType());
1732 Type *IntPtrTy = TD->getIntPtrType(CI->getContext());
17331733 unsigned TypeSize = TD->getStructLayout(AllocSTy)->getSizeInBytes();
17341734 Value *AllocSize = ConstantInt::get(IntPtrTy, TypeSize);
17351735 Value *NumElements = ConstantInt::get(IntPtrTy, AT->getNumElements());
205205 return true;
206206 if (Ty1->getTypeID() != Ty2->getTypeID()) {
207207 if (TD) {
208 if (isa(Ty1) && Ty2 == TD->getIntPtrType(Ty1)) return true;
209 if (isa(Ty2) && Ty1 == TD->getIntPtrType(Ty2)) return true;
208 LLVMContext &Ctx = Ty1->getContext();
209 if (isa(Ty1) && Ty2 == TD->getIntPtrType(Ctx)) return true;
210 if (isa(Ty2) && Ty1 == TD->getIntPtrType(Ctx)) return true;
210211 }
211212 return false;
212213 }
207207 bool ShouldChangeType(Type *From, Type *To) const;
208208 Value *dyn_castNegVal(Value *V) const;
209209 Value *dyn_castFNegVal(Value *V) const;
210 Type *FindElementAtOffset(Type *Ty, int64_t Offset, Type *IntPtrTy,
210 Type *FindElementAtOffset(Type *Ty, int64_t Offset,
211211 SmallVectorImpl &NewIndices);
212212 Instruction *FoldOpIntoSelect(Instruction &Op, SelectInst *SI);
213213
995995 // Conversion is ok if changing from one pointer type to another or from
996996 // a pointer to an integer of the same size.
997997 !((OldRetTy->isPointerTy() || !TD ||
998 OldRetTy == TD->getIntPtrType(NewRetTy)) &&
998 OldRetTy == TD->getIntPtrType(Caller->getContext())) &&
999999 (NewRetTy->isPointerTy() || !TD ||
1000 NewRetTy == TD->getIntPtrType(OldRetTy))))
1000 NewRetTy == TD->getIntPtrType(Caller->getContext()))))
10011001 return false; // Cannot transform this return value.
10021002
10031003 if (!Caller->use_empty() &&
10561056
10571057 // Converting from one pointer type to another or between a pointer and an
10581058 // integer of the same size is safe even if we do not have a body.
1059 // FIXME: Not sure what to do here, so setting AS to 0.
1060 // How can the AS for a function call be outside the default?
10611059 bool isConvertible = ActTy == ParamTy ||
10621060 (TD && ((ParamTy->isPointerTy() ||
1063 ParamTy == TD->getIntPtrType(ActTy)) &&
1061 ParamTy == TD->getIntPtrType(Caller->getContext())) &&
10641062 (ActTy->isPointerTy() ||
1065 ActTy == TD->getIntPtrType(ParamTy))));
1063 ActTy == TD->getIntPtrType(Caller->getContext()))));
10661064 if (Callee->isDeclaration() && !isConvertible) return false;
10671065 }
10681066
2929 Scale = 0;
3030 return ConstantInt::get(Val->getType(), 0);
3131 }
32
32
3333 if (BinaryOperator *I = dyn_cast(Val)) {
3434 // Cannot look past anything that might overflow.
3535 OverflowingBinaryOperator *OBI = dyn_cast(Val);
4646 Offset = 0;
4747 return I->getOperand(0);
4848 }
49
49
5050 if (I->getOpcode() == Instruction::Mul) {
5151 // This value is scaled by 'RHS'.
5252 Scale = RHS->getZExtValue();
5353 Offset = 0;
5454 return I->getOperand(0);
5555 }
56
56
5757 if (I->getOpcode() == Instruction::Add) {
58 // We have X+C. Check to see if we really have (X*C2)+C1,
58 // We have X+C. Check to see if we really have (X*C2)+C1,
5959 // where C1 is divisible by C2.
6060 unsigned SubScale;
61 Value *SubVal =
61 Value *SubVal =
6262 DecomposeSimpleLinearExpr(I->getOperand(0), SubScale, Offset);
6363 Offset += RHS->getZExtValue();
6464 Scale = SubScale;
8181 if (!TD) return 0;
8282
8383 PointerType *PTy = cast(CI.getType());
84
84
8585 BuilderTy AllocaBuilder(*Builder);
8686 AllocaBuilder.SetInsertPoint(AI.getParent(), &AI);
8787
109109 uint64_t ArrayOffset;
110110 Value *NumElements = // See if the array size is a decomposable linear expr.
111111 DecomposeSimpleLinearExpr(AI.getOperand(0), ArraySizeScale, ArrayOffset);
112
112
113113 // If we can now satisfy the modulus, by using a non-1 scale, we really can
114114 // do the xform.
115115 if ((AllocElTySize*ArraySizeScale) % CastElTySize != 0 ||
124124 // Insert before the alloca, not before the cast.
125125 Amt = AllocaBuilder.CreateMul(Amt, NumElements);
126126 }
127
127
128128 if (uint64_t Offset = (AllocElTySize*ArrayOffset)/CastElTySize) {
129129 Value *Off = ConstantInt::get(AI.getArraySize()->getType(),
130130 Offset, true);
131131 Amt = AllocaBuilder.CreateAdd(Amt, Off);
132132 }
133
133
134134 AllocaInst *New = AllocaBuilder.CreateAlloca(CastElTy, Amt);
135135 New->setAlignment(AI.getAlignment());
136136 New->takeName(&AI);
137
137
138138 // If the allocation has multiple real uses, insert a cast and change all
139139 // things that used it to use the new cast. This will also hack on CI, but it
140140 // will die soon.
147147 return ReplaceInstUsesWith(CI, New);
148148 }
149149
150 /// EvaluateInDifferentType - Given an expression that
150 /// EvaluateInDifferentType - Given an expression that
151151 /// CanEvaluateTruncated or CanEvaluateSExtd returns true for, actually
152152 /// insert the code to evaluate the expression.
153 Value *InstCombiner::EvaluateInDifferentType(Value *V, Type *Ty,
153 Value *InstCombiner::EvaluateInDifferentType(Value *V, Type *Ty,
154154 bool isSigned) {
155155 if (Constant *C = dyn_cast(V)) {
156156 C = ConstantExpr::getIntegerCast(C, Ty, isSigned /*Sext or ZExt*/);
180180 Value *RHS = EvaluateInDifferentType(I->getOperand(1), Ty, isSigned);
181181 Res = BinaryOperator::Create((Instruction::BinaryOps)Opc, LHS, RHS);
182182 break;
183 }
183 }
184184 case Instruction::Trunc:
185185 case Instruction::ZExt:
186186 case Instruction::SExt:
189189 // new.
190190 if (I->getOperand(0)->getType() == Ty)
191191 return I->getOperand(0);
192
192
193193 // Otherwise, must be the same type of cast, so just reinsert a new one.
194194 // This also handles the case of zext(trunc(x)) -> zext(x).
195195 Res = CastInst::CreateIntegerCast(I->getOperand(0), Ty,
211211 Res = NPN;
212212 break;
213213 }
214 default:
214 default:
215215 // TODO: Can handle more cases here.
216216 llvm_unreachable("Unreachable!");
217217 }
218
218
219219 Res->takeName(I);
220220 return InsertNewInstWith(Res, *I);
221221 }
223223
224224 /// This function is a wrapper around CastInst::isEliminableCastPair. It
225225 /// simply extracts arguments and returns what that function returns.
226 static Instruction::CastOps
226 static Instruction::CastOps
227227 isEliminableCastPair(
228228 const CastInst *CI, ///< The first cast instruction
229229 unsigned opcode, ///< The opcode of the second cast instruction
252252 if ((Res == Instruction::IntToPtr && SrcTy != DstIntPtrTy) ||
253253 (Res == Instruction::PtrToInt && DstTy != SrcIntPtrTy))
254254 Res = 0;
255
255
256256 return Instruction::CastOps(Res);
257257 }
258258
264264 Type *Ty) {
265265 // Noop casts and casts of constants should be eliminated trivially.
266266 if (V->getType() == Ty || isa(V)) return false;
267
267
268268 // If this is another cast that can be eliminated, we prefer to have it
269269 // eliminated.
270270 if (const CastInst *CI = dyn_cast(V))
271271 if (isEliminableCastPair(CI, opc, Ty, TD))
272272 return false;
273
273
274274 // If this is a vector sext from a compare, then we don't want to break the
275275 // idiom where each element of the extended vector is either zero or all ones.
276276 if (opc == Instruction::SExt && isa(V) && Ty->isVectorTy())
277277 return false;
278
278
279279 return true;
280280 }
281281
287287 // Many cases of "cast of a cast" are eliminable. If it's eliminable we just
288288 // eliminate it now.
289289 if (CastInst *CSrc = dyn_cast(Src)) { // A->B->C cast
290 if (Instruction::CastOps opc =
290 if (Instruction::CastOps opc =
291291 isEliminableCastPair(CSrc, CI.getOpcode(), CI.getType(), TD)) {
292292 // The first cast (CSrc) is eliminable so we need to fix up or replace
293293 // the second cast (CI). CSrc will then have a good chance of being dead.
310310 if (Instruction *NV = FoldOpIntoPhi(CI))
311311 return NV;
312312 }
313
313
314314 return 0;
315315 }
316316
329329 // We can always evaluate constants in another type.
330330 if (isa(V))
331331 return true;
332
332
333333 Instruction *I = dyn_cast(V);
334334 if (!I) return false;
335
335
336336 Type *OrigTy = V->getType();
337
337
338338 // If this is an extension from the dest type, we can eliminate it, even if it
339339 // has multiple uses.
340 if ((isa(I) || isa(I)) &&
340 if ((isa(I) || isa(I)) &&
341341 I->getOperand(0)->getType() == Ty)
342342 return true;
343343
422422 // TODO: Can handle more cases here.
423423 break;
424424 }
425
425
426426 return false;
427427 }
428428
429429 Instruction *InstCombiner::visitTrunc(TruncInst &CI) {
430430 if (Instruction *Result = commonCastTransforms(CI))
431431 return Result;
432
433 // See if we can simplify any instructions used by the input whose sole
432
433 // See if we can simplify any instructions used by the input whose sole
434434 // purpose is to compute bits we don't care about.
435435 if (SimplifyDemandedInstructionBits(CI))
436436 return &CI;
437
437
438438 Value *Src = CI.getOperand(0);
439439 Type *DestTy = CI.getType(), *SrcTy = Src->getType();
440
440
441441 // Attempt to truncate the entire input expression tree to the destination
442442 // type. Only do this if the dest type is a simple type, don't convert the
443443 // expression tree to something weird like i93 unless the source is also
444444 // strange.
445445 if ((DestTy->isVectorTy() || ShouldChangeType(SrcTy, DestTy)) &&
446446 CanEvaluateTruncated(Src, DestTy)) {
447
447
448448 // If this cast is a truncate, evaluting in a different type always
449449 // eliminates the cast, so it is always a win.
450450 DEBUG(dbgs() << "ICE: EvaluateInDifferentType converting expression type"
461461 Value *Zero = Constant::getNullValue(Src->getType());
462462 return new ICmpInst(ICmpInst::ICMP_NE, Src, Zero);
463463 }
464
464
465465 // Transform trunc(lshr (zext A), Cst) to eliminate one type conversion.
466466 Value *A = 0; ConstantInt *Cst = 0;
467467 if (Src->hasOneUse() &&
471471 // ASize < MidSize and MidSize > ResultSize, but don't know the relation
472472 // between ASize and ResultSize.
473473 unsigned ASize = A->getType()->getPrimitiveSizeInBits();
474
474
475475 // If the shift amount is larger than the size of A, then the result is
476476 // known to be zero because all the input bits got shifted out.
477477 if (Cst->getZExtValue() >= ASize)
484484 Shift->takeName(Src);
485485 return CastInst::CreateIntegerCast(Shift, CI.getType(), false);
486486 }
487
487
488488 // Transform "trunc (and X, cst)" -> "and (trunc X), cst" so long as the dest
489489 // type isn't non-native.
490490 if (Src->hasOneUse() && isa(Src->getType()) &&
507507 // cast to integer to avoid the comparison.
508508 if (ConstantInt *Op1C = dyn_cast(ICI->getOperand(1))) {
509509 const APInt &Op1CV = Op1C->getValue();
510
510
511511 // zext (x x>>u31 true if signbit set.
512512 // zext (x >s -1) to i32 --> (x>>u31)^1 true if signbit clear.
513513 if ((ICI->getPredicate() == ICmpInst::ICMP_SLT && Op1CV == 0) ||
537537 // zext (X != 0) to i32 --> X>>1 iff X has only the 2nd bit set.
538538 // zext (X != 1) to i32 --> X^1 iff X has only the low bit set.
539539 // zext (X != 2) to i32 --> (X>>1)^1 iff X has only the 2nd bit set.
540 if ((Op1CV == 0 || Op1CV.isPowerOf2()) &&
540 if ((Op1CV == 0 || Op1CV.isPowerOf2()) &&
541541 // This only works for EQ and NE
542542 ICI->isEquality()) {
543543 // If Op1C some other power of two, convert:
544544 uint32_t BitWidth = Op1C->getType()->getBitWidth();
545545 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
546546 ComputeMaskedBits(ICI->getOperand(0), KnownZero, KnownOne);
547
547
548548 APInt KnownZeroMask(~KnownZero);
549549 if (KnownZeroMask.isPowerOf2()) { // Exactly 1 possible 1?
550550 if (!DoXform) return ICI;
558558 Res = ConstantExpr::getZExt(Res, CI.getType());
559559 return ReplaceInstUsesWith(CI, Res);
560560 }
561
561
562562 uint32_t ShiftAmt = KnownZeroMask.logBase2();
563563 Value *In = ICI->getOperand(0);
564564 if (ShiftAmt) {
567567 In = Builder->CreateLShr(In, ConstantInt::get(In->getType(),ShiftAmt),
568568 In->getName()+".lobit");
569569 }
570
570
571571 if ((Op1CV != 0) == isNE) { // Toggle the low bit.
572572 Constant *One = ConstantInt::get(In->getType(), 1);
573573 In = Builder->CreateXor(In, One);
574574 }
575
575
576576 if (CI.getType() == In->getType())
577577 return ReplaceInstUsesWith(CI, In);
578578 return CastInst::CreateIntegerCast(In, CI.getType(), false/*ZExt*/);
645645 BitsToClear = 0;
646646 if (isa(V))
647647 return true;
648
648
649649 Instruction *I = dyn_cast(V);
650650 if (!I) return false;
651
651
652652 // If the input is a truncate from the destination type, we can trivially
653653 // eliminate it.
654654 if (isa(I) && I->getOperand(0)->getType() == Ty)
655655 return true;
656
656
657657 // We can't extend or shrink something that has multiple uses: doing so would
658658 // require duplicating the instruction in general, which isn't profitable.
659659 if (!I->hasOneUse()) return false;
660
660
661661 unsigned Opc = I->getOpcode(), Tmp;
662662 switch (Opc) {
663663 case Instruction::ZExt: // zext(zext(x)) -> zext(x).
677677 // These can all be promoted if neither operand has 'bits to clear'.
678678 if (BitsToClear == 0 && Tmp == 0)
679679 return true;
680
680
681681 // If the operation is an AND/OR/XOR and the bits to clear are zero in the
682682 // other side, BitsToClear is ok.
683683 if (Tmp == 0 &&
690690 APInt::getHighBitsSet(VSize, BitsToClear)))
691691 return true;
692692 }
693
693
694694 // Otherwise, we don't know how to analyze this BitsToClear case yet.
695695 return false;
696
696
697697 case Instruction::LShr:
698698 // We can promote lshr(x, cst) if we can promote x. This requires the
699699 // ultimate 'and' to clear out the high zero bits we're clearing out though.
715715 Tmp != BitsToClear)
716716 return false;
717717 return true;
718
718
719719 case Instruction::PHI: {
720720 // We can change a phi if we can change all operands. Note that we never
721721 // get into trouble with cyclic PHIs here because we only consider
742742 // eliminated before we try to optimize this zext.
743743 if (CI.hasOneUse() && isa(CI.use_back()))
744744 return 0;
745
745
746746 // If one of the common conversion will work, do it.
747747 if (Instruction *Result = commonCastTransforms(CI))
748748 return Result;
749749
750 // See if we can simplify any instructions used by the input whose sole
750 // See if we can simplify any instructions used by the input whose sole
751751 // purpose is to compute bits we don't care about.
752752 if (SimplifyDemandedInstructionBits(CI))
753753 return &CI;
754
754
755755 Value *Src = CI.getOperand(0);
756756 Type *SrcTy = Src->getType(), *DestTy = CI.getType();
757
757
758758 // Attempt to extend the entire input expression tree to the destination
759759 // type. Only do this if the dest type is a simple type, don't convert the
760760 // expression tree to something weird like i93 unless the source is also
761761 // strange.
762762 unsigned BitsToClear;
763763 if ((DestTy->isVectorTy() || ShouldChangeType(SrcTy, DestTy)) &&
764 CanEvaluateZExtd(Src, DestTy, BitsToClear)) {
764 CanEvaluateZExtd(Src, DestTy, BitsToClear)) {
765765 assert(BitsToClear < SrcTy->getScalarSizeInBits() &&
766766 "Unreasonable BitsToClear");
767
767
768768 // Okay, we can transform this! Insert the new expression now.
769769 DEBUG(dbgs() << "ICE: EvaluateInDifferentType converting expression type"
770770 " to avoid zero extend: " << CI);
771771 Value *Res = EvaluateInDifferentType(Src, DestTy, false);
772772 assert(Res->getType() == DestTy);
773
773
774774 uint32_t SrcBitsKept = SrcTy->getScalarSizeInBits()-BitsToClear;
775775 uint32_t DestBitSize = DestTy->getScalarSizeInBits();
776
776
777777 // If the high bits are already filled with zeros, just replace this
778778 // cast with the result.
779779 if (MaskedValueIsZero(Res, APInt::getHighBitsSet(DestBitSize,
780780 DestBitSize-SrcBitsKept)))
781781 return ReplaceInstUsesWith(CI, Res);
782
782
783783 // We need to emit an AND to clear the high bits.
784784 Constant *C = ConstantInt::get(Res->getType(),
785785 APInt::getLowBitsSet(DestBitSize, SrcBitsKept));
791791 // 'and' which will be much cheaper than the pair of casts.
792792 if (TruncInst *CSrc = dyn_cast(Src)) { // A->B->C cast
793793 // TODO: Subsume this into EvaluateInDifferentType.
794
794
795795 // Get the sizes of the types involved. We know that the intermediate type
796796 // will be smaller than A or C, but don't know the relation between A and C.
797797 Value *A = CSrc->getOperand(0);
808808 Value *And = Builder->CreateAnd(A, AndConst, CSrc->getName()+".mask");
809809 return new ZExtInst(And, CI.getType());
810810 }
811
811
812812 if (SrcSize == DstSize) {
813813 APInt AndValue(APInt::getLowBitsSet(SrcSize, MidSize));
814814 return BinaryOperator::CreateAnd(A, ConstantInt::get(A->getType(),
817817 if (SrcSize > DstSize) {
818818 Value *Trunc = Builder->CreateTrunc(A, CI.getType());
819819 APInt AndValue(APInt::getLowBitsSet(DstSize, MidSize));
820 return BinaryOperator::CreateAnd(Trunc,
820 return BinaryOperator::CreateAnd(Trunc,
821821 ConstantInt::get(Trunc->getType(),
822822 AndValue));
823823 }
875875 Value *New = Builder->CreateZExt(X, CI.getType());
876876 return BinaryOperator::CreateXor(New, ConstantInt::get(CI.getType(), 1));
877877 }
878
878
879879 return 0;
880880 }
881881
988988 // If this is a constant, it can be trivially promoted.
989989 if (isa(V))
990990 return true;
991
991
992992 Instruction *I = dyn_cast(V);
993993 if (!I) return false;
994
994
995995 // If this is a truncate from the dest type, we can trivially eliminate it.
996996 if (isa(I) && I->getOperand(0)->getType() == Ty)
997997 return true;
998
998
999999 // We can't extend or shrink something that has multiple uses: doing so would
10001000 // require duplicating the instruction in general, which isn't profitable.
10011001 if (!I->hasOneUse()) return false;
10141014 // These operators can all arbitrarily be extended if their inputs can.
10151015 return CanEvaluateSExtd(I->getOperand(0), Ty) &&
10161016 CanEvaluateSExtd(I->getOperand(1), Ty);
1017
1017
10181018 //case Instruction::Shl: TODO
10191019 //case Instruction::LShr: TODO
1020
1020
10211021 case Instruction::Select:
10221022 return CanEvaluateSExtd(I->getOperand(1), Ty) &&
10231023 CanEvaluateSExtd(I->getOperand(2), Ty);
1024
1024
10251025 case Instruction::PHI: {
10261026 // We can change a phi if we can change all operands. Note that we never
10271027 // get into trouble with cyclic PHIs here because we only consider
10351035 // TODO: Can handle more cases here.
10361036 break;
10371037 }
1038
1038
10391039 return false;
10401040 }
10411041
10441044 // eliminated before we try to optimize this zext.
10451045 if (CI.hasOneUse() && isa(CI.use_back()))
10461046 return 0;
1047
1047
10481048 if (Instruction *I = commonCastTransforms(CI))
10491049 return I;
1050
1051 // See if we can simplify any instructions used by the input whose sole
1050
1051 // See if we can simplify any instructions used by the input whose sole
10521052 // purpose is to compute bits we don't care about.
10531053 if (SimplifyDemandedInstructionBits(CI))
10541054 return &CI;
1055
1055
10561056 Value *Src = CI.getOperand(0);
10571057 Type *SrcTy = Src->getType(), *DestTy = CI.getType();
10581058
10751075 // cast with the result.
10761076 if (ComputeNumSignBits(Res) > DestBitSize - SrcBitSize)
10771077 return ReplaceInstUsesWith(CI, Res);
1078
1078
10791079 // We need to emit a shl + ashr to do the sign extend.
10801080 Value *ShAmt = ConstantInt::get(DestTy, DestBitSize-SrcBitSize);
10811081 return BinaryOperator::CreateAShr(Builder->CreateShl(Res, ShAmt, "sext"),
10881088 if (TI->hasOneUse() && TI->getOperand(0)->getType() == DestTy) {
10891089 uint32_t SrcBitSize = SrcTy->getScalarSizeInBits();
10901090 uint32_t DestBitSize = DestTy->getScalarSizeInBits();
1091
1091
10921092 // We need to emit a shl + ashr to do the sign extend.
10931093 Value *ShAmt = ConstantInt::get(DestTy, DestBitSize-SrcBitSize);
10941094 Value *Res = Builder->CreateShl(TI->getOperand(0), ShAmt, "sext");
11241124 A = Builder->CreateShl(A, ShAmtV, CI.getName());
11251125 return BinaryOperator::CreateAShr(A, ShAmtV);
11261126 }
1127
1127
11281128 return 0;
11291129 }
11301130
11461146 if (Instruction *I = dyn_cast(V))
11471147 if (I->getOpcode() == Instruction::FPExt)
11481148 return LookThroughFPExtensions(I->getOperand(0));
1149
1149
11501150 // If this value is a constant, return the constant in the smallest FP type
11511151 // that can accurately represent it. This allows us to turn
11521152 // (float)((double)X+2.0) into x+2.0f.
11651165 return V;
11661166 // Don't try to shrink to various long double types.
11671167 }
1168
1168
11691169 return V;
11701170 }
11711171
11721172 Instruction *InstCombiner::visitFPTrunc(FPTruncInst &CI) {
11731173 if (Instruction *I = commonCastTransforms(CI))
11741174 return I;
1175
1175
11761176 // If we have fptrunc(fadd (fpextend x), (fpextend y)), where x and y are
11771177 // smaller than the destination type, we can eliminate the truncate by doing
11781178 // the add as the smaller type. This applies to fadd/fsub/fmul/fdiv as well
11891189 Type *SrcTy = OpI->getType();
11901190 Value *LHSTrunc = LookThroughFPExtensions(OpI->getOperand(0));
11911191 Value *RHSTrunc = LookThroughFPExtensions(OpI->getOperand(1));
1192 if (LHSTrunc->getType() != SrcTy &&
1192 if (LHSTrunc->getType() != SrcTy &&
11931193 RHSTrunc->getType() != SrcTy) {
11941194 unsigned DstSize = CI.getType()->getScalarSizeInBits();
11951195 // If the source types were both smaller than the destination type of
12011201 return BinaryOperator::Create(OpI->getOpcode(), LHSTrunc, RHSTrunc);
12021202 }
12031203 }
1204 break;
1205 }
1206 }
1207
1204 break;
1205 }
1206 }
1207
12081208 // Fold (fptrunc (sqrt (fpext x))) -> (sqrtf x)
12091209 CallInst *Call = dyn_cast(CI.getOperand(0));
12101210 if (Call && Call->getCalledFunction() && TLI->has(LibFunc::sqrtf) &&
12191219 Arg->getOperand(0)->getType()->isFloatTy()) {
12201220 Function *Callee = Call->getCalledFunction();
12211221 Module *M = CI.getParent()->getParent()->getParent();
1222 Constant *SqrtfFunc = M->getOrInsertFunction("sqrtf",
1222 Constant *SqrtfFunc = M->getOrInsertFunction("sqrtf",
12231223 Callee->getAttributes(),
12241224 Builder->getFloatTy(),
12251225 Builder->getFloatTy(),
12271227 CallInst *ret = CallInst::Create(SqrtfFunc, Arg->getOperand(0),
12281228 "sqrtfcall");
12291229 ret->setAttributes(Callee->getAttributes());
1230
1231
1230
1231
12321232 // Remove the old Call. With -fmath-errno, it won't get marked readnone.
12331233 ReplaceInstUsesWith(*Call, UndefValue::get(Call->getType()));
12341234 EraseInstFromFunction(*Call);
12351235 return ret;
12361236 }
12371237 }
1238
1238
12391239 return 0;
12401240 }
12411241
12531253 // This is safe if the intermediate type has enough bits in its mantissa to
12541254 // accurately represent all values of X. For example, do not do this with
12551255 // i64->float->i64. This is also safe for sitofp case, because any negative
1256 // 'X' value would cause an undefined result for the fptoui.
1256 // 'X' value would cause an undefined result for the fptoui.
12571257 if ((isa(OpI) || isa(OpI)) &&
12581258 OpI->getOperand(0)->getType() == FI.getType() &&
12591259 (int)FI.getType()->getScalarSizeInBits() < /*extra bit for sign */
12671267 Instruction *OpI = dyn_cast(FI.getOperand(0));
12681268 if (OpI == 0)
12691269 return commonCastTransforms(FI);
1270
1270
12711271 // fptosi(sitofp(X)) --> X
12721272 // fptosi(uitofp(X)) --> X
12731273 // This is safe if the intermediate type has enough bits in its mantissa to
12741274 // accurately represent all values of X. For example, do not do this with
12751275 // i64->float->i64. This is also safe for sitofp case, because any negative
1276 // 'X' value would cause an undefined result for the fptoui.
1276 // 'X' value would cause an undefined result for the fptoui.
12771277 if ((isa(OpI) || isa(OpI)) &&
12781278 OpI->getOperand(0)->getType() == FI.getType() &&
12791279 (int)FI.getType()->getScalarSizeInBits() <=
12801280 OpI->getType()->getFPMantissaWidth())
12811281 return ReplaceInstUsesWith(FI, OpI->getOperand(0));
1282
1282
12831283 return commonCastTransforms(FI);
12841284 }
12851285
13001300 if (CI.getOperand(0)->getType()->getScalarSizeInBits() >
13011301 TD->getPointerSizeInBits(AS)) {
13021302 Value *P = Builder->CreateTrunc(CI.getOperand(0),
1303 TD->getIntPtrType(CI.getType()));
1303 TD->getIntPtrType(CI.getContext()));
13041304 return new IntToPtrInst(P, CI.getType());
13051305 }
13061306 if (CI.getOperand(0)->getType()->getScalarSizeInBits() <
13071307 TD->getPointerSizeInBits(AS)) {
13081308 Value *P = Builder->CreateZExt(CI.getOperand(0),
1309 TD->getIntPtrType(CI.getType()));
1309 TD->getIntPtrType(CI.getContext()));
13101310 return new IntToPtrInst(P, CI.getType());
13111311 }
13121312 }
1313
1313
13141314 if (Instruction *I = commonCastTransforms(CI))
13151315 return I;
13161316
13201320 /// @brief Implement the transforms for cast of pointer (bitcast/ptrtoint)
13211321 Instruction *InstCombiner::commonPointerCastTransforms(CastInst &CI) {
13221322 Value *Src = CI.getOperand(0);
1323
1323
13241324 if (GetElementPtrInst *GEP = dyn_cast(Src)) {
13251325 // If casting the result of a getelementptr instruction with no offset, turn
13261326 // this into a cast of the original pointer!
13271327 if (GEP->hasAllZeroIndices()) {
13281328 // Changing the cast operand is usually not a good idea but it is safe
1329 // here because the pointer operand is being replaced with another
1329 // here because the pointer operand is being replaced with another
13301330 // pointer operand so the opcode doesn't need to change.
13311331 Worklist.Add(GEP);
13321332 CI.setOperand(0, GEP->getOperand(0));
13331333 return &CI;
13341334 }
1335
1335
13361336 // If the GEP has a single use, and the base pointer is a bitcast, and the
13371337 // GEP computes a constant offset, see if we can convert these three
13381338 // instructions into fewer. This typically happens with unions and other
13471347 Type *GEPIdxTy =
13481348 cast(OrigBase->getType())->getElementType();
13491349 SmallVector NewIndices;
1350 Type *IntPtrTy = TD->getIntPtrType(OrigBase->getType());
1351 if (FindElementAtOffset(GEPIdxTy, Offset, IntPtrTy, NewIndices)) {
1350 if (FindElementAtOffset(GEPIdxTy, Offset, NewIndices)) {
13521351 // If we were able to index down into an element, create the GEP
13531352 // and bitcast the result. This eliminates one bitcast, potentially
13541353 // two.
13561355 Builder->CreateInBoundsGEP(OrigBase, NewIndices) :
13571356 Builder->CreateGEP(OrigBase, NewIndices);
13581357 NGEP->takeName(GEP);
1359
1358
13601359 if (isa(CI))
13611360 return new BitCastInst(NGEP, CI.getType());
13621361 assert(isa(CI));
13631362 return new PtrToIntInst(NGEP, CI.getType());
1364 }
1365 }
1366 }
1367
1363 }
1364 }
1365 }
1366
13681367 return commonCastTransforms(CI);
13691368 }
13701369
13761375 if (TD) {
13771376 if (CI.getType()->getScalarSizeInBits() < TD->getPointerSizeInBits(AS)) {
13781377 Value *P = Builder->CreatePtrToInt(CI.getOperand(0),
1379 TD->getIntPtrType(CI.getContext(), AS));
1378 TD->getIntPtrType(CI.getContext()));
13801379 return new TruncInst(P, CI.getType());
13811380 }
13821381 if (CI.getType()->getScalarSizeInBits() > TD->getPointerSizeInBits(AS)) {
13831382 Value *P = Builder->CreatePtrToInt(CI.getOperand(0),
1384 TD->getIntPtrType(CI.getContext(), AS));
1383 TD->getIntPtrType(CI.getContext()));
13851384 return new ZExtInst(P, CI.getType());
13861385 }
13871386 }
1388
1387
13891388 return commonPointerCastTransforms(CI);
13901389 }
13911390
14001399 // element size, or the input is a multiple of the output element size.
14011400 // Convert the input type to have the same element type as the output.
14021401 VectorType *SrcTy = cast(InVal->getType());
1403
1402
14041403 if (SrcTy->getElementType() != DestTy->getElementType()) {
14051404 // The input types don't need to be identical, but for now they must be the
14061405 // same size. There is no specific reason we couldn't handle things like
14071406 // <4 x i16> -> <4 x i32> by bitcasting to <2 x i32> but haven't gotten
1408 // there yet.
1407 // there yet.
14091408 if (SrcTy->getElementType()->getPrimitiveSizeInBits() !=
14101409 DestTy->getElementType()->getPrimitiveSizeInBits())
14111410 return 0;
1412
1411
14131412 SrcTy = VectorType::get(DestTy->getElementType(), SrcTy->getNumElements());
14141413 InVal = IC.Builder->CreateBitCast(InVal, SrcTy);
14151414 }
1416
1415
14171416 // Now that the element types match, get the shuffle mask and RHS of the
14181417 // shuffle to use, which depends on whether we're increasing or decreasing the
14191418 // size of the input.
14201419 SmallVector ShuffleMask;
14211420 Value *V2;
1422
1421
14231422 if (SrcTy->getNumElements() > DestTy->getNumElements()) {
14241423 // If we're shrinking the number of elements, just shuffle in the low
14251424 // elements from the input and use undef as the second shuffle input.
14261425 V2 = UndefValue::get(SrcTy);
14271426 for (unsigned i = 0, e = DestTy->getNumElements(); i != e; ++i)
14281427 ShuffleMask.push_back(i);
1429
1428
14301429 } else {
14311430 // If we're increasing the number of elements, shuffle in all of the
14321431 // elements from InVal and fill the rest of the result elements with zeros
14401439 for (unsigned i = 0, e = DestTy->getNumElements()-SrcElts; i != e; ++i)
14411440 ShuffleMask.push_back(SrcElts);
14421441 }
1443
1442
14441443 return new ShuffleVectorInst(InVal, V2,
14451444 ConstantDataVector::get(V2->getContext(),
14461445 ShuffleMask));
14671466 Type *VecEltTy) {
14681467 // Undef values never contribute useful bits to the result.
14691468 if (isa(V)) return true;
1470
1469
14711470 // If we got down to a value of the right type, we win, try inserting into the
14721471 // right element.
14731472 if (V->getType() == VecEltTy) {
14751474 if (Constant *C = dyn_cast(V))
14761475 if (C->isNullValue())
14771476 return true;
1478
1477
14791478 // Fail if multiple elements are inserted into this slot.
14801479 if (ElementIndex >= Elements.size() || Elements[ElementIndex] != 0)
14811480 return false;
1482
1481
14831482 Elements[ElementIndex] = V;
14841483 return true;
14851484 }
1486
1485
14871486 if (Constant *C = dyn_cast(V)) {
14881487 // Figure out the # elements this provides, and bitcast it or slice it up
14891488 // as required.
14941493 if (NumElts == 1)
14951494 return CollectInsertionElements(ConstantExpr::getBitCast(C, VecEltTy),
14961495 ElementIndex, Elements, VecEltTy);
1497
1496
14981497 // Okay, this is a constant that covers multiple elements. Slice it up into
14991498 // pieces and insert each element-sized piece into the vector.
15001499 if (!isa(C->getType()))
15021501 C->getType()->getPrimitiveSizeInBits()));
15031502 unsigned ElementSize = VecEltTy->getPrimitiveSizeInBits();
15041503 Type *ElementIntTy = IntegerType::get(C->getContext(), ElementSize);
1505
1504
15061505 for (unsigned i = 0; i != NumElts; ++i) {
15071506 Constant *Piece = ConstantExpr::getLShr(C, ConstantInt::get(C->getType(),
15081507 i*ElementSize));
15121511 }
15131512 return true;
15141513 }
1515
1514
15161515 if (!V->hasOneUse()) return false;
1517
1516
15181517 Instruction *I = dyn_cast(V);
15191518 if (I == 0) return false;
15201519 switch (I->getOpcode()) {
15211520 default: return false; // Unhandled case.
15221521 case Instruction::BitCast:
15231522 return CollectInsertionElements(I->getOperand(0), ElementIndex,
1524 Elements, VecEltTy);
1523 Elements, VecEltTy);
15251524 case Instruction::ZExt:
15261525 if (!isMultipleOfTypeSize(
15271526 I->getOperand(0)->getType()->getPrimitiveSizeInBits(),
15281527 VecEltTy))
15291528 return false;
15301529 return CollectInsertionElements(I->getOperand(0), ElementIndex,
1531 Elements, VecEltTy);
1530 Elements, VecEltTy);
15321531 case Instruction::Or:
15331532 return CollectInsertionElements(I->getOperand(0), ElementIndex,
15341533 Elements, VecEltTy) &&
15401539 if (CI == 0) return false;
15411540 if (!isMultipleOfTypeSize(CI->getZExtValue(), VecEltTy)) return false;
15421541 unsigned IndexShift = getTypeSizeIndex(CI->getZExtValue(), VecEltTy);
1543
1542
15441543 return CollectInsertionElements(I->getOperand(0), ElementIndex+IndexShift,
15451544 Elements, VecEltTy);
15461545 }
1547
1546
15481547 }
15491548 }
15501549
15791578 Value *Result = Constant::getNullValue(CI.getType());
15801579 for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
15811580 if (Elements[i] == 0) continue; // Unset element.
1582
1581
15831582 Result = IC.Builder->CreateInsertElement(Result, Elements[i],
15841583 IC.Builder->getInt32(i));
15851584 }
1586
1585
15871586 return Result;
15881587 }
15891588
16111610 VecTy->getPrimitiveSizeInBits() / DestWidth);
16121611 VecInput = IC.Builder->CreateBitCast(VecInput, VecTy);
16131612 }
1614
1613
16151614 return ExtractElementInst::Create(VecInput, IC.Builder->getInt32(0));
16161615 }
16171616 }
1618
1617
16191618 // bitcast(trunc(lshr(bitcast(somevector), cst))
16201619 ConstantInt *ShAmt = 0;
16211620 if (match(Src, m_Trunc(m_LShr(m_BitCast(m_Value(VecInput)),
16321631 VecTy->getPrimitiveSizeInBits() / DestWidth);
16331632 VecInput = IC.Builder->CreateBitCast(VecInput, VecTy);
16341633 }
1635
1634
16361635 unsigned Elt = ShAmt->getZExtValue() / DestWidth;
16371636 return ExtractElementInst::Create(VecInput, IC.Builder->getInt32(Elt));
16381637 }
16561655 PointerType *SrcPTy = cast(SrcTy);
16571656 Type *DstElTy = DstPTy->getElementType();
16581657 Type *SrcElTy = SrcPTy->getElementType();
1659
1658
16601659 // If the address spaces don't match, don't eliminate the bitcast, which is
16611660 // required for changing types.
16621661 if (SrcPTy->getAddressSpace() != DstPTy->getAddressSpace())
16631662 return 0;
1664
1663
16651664 // If we are casting a alloca to a pointer to a type of the same
16661665 // size, rewrite the allocation instruction to allocate the "right" type.
16671666 // There is no need to modify malloc calls because it is their bitcast that
16691668 if (AllocaInst *AI = dyn_cast(Src))
16701669 if (Instruction *V = PromoteCastOfAllocation(CI, *AI))
16711670 return V;
1672
1671
16731672 // If the source and destination are pointers, and this cast is equivalent
16741673 // to a getelementptr X, 0, 0, 0... turn it into the appropriate gep.
16751674 // This can enhance SROA and other transforms that want type-safe pointers.
16761675 Constant *ZeroUInt =
16771676 Constant::getNullValue(Type::getInt32Ty(CI.getContext()));
16781677 unsigned NumZeros = 0;
1679 while (SrcElTy != DstElTy &&
1678 while (SrcElTy != DstElTy &&
16801679 isa(SrcElTy) && !SrcElTy->isPointerTy() &&
16811680 SrcElTy->getNumContainedTypes() /* not "{}" */) {
16821681 SrcElTy = cast(SrcElTy)->getTypeAtIndex(ZeroUInt);
16891688 return GetElementPtrInst::CreateInBounds(Src, Idxs);
16901689 }
16911690 }
1692
1691
16931692 // Try to optimize int -> float bitcasts.
16941693 if ((DestTy->isFloatTy() || DestTy->isDoubleTy()) && isa(SrcTy))
16951694 if (Instruction *I = OptimizeIntToFloatBitCast(CI, *this))
17021701 Constant::getNullValue(Type::getInt32Ty(CI.getContext())));
17031702 // FIXME: Canonicalize bitcast(insertelement) -> insertelement(bitcast)
17041703 }
1705
1704
17061705 if (isa(SrcTy)) {
17071706 // If this is a cast from an integer to vector, check to see if the input
17081707 // is a trunc or zext of a bitcast from vector. If so, we can replace all
17151714 cast(DestTy), *this))
17161715 return I;
17171716 }
1718
1717
17191718 // If the input is an 'or' instruction, we may be doing shifts and ors to
17201719 // assemble the elements of the vector manually. Try to rip the code out
17211720 // and replace it with insertelements.
17261725
17271726 if (VectorType *SrcVTy = dyn_cast(SrcTy)) {
17281727 if (SrcVTy->getNumElements() == 1 && !DestTy->isVectorTy()) {
1729 Value *Elem =
1728 Value *Elem =
17301729 Builder->CreateExtractElement(Src,
17311730 Constant::getNullValue(Type::getInt32Ty(CI.getContext())));
17321731 return CastInst::Create(Instruction::BitCast, Elem, DestTy);
17361735 if (ShuffleVectorInst *SVI = dyn_cast(Src)) {
17371736 // Okay, we have (bitcast (shuffle ..)). Check to see if this is
17381737 // a bitcast to a vector with the same # elts.
1739 if (SVI->hasOneUse() && DestTy->isVectorTy() &&
1738 if (SVI->hasOneUse() && DestTy->isVectorTy() &&
17401739 cast(DestTy)->getNumElements() ==
17411740 SVI->getType()->getNumElements() &&
17421741 SVI->getType()->getNumElements() ==
17451744 // If either of the operands is a cast from CI.getType(), then
17461745 // evaluating the shuffle in the casted destination's type will allow
17471746 // us to eliminate at least one cast.
1748 if (((Tmp = dyn_cast(SVI->getOperand(0))) &&
1747 if (((Tmp = dyn_cast(SVI->getOperand(0))) &&
17491748 Tmp->getOperand(0)->getType() == DestTy) ||
1750 ((Tmp = dyn_cast(SVI->getOperand(1))) &&
1749 ((Tmp = dyn_cast(SVI->getOperand(1))) &&
17511750 Tmp->getOperand(0)->getType() == DestTy)) {
17521751 Value *LHS = Builder->CreateBitCast(SVI->getOperand(0), DestTy);
17531752 Value *RHS = Builder->CreateBitCast(SVI->getOperand(1), DestTy);
17571756 }
17581757 }
17591758 }
1760
1759
17611760 if (SrcTy->isPointerTy())
17621761 return commonPointerCastTransforms(CI);
17631762 return commonCastTransforms(CI);
370370 // an inbounds GEP because the index can't be out of range.
371371 if (!GEP->isInBounds() &&
372372 Idx->getType()->getPrimitiveSizeInBits() > TD->getPointerSizeInBits(AS))
373 Idx = Builder->CreateTrunc(Idx, TD->getIntPtrType(Idx->getContext(), AS));
373 Idx = Builder->CreateTrunc(Idx, TD->getIntPtrType(Idx->getContext()));
374374
375375 // If the comparison is only true for one or two elements, emit direct
376376 // comparisons.
538538 // we don't need to bother extending: the extension won't affect where the
539539 // computation crosses zero.
540540 if (VariableIdx->getType()->getPrimitiveSizeInBits() > IntPtrWidth) {
541 Type *IntPtrTy = TD.getIntPtrType(VariableIdx->getContext(), AS);
541 Type *IntPtrTy = TD.getIntPtrType(VariableIdx->getContext());
542542 VariableIdx = IC.Builder->CreateTrunc(VariableIdx, IntPtrTy);
543543 }
544544 return VariableIdx;
560560 return 0;
561561
562562 // Okay, we can do this evaluation. Start by converting the index to intptr.
563 Type *IntPtrTy = TD.getIntPtrType(VariableIdx->getContext(), AS);
563 Type *IntPtrTy = TD.getIntPtrType(VariableIdx->getContext());
564564 if (VariableIdx->getType() != IntPtrTy)
565565 VariableIdx = IC.Builder->CreateIntCast(VariableIdx, IntPtrTy,
566566 true /*Signed*/);
15531553 // Turn icmp (ptrtoint x), (ptrtoint/c) into a compare of the input if the
15541554 // integer type is the same size as the pointer type.
15551555 if (TD && LHSCI->getOpcode() == Instruction::PtrToInt &&
1556 TD->getTypeSizeInBits(DestTy) ==
1556 TD->getPointerSizeInBits(
1557 cast(LHSCI)->getPointerAddressSpace()) ==
15571558 cast(DestTy)->getBitWidth()) {
15581559 Value *RHSOp = 0;
15591560 if (Constant *RHSC = dyn_cast(ICI.getOperand(1))) {
22492250 case Instruction::IntToPtr:
22502251 // icmp pred inttoptr(X), null -> icmp pred X, 0
22512252 if (RHSC->isNullValue() && TD &&
2252 TD->getIntPtrType(LHSI->getType()) ==
2253 TD->getIntPtrType(RHSC->getContext()) ==
22532254 LHSI->getOperand(0)->getType())
22542255 return new ICmpInst(I.getPredicate(), LHSI->getOperand(0),
22552256 Constant::getNullValue(LHSI->getOperand(0)->getType()));
172172 // Ensure that the alloca array size argument has type intptr_t, so that
173173 // any casting is exposed early.
174174 if (TD) {
175 Type *IntPtrTy = TD->getIntPtrType(AI.getType());
175 Type *IntPtrTy = TD->getIntPtrType(AI.getContext());
176176 if (AI.getArraySize()->getType() != IntPtrTy) {
177177 Value *V = Builder->CreateIntCast(AI.getArraySize(),
178178 IntPtrTy, false);
184184 // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
185185 if (AI.isArrayAllocation()) { // Check C != 1
186186 if (const ConstantInt *C = dyn_cast(AI.getArraySize())) {
187 Type *NewTy =
187 Type *NewTy =
188188 ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
189189 AllocaInst *New = Builder->CreateAlloca(NewTy, 0, AI.getName());
190190 New->setAlignment(AI.getAlignment());
310310
311311 Type *SrcPTy = SrcTy->getElementType();
312312
313 if (DestPTy->isIntegerTy() || DestPTy->isPointerTy() ||
313 if (DestPTy->isIntegerTy() || DestPTy->isPointerTy() ||
314314 DestPTy->isVectorTy()) {
315315 // If the source is an array, the code below will not succeed. Check to
316316 // see if a trivial 'gep P, 0, 0' will help matters. Only do this for
327327 }
328328
329329 if (IC.getDataLayout() &&
330 (SrcPTy->isIntegerTy() || SrcPTy->isPointerTy() ||
330 (SrcPTy->isIntegerTy() || SrcPTy->isPointerTy() ||
331331 SrcPTy->isVectorTy()) &&
332332 // Do not allow turning this into a load of an integer, which is then
333333 // casted to a pointer, this pessimizes pointer analysis a lot.
338338 // Okay, we are casting from one integer or pointer type to another of
339339 // the same size. Instead of casting the pointer before the load, cast
340340 // the result of the loaded value.
341 LoadInst *NewLoad =
341 LoadInst *NewLoad =
342342 IC.Builder->CreateLoad(CastOp, LI.isVolatile(), CI->getName());
343343 NewLoad->setAlignment(LI.getAlignment());
344344 NewLoad->setAtomic(LI.getOrdering(), LI.getSynchScope());
375375 // None of the following transforms are legal for volatile/atomic loads.
376376 // FIXME: Some of it is okay for atomic loads; needs refactoring.
377377 if (!LI.isSimple()) return 0;
378
378
379379 // Do really simple store-to-load forwarding and load CSE, to catch cases
380380 // where there are several consecutive memory accesses to the same location,
381381 // separated by a few arithmetic operations.
396396 Constant::getNullValue(Op->getType()), &LI);
397397 return ReplaceInstUsesWith(LI, UndefValue::get(LI.getType()));
398398 }
399 }
399 }
400400
401401 // load null/undef -> unreachable
402402 // TODO: Consider a target hook for valid address spaces for this xform.
415415 if (CE->isCast())
416416 if (Instruction *Res = InstCombineLoadCast(*this, LI, TD))
417417 return Res;
418
418
419419 if (Op->hasOneUse()) {
420420 // Change select and PHI nodes to select values instead of addresses: this
421421 // helps alias analysis out a lot, allows many others simplifications, and
469469 Type *DestPTy = cast(CI->getType())->getElementType();
470470 PointerType *SrcTy = dyn_cast(CastOp->getType());
471471 if (SrcTy == 0) return 0;
472
472
473473 Type *SrcPTy = SrcTy->getElementType();
474474
475475 if (!DestPTy->isIntegerTy() && !DestPTy->isPointerTy())
476476 return 0;
477
477
478478 /// NewGEPIndices - If SrcPTy is an aggregate type, we can emit a "noop gep"
479479 /// to its first element. This allows us to handle things like:
480480 /// store i32 xxx, (bitcast {foo*, float}* %P to i32*)
481481 /// on 32-bit hosts.
482482 SmallVector NewGEPIndices;
483
483
484484 // If the source is an array, the code below will not succeed. Check to
485485 // see if a trivial 'gep P, 0, 0' will help matters. Only do this for
486486 // constants.
488488 // Index through pointer.
489489 Constant *Zero = Constant::getNullValue(Type::getInt32Ty(SI.getContext()));
490490 NewGEPIndices.push_back(Zero);
491
491
492492 while (1) {
493493 if (StructType *STy = dyn_cast(SrcPTy)) {
494494 if (!STy->getNumElements()) /* Struct can be empty {} */
502502 break;
503503 }
504504 }
505
505
506506 SrcTy = PointerType::get(SrcPTy, SrcTy->getAddressSpace());
507507 }
508508
509509 if (!SrcPTy->isIntegerTy() && !SrcPTy->isPointerTy())
510510 return 0;
511
511
512512 // If the pointers point into different address spaces or if they point to
513513 // values with different sizes, we can't do the transformation.
514514 if (!IC.getDataLayout() ||
515 SrcTy->getAddressSpace() != CI->getType()->getPointerAddressSpace() ||
515 SrcTy->getAddressSpace() !=
516 cast(CI->getType())->getAddressSpace() ||
516517 IC.getDataLayout()->getTypeSizeInBits(SrcPTy) !=
517518 IC.getDataLayout()->getTypeSizeInBits(DestPTy))
518519 return 0;
519520
520521 // Okay, we are casting from one integer or pointer type to another of
521 // the same size. Instead of casting the pointer before
522 // the same size. Instead of casting the pointer before
522523 // the store, cast the value to be stored.
523524 Value *NewCast;
524525 Value *SIOp0 = SI.getOperand(0);
532533 if (SIOp0->getType()->isPointerTy())
533534 opcode = Instruction::PtrToInt;
534535 }
535
536
536537 // SIOp0 is a pointer to aggregate and this is a store to the first field,
537538 // emit a GEP to index into its first field.
538539 if (!NewGEPIndices.empty())
539540 CastOp = IC.Builder->CreateInBoundsGEP(CastOp, NewGEPIndices);
540
541
541542 NewCast = IC.Builder->CreateCast(opcode, SIOp0, CastDstTy,
542543 SIOp0->getName()+".c");
543544 SI.setOperand(0, NewCast);
556557 static bool equivalentAddressValues(Value *A, Value *B) {
557558 // Test if the values are trivially equivalent.
558559 if (A == B) return true;
559
560
560561 // Test if the values come form identical arithmetic instructions.
561562 // This uses isIdenticalToWhenDefined instead of isIdenticalTo because
562563 // its only used to compare two uses within the same basic block, which
569570 if (Instruction *BI = dyn_cast(B))
570571 if (cast(A)->isIdenticalToWhenDefined(BI))
571572 return true;
572
573
573574 // Otherwise they may not be equivalent.
574575 return false;
575576 }
600601 // If the RHS is an alloca with a single use, zapify the store, making the
601602 // alloca dead.
602603 if (Ptr->hasOneUse()) {
603 if (isa(Ptr))
604 if (isa(Ptr))
604605 return EraseInstFromFunction(SI);
605606 if (GetElementPtrInst *GEP = dyn_cast(Ptr)) {
606607 if (isa(GEP->getOperand(0))) {
623624 (isa(BBI) && BBI->getType()->isPointerTy())) {
624625 ScanInsts++;
625626 continue;
626 }
627
627 }
628
628629 if (StoreInst *PrevSI = dyn_cast(BBI)) {
629630 // Prev store isn't volatile, and stores to the same location?
630631 if (PrevSI->isSimple() && equivalentAddressValues(PrevSI->getOperand(1),
636637 }
637638 break;
638639 }
639
640
640641 // If this is a load, we have to stop. However, if the loaded value is from
641642 // the pointer we're loading and is producing the pointer we're storing,
642643 // then *this* store is dead (X = load P; store X -> P).
644645 if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr) &&
645646 LI->isSimple())
646647 return EraseInstFromFunction(SI);
647
648
648649 // Otherwise, this is a load from some other location. Stores before it
649650 // may not be dead.
650651 break;
651652 }
652
653
653654 // Don't skip over loads or things that can modify memory.
654655 if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory())
655656 break;
679680 if (Instruction *Res = InstCombineStoreToCast(*this, SI))
680681 return Res;
681682
682
683
683684 // If this store is the last instruction in the basic block (possibly
684685 // excepting debug info instructions), and if the block ends with an
685686 // unconditional branch, try to move it to the successor block.
686 BBI = &SI;
687 BBI = &SI;
687688 do {
688689 ++BBI;
689690 } while (isa(BBI) ||
692693 if (BI->isUnconditional())
693694 if (SimplifyStoreAtEndOfBlock(SI))
694695 return 0; // xform done!
695
696
696697 return 0;
697698 }
698699
706707 ///
707708 bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst &SI) {
708709 BasicBlock *StoreBB = SI.getParent();
709
710
710711 // Check to see if the successor block has exactly two incoming edges. If
711712 // so, see if the other predecessor contains a store to the same location.
712713 // if so, insert a PHI node (if needed) and move the stores down.
713714 BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0);
714
715
715716 // Determine whether Dest has exactly two predecessors and, if so, compute
716717 // the other predecessor.
717718 pred_iterator PI = pred_begin(DestBB);
723724
724725 if (++PI == pred_end(DestBB))
725726 return false;
726
727
727728 P = *PI;
728729 if (P != StoreBB) {
729730 if (OtherBB)
743744 BranchInst *OtherBr = dyn_cast(BBI);
744745 if (!OtherBr || BBI == OtherBB->begin())
745746 return false;
746
747
747748 // If the other block ends in an unconditional branch, check for the 'if then
748749 // else' case. there is an instruction before the branch.
749750 StoreInst *OtherStore = 0;
765766 } else {
766767 // Otherwise, the other block ended with a conditional branch. If one of the
767768 // destinations is StoreBB, then we have the if/then case.
768 if (OtherBr->getSuccessor(0) != StoreBB &&
769 if (OtherBr->getSuccessor(0) != StoreBB &&
769770 OtherBr->getSuccessor(1) != StoreBB)
770771 return false;
771
772
772773 // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
773774 // if/then triangle. See if there is a store to the same ptr as SI that
774775 // lives in OtherBB.
786787 BBI == OtherBB->begin())
787788 return false;
788789 }
789
790
790791 // In order to eliminate the store in OtherBr, we have to
791792 // make sure nothing reads or overwrites the stored value in
792793 // StoreBB.
796797 return false;
797798 }
798799 }
799
800
800801 // Insert a PHI node now if we need it.
801802 Value *MergedVal = OtherStore->getOperand(0);
802803 if (MergedVal != SI.getOperand(0)) {
805806 PN->addIncoming(OtherStore->getOperand(0), OtherBB);
806807 MergedVal = InsertNewInstBefore(PN, DestBB->front());
807808 }
808
809
809810 // Advance to a place where it is safe to insert the new store and
810811 // insert it.
811812 BBI = DestBB->getFirstInsertionPt();
815816 SI.getOrdering(),
816817 SI.getSynchScope());
817818 InsertNewInstBefore(NewSI, *BBI);
818 NewSI->setDebugLoc(OtherStore->getDebugLoc());
819 NewSI->setDebugLoc(OtherStore->getDebugLoc());
819820
820821 // Nuke the old stores.
821822 EraseInstFromFunction(SI);
737737 /// or not there is a sequence of GEP indices into the type that will land us at
738738 /// the specified offset. If so, fill them into NewIndices and return the
739739 /// resultant element type, otherwise return null.
740 Type *InstCombiner::FindElementAtOffset(Type *Ty, int64_t Offset, Type *IntPtrTy,
740 Type *InstCombiner::FindElementAtOffset(Type *Ty, int64_t Offset,
741741 SmallVectorImpl &NewIndices) {
742742 if (!TD) return 0;
743743 if (!Ty->isSized()) return 0;
745745 // Start with the index over the outer type. Note that the type size
746746 // might be zero (even if the offset isn't zero) if the indexed type
747747 // is something like [0 x {int, int}]
748 Type *IntPtrTy = TD->getIntPtrType(Ty->getContext());
748749 int64_t FirstIdx = 0;
749750 if (int64_t TySize = TD->getTypeAllocSize(Ty)) {
750751 FirstIdx = Offset/TySize;
10531054 // by multiples of a zero size type with zero.
10541055 if (TD) {
10551056 bool MadeChange = false;
1056 Type *IntPtrTy = TD->getIntPtrType(PtrOp->getType());
1057 Type *IntPtrTy = TD->getIntPtrType(GEP.getContext());
10571058
10581059 gep_type_iterator GTI = gep_type_begin(GEP);
10591060 for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end();
12381239
12391240 // Earlier transforms ensure that the index has type IntPtrType, which
12401241 // considerably simplifies the logic by eliminating implicit casts.
1241 assert(Idx->getType() == TD->getIntPtrType(GEP.getType()) &&
1242 assert(Idx->getType() == TD->getIntPtrType(GEP.getContext()) &&
12421243 "Index not cast to pointer width?");
12431244
12441245 bool NSW;
12731274
12741275 // Earlier transforms ensure that the index has type IntPtrType, which
12751276 // considerably simplifies the logic by eliminating implicit casts.
1276 assert(Idx->getType() == TD->getIntPtrType(GEP.getType()) &&
1277 assert(Idx->getType() == TD->getIntPtrType(GEP.getContext()) &&
12771278 "Index not cast to pointer width?");
12781279
12791280 bool NSW;
13351336 SmallVector NewIndices;
13361337 Type *InTy =
13371338 cast(BCI->getOperand(0)->getType())->getElementType();
1338 Type *IntPtrTy = TD->getIntPtrType(BCI->getOperand(0)->getType());
1339 if (FindElementAtOffset(InTy, Offset, IntPtrTy, NewIndices)) {
1339 if (FindElementAtOffset(InTy, Offset, NewIndices)) {
13401340 Value *NGEP = GEP.isInBounds() ?
13411341 Builder->CreateInBoundsGEP(BCI->getOperand(0), NewIndices) :
13421342 Builder->CreateGEP(BCI->getOperand(0), NewIndices);
932932 DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for "
933933 << *MemoryInst);
934934 Type *IntPtrTy =
935 TLI->getDataLayout()->getIntPtrType(Addr->getType());
935 TLI->getDataLayout()->getIntPtrType(AccessTy->getContext());
936936
937937 Value *Result = 0;
938938
14271427 /// genLoopLimit - Help LinearFunctionTestReplace by generating a value that
14281428 /// holds the RHS of the new loop test.
14291429 static Value *genLoopLimit(PHINode *IndVar, const SCEV *IVCount, Loop *L,
1430 SCEVExpander &Rewriter, ScalarEvolution *SE,
1431 Type *IntPtrTy) {
1430 SCEVExpander &Rewriter, ScalarEvolution *SE) {
14321431 const SCEVAddRecExpr *AR = dyn_cast(SE->getSCEV(IndVar));
14331432 assert(AR && AR->getLoop() == L && AR->isAffine() && "bad loop counter");
14341433 const SCEV *IVInit = AR->getStart();
14541453 // We could handle pointer IVs other than i8*, but we need to compensate for
14551454 // gep index scaling. See canExpandBackedgeTakenCount comments.
14561455 assert(SE->getSizeOfExpr(
1457 cast(GEPBase->getType())->getElementType(),
1458 IntPtrTy)->isOne()
1456 cast(GEPBase->getType())->getElementType())->isOne()
14591457 && "unit stride pointer IV must be i8*");
14601458
14611459 IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
15541552 CmpIndVar = IndVar;
15551553 }
15561554
1557 Type *IntPtrTy = TD ? TD->getIntPtrType(IndVar->getType()) :
1558 IntegerType::getInt64Ty(IndVar->getContext());
1559 Value *ExitCnt = genLoopLimit(IndVar, IVCount, L, Rewriter, SE, IntPtrTy);
1555 Value *ExitCnt = genLoopLimit(IndVar, IVCount, L, Rewriter, SE);
15601556 assert(ExitCnt->getType()->isPointerTy() == IndVar->getType()->isPointerTy()
15611557 && "genLoopLimit missed a cast");
15621558
457457 // Okay, we have a strided store "p[i]" of a splattable value. We can turn
458458 // this into a memset in the loop preheader now if we want. However, this
459459 // would be unsafe to do if there is anything else in the loop that may read
460 // or write to the aliased location.
461 assert(DestPtr->getType()->isPointerTy()
462 && "Must be a pointer type.");
463 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
460 // or write to the aliased location. Check for any overlap by generating the
461 // base pointer and checking the region.
462 unsigned AddrSpace = cast(DestPtr->getType())->getAddressSpace();
464463 Value *BasePtr =
465464 Expander.expandCodeFor(Ev->getStart(), Builder.getInt8PtrTy(AddrSpace),
466465 Preheader->getTerminator());
470469
471470 // The # stored bytes is (BECount+1)*Size. Expand the trip count out to
472471 // pointer size if it isn't already.
473 Type *IntPtr = TD->getIntPtrType(DestPtr->getType());
472 Type *IntPtr = TD->getIntPtrType(DestPtr->getContext());
474473 BECount = SE->getTruncateOrZeroExtend(BECount, IntPtr);
475474
476475 const SCEV *NumBytesS = SE->getAddExpr(BECount, SE->getConstant(IntPtr, 1),
586585
587586 // The # stored bytes is (BECount+1)*Size. Expand the trip count out to
588587 // pointer size if it isn't already.
589 Type *IntPtr = TD->getIntPtrType(SI->getType());
588 Type *IntPtr = TD->getIntPtrType(SI->getContext());
590589 BECount = SE->getTruncateOrZeroExtend(BECount, IntPtr);
591590
592591 const SCEV *NumBytesS = SE->getAddExpr(BECount, SE->getConstant(IntPtr, 1),
23942394
23952395 Value *getAdjustedAllocaPtr(IRBuilder<> &IRB, Type *PointerTy) {
23962396 assert(BeginOffset >= NewAllocaBeginOffset);
2397 assert(PointerTy->isPointerTy() &&
2398 "Type must be pointer type!");
2399 APInt Offset(TD.getTypeSizeInBits(PointerTy), BeginOffset - NewAllocaBeginOffset);
2397 unsigned AS = cast(PointerTy)->getAddressSpace();
2398 APInt Offset(TD.getPointerSizeInBits(AS), BeginOffset - NewAllocaBeginOffset);
24002399 return getAdjustedPtr(IRB, TD, &NewAI, Offset, PointerTy, getName(""));
24012400 }
24022401
27942793 = P.getMemTransferOffsets(II);
27952794
27962795 assert(OldPtr->getType()->isPointerTy() && "Must be a pointer type!");
2796 unsigned AS = cast(OldPtr->getType())->getAddressSpace();
27972797 // Compute the relative offset within the transfer.
2798 unsigned IntPtrWidth = TD.getTypeSizeInBits(OldPtr->getType());
2798 unsigned IntPtrWidth = TD.getPointerSizeInBits(AS);
27992799 APInt RelOffset(IntPtrWidth, BeginOffset - (IsDest ? MTO.DestBegin
28002800 : MTO.SourceBegin));
28012801
962962 if (SV->getType()->isFloatingPointTy() || SV->getType()->isVectorTy())
963963 SV = Builder.CreateBitCast(SV, IntegerType::get(SV->getContext(),SrcWidth));
964964 else if (SV->getType()->isPointerTy())
965 SV = Builder.CreatePtrToInt(SV, TD.getIntPtrType(SV->getType()));
965 SV = Builder.CreatePtrToInt(SV, TD.getIntPtrType(SV->getContext()));
966966
967967 // Zero extend or truncate the value if needed.
968968 if (SV->getType() != AllocaType) {
310310 if (!TD) return 0;
311311
312312 FunctionType *FT = Callee->getFunctionType();
313 Type *PT = FT->getParamType(0);
314313 if (FT->getNumParams() != 3 || FT->getReturnType() != FT->getParamType(0) ||
315314 !FT->getParamType(0)->isPointerTy() ||
316315 !FT->getParamType(1)->isPointerTy() ||
317 FT->getParamType(2) != TD->getIntPtrType(PT))
316 FT->getParamType(2) != TD->getIntPtrType(*Context))
318317 return 0;
319318
320319 // memcpy(x, y, n) -> llvm.memcpy(x, y, n, 1)
333332 if (!TD) return 0;
334333
335334 FunctionType *FT = Callee->getFunctionType();
336 Type *PT = FT->getParamType(0);
337335 if (FT->getNumParams() != 3 || FT->getReturnType() != FT->getParamType(0) ||
338336 !FT->getParamType(0)->isPointerTy() ||
339337 !FT->getParamType(1)->isPointerTy() ||
340 FT->getParamType(2) != TD->getIntPtrType(PT))
338 FT->getParamType(2) != TD->getIntPtrType(*Context))
341339 return 0;
342340
343341 // memmove(x, y, n) -> llvm.memmove(x, y, n, 1)
356354 if (!TD) return 0;
357355
358356 FunctionType *FT = Callee->getFunctionType();
359 Type *PT = FT->getParamType(0);
360357 if (FT->getNumParams() != 3 || FT->getReturnType() != FT->getParamType(0) ||
361358 !FT->getParamType(0)->isPointerTy() ||
362359 !FT->getParamType(1)->isIntegerTy() ||
363 FT->getParamType(2) != TD->getIntPtrType(PT))
360 FT->getParamType(2) != TD->getIntPtrType(*Context))
364361 return 0;
365362
366363 // memset(p, v, n) -> llvm.memset(p, v, n, 1)
785782 if (!TD) return 0;
786783
787784 // sprintf(str, fmt) -> llvm.memcpy(str, fmt, strlen(fmt)+1, 1)
788 Type *AT = CI->getArgOperand(0)->getType();
789785 B.CreateMemCpy(CI->getArgOperand(0), CI->getArgOperand(1),
790 ConstantInt::get(TD->getIntPtrType(AT), // Copy the
786 ConstantInt::get(TD->getIntPtrType(*Context), // Copy the
791787 FormatStr.size() + 1), 1); // nul byte.
792788 return ConstantInt::get(CI->getType(), FormatStr.size());
793789 }
914910 uint64_t Len = GetStringLength(CI->getArgOperand(0));
915911 if (!Len) return 0;
916912 // Known to have no uses (see above).
917 Type *PT = FT->getParamType(0);
918913 return EmitFWrite(CI->getArgOperand(0),
919 ConstantInt::get(TD->getIntPtrType(PT), Len-1),
914 ConstantInt::get(TD->getIntPtrType(*Context), Len-1),
920915 CI->getArgOperand(1), B, TD, TLI);
921916 }
922917 };
941936 // These optimizations require DataLayout.
942937 if (!TD) return 0;
943938
944 Type *AT = CI->getArgOperand(1)->getType();
945939 Value *NewCI = EmitFWrite(CI->getArgOperand(1),
946 ConstantInt::get(TD->getIntPtrType(AT),
940 ConstantInt::get(TD->getIntPtrType(*Context),
947941 FormatStr.size()),
948942 CI->getArgOperand(0), B, TD, TLI);
949943 return NewCI ? ConstantInt::get(CI->getType(), FormatStr.size()) : 0;
4545 AWI[1] = AttributeWithIndex::get(M->getContext(), AttrListPtr::FunctionIndex,
4646 ArrayRef(AVs, 2));
4747
48 LLVMContext &Context = B.GetInsertBlock()->getContext();
4849 Constant *StrLen = M->getOrInsertFunction("strlen", AttrListPtr::get(AWI),
49 TD->getIntPtrType(Ptr->getType()),
50 TD->getIntPtrType(Context),
5051 B.getInt8PtrTy(),
5152 NULL);
5253 CallInst *CI = B.CreateCall(StrLen, CastToCStr(Ptr, B), "strlen");
7172 AWI[1] = AttributeWithIndex::get(M->getContext(), AttrListPtr::FunctionIndex,
7273 ArrayRef(AVs, 2));
7374
75 LLVMContext &Context = B.GetInsertBlock()->getContext();
7476 Constant *StrNLen = M->getOrInsertFunction("strnlen", AttrListPtr::get(AWI),
75 TD->getIntPtrType(Ptr->getType()),
77 TD->getIntPtrType(Context),
7678 B.getInt8PtrTy(),
77 TD->getIntPtrType(Ptr->getType()),
79 TD->getIntPtrType(Context),
7880 NULL);
7981 CallInst *CI = B.CreateCall2(StrNLen, CastToCStr(Ptr, B), MaxLen, "strnlen");
8082 if (const Function *F = dyn_cast(StrNLen->stripPointerCasts()))
123125 AWI[2] = AttributeWithIndex::get(M->getContext(), AttrListPtr::FunctionIndex,
124126 ArrayRef(AVs, 2));
125127
128 LLVMContext &Context = B.GetInsertBlock()->getContext();
126129 Value *StrNCmp = M->getOrInsertFunction("strncmp", AttrListPtr::get(AWI),
127130 B.getInt32Ty(),
128131 B.getInt8PtrTy(),
129132 B.getInt8PtrTy(),
130 TD->getIntPtrType(Ptr1->getType()),
131 NULL);
133 TD->getIntPtrType(Context), NULL);
132134 CallInst *CI = B.CreateCall3(StrNCmp, CastToCStr(Ptr1, B),
133135 CastToCStr(Ptr2, B), Len, "strncmp");
134136
198200 AttributeWithIndex AWI;
199201 AWI = AttributeWithIndex::get(M->getContext(), AttrListPtr::FunctionIndex,
200202 Attributes::NoUnwind);
203 LLVMContext &Context = B.GetInsertBlock()->getContext();
201204 Value *MemCpy = M->getOrInsertFunction("__memcpy_chk",
202205 AttrListPtr::get(AWI),
203206 B.getInt8PtrTy(),
204207 B.getInt8PtrTy(),
205208 B.getInt8PtrTy(),
206 TD->getIntPtrType(Dst->getType()),
207 TD->getIntPtrType(Src->getType()),
208 NULL);
209 TD->getIntPtrType(Context),
210 TD->getIntPtrType(Context), NULL);
209211 Dst = CastToCStr(Dst, B);
210212 Src = CastToCStr(Src, B);
211213 CallInst *CI = B.CreateCall4(MemCpy, Dst, Src, Len, ObjSize);
227229 Attributes::AttrVal AVs[2] = { Attributes::ReadOnly, Attributes::NoUnwind };
228230 AWI = AttributeWithIndex::get(M->getContext(), AttrListPtr::FunctionIndex,
229231 ArrayRef(AVs, 2));
232 LLVMContext &Context = B.GetInsertBlock()->getContext();
230233 Value *MemChr = M->getOrInsertFunction("memchr", AttrListPtr::get(AWI),
231234 B.getInt8PtrTy(),
232235 B.getInt8PtrTy(),
233236 B.getInt32Ty(),
234 TD->getIntPtrType(Ptr->getType()),
237 TD->getIntPtrType(Context),
235238 NULL);
236239 CallInst *CI = B.CreateCall3(MemChr, CastToCStr(Ptr, B), Val, Len, "memchr");
237240
256259 AWI[2] = AttributeWithIndex::get(M->getContext(), AttrListPtr::FunctionIndex,
257260 ArrayRef(AVs, 2));
258261
262 LLVMContext &Context = B.GetInsertBlock()->getContext();
259263 Value *MemCmp = M->getOrInsertFunction("memcmp", AttrListPtr::get(AWI),
260264 B.getInt32Ty(),
261265 B.getInt8PtrTy(),
262266 B.getInt8PtrTy(),
263 TD->getIntPtrType(Ptr1->getType()),
264 NULL);
267 TD->getIntPtrType(Context), NULL);
265268 CallInst *CI = B.CreateCall3(MemCmp, CastToCStr(Ptr1, B), CastToCStr(Ptr2, B),
266269 Len, "memcmp");
267270
421424 AWI[1] = AttributeWithIndex::get(M->getContext(), 4, Attributes::NoCapture);
422425 AWI[2] = AttributeWithIndex::get(M->getContext(), AttrListPtr::FunctionIndex,
423426 Attributes::NoUnwind);
427 LLVMContext &Context = B.GetInsertBlock()->getContext();
424428 StringRef FWriteName = TLI->getName(LibFunc::fwrite);
425429 Constant *F;
426 Type *PtrTy = Ptr->getType();
427430 if (File->getType()->isPointerTy())
428431 F = M->getOrInsertFunction(FWriteName, AttrListPtr::get(AWI),
429 TD->getIntPtrType(PtrTy),
432 TD->getIntPtrType(Context),
430433 B.getInt8PtrTy(),
431 TD->getIntPtrType(PtrTy),
432 TD->getIntPtrType(PtrTy),
434 TD->getIntPtrType(Context),
435 TD->getIntPtrType(Context),
433436 File->getType(), NULL);
434437 else
435 F = M->getOrInsertFunction(FWriteName, TD->getIntPtrType(PtrTy),
438 F = M->getOrInsertFunction(FWriteName, TD->getIntPtrType(Context),
436439 B.getInt8PtrTy(),
437 TD->getIntPtrType(PtrTy),
438 TD->getIntPtrType(PtrTy),
440 TD->getIntPtrType(Context),
441 TD->getIntPtrType(Context),
439442 File->getType(), NULL);
440443 CallInst *CI = B.CreateCall4(F, CastToCStr(Ptr, B), Size,
441 ConstantInt::get(TD->getIntPtrType(PtrTy), 1), File);
444 ConstantInt::get(TD->getIntPtrType(Context), 1), File);
442445
443446 if (const Function *Fn = dyn_cast(F->stripPointerCasts()))
444447 CI->setCallingConv(Fn->getCallingConv());
460463 IRBuilder<> B(CI);
461464
462465 if (Name == "__memcpy_chk") {
463 Type *PT = FT->getParamType(0);
464466 // Check if this has the right signature.
465467 if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) ||
466468 !FT->getParamType(0)->isPointerTy() ||
467469 !FT->getParamType(1)->isPointerTy() ||
468 FT->getParamType(2) != TD->getIntPtrType(PT) ||
469 FT->getParamType(3) != TD->getIntPtrType(PT))
470 FT->getParamType(2) != TD->getIntPtrType(Context) ||
471 FT->getParamType(3) != TD->getIntPtrType(Context))
470472 return false;
471473
472474 if (isFoldable(3, 2, false)) {
485487
486488 if (Name == "__memmove_chk") {
487489 // Check if this has the right signature.
488 Type *PT = FT->getParamType(0);
489490 if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) ||
490491 !FT->getParamType(0)->isPointerTy() ||
491492 !FT->getParamType(1)->isPointerTy() ||
492 FT->getParamType(2) != TD->getIntPtrType(PT) ||
493 FT->getParamType(3) != TD->getIntPtrType(PT))
493 FT->getParamType(2) != TD->getIntPtrType(Context) ||
494 FT->getParamType(3) != TD->getIntPtrType(Context))
494495 return false;
495496
496497 if (isFoldable(3, 2, false)) {
504505
505506 if (Name == "__memset_chk") {
506507 // Check if this has the right signature.
507 Type *PT = FT->getParamType(0);
508508 if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) ||
509509 !FT->getParamType(0)->isPointerTy() ||
510510 !FT->getParamType(1)->isIntegerTy() ||
511 FT->getParamType(2) != TD->getIntPtrType(PT) ||
512 FT->getParamType(3) != TD->getIntPtrType(PT))
511 FT->getParamType(2) != TD->getIntPtrType(Context) ||
512 FT->getParamType(3) != TD->getIntPtrType(Context))
513513 return false;
514514
515515 if (isFoldable(3, 2, false)) {
524524
525525 if (Name == "__strcpy_chk" || Name == "__stpcpy_chk") {
526526 // Check if this has the right signature.
527 Type *PT = FT->getParamType(0);
528527 if (FT->getNumParams() != 3 ||
529528 FT->getReturnType() != FT->getParamType(0) ||
530529 FT->getParamType(0) != FT->getParamType(1) ||
531530 FT->getParamType(0) != Type::getInt8PtrTy(Context) ||
532 FT->getParamType(2) != TD->getIntPtrType(PT))
531 FT->getParamType(2) != TD->getIntPtrType(Context))
533532 return 0;
534533
535534
551550
552551 if (Name == "__strncpy_chk" || Name == "__stpncpy_chk") {
553552 // Check if this has the right signature.
554 Type *PT = FT->getParamType(0);
555553 if (FT->getNumParams() != 4 || FT->getReturnType() != FT->getParamType(0) ||
556554 FT->getParamType(0) != FT->getParamType(1) ||
557555 FT->getParamType(0) != Type::getInt8PtrTy(Context) ||
558556 !FT->getParamType(2)->isIntegerTy() ||
559 FT->getParamType(3) != TD->getIntPtrType(PT))
557 FT->getParamType(3) != TD->getIntPtrType(Context))
560558 return false;
561559
562560 if (isFoldable(3, 2, false)) {
805805 const DataLayout *TD) {
806806 assert(V->getType()->isPointerTy() &&
807807 "getOrEnforceKnownAlignment expects a pointer!");
808 unsigned BitWidth = TD ? TD->getTypeSizeInBits(V->getType()) : 64;
808 unsigned AS = cast(V->getType())->getAddressSpace();
809 unsigned BitWidth = TD ? TD->getPointerSizeInBits(AS) : 64;
809810 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
810811 ComputeMaskedBits(V, KnownZero, KnownOne, TD);
811812 unsigned TrailZ = KnownZero.countTrailingOnes();
534534 CV = ICI->getOperand(0);
535535
536536 // Unwrap any lossless ptrtoint cast.
537 if (TD && CV) {
538 PtrToIntInst *PTII = NULL;
539 if ((PTII = dyn_cast(CV)) &&
540 CV->getType() == TD->getIntPtrType(CV->getContext(),
541 PTII->getPointerAddressSpace()))
537 if (TD && CV && CV->getType() == TD->getIntPtrType(CV->getContext()))
538 if (PtrToIntInst *PTII = dyn_cast(CV))
542539 CV = PTII->getOperand(0);
543 }
544540 return CV;
545541 }
546542
987983 // Convert pointer to int before we switch.
988984 if (CV->getType()->isPointerTy()) {
989985 assert(TD && "Cannot switch on pointer without DataLayout");
990 CV = Builder.CreatePtrToInt(CV, TD->getIntPtrType(CV->getType()),
986 CV = Builder.CreatePtrToInt(CV, TD->getIntPtrType(CV->getContext()),
991987 "magicptr");
992988 }
993989
27152711 if (CompVal->getType()->isPointerTy()) {
27162712 assert(TD && "Cannot switch on pointer without DataLayout");
27172713 CompVal = Builder.CreatePtrToInt(CompVal,
2718 TD->getIntPtrType(CompVal->getType()),
2714 TD->getIntPtrType(CompVal->getContext()),
27192715 "magicptr");
27202716 }
27212717