llvm.org GIT mirror llvm / 0611580
IR: Change the gep_type_iterator API to avoid always exposing the "current" type. Instead, expose whether the current type is an array or a struct, if an array what the upper bound is, and if a struct the struct type itself. This is in preparation for a later change which will make PointerType derive from Type rather than SequentialType. Differential Revision: https://reviews.llvm.org/D26594 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@288458 91177308-0d34-0410-b5e6-96231b3b80d8 Peter Collingbourne 2 years ago
31 changed file(s) with 142 addition(s) and 145 deletion(s). Raw diff Collapse all Expand all
481481 int64_t BaseOffset = 0;
482482 int64_t Scale = 0;
483483
484 // Assumes the address space is 0 when Ptr is nullptr.
485 unsigned AS =
486 (Ptr == nullptr ? 0 : Ptr->getType()->getPointerAddressSpace());
487 auto GTI = gep_type_begin(PointeeType, AS, Operands);
484 auto GTI = gep_type_begin(PointeeType, Operands);
488485 for (auto I = Operands.begin(); I != Operands.end(); ++I, ++GTI) {
489486 // We assume that the cost of Scalar GEP with constant index and the
490487 // cost of Vector GEP with splat constant index are the same.
492489 if (!ConstIdx)
493490 if (auto Splat = getSplatValue(*I))
494491 ConstIdx = dyn_cast(Splat);
495 if (isa(*GTI)) {
492 if (StructType *STy = GTI.getStructTypeOrNull()) {
493 // For structures the index is always splat or scalar constant
494 assert(ConstIdx && "Unexpected GEP index");
495 uint64_t Field = ConstIdx->getZExtValue();
496 BaseOffset += DL.getStructLayout(STy)->getElementOffset(Field);
497 } else {
496498 int64_t ElementSize = DL.getTypeAllocSize(GTI.getIndexedType());
497499 if (ConstIdx)
498500 BaseOffset += ConstIdx->getSExtValue() * ElementSize;
503505 return TTI::TCC_Basic;
504506 Scale = ElementSize;
505507 }
506 } else {
507 StructType *STy = cast(*GTI);
508 // For structures the index is always splat or scalar constant
509 assert(ConstIdx && "Unexpected GEP index");
510 uint64_t Field = ConstIdx->getZExtValue();
511 BaseOffset += DL.getStructLayout(STy)->getElementOffset(Field);
512508 }
513509 }
514510
511 // Assumes the address space is 0 when Ptr is nullptr.
512 unsigned AS =
513 (Ptr == nullptr ? 0 : Ptr->getType()->getPointerAddressSpace());
515514 if (static_cast(this)->isLegalAddressingMode(
516 PointerType::get(*GTI, AS), const_cast(BaseGV),
515 PointerType::get(Type::getInt8Ty(PointeeType->getContext()), AS),
516 const_cast(BaseGV),
517517 BaseOffset, HasBaseReg, Scale, AS)) {
518518 return TTI::TCC_Free;
519519 }
1515 #define LLVM_IR_GETELEMENTPTRTYPEITERATOR_H
1616
1717 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/PointerIntPair.h"
18 #include "llvm/ADT/PointerUnion.h"
1919 #include "llvm/IR/DerivedTypes.h"
2020 #include "llvm/IR/Operator.h"
2121 #include "llvm/IR/User.h"
3232 Type *, ptrdiff_t> super;
3333
3434 ItTy OpIt;
35 PointerIntPair CurTy;
36 unsigned AddrSpace;
37
35 PointerUnion CurTy;
36 enum { Unbounded = -1ull };
37 uint64_t NumElements = Unbounded;
3838 generic_gep_type_iterator() = default;
3939
4040 public:
41 static generic_gep_type_iterator begin(Type *Ty, unsigned AddrSpace,
42 ItTy It) {
41 static generic_gep_type_iterator begin(Type *Ty, ItTy It) {
4342 generic_gep_type_iterator I;
44 I.CurTy.setPointer(Ty);
45 I.CurTy.setInt(true);
46 I.AddrSpace = AddrSpace;
43 I.CurTy = Ty;
4744 I.OpIt = It;
4845 return I;
4946 }
6259 return !operator==(x);
6360 }
6461
65 Type *operator*() const {
66 if (CurTy.getInt())
67 return CurTy.getPointer()->getPointerTo(AddrSpace);
68 return CurTy.getPointer();
62 // FIXME: Make this the iterator's operator*() after the 4.0 release.
63 // operator*() had a different meaning in earlier releases, so we're
64 // temporarily not giving this iterator an operator*() to avoid a subtle
65 // semantics break.
66 Type *getIndexedType() const {
67 if (auto *T = CurTy.dyn_cast())
68 return T;
69 return CurTy.get()->getTypeAtIndex(getOperand());
6970 }
70
71 Type *getIndexedType() const {
72 if (CurTy.getInt())
73 return CurTy.getPointer();
74 CompositeType *CT = cast(CurTy.getPointer());
75 return CT->getTypeAtIndex(getOperand());
76 }
77
78 // This is a non-standard operator->. It allows you to call methods on the
79 // current type directly.
80 Type *operator->() const { return operator*(); }
8171
8272 Value *getOperand() const { return const_cast(&**OpIt); }
8373
8474 generic_gep_type_iterator& operator++() { // Preincrement
85 if (CurTy.getInt()) {
86 CurTy.setInt(false);
87 } else if (CompositeType *CT =
88 dyn_cast(CurTy.getPointer())) {
89 CurTy.setPointer(CT->getTypeAtIndex(getOperand()));
90 } else {
91 CurTy.setPointer(nullptr);
92 }
75 Type *Ty = getIndexedType();
76 if (auto *ATy = dyn_cast(Ty)) {
77 CurTy = ATy->getElementType();
78 NumElements = ATy->getNumElements();
79 } else if (auto *VTy = dyn_cast(Ty)) {
80 CurTy = VTy->getElementType();
81 NumElements = VTy->getNumElements();
82 } else
83 CurTy = dyn_cast(Ty);
9384 ++OpIt;
9485 return *this;
9586 }
9687
9788 generic_gep_type_iterator operator++(int) { // Postincrement
9889 generic_gep_type_iterator tmp = *this; ++*this; return tmp;
90 }
91
92 // All of the below API is for querying properties of the "outer type", i.e.
93 // the type that contains the indexed type. Most of the time this is just
94 // the type that was visited immediately prior to the indexed type, but for
95 // the first element this is an unbounded array of the GEP's source element
96 // type, for which there is no clearly corresponding IR type (we've
97 // historically used a pointer type as the outer type in this case, but
98 // pointers will soon lose their element type).
99 //
100 // FIXME: Most current users of this class are just interested in byte
101 // offsets (a few need to know whether the outer type is a struct because
102 // they are trying to replace a constant with a variable, which is only
103 // legal for arrays, e.g. canReplaceOperandWithVariable in SimplifyCFG.cpp);
104 // we should provide a more minimal API here that exposes not much more than
105 // that.
106
107 bool isStruct() const { return CurTy.is(); }
108 bool isSequential() const { return CurTy.is(); }
109
110 StructType *getStructType() const { return CurTy.get(); }
111
112 StructType *getStructTypeOrNull() const {
113 return CurTy.dyn_cast();
114 }
115
116 bool isBoundedSequential() const {
117 return isSequential() && NumElements != Unbounded;
118 }
119
120 uint64_t getSequentialNumElements() const {
121 assert(isBoundedSequential());
122 return NumElements;
99123 }
100124 };
101125
105129 auto *GEPOp = cast(GEP);
106130 return gep_type_iterator::begin(
107131 GEPOp->getSourceElementType(),
108 cast(GEPOp->getPointerOperandType()->getScalarType())
109 ->getAddressSpace(),
110132 GEP->op_begin() + 1);
111133 }
112134
118140 auto &GEPOp = cast(GEP);
119141 return gep_type_iterator::begin(
120142 GEPOp.getSourceElementType(),
121 cast(GEPOp.getPointerOperandType()->getScalarType())
122 ->getAddressSpace(),
123143 GEP.op_begin() + 1);
124144 }
125145
129149
130150 template
131151 inline generic_gep_type_iterator
132 gep_type_begin(Type *Op0, unsigned AS, ArrayRef A) {
133 return generic_gep_type_iterator::begin(Op0, AS, A.begin());
152 gep_type_begin(Type *Op0, ArrayRef A) {
153 return generic_gep_type_iterator::begin(Op0, A.begin());
134154 }
135155
136156 template
137157 inline generic_gep_type_iterator
138 gep_type_end(Type * /*Op0*/, unsigned /*AS*/, ArrayRef A) {
158 gep_type_end(Type * /*Op0*/, ArrayRef A) {
139159 return generic_gep_type_iterator::end(A.end());
140160 }
141161
216216 continue;
217217
218218 // Handle a struct index, which adds its field offset to the pointer.
219 if (StructType *STy = dyn_cast(*GTI)) {
219 if (StructType *STy = GTI.getStructTypeOrNull()) {
220220 if (OpC->getType()->isVectorTy())
221221 OpC = OpC->getSplatValue();
222222
411411 // Assume all GEP operands are constants until proven otherwise.
412412 bool GepHasConstantOffset = true;
413413 for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end();
414 I != E; ++I) {
414 I != E; ++I, ++GTI) {
415415 const Value *Index = *I;
416416 // Compute the (potentially symbolic) offset in bytes for this index.
417 if (StructType *STy = dyn_cast(*GTI++)) {
417 if (StructType *STy = GTI.getStructTypeOrNull()) {
418418 // For a struct, add the member offset.
419419 unsigned FieldNo = cast(Index)->getZExtValue();
420420 if (FieldNo == 0)
430430 if (CIdx->isZero())
431431 continue;
432432 Decomposed.OtherOffset +=
433 DL.getTypeAllocSize(*GTI) * CIdx->getSExtValue();
433 DL.getTypeAllocSize(GTI.getIndexedType()) * CIdx->getSExtValue();
434434 continue;
435435 }
436436
437437 GepHasConstantOffset = false;
438438
439 uint64_t Scale = DL.getTypeAllocSize(*GTI);
439 uint64_t Scale = DL.getTypeAllocSize(GTI.getIndexedType());
440440 unsigned ZExtBits = 0, SExtBits = 0;
441441
442442 // If the integer type is smaller than the pointer size, it is implicitly
317317 continue;
318318
319319 // Handle a struct index, which adds its field offset to the pointer.
320 if (StructType *STy = dyn_cast(*GTI)) {
320 if (StructType *STy = GTI.getStructTypeOrNull()) {
321321 unsigned ElementIdx = OpC->getZExtValue();
322322 const StructLayout *SL = DL.getStructLayout(STy);
323323 Offset += APInt(IntPtrWidth, SL->getElementOffset(ElementIdx));
12301230 gep_type_iterator GTI = gep_type_begin(I);
12311231 for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
12321232 Value *Index = I->getOperand(i);
1233 if (StructType *STy = dyn_cast(*GTI)) {
1233 if (StructType *STy = GTI.getStructTypeOrNull()) {
12341234 // Handle struct member offset arithmetic.
12351235
12361236 // Handle case when index is vector zeroinitializer
17291729 for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
17301730 GTI != GTE; ++GTI) {
17311731 // Struct types are easy -- they must always be indexed by a constant.
1732 if (StructType *STy = dyn_cast(*GTI)) {
1732 if (StructType *STy = GTI.getStructTypeOrNull()) {
17331733 ConstantInt *OpC = cast(GTI.getOperand());
17341734 unsigned ElementIdx = OpC->getZExtValue();
17351735 const StructLayout *SL = Q.DL.getStructLayout(STy);
106106 while (LastOperand > 1 && match(Gep->getOperand(LastOperand), m_Zero())) {
107107 // Find the type we're currently indexing into.
108108 gep_type_iterator GEPTI = gep_type_begin(Gep);
109 std::advance(GEPTI, LastOperand - 1);
109 std::advance(GEPTI, LastOperand - 2);
110110
111111 // If it's a type with the same allocation size as the result of the GEP we
112112 // can peel off the zero index.
113 if (DL.getTypeAllocSize(*GEPTI) != GEPAllocSize)
113 if (DL.getTypeAllocSize(GEPTI.getIndexedType()) != GEPAllocSize)
114114 break;
115115 --LastOperand;
116116 }
32603260 int64_t ConstantOffset = 0;
32613261 gep_type_iterator GTI = gep_type_begin(AddrInst);
32623262 for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) {
3263 if (StructType *STy = dyn_cast(*GTI)) {
3263 if (StructType *STy = GTI.getStructTypeOrNull()) {
32643264 const StructLayout *SL = DL.getStructLayout(STy);
32653265 unsigned Idx =
32663266 cast(AddrInst->getOperand(i))->getZExtValue();
487487 for (gep_type_iterator GTI = gep_type_begin(I), E = gep_type_end(I);
488488 GTI != E; ++GTI) {
489489 const Value *Idx = GTI.getOperand();
490 if (auto *StTy = dyn_cast(*GTI)) {
490 if (StructType *StTy = GTI.getStructTypeOrNull()) {
491491 uint64_t Field = cast(Idx)->getZExtValue();
492492 if (Field) {
493493 // N = N + Offset
32733273 for (gep_type_iterator GTI = gep_type_begin(&I), E = gep_type_end(&I);
32743274 GTI != E; ++GTI) {
32753275 const Value *Idx = GTI.getOperand();
3276 if (StructType *StTy = dyn_cast(*GTI)) {
3276 if (StructType *StTy = GTI.getStructTypeOrNull()) {
32773277 unsigned Field = cast(Idx)->getUniqueInteger().getZExtValue();
32783278 if (Field) {
32793279 // N = N + Offset
998998 uint64_t Total = 0;
999999
10001000 for (; I != E; ++I) {
1001 if (StructType *STy = dyn_cast(*I)) {
1001 if (StructType *STy = I.getStructTypeOrNull()) {
10021002 const StructLayout *SLO = getDataLayout().getStructLayout(STy);
10031003
10041004 const ConstantInt *CPU = cast(I.getOperand());
10061006
10071007 Total += SLO->getElementOffset(Index);
10081008 } else {
1009 SequentialType *ST = cast(*I);
10101009 // Get the index number for the array... which must be long type...
10111010 GenericValue IdxGV = getOperandValue(I.getOperand(), SF);
10121011
10191018 assert(BitWidth == 64 && "Invalid index type for getelementptr");
10201019 Idx = (int64_t)IdxGV.IntVal.getZExtValue();
10211020 }
1022 Total += getDataLayout().getTypeAllocSize(ST->getElementType()) * Idx;
1021 Total += getDataLayout().getTypeAllocSize(I.getIndexedType()) * Idx;
10231022 }
10241023 }
10251024
20182018 }
20192019
20202020 /// Test whether a given ConstantInt is in-range for a SequentialType.
2021 static bool isIndexInRangeOfSequentialType(SequentialType *STy,
2022 const ConstantInt *CI) {
2023 // And indices are valid when indexing along a pointer
2024 if (isa(STy))
2025 return true;
2026
2027 uint64_t NumElements = 0;
2028 // Determine the number of elements in our sequential type.
2029 if (auto *ATy = dyn_cast(STy))
2030 NumElements = ATy->getNumElements();
2031 else if (auto *VTy = dyn_cast(STy))
2032 NumElements = VTy->getNumElements();
2033
2034 assert((isa(STy) || NumElements > 0) &&
2035 "didn't expect non-array type to have zero elements!");
2036
2021 static bool isIndexInRangeOfArrayType(uint64_t NumElements,
2022 const ConstantInt *CI) {
20372023 // We cannot bounds check the index if it doesn't fit in an int64_t.
20382024 if (CI->getValue().getActiveBits() > 64)
20392025 return false;
20882074 // getelementptr instructions into a single instruction.
20892075 //
20902076 if (CE->getOpcode() == Instruction::GetElementPtr) {
2091 Type *LastTy = nullptr;
2077 gep_type_iterator LastI = gep_type_end(CE);
20922078 for (gep_type_iterator I = gep_type_begin(CE), E = gep_type_end(CE);
20932079 I != E; ++I)
2094 LastTy = *I;
2080 LastI = I;
20952081
20962082 // We cannot combine indices if doing so would take us outside of an
20972083 // array or vector. Doing otherwise could trick us if we evaluated such a
21142100 bool PerformFold = false;
21152101 if (Idx0->isNullValue())
21162102 PerformFold = true;
2117 else if (SequentialType *STy = dyn_cast_or_null(LastTy))
2103 else if (LastI.isSequential())
21182104 if (ConstantInt *CI = dyn_cast(Idx0))
2119 PerformFold = isIndexInRangeOfSequentialType(STy, CI);
2105 PerformFold =
2106 !LastI.isBoundedSequential() ||
2107 isIndexInRangeOfArrayType(LastI.getSequentialNumElements(), CI);
21202108
21212109 if (PerformFold) {
21222110 SmallVector NewIndices;
22272215 Unknown = true;
22282216 continue;
22292217 }
2230 if (isIndexInRangeOfSequentialType(STy, CI))
2218 if (isIndexInRangeOfArrayType(isa(STy)
2219 ? cast(STy)->getNumElements()
2220 : cast(STy)->getNumElements(),
2221 CI))
22312222 // It's in range, skip to the next index.
22322223 continue;
22332224 if (!isa(Prev)) {
10721072 gep_type_iterator GEPI = gep_type_begin(this), E = gep_type_end(this);
10731073 User::const_op_iterator OI = std::next(this->op_begin());
10741074
1075 // Skip the first index, as it has no static limit.
1076 ++GEPI;
1077 ++OI;
1078
10791075 // The remaining indices must be compile-time known integers within the
10801076 // bounds of the corresponding notional static array types.
10811077 for (; GEPI != E; ++GEPI, ++OI) {
10821078 ConstantInt *CI = dyn_cast(*OI);
1083 if (!CI) return false;
1084 if (ArrayType *ATy = dyn_cast(*GEPI))
1085 if (CI->getValue().getActiveBits() > 64 ||
1086 CI->getZExtValue() >= ATy->getNumElements())
1087 return false;
1079 if (GEPI.isBoundedSequential() &&
1080 (CI->getValue().getActiveBits() > 64 ||
1081 CI->getZExtValue() >= GEPI.getSequentialNumElements()))
1082 return false;
10881083 }
10891084
10901085 // All the indices checked out.
736736 ArrayRef Indices) const {
737737 int64_t Result = 0;
738738
739 // We can use 0 as the address space as we don't need
740 // to get pointer types back from gep_type_iterator.
741 unsigned AS = 0;
742739 generic_gep_type_iterator
743 GTI = gep_type_begin(ElemTy, AS, Indices),
744 GTE = gep_type_end(ElemTy, AS, Indices);
740 GTI = gep_type_begin(ElemTy, Indices),
741 GTE = gep_type_end(ElemTy, Indices);
745742 for (; GTI != GTE; ++GTI) {
746743 Value *Idx = GTI.getOperand();
747 if (auto *STy = dyn_cast(*GTI)) {
744 if (StructType *STy = GTI.getStructTypeOrNull()) {
748745 assert(Idx->getType()->isIntegerTy(32) && "Illegal struct idx");
749746 unsigned FieldNo = cast(Idx)->getZExtValue();
750747
3232 continue;
3333
3434 // Handle a struct index, which adds its field offset to the pointer.
35 if (StructType *STy = dyn_cast(*GTI)) {
35 if (StructType *STy = GTI.getStructTypeOrNull()) {
3636 unsigned ElementIdx = OpC->getZExtValue();
3737 const StructLayout *SL = DL.getStructLayout(STy);
3838 Offset += APInt(Offset.getBitWidth(), SL->getElementOffset(ElementIdx));
556556 for (gep_type_iterator GTI = gep_type_begin(U), E = gep_type_end(U);
557557 GTI != E; ++GTI) {
558558 const Value *Op = GTI.getOperand();
559 if (StructType *STy = dyn_cast(*GTI)) {
559 if (StructType *STy = GTI.getStructTypeOrNull()) {
560560 const StructLayout *SL = DL.getStructLayout(STy);
561561 unsigned Idx = cast(Op)->getZExtValue();
562562 TmpOffset += SL->getElementOffset(Idx);
48844884 for (gep_type_iterator GTI = gep_type_begin(I), E = gep_type_end(I);
48854885 GTI != E; ++GTI) {
48864886 const Value *Idx = GTI.getOperand();
4887 if (auto *StTy = dyn_cast(*GTI)) {
4887 if (auto *StTy = GTI.getStructTypeOrNull()) {
48884888 unsigned Field = cast(Idx)->getZExtValue();
48894889 // N = N + Offset
48904890 if (Field)
71567156 case Instruction::GetElementPtr: {
71577157 gep_type_iterator GTI = gep_type_begin(Instr);
71587158 auto &DL = Ext->getModule()->getDataLayout();
7159 std::advance(GTI, U.getOperandNo());
7160 Type *IdxTy = *GTI;
7159 std::advance(GTI, U.getOperandNo()-1);
7160 Type *IdxTy = GTI.getIndexedType();
71617161 // This extension will end up with a shift because of the scaling factor.
71627162 // 8-bit sized types have a scaling factor of 1, thus a shift amount of 0.
71637163 // Get the shift amount based on the scaling factor:
732732 for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end();
733733 i != e; ++i, ++GTI) {
734734 const Value *Op = *i;
735 if (StructType *STy = dyn_cast(*GTI)) {
735 if (StructType *STy = GTI.getStructTypeOrNull()) {
736736 const StructLayout *SL = DL.getStructLayout(STy);
737737 unsigned Idx = cast(Op)->getZExtValue();
738738 TmpOffset += SL->getElementOffset(Idx);
444444 for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); i != e;
445445 ++i, ++GTI) {
446446 const Value *Op = *i;
447 if (StructType *STy = dyn_cast(*GTI)) {
447 if (StructType *STy = GTI.getStructTypeOrNull()) {
448448 const StructLayout *SL = DL.getStructLayout(STy);
449449 unsigned Idx = cast(Op)->getZExtValue();
450450 TmpOffset += SL->getElementOffset(Idx);
357357 for (User::const_op_iterator II = U->op_begin() + 1, IE = U->op_end();
358358 II != IE; ++II, ++GTI) {
359359 const Value *Op = *II;
360 if (StructType *STy = dyn_cast(*GTI)) {
360 if (StructType *STy = GTI.getStructTypeOrNull()) {
361361 const StructLayout *SL = DL.getStructLayout(STy);
362362 unsigned Idx = cast(Op)->getZExtValue();
363363 TmpOffset += SL->getElementOffset(Idx);
240240 for (gep_type_iterator GTI = gep_type_begin(U), E = gep_type_end(U);
241241 GTI != E; ++GTI) {
242242 const Value *Op = GTI.getOperand();
243 if (StructType *STy = dyn_cast(*GTI)) {
243 if (StructType *STy = GTI.getStructTypeOrNull()) {
244244 const StructLayout *SL = DL.getStructLayout(STy);
245245 unsigned Idx = cast(Op)->getZExtValue();
246246 TmpOffset += SL->getElementOffset(Idx);
935935 for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end();
936936 i != e; ++i, ++GTI) {
937937 const Value *Op = *i;
938 if (StructType *STy = dyn_cast(*GTI)) {
938 if (StructType *STy = GTI.getStructTypeOrNull()) {
939939 const StructLayout *SL = DL.getStructLayout(STy);
940940 Disp += SL->getElementOffset(cast(Op)->getZExtValue());
941941 continue;
370370 ++GEPI; // Skip over the pointer index.
371371
372372 // If this is a use of an array allocation, do a bit more checking for sanity.
373 if (ArrayType *AT = dyn_cast(*GEPI)) {
374 uint64_t NumElements = AT->getNumElements();
373 if (GEPI.isSequential()) {
375374 ConstantInt *Idx = cast(U->getOperand(2));
376375
377376 // Check to make sure that index falls within the array. If not,
378377 // something funny is going on, so we won't do the optimization.
379378 //
380 if (Idx->getZExtValue() >= NumElements)
379 if (GEPI.isBoundedSequential() &&
380 Idx->getZExtValue() >= GEPI.getSequentialNumElements())
381381 return false;
382382
383383 // We cannot scalar repl this level of the array unless any array
390390 for (++GEPI; // Skip array index.
391391 GEPI != E;
392392 ++GEPI) {
393 uint64_t NumElements;
394 if (ArrayType *SubArrayTy = dyn_cast(*GEPI))
395 NumElements = SubArrayTy->getNumElements();
396 else if (VectorType *SubVectorTy = dyn_cast(*GEPI))
397 NumElements = SubVectorTy->getNumElements();
398 else {
399 assert((*GEPI)->isStructTy() &&
400 "Indexed GEP type is not array, vector, or struct!");
393 if (GEPI.isStruct())
401394 continue;
402 }
403395
404396 ConstantInt *IdxVal = dyn_cast(GEPI.getOperand());
405 if (!IdxVal || IdxVal->getZExtValue() >= NumElements)
397 if (!IdxVal ||
398 (GEPI.isBoundedSequential() &&
399 IdxVal->getZExtValue() >= GEPI.getSequentialNumElements()))
406400 return false;
407401 }
408402 }
516516 if (CI->isZero()) continue;
517517
518518 // Handle a struct index, which adds its field offset to the pointer.
519 if (StructType *STy = dyn_cast(*GTI)) {
519 if (StructType *STy = GTI.getStructTypeOrNull()) {
520520 Offset += DL.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
521521 } else {
522522 uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType());
546546 if (CI->isZero()) continue;
547547
548548 // Handle a struct index, which adds its field offset to the pointer.
549 if (StructType *STy = dyn_cast(*GTI)) {
549 if (StructType *STy = GTI.getStructTypeOrNull()) {
550550 Offset += DL.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
551551 } else {
552552 uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType());
13881388 for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end(); I != E;
13891389 ++I, ++GTI) {
13901390 // Skip indices into struct types.
1391 if (isa(*GTI))
1391 if (GTI.isStruct())
13921392 continue;
13931393
13941394 // Index type should have the same width as IntPtr
15451545 bool EndsWithSequential = false;
15461546 for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src);
15471547 I != E; ++I)
1548 EndsWithSequential = !(*I)->isStructTy();
1548 EndsWithSequential = I.isSequential();
15491549
15501550 // Can we combine the two pointer arithmetics offsets?
15511551 if (EndsWithSequential) {
5151 if (OpC->isZero()) continue; // No offset.
5252
5353 // Handle struct indices, which add their field offset to the pointer.
54 if (StructType *STy = dyn_cast(*GTI)) {
54 if (StructType *STy = GTI.getStructTypeOrNull()) {
5555 Offset += DL.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
5656 continue;
5757 }
280280 return nullptr;
281281
282282 gep_type_iterator GTI = gep_type_begin(*GEP);
283 for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I) {
284 if (isa(*GTI++)) {
285 if (auto *NewGEP = tryReassociateGEPAtIndex(GEP, I - 1, *GTI)) {
283 for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) {
284 if (GTI.isSequential()) {
285 if (auto *NewGEP = tryReassociateGEPAtIndex(GEP, I - 1,
286 GTI.getIndexedType())) {
286287 return NewGEP;
287288 }
288289 }
691691 break;
692692
693693 // Handle a struct index, which adds its field offset to the pointer.
694 if (StructType *STy = dyn_cast(*GTI)) {
694 if (StructType *STy = GTI.getStructTypeOrNull()) {
695695 unsigned ElementIdx = OpC->getZExtValue();
696696 const StructLayout *SL = DL.getStructLayout(STy);
697697 GEPOffset +=
721721 for (User::op_iterator I = GEP->op_begin() + 1, E = GEP->op_end();
722722 I != E; ++I, ++GTI) {
723723 // Skip struct member indices which must be i32.
724 if (isa(*GTI)) {
724 if (GTI.isSequential()) {
725725 if ((*I)->getType() != IntPtrTy) {
726726 *I = CastInst::CreateIntegerCast(*I, IntPtrTy, true, "idxprom", GEP);
727727 Changed = true;
738738 int64_t AccumulativeByteOffset = 0;
739739 gep_type_iterator GTI = gep_type_begin(*GEP);
740740 for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) {
741 if (isa(*GTI)) {
741 if (GTI.isSequential()) {
742742 // Tries to extract a constant offset from this GEP index.
743743 int64_t ConstantOffset =
744744 ConstantOffsetExtractor::Find(GEP->getOperand(I), GEP, DT);
751751 ConstantOffset * DL->getTypeAllocSize(GTI.getIndexedType());
752752 }
753753 } else if (LowerGEP) {
754 StructType *StTy = cast(*GTI);
754 StructType *StTy = GTI.getStructType();
755755 uint64_t Field = cast(GEP->getOperand(I))->getZExtValue();
756756 // Skip field 0 as the offset is always 0.
757757 if (Field != 0) {
786786 // Create an ugly GEP for each sequential index. We don't create GEPs for
787787 // structure indices, as they are accumulated in the constant offset index.
788788 for (unsigned I = 1, E = Variadic->getNumOperands(); I != E; ++I, ++GTI) {
789 if (isa(*GTI)) {
789 if (GTI.isSequential()) {
790790 Value *Idx = Variadic->getOperand(I);
791791 // Skip zero indices.
792792 if (ConstantInt *CI = dyn_cast(Idx))
847847 // don't create arithmetics for structure indices, as they are accumulated
848848 // in the constant offset index.
849849 for (unsigned I = 1, E = Variadic->getNumOperands(); I != E; ++I, ++GTI) {
850 if (isa(*GTI)) {
850 if (GTI.isSequential()) {
851851 Value *Idx = Variadic->getOperand(I);
852852 // Skip zero indices.
853853 if (ConstantInt *CI = dyn_cast(Idx))
927927 // handle the constant offset and won't need a new structure index.
928928 gep_type_iterator GTI = gep_type_begin(*GEP);
929929 for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) {
930 if (isa(*GTI)) {
930 if (GTI.isSequential()) {
931931 // Splits this GEP index into a variadic part and a constant offset, and
932932 // uses the variadic part as the new index.
933933 Value *OldIdx = GEP->getOperand(I);
489489 IndexExprs.push_back(SE->getSCEV(*I));
490490
491491 gep_type_iterator GTI = gep_type_begin(GEP);
492 for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I) {
493 if (!isa(*GTI++))
492 for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) {
493 if (GTI.isStruct())
494494 continue;
495495
496496 const SCEV *OrigIndexExpr = IndexExprs[I - 1];
500500 // indices except this current one.
501501 const SCEV *BaseExpr = SE->getGEPExpr(cast(GEP), IndexExprs);
502502 Value *ArrayIdx = GEP->getOperand(I);
503 uint64_t ElementSize = DL->getTypeAllocSize(*GTI);
503 uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType());
504504 if (ArrayIdx->getType()->getIntegerBitWidth() <=
505505 DL->getPointerSizeInBits(GEP->getAddressSpace())) {
506506 // Skip factoring if ArrayIdx is wider than the pointer size, because
14151415 if (OpIdx == 0)
14161416 return true;
14171417 gep_type_iterator It = std::next(gep_type_begin(I), OpIdx - 1);
1418 return !It->isStructTy();
1418 return It.isSequential();
14191419 }
14201420 }
14211421