llvm.org GIT mirror llvm / 92631b8
[BasicAA] Improve precision of alloca vs. inbounds GEP alias queries If a we have (a) a GEP and (b) a pointer based on an alloca, and the beginning of the object the GEP points would have a negative offset with repsect to the alloca, then the GEP can not alias pointer (b). For example, consider code like: struct { int f0, int f1, ...} foo; ... foo alloca; foo *random = bar(alloca); int *f0 = &alloca.f0 int *f1 = &random->f1; Which is lowered, approximately, to: %alloca = alloca %struct.foo %random = call %struct.foo* @random(%struct.foo* %alloca) %f0 = getelementptr inbounds %struct, %struct.foo* %alloca, i32 0, i32 0 %f1 = getelementptr inbounds %struct, %struct.foo* %random, i32 0, i32 1 Assume %f1 and %f0 alias. Then %f1 would point into the object allocated by %alloca. Since the %f1 GEP is inbounds, that means %random must also point into the same object. But since %f0 points to the beginning of %alloca, the highest %f1 can be is (%alloca + 3). This means %random can not be higher than (%alloca - 1), and so is not inbounds, a contradiction. Differential Revision: http://reviews.llvm.org/D20495 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@270777 91177308-0d34-0410-b5e6-96231b3b80d8 Michael Kuperstein 3 years ago
3 changed file(s) with 228 addition(s) and 87 deletion(s). Raw diff Collapse all Expand all
108108 }
109109 };
110110
111 // Represents the internal structure of a GEP, decomposed into a base pointer,
112 // constant offsets, and variable scaled indices.
113 struct DecomposedGEP {
114 // Base pointer of the GEP
115 const Value *Base;
116 // Total constant offset w.r.t the base from indexing into structs
117 int64_t StructOffset;
118 // Total constant offset w.r.t the base from indexing through
119 // pointers/arrays/vectors
120 int64_t OtherOffset;
121 // Scaled variable (non-constant) indices.
122 SmallVector VarIndices;
123 };
124
111125 /// Track alias queries to guard against recursion.
112126 typedef std::pair LocPair;
113127 typedef SmallDenseMap AliasCacheTy;
138152 const DataLayout &DL, unsigned Depth, AssumptionCache *AC,
139153 DominatorTree *DT, bool &NSW, bool &NUW);
140154
141 static const Value *
142 DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
143 SmallVectorImpl &VarIndices,
144 bool &MaxLookupReached, const DataLayout &DL,
145 AssumptionCache *AC, DominatorTree *DT);
155 static bool DecomposeGEPExpression(const Value *V, DecomposedGEP &Decomposed,
156 const DataLayout &DL, AssumptionCache *AC, DominatorTree *DT);
157
158 static bool isGEPBaseAtNegativeOffset(const GEPOperator *GEPOp,
159 const DecomposedGEP &DecompGEP, const DecomposedGEP &DecompAlloca,
160 uint64_t AllocaAccessSize);
161
146162 /// \brief A Heuristic for aliasGEP that searches for a constant offset
147163 /// between the variables.
148164 ///
342342 /// GetUnderlyingObject and DecomposeGEPExpression must use the same search
343343 /// depth (MaxLookupSearchDepth). When DataLayout not is around, it just looks
344344 /// through pointer casts.
345 /*static*/ const Value *BasicAAResult::DecomposeGEPExpression(
346 const Value *V, int64_t &BaseOffs,
347 SmallVectorImpl &VarIndices, bool &MaxLookupReached,
348 const DataLayout &DL, AssumptionCache *AC, DominatorTree *DT) {
345 bool BasicAAResult::DecomposeGEPExpression(const Value *V,
346 DecomposedGEP &Decomposed, const DataLayout &DL, AssumptionCache *AC,
347 DominatorTree *DT) {
349348 // Limit recursion depth to limit compile time in crazy cases.
350349 unsigned MaxLookup = MaxLookupSearchDepth;
351 MaxLookupReached = false;
352350 SearchTimes++;
353351
354 BaseOffs = 0;
352 Decomposed.StructOffset = 0;
353 Decomposed.OtherOffset = 0;
354 Decomposed.VarIndices.clear();
355355 do {
356356 // See if this is a bitcast or GEP.
357357 const Operator *Op = dyn_cast(V);
363363 continue;
364364 }
365365 }
366 return V;
366 Decomposed.Base = V;
367 return false;
367368 }
368369
369370 if (Op->getOpcode() == Instruction::BitCast ||
387388 continue;
388389 }
389390
390 return V;
391 Decomposed.Base = V;
392 return false;
391393 }
392394
393395 // Don't attempt to analyze GEPs over unsized objects.
394 if (!GEPOp->getSourceElementType()->isSized())
395 return V;
396 if (!GEPOp->getSourceElementType()->isSized()) {
397 Decomposed.Base = V;
398 return false;
399 }
396400
397401 unsigned AS = GEPOp->getPointerAddressSpace();
398402 // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
408412 if (FieldNo == 0)
409413 continue;
410414
411 BaseOffs += DL.getStructLayout(STy)->getElementOffset(FieldNo);
415 Decomposed.StructOffset +=
416 DL.getStructLayout(STy)->getElementOffset(FieldNo);
412417 continue;
413418 }
414419
416421 if (const ConstantInt *CIdx = dyn_cast(Index)) {
417422 if (CIdx->isZero())
418423 continue;
419 BaseOffs += DL.getTypeAllocSize(*GTI) * CIdx->getSExtValue();
424 Decomposed.OtherOffset +=
425 DL.getTypeAllocSize(*GTI) * CIdx->getSExtValue();
420426 continue;
421427 }
422428
437443
438444 // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale.
439445 // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale.
440 BaseOffs += IndexOffset.getSExtValue() * Scale;
446 Decomposed.OtherOffset += IndexOffset.getSExtValue() * Scale;
441447 Scale *= IndexScale.getSExtValue();
442448
443449 // If we already had an occurrence of this index variable, merge this
444450 // scale into it. For example, we want to handle:
445451 // A[x][x] -> x*16 + x*4 -> x*20
446452 // This also ensures that 'x' only appears in the index list once.
447 for (unsigned i = 0, e = VarIndices.size(); i != e; ++i) {
448 if (VarIndices[i].V == Index && VarIndices[i].ZExtBits == ZExtBits &&
449 VarIndices[i].SExtBits == SExtBits) {
450 Scale += VarIndices[i].Scale;
451 VarIndices.erase(VarIndices.begin() + i);
453 for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) {
454 if (Decomposed.VarIndices[i].V == Index &&
455 Decomposed.VarIndices[i].ZExtBits == ZExtBits &&
456 Decomposed.VarIndices[i].SExtBits == SExtBits) {
457 Scale += Decomposed.VarIndices[i].Scale;
458 Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i);
452459 break;
453460 }
454461 }
460467 if (Scale) {
461468 VariableGEPIndex Entry = {Index, ZExtBits, SExtBits,
462469 static_cast(Scale)};
463 VarIndices.push_back(Entry);
470 Decomposed.VarIndices.push_back(Entry);
464471 }
465472 }
466473
467474 // Take care of wrap-arounds
468 BaseOffs = adjustToPointerSize(BaseOffs, PointerSize);
475 Decomposed.StructOffset =
476 adjustToPointerSize(Decomposed.StructOffset, PointerSize);
477 Decomposed.OtherOffset =
478 adjustToPointerSize(Decomposed.OtherOffset, PointerSize);
469479
470480 // Analyze the base pointer next.
471481 V = GEPOp->getOperand(0);
472482 } while (--MaxLookup);
473483
474484 // If the chain of expressions is too deep, just return early.
475 MaxLookupReached = true;
485 Decomposed.Base = V;
476486 SearchLimitReached++;
477 return V;
487 return true;
478488 }
479489
480490 /// Returns whether the given pointer value points to memory that is local to
948958 return MayAlias;
949959 }
950960
961 // If a we have (a) a GEP and (b) a pointer based on an alloca, and the
962 // beginning of the object the GEP points would have a negative offset with
963 // repsect to the alloca, that means the GEP can not alias pointer (b).
964 // Note that the pointer based on the alloca may not be a GEP. For
965 // example, it may be the alloca itself.
966 //
967 // For example, consider:
968 //
969 // struct { int f0, int f1, ...} foo;
970 // foo alloca;
971 // foo* random = bar(alloca);
972 // int *f0 = &alloca.f0
973 // int *f1 = &random->f1;
974 //
975 // Which is lowered, approximately, to:
976 //
977 // %alloca = alloca %struct.foo
978 // %random = call %struct.foo* @random(%struct.foo* %alloca)
979 // %f0 = getelementptr inbounds %struct, %struct.foo* %alloca, i32 0, i32 0
980 // %f1 = getelementptr inbounds %struct, %struct.foo* %random, i32 0, i32 1
981 //
982 // Assume %f1 and %f0 alias. Then %f1 would point into the object allocated
983 // by %alloca. Since the %f1 GEP is inbounds, that means %random must also
984 // point into the same object. But since %f0 points to the beginning of %alloca,
985 // the highest %f1 can be is (%alloca + 3). This means %random can not be higher
986 // than (%alloca - 1), and so is not inbounds, a contradiction.
987 bool BasicAAResult::isGEPBaseAtNegativeOffset(const GEPOperator *GEPOp,
988 const DecomposedGEP &DecompGEP, const DecomposedGEP &DecompAlloca,
989 uint64_t AllocaAccessSize) {
990 // If the alloca access size is unknown, or the GEP isn't inbounds, bail.
991 if (AllocaAccessSize == MemoryLocation::UnknownSize || !GEPOp->isInBounds())
992 return false;
993
994 // We need an alloca, and want to know the offset of the pointer
995 // from the alloca precisely, so no variable indices are allowed.
996 if (!isa(DecompAlloca.Base) || !DecompAlloca.VarIndices.empty())
997 return false;
998
999 int64_t AllocaBaseOffset = DecompAlloca.StructOffset +
1000 DecompAlloca.OtherOffset;
1001
1002 // If the GEP has no variable indices, we know the precise offset
1003 // from the base, then use it. If the GEP has variable indices, we're in
1004 // a bit more trouble: we can't count on the constant offsets that come
1005 // from non-struct sources, since these can be "rewound" by a negative
1006 // variable offset. So use only offsets that came from structs.
1007 int64_t GEPBaseOffset = DecompGEP.StructOffset;
1008 if (DecompGEP.VarIndices.empty())
1009 GEPBaseOffset += DecompGEP.OtherOffset;
1010
1011 return (GEPBaseOffset >= AllocaBaseOffset + (int64_t)AllocaAccessSize);
1012 }
1013
9511014 /// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against
9521015 /// another pointer.
9531016 ///
9591022 uint64_t V2Size, const AAMDNodes &V2AAInfo,
9601023 const Value *UnderlyingV1,
9611024 const Value *UnderlyingV2) {
962 int64_t GEP1BaseOffset;
963 bool GEP1MaxLookupReached;
964 SmallVector GEP1VariableIndices;
965
1025 DecomposedGEP DecompGEP1, DecompGEP2;
1026 bool GEP1MaxLookupReached =
1027 DecomposeGEPExpression(GEP1, DecompGEP1, DL, &AC, DT);
1028 bool GEP2MaxLookupReached =
1029 DecomposeGEPExpression(V2, DecompGEP2, DL, &AC, DT);
1030
1031 int64_t GEP1BaseOffset = DecompGEP1.StructOffset + DecompGEP1.OtherOffset;
1032 int64_t GEP2BaseOffset = DecompGEP2.StructOffset + DecompGEP2.OtherOffset;
1033
1034 assert(DecompGEP1.Base == UnderlyingV1 && DecompGEP2.Base == UnderlyingV2 &&
1035 "DecomposeGEPExpression returned a result different from "
1036 "GetUnderlyingObject");
1037
1038 // If the GEP's offset relative to its base is such that the base would
1039 // fall below the start of the object underlying V2, then the GEP and V2
1040 // cannot alias.
1041 if (!GEP1MaxLookupReached && !GEP2MaxLookupReached &&
1042 isGEPBaseAtNegativeOffset(GEP1, DecompGEP1, DecompGEP2, V2Size))
1043 return NoAlias;
9661044 // If we have two gep instructions with must-alias or not-alias'ing base
9671045 // pointers, figure out if the indexes to the GEP tell us anything about the
9681046 // derived pointer.
9691047 if (const GEPOperator *GEP2 = dyn_cast(V2)) {
1048 // Check for the GEP base being at a negative offset, this time in the other
1049 // direction.
1050 if (!GEP1MaxLookupReached && !GEP2MaxLookupReached &&
1051 isGEPBaseAtNegativeOffset(GEP2, DecompGEP2, DecompGEP1, V1Size))
1052 return NoAlias;
9701053 // Do the base pointers alias?
9711054 AliasResult BaseAlias =
9721055 aliasCheck(UnderlyingV1, MemoryLocation::UnknownSize, AAMDNodes(),
9811064 if (PreciseBaseAlias == NoAlias) {
9821065 // See if the computed offset from the common pointer tells us about the
9831066 // relation of the resulting pointer.
984 int64_t GEP2BaseOffset;
985 bool GEP2MaxLookupReached;
986 SmallVector GEP2VariableIndices;
987 const Value *GEP2BasePtr =
988 DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices,
989 GEP2MaxLookupReached, DL, &AC, DT);
990 const Value *GEP1BasePtr =
991 DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices,
992 GEP1MaxLookupReached, DL, &AC, DT);
993 // DecomposeGEPExpression and GetUnderlyingObject should return the
994 // same result except when DecomposeGEPExpression has no DataLayout.
995 // FIXME: They always have a DataLayout, so this should become an
996 // assert.
997 if (GEP1BasePtr != UnderlyingV1 || GEP2BasePtr != UnderlyingV2) {
998 return MayAlias;
999 }
10001067 // If the max search depth is reached the result is undefined
10011068 if (GEP2MaxLookupReached || GEP1MaxLookupReached)
10021069 return MayAlias;
10031070
10041071 // Same offsets.
10051072 if (GEP1BaseOffset == GEP2BaseOffset &&
1006 GEP1VariableIndices == GEP2VariableIndices)
1073 DecompGEP1.VarIndices == DecompGEP2.VarIndices)
10071074 return NoAlias;
1008 GEP1VariableIndices.clear();
10091075 }
10101076 }
10111077
10171083 // Otherwise, we have a MustAlias. Since the base pointers alias each other
10181084 // exactly, see if the computed offset from the common pointer tells us
10191085 // about the relation of the resulting pointer.
1020 const Value *GEP1BasePtr =
1021 DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices,
1022 GEP1MaxLookupReached, DL, &AC, DT);
1023
1024 int64_t GEP2BaseOffset;
1025 bool GEP2MaxLookupReached;
1026 SmallVector GEP2VariableIndices;
1027 const Value *GEP2BasePtr =
1028 DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices,
1029 GEP2MaxLookupReached, DL, &AC, DT);
1030
1031 // DecomposeGEPExpression and GetUnderlyingObject should return the
1032 // same result except when DecomposeGEPExpression has no DataLayout.
1033 // FIXME: They always have a DataLayout, so this should become an assert.
1034 if (GEP1BasePtr != UnderlyingV1 || GEP2BasePtr != UnderlyingV2) {
1035 return MayAlias;
1036 }
1037
10381086 // If we know the two GEPs are based off of the exact same pointer (and not
10391087 // just the same underlying object), see if that tells us anything about
10401088 // the resulting pointers.
10521100 // Subtract the GEP2 pointer from the GEP1 pointer to find out their
10531101 // symbolic difference.
10541102 GEP1BaseOffset -= GEP2BaseOffset;
1055 GetIndexDifference(GEP1VariableIndices, GEP2VariableIndices);
1103 GetIndexDifference(DecompGEP1.VarIndices, DecompGEP2.VarIndices);
10561104
10571105 } else {
10581106 // Check to see if these two pointers are related by the getelementptr
10741122 // with the first operand of the getelementptr".
10751123 return R;
10761124
1077 const Value *GEP1BasePtr =
1078 DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices,
1079 GEP1MaxLookupReached, DL, &AC, DT);
1080
1081 // DecomposeGEPExpression and GetUnderlyingObject should return the
1082 // same result except when DecomposeGEPExpression has no DataLayout.
1083 // FIXME: They always have a DataLayout, so this should become an assert.
1084 if (GEP1BasePtr != UnderlyingV1) {
1085 return MayAlias;
1086 }
10871125 // If the max search depth is reached the result is undefined
10881126 if (GEP1MaxLookupReached)
10891127 return MayAlias;
10951133 //
10961134 // In the other case, if we have getelementptr , 0, 0, 0, 0, ... and V2
10971135 // must aliases the GEP, the end result is a must alias also.
1098 if (GEP1BaseOffset == 0 && GEP1VariableIndices.empty())
1136 if (GEP1BaseOffset == 0 && DecompGEP1.VarIndices.empty())
10991137 return MustAlias;
11001138
11011139 // If there is a constant difference between the pointers, but the difference
11021140 // is less than the size of the associated memory object, then we know
11031141 // that the objects are partially overlapping. If the difference is
11041142 // greater, we know they do not overlap.
1105 if (GEP1BaseOffset != 0 && GEP1VariableIndices.empty()) {
1143 if (GEP1BaseOffset != 0 && DecompGEP1.VarIndices.empty()) {
11061144 if (GEP1BaseOffset >= 0) {
11071145 if (V2Size != MemoryLocation::UnknownSize) {
11081146 if ((uint64_t)GEP1BaseOffset < V2Size)
11271165 }
11281166 }
11291167
1130 if (!GEP1VariableIndices.empty()) {
1168 if (!DecompGEP1.VarIndices.empty()) {
11311169 uint64_t Modulo = 0;
11321170 bool AllPositive = true;
1133 for (unsigned i = 0, e = GEP1VariableIndices.size(); i != e; ++i) {
1171 for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) {
11341172
11351173 // Try to distinguish something like &A[i][1] against &A[42][0].
11361174 // Grab the least significant bit set in any of the scales. We
11371175 // don't need std::abs here (even if the scale's negative) as we'll
11381176 // be ^'ing Modulo with itself later.
1139 Modulo |= (uint64_t)GEP1VariableIndices[i].Scale;
1177 Modulo |= (uint64_t)DecompGEP1.VarIndices[i].Scale;
11401178
11411179 if (AllPositive) {
11421180 // If the Value could change between cycles, then any reasoning about
11431181 // the Value this cycle may not hold in the next cycle. We'll just
11441182 // give up if we can't determine conditions that hold for every cycle:
1145 const Value *V = GEP1VariableIndices[i].V;
1183 const Value *V = DecompGEP1.VarIndices[i].V;
11461184
11471185 bool SignKnownZero, SignKnownOne;
11481186 ComputeSignBit(const_cast(V), SignKnownZero, SignKnownOne, DL,
11501188
11511189 // Zero-extension widens the variable, and so forces the sign
11521190 // bit to zero.
1153 bool IsZExt = GEP1VariableIndices[i].ZExtBits > 0 || isa(V);
1191 bool IsZExt = DecompGEP1.VarIndices[i].ZExtBits > 0 || isa(V);
11541192 SignKnownZero |= IsZExt;
11551193 SignKnownOne &= !IsZExt;
11561194
11571195 // If the variable begins with a zero then we know it's
11581196 // positive, regardless of whether the value is signed or
11591197 // unsigned.
1160 int64_t Scale = GEP1VariableIndices[i].Scale;
1198 int64_t Scale = DecompGEP1.VarIndices[i].Scale;
11611199 AllPositive =
11621200 (SignKnownZero && Scale >= 0) || (SignKnownOne && Scale < 0);
11631201 }
11801218 if (AllPositive && GEP1BaseOffset > 0 && V2Size <= (uint64_t)GEP1BaseOffset)
11811219 return NoAlias;
11821220
1183 if (constantOffsetHeuristic(GEP1VariableIndices, V1Size, V2Size,
1221 if (constantOffsetHeuristic(DecompGEP1.VarIndices, V1Size, V2Size,
11841222 GEP1BaseOffset, &AC, DT))
11851223 return NoAlias;
11861224 }
0 ; RUN: opt < %s -basicaa -aa-eval -print-all-alias-modref-info -disable-output 2>&1 | FileCheck %s
1
2 target datalayout = "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128"
3 target triple = "i386-unknown-linux-gnu"
4
5 declare i32* @random.i32(i32* %ptr)
6
7 ; CHECK-LABEL: Function: arr:
8 ; CHECK-DAG: MayAlias: i32* %alloca, i32* %p0
9 ; CHECK-DAG: NoAlias: i32* %alloca, i32* %p1
10 define void @arr() {
11 %alloca = alloca i32, i32 4
12 %random = call i32* @random.i32(i32* %alloca)
13 %p0 = getelementptr inbounds i32, i32* %random, i32 0
14 %p1 = getelementptr inbounds i32, i32* %random, i32 1
15 ret void
16 }
17
18 ; CHECK-LABEL: Function: arg:
19 ; CHECK-DAG: MayAlias: i32* %arg, i32* %p0
20 ; CHECK-DAG: MayAlias: i32* %arg, i32* %p1
21 define void @arg(i32* %arg) {
22 %random = call i32* @random.i32(i32* %arg)
23 %p0 = getelementptr inbounds i32, i32* %random, i32 0
24 %p1 = getelementptr inbounds i32, i32* %random, i32 1
25 ret void
26 }
27
28 ; CHECK-LABEL: Function: struct:
29 ; CHECK-DAG: MayAlias: i32* %f0, i32* %p0
30 ; CHECK-DAG: MayAlias: i32* %f1, i32* %p0
31 ; CHECK-DAG: NoAlias: i32* %f0, i32* %p1
32 ; CHECK-DAG: MayAlias: i32* %f1, i32* %p1
33 %struct = type { i32, i32, i32 }
34 define void @struct() {
35 %alloca = alloca %struct
36 %alloca.i32 = bitcast %struct* %alloca to i32*
37 %random = call i32* @random.i32(i32* %alloca.i32)
38 %f0 = getelementptr inbounds %struct, %struct* %alloca, i32 0, i32 0
39 %f1 = getelementptr inbounds %struct, %struct* %alloca, i32 0, i32 1
40 %p0 = getelementptr inbounds i32, i32* %random, i32 0
41 %p1 = getelementptr inbounds i32, i32* %random, i32 1
42 ret void
43 }
44
45 ; CHECK-LABEL: Function: complex1:
46 ; CHECK-DAG: MayAlias: i32* %a2.0, i32* %r2.0
47 ; CHECK-DAG: NoAlias: i32* %a2.0, i32* %r2.1
48 ; CHECK-DAG: MayAlias: i32* %a2.0, i32* %r2.i
49 ; CHECK-DAG: MayAlias: i32* %a2.0, i32* %r2.1i
50 ; CHECK-DAG: NoAlias: i32* %a1, i32* %r2.0
51 ; CHECK-DAG: NoAlias: i32* %a1, i32* %r2.1
52 ; CHECK-DAG: NoAlias: i32* %a1, i32* %r2.i
53 ; CHECK-DAG: NoAlias: i32* %a1, i32* %r2.1i
54 %complex = type { i32, i32, [4 x i32] }
55 define void @complex1(i32 %i) {
56 %alloca = alloca %complex
57 %alloca.i32 = bitcast %complex* %alloca to i32*
58 %r.i32 = call i32* @random.i32(i32* %alloca.i32)
59 %random = bitcast i32* %r.i32 to %complex*
60 %a1 = getelementptr inbounds %complex, %complex* %alloca, i32 0, i32 1
61 %a2.0 = getelementptr inbounds %complex, %complex* %alloca, i32 0, i32 2, i32 0
62 %r2.0 = getelementptr inbounds %complex, %complex* %random, i32 0, i32 2, i32 0
63 %r2.1 = getelementptr inbounds %complex, %complex* %random, i32 0, i32 2, i32 1
64 %r2.i = getelementptr inbounds %complex, %complex* %random, i32 0, i32 2, i32 %i
65 %r2.1i = getelementptr inbounds i32, i32* %r2.1, i32 %i
66 ret void
67 }
68
69 ; CHECK-LABEL: Function: complex2:
70 ; CHECK-DAG: NoAlias: i32* %alloca, i32* %p120
71 ; CHECK-DAG: NoAlias: i32* %alloca, i32* %pi20
72 ; CHECK-DAG: NoAlias: i32* %alloca, i32* %pij1
73 ; CHECK-DAG: MayAlias: i32* %a3, i32* %pij1
74 %inner = type { i32, i32 }
75 %outer = type { i32, i32, [10 x %inner] }
76 declare %outer* @rand_outer(i32* %p)
77 define void @complex2(i32 %i, i32 %j) {
78 %alloca = alloca i32, i32 128
79 %a3 = getelementptr inbounds i32, i32* %alloca, i32 3
80 %random = call %outer* @rand_outer(i32* %alloca)
81 %p120 = getelementptr inbounds %outer, %outer* %random, i32 1, i32 2, i32 2, i32 0
82 %pi20 = getelementptr inbounds %outer, %outer* %random, i32 %i, i32 2, i32 2, i32 0
83 %pij1 = getelementptr inbounds %outer, %outer* %random, i32 %i, i32 2, i32 %j, i32 1
84 ret void
85 }
86