LLVM 19.0.0git
LoopAccessAnalysis.cpp
Go to the documentation of this file.
1//===- LoopAccessAnalysis.cpp - Loop Access Analysis Implementation --------==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// The implementation for the loop memory dependence that was originally
10// developed for the loop vectorizer.
11//
12//===----------------------------------------------------------------------===//
13
15#include "llvm/ADT/APInt.h"
16#include "llvm/ADT/DenseMap.h"
19#include "llvm/ADT/STLExtras.h"
20#include "llvm/ADT/SetVector.h"
22#include "llvm/ADT/SmallSet.h"
37#include "llvm/IR/BasicBlock.h"
38#include "llvm/IR/Constants.h"
39#include "llvm/IR/DataLayout.h"
40#include "llvm/IR/DebugLoc.h"
43#include "llvm/IR/Dominators.h"
44#include "llvm/IR/Function.h"
46#include "llvm/IR/InstrTypes.h"
47#include "llvm/IR/Instruction.h"
49#include "llvm/IR/Operator.h"
50#include "llvm/IR/PassManager.h"
52#include "llvm/IR/Type.h"
53#include "llvm/IR/Value.h"
54#include "llvm/IR/ValueHandle.h"
57#include "llvm/Support/Debug.h"
60#include <algorithm>
61#include <cassert>
62#include <cstdint>
63#include <iterator>
64#include <utility>
65#include <variant>
66#include <vector>
67
68using namespace llvm;
69using namespace llvm::PatternMatch;
70
71#define DEBUG_TYPE "loop-accesses"
72
74VectorizationFactor("force-vector-width", cl::Hidden,
75 cl::desc("Sets the SIMD width. Zero is autoselect."),
78
80VectorizationInterleave("force-vector-interleave", cl::Hidden,
81 cl::desc("Sets the vectorization interleave count. "
82 "Zero is autoselect."),
86
88 "runtime-memory-check-threshold", cl::Hidden,
89 cl::desc("When performing memory disambiguation checks at runtime do not "
90 "generate more than this number of comparisons (default = 8)."),
93
94/// The maximum iterations used to merge memory checks
96 "memory-check-merge-threshold", cl::Hidden,
97 cl::desc("Maximum number of comparisons done when trying to merge "
98 "runtime memory checks. (default = 100)"),
99 cl::init(100));
100
101/// Maximum SIMD width.
102const unsigned VectorizerParams::MaxVectorWidth = 64;
103
104/// We collect dependences up to this threshold.
106 MaxDependences("max-dependences", cl::Hidden,
107 cl::desc("Maximum number of dependences collected by "
108 "loop-access analysis (default = 100)"),
109 cl::init(100));
110
111/// This enables versioning on the strides of symbolically striding memory
112/// accesses in code like the following.
113/// for (i = 0; i < N; ++i)
114/// A[i * Stride1] += B[i * Stride2] ...
115///
116/// Will be roughly translated to
117/// if (Stride1 == 1 && Stride2 == 1) {
118/// for (i = 0; i < N; i+=4)
119/// A[i:i+3] += ...
120/// } else
121/// ...
123 "enable-mem-access-versioning", cl::init(true), cl::Hidden,
124 cl::desc("Enable symbolic stride memory access versioning"));
125
126/// Enable store-to-load forwarding conflict detection. This option can
127/// be disabled for correctness testing.
129 "store-to-load-forwarding-conflict-detection", cl::Hidden,
130 cl::desc("Enable conflict detection in loop-access analysis"),
131 cl::init(true));
132
134 "max-forked-scev-depth", cl::Hidden,
135 cl::desc("Maximum recursion depth when finding forked SCEVs (default = 5)"),
136 cl::init(5));
137
139 "laa-speculate-unit-stride", cl::Hidden,
140 cl::desc("Speculate that non-constant strides are unit in LAA"),
141 cl::init(true));
142
144 "hoist-runtime-checks", cl::Hidden,
145 cl::desc(
146 "Hoist inner loop runtime memory checks to outer loop if possible"),
149
151 return ::VectorizationInterleave.getNumOccurrences() > 0;
152}
153
155 const DenseMap<Value *, const SCEV *> &PtrToStride,
156 Value *Ptr) {
157 const SCEV *OrigSCEV = PSE.getSCEV(Ptr);
158
159 // If there is an entry in the map return the SCEV of the pointer with the
160 // symbolic stride replaced by one.
162 if (SI == PtrToStride.end())
163 // For a non-symbolic stride, just return the original expression.
164 return OrigSCEV;
165
166 const SCEV *StrideSCEV = SI->second;
167 // Note: This assert is both overly strong and overly weak. The actual
168 // invariant here is that StrideSCEV should be loop invariant. The only
169 // such invariant strides we happen to speculate right now are unknowns
170 // and thus this is a reasonable proxy of the actual invariant.
171 assert(isa<SCEVUnknown>(StrideSCEV) && "shouldn't be in map");
172
173 ScalarEvolution *SE = PSE.getSE();
174 const auto *CT = SE->getOne(StrideSCEV->getType());
175 PSE.addPredicate(*SE->getEqualPredicate(StrideSCEV, CT));
176 auto *Expr = PSE.getSCEV(Ptr);
177
178 LLVM_DEBUG(dbgs() << "LAA: Replacing SCEV: " << *OrigSCEV
179 << " by: " << *Expr << "\n");
180 return Expr;
181}
182
184 unsigned Index, RuntimePointerChecking &RtCheck)
185 : High(RtCheck.Pointers[Index].End), Low(RtCheck.Pointers[Index].Start),
186 AddressSpace(RtCheck.Pointers[Index]
187 .PointerValue->getType()
189 NeedsFreeze(RtCheck.Pointers[Index].NeedsFreeze) {
191}
192
193/// Calculate Start and End points of memory access.
194/// Let's assume A is the first access and B is a memory access on N-th loop
195/// iteration. Then B is calculated as:
196/// B = A + Step*N .
197/// Step value may be positive or negative.
198/// N is a calculated back-edge taken count:
199/// N = (TripCount > 0) ? RoundDown(TripCount -1 , VF) : 0
200/// Start and End points are calculated in the following way:
201/// Start = UMIN(A, B) ; End = UMAX(A, B) + SizeOfElt,
202/// where SizeOfElt is the size of single memory access in bytes.
203///
204/// There is no conflict when the intervals are disjoint:
205/// NoConflict = (P2.Start >= P1.End) || (P1.Start >= P2.End)
207 Type *AccessTy, bool WritePtr,
208 unsigned DepSetId, unsigned ASId,
210 bool NeedsFreeze) {
211 ScalarEvolution *SE = PSE.getSE();
212
213 const SCEV *ScStart;
214 const SCEV *ScEnd;
215
216 if (SE->isLoopInvariant(PtrExpr, Lp)) {
217 ScStart = ScEnd = PtrExpr;
218 } else {
219 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrExpr);
220 assert(AR && "Invalid addrec expression");
221 const SCEV *Ex = PSE.getBackedgeTakenCount();
222
223 ScStart = AR->getStart();
224 ScEnd = AR->evaluateAtIteration(Ex, *SE);
225 const SCEV *Step = AR->getStepRecurrence(*SE);
226
227 // For expressions with negative step, the upper bound is ScStart and the
228 // lower bound is ScEnd.
229 if (const auto *CStep = dyn_cast<SCEVConstant>(Step)) {
230 if (CStep->getValue()->isNegative())
231 std::swap(ScStart, ScEnd);
232 } else {
233 // Fallback case: the step is not constant, but we can still
234 // get the upper and lower bounds of the interval by using min/max
235 // expressions.
236 ScStart = SE->getUMinExpr(ScStart, ScEnd);
237 ScEnd = SE->getUMaxExpr(AR->getStart(), ScEnd);
238 }
239 }
240 assert(SE->isLoopInvariant(ScStart, Lp) && "ScStart needs to be invariant");
241 assert(SE->isLoopInvariant(ScEnd, Lp)&& "ScEnd needs to be invariant");
242
243 // Add the size of the pointed element to ScEnd.
244 auto &DL = Lp->getHeader()->getModule()->getDataLayout();
245 Type *IdxTy = DL.getIndexType(Ptr->getType());
246 const SCEV *EltSizeSCEV = SE->getStoreSizeOfExpr(IdxTy, AccessTy);
247 ScEnd = SE->getAddExpr(ScEnd, EltSizeSCEV);
248
249 Pointers.emplace_back(Ptr, ScStart, ScEnd, WritePtr, DepSetId, ASId, PtrExpr,
250 NeedsFreeze);
251}
252
253void RuntimePointerChecking::tryToCreateDiffCheck(
254 const RuntimeCheckingPtrGroup &CGI, const RuntimeCheckingPtrGroup &CGJ) {
255 if (!CanUseDiffCheck)
256 return;
257
258 // If either group contains multiple different pointers, bail out.
259 // TODO: Support multiple pointers by using the minimum or maximum pointer,
260 // depending on src & sink.
261 if (CGI.Members.size() != 1 || CGJ.Members.size() != 1) {
262 CanUseDiffCheck = false;
263 return;
264 }
265
266 PointerInfo *Src = &Pointers[CGI.Members[0]];
267 PointerInfo *Sink = &Pointers[CGJ.Members[0]];
268
269 // If either pointer is read and written, multiple checks may be needed. Bail
270 // out.
271 if (!DC.getOrderForAccess(Src->PointerValue, !Src->IsWritePtr).empty() ||
272 !DC.getOrderForAccess(Sink->PointerValue, !Sink->IsWritePtr).empty()) {
273 CanUseDiffCheck = false;
274 return;
275 }
276
277 ArrayRef<unsigned> AccSrc =
278 DC.getOrderForAccess(Src->PointerValue, Src->IsWritePtr);
279 ArrayRef<unsigned> AccSink =
280 DC.getOrderForAccess(Sink->PointerValue, Sink->IsWritePtr);
281 // If either pointer is accessed multiple times, there may not be a clear
282 // src/sink relation. Bail out for now.
283 if (AccSrc.size() != 1 || AccSink.size() != 1) {
284 CanUseDiffCheck = false;
285 return;
286 }
287 // If the sink is accessed before src, swap src/sink.
288 if (AccSink[0] < AccSrc[0])
289 std::swap(Src, Sink);
290
291 auto *SrcAR = dyn_cast<SCEVAddRecExpr>(Src->Expr);
292 auto *SinkAR = dyn_cast<SCEVAddRecExpr>(Sink->Expr);
293 if (!SrcAR || !SinkAR || SrcAR->getLoop() != DC.getInnermostLoop() ||
294 SinkAR->getLoop() != DC.getInnermostLoop()) {
295 CanUseDiffCheck = false;
296 return;
297 }
298
300 DC.getInstructionsForAccess(Src->PointerValue, Src->IsWritePtr);
302 DC.getInstructionsForAccess(Sink->PointerValue, Sink->IsWritePtr);
303 Type *SrcTy = getLoadStoreType(SrcInsts[0]);
304 Type *DstTy = getLoadStoreType(SinkInsts[0]);
305 if (isa<ScalableVectorType>(SrcTy) || isa<ScalableVectorType>(DstTy)) {
306 CanUseDiffCheck = false;
307 return;
308 }
309 const DataLayout &DL =
310 SinkAR->getLoop()->getHeader()->getModule()->getDataLayout();
311 unsigned AllocSize =
312 std::max(DL.getTypeAllocSize(SrcTy), DL.getTypeAllocSize(DstTy));
313
314 // Only matching constant steps matching the AllocSize are supported at the
315 // moment. This simplifies the difference computation. Can be extended in the
316 // future.
317 auto *Step = dyn_cast<SCEVConstant>(SinkAR->getStepRecurrence(*SE));
318 if (!Step || Step != SrcAR->getStepRecurrence(*SE) ||
319 Step->getAPInt().abs() != AllocSize) {
320 CanUseDiffCheck = false;
321 return;
322 }
323
324 IntegerType *IntTy =
325 IntegerType::get(Src->PointerValue->getContext(),
326 DL.getPointerSizeInBits(CGI.AddressSpace));
327
328 // When counting down, the dependence distance needs to be swapped.
329 if (Step->getValue()->isNegative())
330 std::swap(SinkAR, SrcAR);
331
332 const SCEV *SinkStartInt = SE->getPtrToIntExpr(SinkAR->getStart(), IntTy);
333 const SCEV *SrcStartInt = SE->getPtrToIntExpr(SrcAR->getStart(), IntTy);
334 if (isa<SCEVCouldNotCompute>(SinkStartInt) ||
335 isa<SCEVCouldNotCompute>(SrcStartInt)) {
336 CanUseDiffCheck = false;
337 return;
338 }
339
340 const Loop *InnerLoop = SrcAR->getLoop();
341 // If the start values for both Src and Sink also vary according to an outer
342 // loop, then it's probably better to avoid creating diff checks because
343 // they may not be hoisted. We should instead let llvm::addRuntimeChecks
344 // do the expanded full range overlap checks, which can be hoisted.
345 if (HoistRuntimeChecks && InnerLoop->getParentLoop() &&
346 isa<SCEVAddRecExpr>(SinkStartInt) && isa<SCEVAddRecExpr>(SrcStartInt)) {
347 auto *SrcStartAR = cast<SCEVAddRecExpr>(SrcStartInt);
348 auto *SinkStartAR = cast<SCEVAddRecExpr>(SinkStartInt);
349 const Loop *StartARLoop = SrcStartAR->getLoop();
350 if (StartARLoop == SinkStartAR->getLoop() &&
351 StartARLoop == InnerLoop->getParentLoop() &&
352 // If the diff check would already be loop invariant (due to the
353 // recurrences being the same), then we prefer to keep the diff checks
354 // because they are cheaper.
355 SrcStartAR->getStepRecurrence(*SE) !=
356 SinkStartAR->getStepRecurrence(*SE)) {
357 LLVM_DEBUG(dbgs() << "LAA: Not creating diff runtime check, since these "
358 "cannot be hoisted out of the outer loop\n");
359 CanUseDiffCheck = false;
360 return;
361 }
362 }
363
364 LLVM_DEBUG(dbgs() << "LAA: Creating diff runtime check for:\n"
365 << "SrcStart: " << *SrcStartInt << '\n'
366 << "SinkStartInt: " << *SinkStartInt << '\n');
367 DiffChecks.emplace_back(SrcStartInt, SinkStartInt, AllocSize,
368 Src->NeedsFreeze || Sink->NeedsFreeze);
369}
370
371SmallVector<RuntimePointerCheck, 4> RuntimePointerChecking::generateChecks() {
373
374 for (unsigned I = 0; I < CheckingGroups.size(); ++I) {
375 for (unsigned J = I + 1; J < CheckingGroups.size(); ++J) {
378
379 if (needsChecking(CGI, CGJ)) {
380 tryToCreateDiffCheck(CGI, CGJ);
381 Checks.push_back(std::make_pair(&CGI, &CGJ));
382 }
383 }
384 }
385 return Checks;
386}
387
388void RuntimePointerChecking::generateChecks(
389 MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) {
390 assert(Checks.empty() && "Checks is not empty");
391 groupChecks(DepCands, UseDependencies);
392 Checks = generateChecks();
393}
394
396 const RuntimeCheckingPtrGroup &M, const RuntimeCheckingPtrGroup &N) const {
397 for (unsigned I = 0, EI = M.Members.size(); EI != I; ++I)
398 for (unsigned J = 0, EJ = N.Members.size(); EJ != J; ++J)
399 if (needsChecking(M.Members[I], N.Members[J]))
400 return true;
401 return false;
402}
403
404/// Compare \p I and \p J and return the minimum.
405/// Return nullptr in case we couldn't find an answer.
406static const SCEV *getMinFromExprs(const SCEV *I, const SCEV *J,
407 ScalarEvolution *SE) {
408 const SCEV *Diff = SE->getMinusSCEV(J, I);
409 const SCEVConstant *C = dyn_cast<const SCEVConstant>(Diff);
410
411 if (!C)
412 return nullptr;
413 if (C->getValue()->isNegative())
414 return J;
415 return I;
416}
417
419 RuntimePointerChecking &RtCheck) {
420 return addPointer(
421 Index, RtCheck.Pointers[Index].Start, RtCheck.Pointers[Index].End,
422 RtCheck.Pointers[Index].PointerValue->getType()->getPointerAddressSpace(),
423 RtCheck.Pointers[Index].NeedsFreeze, *RtCheck.SE);
424}
425
427 const SCEV *End, unsigned AS,
428 bool NeedsFreeze,
429 ScalarEvolution &SE) {
430 assert(AddressSpace == AS &&
431 "all pointers in a checking group must be in the same address space");
432
433 // Compare the starts and ends with the known minimum and maximum
434 // of this set. We need to know how we compare against the min/max
435 // of the set in order to be able to emit memchecks.
436 const SCEV *Min0 = getMinFromExprs(Start, Low, &SE);
437 if (!Min0)
438 return false;
439
440 const SCEV *Min1 = getMinFromExprs(End, High, &SE);
441 if (!Min1)
442 return false;
443
444 // Update the low bound expression if we've found a new min value.
445 if (Min0 == Start)
446 Low = Start;
447
448 // Update the high bound expression if we've found a new max value.
449 if (Min1 != End)
450 High = End;
451
453 this->NeedsFreeze |= NeedsFreeze;
454 return true;
455}
456
457void RuntimePointerChecking::groupChecks(
458 MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) {
459 // We build the groups from dependency candidates equivalence classes
460 // because:
461 // - We know that pointers in the same equivalence class share
462 // the same underlying object and therefore there is a chance
463 // that we can compare pointers
464 // - We wouldn't be able to merge two pointers for which we need
465 // to emit a memcheck. The classes in DepCands are already
466 // conveniently built such that no two pointers in the same
467 // class need checking against each other.
468
469 // We use the following (greedy) algorithm to construct the groups
470 // For every pointer in the equivalence class:
471 // For each existing group:
472 // - if the difference between this pointer and the min/max bounds
473 // of the group is a constant, then make the pointer part of the
474 // group and update the min/max bounds of that group as required.
475
476 CheckingGroups.clear();
477
478 // If we need to check two pointers to the same underlying object
479 // with a non-constant difference, we shouldn't perform any pointer
480 // grouping with those pointers. This is because we can easily get
481 // into cases where the resulting check would return false, even when
482 // the accesses are safe.
483 //
484 // The following example shows this:
485 // for (i = 0; i < 1000; ++i)
486 // a[5000 + i * m] = a[i] + a[i + 9000]
487 //
488 // Here grouping gives a check of (5000, 5000 + 1000 * m) against
489 // (0, 10000) which is always false. However, if m is 1, there is no
490 // dependence. Not grouping the checks for a[i] and a[i + 9000] allows
491 // us to perform an accurate check in this case.
492 //
493 // The above case requires that we have an UnknownDependence between
494 // accesses to the same underlying object. This cannot happen unless
495 // FoundNonConstantDistanceDependence is set, and therefore UseDependencies
496 // is also false. In this case we will use the fallback path and create
497 // separate checking groups for all pointers.
498
499 // If we don't have the dependency partitions, construct a new
500 // checking pointer group for each pointer. This is also required
501 // for correctness, because in this case we can have checking between
502 // pointers to the same underlying object.
503 if (!UseDependencies) {
504 for (unsigned I = 0; I < Pointers.size(); ++I)
505 CheckingGroups.push_back(RuntimeCheckingPtrGroup(I, *this));
506 return;
507 }
508
509 unsigned TotalComparisons = 0;
510
512 for (unsigned Index = 0; Index < Pointers.size(); ++Index) {
513 auto Iter = PositionMap.insert({Pointers[Index].PointerValue, {}});
514 Iter.first->second.push_back(Index);
515 }
516
517 // We need to keep track of what pointers we've already seen so we
518 // don't process them twice.
520
521 // Go through all equivalence classes, get the "pointer check groups"
522 // and add them to the overall solution. We use the order in which accesses
523 // appear in 'Pointers' to enforce determinism.
524 for (unsigned I = 0; I < Pointers.size(); ++I) {
525 // We've seen this pointer before, and therefore already processed
526 // its equivalence class.
527 if (Seen.count(I))
528 continue;
529
530 MemoryDepChecker::MemAccessInfo Access(Pointers[I].PointerValue,
531 Pointers[I].IsWritePtr);
532
534 auto LeaderI = DepCands.findValue(DepCands.getLeaderValue(Access));
535
536 // Because DepCands is constructed by visiting accesses in the order in
537 // which they appear in alias sets (which is deterministic) and the
538 // iteration order within an equivalence class member is only dependent on
539 // the order in which unions and insertions are performed on the
540 // equivalence class, the iteration order is deterministic.
541 for (auto MI = DepCands.member_begin(LeaderI), ME = DepCands.member_end();
542 MI != ME; ++MI) {
543 auto PointerI = PositionMap.find(MI->getPointer());
544 assert(PointerI != PositionMap.end() &&
545 "pointer in equivalence class not found in PositionMap");
546 for (unsigned Pointer : PointerI->second) {
547 bool Merged = false;
548 // Mark this pointer as seen.
549 Seen.insert(Pointer);
550
551 // Go through all the existing sets and see if we can find one
552 // which can include this pointer.
553 for (RuntimeCheckingPtrGroup &Group : Groups) {
554 // Don't perform more than a certain amount of comparisons.
555 // This should limit the cost of grouping the pointers to something
556 // reasonable. If we do end up hitting this threshold, the algorithm
557 // will create separate groups for all remaining pointers.
558 if (TotalComparisons > MemoryCheckMergeThreshold)
559 break;
560
561 TotalComparisons++;
562
563 if (Group.addPointer(Pointer, *this)) {
564 Merged = true;
565 break;
566 }
567 }
568
569 if (!Merged)
570 // We couldn't add this pointer to any existing set or the threshold
571 // for the number of comparisons has been reached. Create a new group
572 // to hold the current pointer.
573 Groups.push_back(RuntimeCheckingPtrGroup(Pointer, *this));
574 }
575 }
576
577 // We've computed the grouped checks for this partition.
578 // Save the results and continue with the next one.
579 llvm::copy(Groups, std::back_inserter(CheckingGroups));
580 }
581}
582
584 const SmallVectorImpl<int> &PtrToPartition, unsigned PtrIdx1,
585 unsigned PtrIdx2) {
586 return (PtrToPartition[PtrIdx1] != -1 &&
587 PtrToPartition[PtrIdx1] == PtrToPartition[PtrIdx2]);
588}
589
590bool RuntimePointerChecking::needsChecking(unsigned I, unsigned J) const {
591 const PointerInfo &PointerI = Pointers[I];
592 const PointerInfo &PointerJ = Pointers[J];
593
594 // No need to check if two readonly pointers intersect.
595 if (!PointerI.IsWritePtr && !PointerJ.IsWritePtr)
596 return false;
597
598 // Only need to check pointers between two different dependency sets.
599 if (PointerI.DependencySetId == PointerJ.DependencySetId)
600 return false;
601
602 // Only need to check pointers in the same alias set.
603 if (PointerI.AliasSetId != PointerJ.AliasSetId)
604 return false;
605
606 return true;
607}
608
611 unsigned Depth) const {
612 unsigned N = 0;
613 for (const auto &Check : Checks) {
614 const auto &First = Check.first->Members, &Second = Check.second->Members;
615
616 OS.indent(Depth) << "Check " << N++ << ":\n";
617
618 OS.indent(Depth + 2) << "Comparing group (" << Check.first << "):\n";
619 for (unsigned K = 0; K < First.size(); ++K)
620 OS.indent(Depth + 2) << *Pointers[First[K]].PointerValue << "\n";
621
622 OS.indent(Depth + 2) << "Against group (" << Check.second << "):\n";
623 for (unsigned K = 0; K < Second.size(); ++K)
624 OS.indent(Depth + 2) << *Pointers[Second[K]].PointerValue << "\n";
625 }
626}
627
629
630 OS.indent(Depth) << "Run-time memory checks:\n";
631 printChecks(OS, Checks, Depth);
632
633 OS.indent(Depth) << "Grouped accesses:\n";
634 for (unsigned I = 0; I < CheckingGroups.size(); ++I) {
635 const auto &CG = CheckingGroups[I];
636
637 OS.indent(Depth + 2) << "Group " << &CG << ":\n";
638 OS.indent(Depth + 4) << "(Low: " << *CG.Low << " High: " << *CG.High
639 << ")\n";
640 for (unsigned J = 0; J < CG.Members.size(); ++J) {
641 OS.indent(Depth + 6) << "Member: " << *Pointers[CG.Members[J]].Expr
642 << "\n";
643 }
644 }
645}
646
647namespace {
648
649/// Analyses memory accesses in a loop.
650///
651/// Checks whether run time pointer checks are needed and builds sets for data
652/// dependence checking.
653class AccessAnalysis {
654public:
655 /// Read or write access location.
656 typedef PointerIntPair<Value *, 1, bool> MemAccessInfo;
657 typedef SmallVector<MemAccessInfo, 8> MemAccessInfoList;
658
659 AccessAnalysis(Loop *TheLoop, AAResults *AA, LoopInfo *LI,
662 SmallPtrSetImpl<MDNode *> &LoopAliasScopes)
663 : TheLoop(TheLoop), BAA(*AA), AST(BAA), LI(LI), DepCands(DA), PSE(PSE),
664 LoopAliasScopes(LoopAliasScopes) {
665 // We're analyzing dependences across loop iterations.
666 BAA.enableCrossIterationMode();
667 }
668
669 /// Register a load and whether it is only read from.
670 void addLoad(MemoryLocation &Loc, Type *AccessTy, bool IsReadOnly) {
671 Value *Ptr = const_cast<Value *>(Loc.Ptr);
672 AST.add(adjustLoc(Loc));
673 Accesses[MemAccessInfo(Ptr, false)].insert(AccessTy);
674 if (IsReadOnly)
675 ReadOnlyPtr.insert(Ptr);
676 }
677
678 /// Register a store.
679 void addStore(MemoryLocation &Loc, Type *AccessTy) {
680 Value *Ptr = const_cast<Value *>(Loc.Ptr);
681 AST.add(adjustLoc(Loc));
682 Accesses[MemAccessInfo(Ptr, true)].insert(AccessTy);
683 }
684
685 /// Check if we can emit a run-time no-alias check for \p Access.
686 ///
687 /// Returns true if we can emit a run-time no alias check for \p Access.
688 /// If we can check this access, this also adds it to a dependence set and
689 /// adds a run-time to check for it to \p RtCheck. If \p Assume is true,
690 /// we will attempt to use additional run-time checks in order to get
691 /// the bounds of the pointer.
692 bool createCheckForAccess(RuntimePointerChecking &RtCheck,
693 MemAccessInfo Access, Type *AccessTy,
694 const DenseMap<Value *, const SCEV *> &Strides,
696 Loop *TheLoop, unsigned &RunningDepId,
697 unsigned ASId, bool ShouldCheckStride, bool Assume);
698
699 /// Check whether we can check the pointers at runtime for
700 /// non-intersection.
701 ///
702 /// Returns true if we need no check or if we do and we can generate them
703 /// (i.e. the pointers have computable bounds).
704 bool canCheckPtrAtRT(RuntimePointerChecking &RtCheck, ScalarEvolution *SE,
705 Loop *TheLoop, const DenseMap<Value *, const SCEV *> &Strides,
706 Value *&UncomputablePtr, bool ShouldCheckWrap = false);
707
708 /// Goes over all memory accesses, checks whether a RT check is needed
709 /// and builds sets of dependent accesses.
710 void buildDependenceSets() {
711 processMemAccesses();
712 }
713
714 /// Initial processing of memory accesses determined that we need to
715 /// perform dependency checking.
716 ///
717 /// Note that this can later be cleared if we retry memcheck analysis without
718 /// dependency checking (i.e. FoundNonConstantDistanceDependence).
719 bool isDependencyCheckNeeded() { return !CheckDeps.empty(); }
720
721 /// We decided that no dependence analysis would be used. Reset the state.
722 void resetDepChecks(MemoryDepChecker &DepChecker) {
723 CheckDeps.clear();
724 DepChecker.clearDependences();
725 }
726
727 MemAccessInfoList &getDependenciesToCheck() { return CheckDeps; }
728
731 return UnderlyingObjects;
732 }
733
734private:
736
737 /// Adjust the MemoryLocation so that it represents accesses to this
738 /// location across all iterations, rather than a single one.
739 MemoryLocation adjustLoc(MemoryLocation Loc) const {
740 // The accessed location varies within the loop, but remains within the
741 // underlying object.
743 Loc.AATags.Scope = adjustAliasScopeList(Loc.AATags.Scope);
744 Loc.AATags.NoAlias = adjustAliasScopeList(Loc.AATags.NoAlias);
745 return Loc;
746 }
747
748 /// Drop alias scopes that are only valid within a single loop iteration.
749 MDNode *adjustAliasScopeList(MDNode *ScopeList) const {
750 if (!ScopeList)
751 return nullptr;
752
753 // For the sake of simplicity, drop the whole scope list if any scope is
754 // iteration-local.
755 if (any_of(ScopeList->operands(), [&](Metadata *Scope) {
756 return LoopAliasScopes.contains(cast<MDNode>(Scope));
757 }))
758 return nullptr;
759
760 return ScopeList;
761 }
762
763 /// Go over all memory access and check whether runtime pointer checks
764 /// are needed and build sets of dependency check candidates.
765 void processMemAccesses();
766
767 /// Map of all accesses. Values are the types used to access memory pointed to
768 /// by the pointer.
769 PtrAccessMap Accesses;
770
771 /// The loop being checked.
772 const Loop *TheLoop;
773
774 /// List of accesses that need a further dependence check.
775 MemAccessInfoList CheckDeps;
776
777 /// Set of pointers that are read only.
778 SmallPtrSet<Value*, 16> ReadOnlyPtr;
779
780 /// Batched alias analysis results.
781 BatchAAResults BAA;
782
783 /// An alias set tracker to partition the access set by underlying object and
784 //intrinsic property (such as TBAA metadata).
785 AliasSetTracker AST;
786
787 LoopInfo *LI;
788
789 /// Sets of potentially dependent accesses - members of one set share an
790 /// underlying pointer. The set "CheckDeps" identfies which sets really need a
791 /// dependence check.
793
794 /// Initial processing of memory accesses determined that we may need
795 /// to add memchecks. Perform the analysis to determine the necessary checks.
796 ///
797 /// Note that, this is different from isDependencyCheckNeeded. When we retry
798 /// memcheck analysis without dependency checking
799 /// (i.e. FoundNonConstantDistanceDependence), isDependencyCheckNeeded is
800 /// cleared while this remains set if we have potentially dependent accesses.
801 bool IsRTCheckAnalysisNeeded = false;
802
803 /// The SCEV predicate containing all the SCEV-related assumptions.
805
807
808 /// Alias scopes that are declared inside the loop, and as such not valid
809 /// across iterations.
810 SmallPtrSetImpl<MDNode *> &LoopAliasScopes;
811};
812
813} // end anonymous namespace
814
815/// Check whether a pointer can participate in a runtime bounds check.
816/// If \p Assume, try harder to prove that we can compute the bounds of \p Ptr
817/// by adding run-time checks (overflow checks) if necessary.
819 const SCEV *PtrScev, Loop *L, bool Assume) {
820 // The bounds for loop-invariant pointer is trivial.
821 if (PSE.getSE()->isLoopInvariant(PtrScev, L))
822 return true;
823
824 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
825
826 if (!AR && Assume)
827 AR = PSE.getAsAddRec(Ptr);
828
829 if (!AR)
830 return false;
831
832 return AR->isAffine();
833}
834
835/// Check whether a pointer address cannot wrap.
837 const DenseMap<Value *, const SCEV *> &Strides, Value *Ptr, Type *AccessTy,
838 Loop *L) {
839 const SCEV *PtrScev = PSE.getSCEV(Ptr);
840 if (PSE.getSE()->isLoopInvariant(PtrScev, L))
841 return true;
842
843 int64_t Stride = getPtrStride(PSE, AccessTy, Ptr, L, Strides).value_or(0);
844 if (Stride == 1 || PSE.hasNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW))
845 return true;
846
847 return false;
848}
849
850static void visitPointers(Value *StartPtr, const Loop &InnermostLoop,
851 function_ref<void(Value *)> AddPointer) {
853 SmallVector<Value *> WorkList;
854 WorkList.push_back(StartPtr);
855
856 while (!WorkList.empty()) {
857 Value *Ptr = WorkList.pop_back_val();
858 if (!Visited.insert(Ptr).second)
859 continue;
860 auto *PN = dyn_cast<PHINode>(Ptr);
861 // SCEV does not look through non-header PHIs inside the loop. Such phis
862 // can be analyzed by adding separate accesses for each incoming pointer
863 // value.
864 if (PN && InnermostLoop.contains(PN->getParent()) &&
865 PN->getParent() != InnermostLoop.getHeader()) {
866 for (const Use &Inc : PN->incoming_values())
867 WorkList.push_back(Inc);
868 } else
869 AddPointer(Ptr);
870 }
871}
872
873// Walk back through the IR for a pointer, looking for a select like the
874// following:
875//
876// %offset = select i1 %cmp, i64 %a, i64 %b
877// %addr = getelementptr double, double* %base, i64 %offset
878// %ld = load double, double* %addr, align 8
879//
880// We won't be able to form a single SCEVAddRecExpr from this since the
881// address for each loop iteration depends on %cmp. We could potentially
882// produce multiple valid SCEVAddRecExprs, though, and check all of them for
883// memory safety/aliasing if needed.
884//
885// If we encounter some IR we don't yet handle, or something obviously fine
886// like a constant, then we just add the SCEV for that term to the list passed
887// in by the caller. If we have a node that may potentially yield a valid
888// SCEVAddRecExpr then we decompose it into parts and build the SCEV terms
889// ourselves before adding to the list.
890static void findForkedSCEVs(
891 ScalarEvolution *SE, const Loop *L, Value *Ptr,
893 unsigned Depth) {
894 // If our Value is a SCEVAddRecExpr, loop invariant, not an instruction, or
895 // we've exceeded our limit on recursion, just return whatever we have
896 // regardless of whether it can be used for a forked pointer or not, along
897 // with an indication of whether it might be a poison or undef value.
898 const SCEV *Scev = SE->getSCEV(Ptr);
899 if (isa<SCEVAddRecExpr>(Scev) || L->isLoopInvariant(Ptr) ||
900 !isa<Instruction>(Ptr) || Depth == 0) {
901 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
902 return;
903 }
904
905 Depth--;
906
907 auto UndefPoisonCheck = [](PointerIntPair<const SCEV *, 1, bool> S) {
908 return get<1>(S);
909 };
910
911 auto GetBinOpExpr = [&SE](unsigned Opcode, const SCEV *L, const SCEV *R) {
912 switch (Opcode) {
913 case Instruction::Add:
914 return SE->getAddExpr(L, R);
915 case Instruction::Sub:
916 return SE->getMinusSCEV(L, R);
917 default:
918 llvm_unreachable("Unexpected binary operator when walking ForkedPtrs");
919 }
920 };
921
922 Instruction *I = cast<Instruction>(Ptr);
923 unsigned Opcode = I->getOpcode();
924 switch (Opcode) {
925 case Instruction::GetElementPtr: {
926 GetElementPtrInst *GEP = cast<GetElementPtrInst>(I);
927 Type *SourceTy = GEP->getSourceElementType();
928 // We only handle base + single offset GEPs here for now.
929 // Not dealing with preexisting gathers yet, so no vectors.
930 if (I->getNumOperands() != 2 || SourceTy->isVectorTy()) {
931 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(GEP));
932 break;
933 }
936 findForkedSCEVs(SE, L, I->getOperand(0), BaseScevs, Depth);
937 findForkedSCEVs(SE, L, I->getOperand(1), OffsetScevs, Depth);
938
939 // See if we need to freeze our fork...
940 bool NeedsFreeze = any_of(BaseScevs, UndefPoisonCheck) ||
941 any_of(OffsetScevs, UndefPoisonCheck);
942
943 // Check that we only have a single fork, on either the base or the offset.
944 // Copy the SCEV across for the one without a fork in order to generate
945 // the full SCEV for both sides of the GEP.
946 if (OffsetScevs.size() == 2 && BaseScevs.size() == 1)
947 BaseScevs.push_back(BaseScevs[0]);
948 else if (BaseScevs.size() == 2 && OffsetScevs.size() == 1)
949 OffsetScevs.push_back(OffsetScevs[0]);
950 else {
951 ScevList.emplace_back(Scev, NeedsFreeze);
952 break;
953 }
954
955 // Find the pointer type we need to extend to.
956 Type *IntPtrTy = SE->getEffectiveSCEVType(
957 SE->getSCEV(GEP->getPointerOperand())->getType());
958
959 // Find the size of the type being pointed to. We only have a single
960 // index term (guarded above) so we don't need to index into arrays or
961 // structures, just get the size of the scalar value.
962 const SCEV *Size = SE->getSizeOfExpr(IntPtrTy, SourceTy);
963
964 // Scale up the offsets by the size of the type, then add to the bases.
965 const SCEV *Scaled1 = SE->getMulExpr(
966 Size, SE->getTruncateOrSignExtend(get<0>(OffsetScevs[0]), IntPtrTy));
967 const SCEV *Scaled2 = SE->getMulExpr(
968 Size, SE->getTruncateOrSignExtend(get<0>(OffsetScevs[1]), IntPtrTy));
969 ScevList.emplace_back(SE->getAddExpr(get<0>(BaseScevs[0]), Scaled1),
970 NeedsFreeze);
971 ScevList.emplace_back(SE->getAddExpr(get<0>(BaseScevs[1]), Scaled2),
972 NeedsFreeze);
973 break;
974 }
975 case Instruction::Select: {
977 // A select means we've found a forked pointer, but we currently only
978 // support a single select per pointer so if there's another behind this
979 // then we just bail out and return the generic SCEV.
980 findForkedSCEVs(SE, L, I->getOperand(1), ChildScevs, Depth);
981 findForkedSCEVs(SE, L, I->getOperand(2), ChildScevs, Depth);
982 if (ChildScevs.size() == 2) {
983 ScevList.push_back(ChildScevs[0]);
984 ScevList.push_back(ChildScevs[1]);
985 } else
986 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
987 break;
988 }
989 case Instruction::PHI: {
991 // A phi means we've found a forked pointer, but we currently only
992 // support a single phi per pointer so if there's another behind this
993 // then we just bail out and return the generic SCEV.
994 if (I->getNumOperands() == 2) {
995 findForkedSCEVs(SE, L, I->getOperand(0), ChildScevs, Depth);
996 findForkedSCEVs(SE, L, I->getOperand(1), ChildScevs, Depth);
997 }
998 if (ChildScevs.size() == 2) {
999 ScevList.push_back(ChildScevs[0]);
1000 ScevList.push_back(ChildScevs[1]);
1001 } else
1002 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
1003 break;
1004 }
1005 case Instruction::Add:
1006 case Instruction::Sub: {
1009 findForkedSCEVs(SE, L, I->getOperand(0), LScevs, Depth);
1010 findForkedSCEVs(SE, L, I->getOperand(1), RScevs, Depth);
1011
1012 // See if we need to freeze our fork...
1013 bool NeedsFreeze =
1014 any_of(LScevs, UndefPoisonCheck) || any_of(RScevs, UndefPoisonCheck);
1015
1016 // Check that we only have a single fork, on either the left or right side.
1017 // Copy the SCEV across for the one without a fork in order to generate
1018 // the full SCEV for both sides of the BinOp.
1019 if (LScevs.size() == 2 && RScevs.size() == 1)
1020 RScevs.push_back(RScevs[0]);
1021 else if (RScevs.size() == 2 && LScevs.size() == 1)
1022 LScevs.push_back(LScevs[0]);
1023 else {
1024 ScevList.emplace_back(Scev, NeedsFreeze);
1025 break;
1026 }
1027
1028 ScevList.emplace_back(
1029 GetBinOpExpr(Opcode, get<0>(LScevs[0]), get<0>(RScevs[0])),
1030 NeedsFreeze);
1031 ScevList.emplace_back(
1032 GetBinOpExpr(Opcode, get<0>(LScevs[1]), get<0>(RScevs[1])),
1033 NeedsFreeze);
1034 break;
1035 }
1036 default:
1037 // Just return the current SCEV if we haven't handled the instruction yet.
1038 LLVM_DEBUG(dbgs() << "ForkedPtr unhandled instruction: " << *I << "\n");
1039 ScevList.emplace_back(Scev, !isGuaranteedNotToBeUndefOrPoison(Ptr));
1040 break;
1041 }
1042}
1043
1046 const DenseMap<Value *, const SCEV *> &StridesMap, Value *Ptr,
1047 const Loop *L) {
1048 ScalarEvolution *SE = PSE.getSE();
1049 assert(SE->isSCEVable(Ptr->getType()) && "Value is not SCEVable!");
1051 findForkedSCEVs(SE, L, Ptr, Scevs, MaxForkedSCEVDepth);
1052
1053 // For now, we will only accept a forked pointer with two possible SCEVs
1054 // that are either SCEVAddRecExprs or loop invariant.
1055 if (Scevs.size() == 2 &&
1056 (isa<SCEVAddRecExpr>(get<0>(Scevs[0])) ||
1057 SE->isLoopInvariant(get<0>(Scevs[0]), L)) &&
1058 (isa<SCEVAddRecExpr>(get<0>(Scevs[1])) ||
1059 SE->isLoopInvariant(get<0>(Scevs[1]), L))) {
1060 LLVM_DEBUG(dbgs() << "LAA: Found forked pointer: " << *Ptr << "\n");
1061 LLVM_DEBUG(dbgs() << "\t(1) " << *get<0>(Scevs[0]) << "\n");
1062 LLVM_DEBUG(dbgs() << "\t(2) " << *get<0>(Scevs[1]) << "\n");
1063 return Scevs;
1064 }
1065
1066 return {{replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr), false}};
1067}
1068
1069bool AccessAnalysis::createCheckForAccess(RuntimePointerChecking &RtCheck,
1070 MemAccessInfo Access, Type *AccessTy,
1071 const DenseMap<Value *, const SCEV *> &StridesMap,
1073 Loop *TheLoop, unsigned &RunningDepId,
1074 unsigned ASId, bool ShouldCheckWrap,
1075 bool Assume) {
1076 Value *Ptr = Access.getPointer();
1077
1079 findForkedPointer(PSE, StridesMap, Ptr, TheLoop);
1080
1081 for (auto &P : TranslatedPtrs) {
1082 const SCEV *PtrExpr = get<0>(P);
1083 if (!hasComputableBounds(PSE, Ptr, PtrExpr, TheLoop, Assume))
1084 return false;
1085
1086 // When we run after a failing dependency check we have to make sure
1087 // we don't have wrapping pointers.
1088 if (ShouldCheckWrap) {
1089 // Skip wrap checking when translating pointers.
1090 if (TranslatedPtrs.size() > 1)
1091 return false;
1092
1093 if (!isNoWrap(PSE, StridesMap, Ptr, AccessTy, TheLoop)) {
1094 auto *Expr = PSE.getSCEV(Ptr);
1095 if (!Assume || !isa<SCEVAddRecExpr>(Expr))
1096 return false;
1098 }
1099 }
1100 // If there's only one option for Ptr, look it up after bounds and wrap
1101 // checking, because assumptions might have been added to PSE.
1102 if (TranslatedPtrs.size() == 1)
1103 TranslatedPtrs[0] = {replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr),
1104 false};
1105 }
1106
1107 for (auto [PtrExpr, NeedsFreeze] : TranslatedPtrs) {
1108 // The id of the dependence set.
1109 unsigned DepId;
1110
1111 if (isDependencyCheckNeeded()) {
1112 Value *Leader = DepCands.getLeaderValue(Access).getPointer();
1113 unsigned &LeaderId = DepSetId[Leader];
1114 if (!LeaderId)
1115 LeaderId = RunningDepId++;
1116 DepId = LeaderId;
1117 } else
1118 // Each access has its own dependence set.
1119 DepId = RunningDepId++;
1120
1121 bool IsWrite = Access.getInt();
1122 RtCheck.insert(TheLoop, Ptr, PtrExpr, AccessTy, IsWrite, DepId, ASId, PSE,
1123 NeedsFreeze);
1124 LLVM_DEBUG(dbgs() << "LAA: Found a runtime check ptr:" << *Ptr << '\n');
1125 }
1126
1127 return true;
1128}
1129
1130bool AccessAnalysis::canCheckPtrAtRT(RuntimePointerChecking &RtCheck,
1131 ScalarEvolution *SE, Loop *TheLoop,
1132 const DenseMap<Value *, const SCEV *> &StridesMap,
1133 Value *&UncomputablePtr, bool ShouldCheckWrap) {
1134 // Find pointers with computable bounds. We are going to use this information
1135 // to place a runtime bound check.
1136 bool CanDoRT = true;
1137
1138 bool MayNeedRTCheck = false;
1139 if (!IsRTCheckAnalysisNeeded) return true;
1140
1141 bool IsDepCheckNeeded = isDependencyCheckNeeded();
1142
1143 // We assign a consecutive id to access from different alias sets.
1144 // Accesses between different groups doesn't need to be checked.
1145 unsigned ASId = 0;
1146 for (auto &AS : AST) {
1147 int NumReadPtrChecks = 0;
1148 int NumWritePtrChecks = 0;
1149 bool CanDoAliasSetRT = true;
1150 ++ASId;
1151 auto ASPointers = AS.getPointers();
1152
1153 // We assign consecutive id to access from different dependence sets.
1154 // Accesses within the same set don't need a runtime check.
1155 unsigned RunningDepId = 1;
1157
1159
1160 // First, count how many write and read accesses are in the alias set. Also
1161 // collect MemAccessInfos for later.
1163 for (const Value *Ptr_ : ASPointers) {
1164 Value *Ptr = const_cast<Value *>(Ptr_);
1165 bool IsWrite = Accesses.count(MemAccessInfo(Ptr, true));
1166 if (IsWrite)
1167 ++NumWritePtrChecks;
1168 else
1169 ++NumReadPtrChecks;
1170 AccessInfos.emplace_back(Ptr, IsWrite);
1171 }
1172
1173 // We do not need runtime checks for this alias set, if there are no writes
1174 // or a single write and no reads.
1175 if (NumWritePtrChecks == 0 ||
1176 (NumWritePtrChecks == 1 && NumReadPtrChecks == 0)) {
1177 assert((ASPointers.size() <= 1 ||
1178 all_of(ASPointers,
1179 [this](const Value *Ptr) {
1180 MemAccessInfo AccessWrite(const_cast<Value *>(Ptr),
1181 true);
1182 return DepCands.findValue(AccessWrite) == DepCands.end();
1183 })) &&
1184 "Can only skip updating CanDoRT below, if all entries in AS "
1185 "are reads or there is at most 1 entry");
1186 continue;
1187 }
1188
1189 for (auto &Access : AccessInfos) {
1190 for (const auto &AccessTy : Accesses[Access]) {
1191 if (!createCheckForAccess(RtCheck, Access, AccessTy, StridesMap,
1192 DepSetId, TheLoop, RunningDepId, ASId,
1193 ShouldCheckWrap, false)) {
1194 LLVM_DEBUG(dbgs() << "LAA: Can't find bounds for ptr:"
1195 << *Access.getPointer() << '\n');
1196 Retries.push_back({Access, AccessTy});
1197 CanDoAliasSetRT = false;
1198 }
1199 }
1200 }
1201
1202 // Note that this function computes CanDoRT and MayNeedRTCheck
1203 // independently. For example CanDoRT=false, MayNeedRTCheck=false means that
1204 // we have a pointer for which we couldn't find the bounds but we don't
1205 // actually need to emit any checks so it does not matter.
1206 //
1207 // We need runtime checks for this alias set, if there are at least 2
1208 // dependence sets (in which case RunningDepId > 2) or if we need to re-try
1209 // any bound checks (because in that case the number of dependence sets is
1210 // incomplete).
1211 bool NeedsAliasSetRTCheck = RunningDepId > 2 || !Retries.empty();
1212
1213 // We need to perform run-time alias checks, but some pointers had bounds
1214 // that couldn't be checked.
1215 if (NeedsAliasSetRTCheck && !CanDoAliasSetRT) {
1216 // Reset the CanDoSetRt flag and retry all accesses that have failed.
1217 // We know that we need these checks, so we can now be more aggressive
1218 // and add further checks if required (overflow checks).
1219 CanDoAliasSetRT = true;
1220 for (auto Retry : Retries) {
1221 MemAccessInfo Access = Retry.first;
1222 Type *AccessTy = Retry.second;
1223 if (!createCheckForAccess(RtCheck, Access, AccessTy, StridesMap,
1224 DepSetId, TheLoop, RunningDepId, ASId,
1225 ShouldCheckWrap, /*Assume=*/true)) {
1226 CanDoAliasSetRT = false;
1227 UncomputablePtr = Access.getPointer();
1228 break;
1229 }
1230 }
1231 }
1232
1233 CanDoRT &= CanDoAliasSetRT;
1234 MayNeedRTCheck |= NeedsAliasSetRTCheck;
1235 ++ASId;
1236 }
1237
1238 // If the pointers that we would use for the bounds comparison have different
1239 // address spaces, assume the values aren't directly comparable, so we can't
1240 // use them for the runtime check. We also have to assume they could
1241 // overlap. In the future there should be metadata for whether address spaces
1242 // are disjoint.
1243 unsigned NumPointers = RtCheck.Pointers.size();
1244 for (unsigned i = 0; i < NumPointers; ++i) {
1245 for (unsigned j = i + 1; j < NumPointers; ++j) {
1246 // Only need to check pointers between two different dependency sets.
1247 if (RtCheck.Pointers[i].DependencySetId ==
1248 RtCheck.Pointers[j].DependencySetId)
1249 continue;
1250 // Only need to check pointers in the same alias set.
1251 if (RtCheck.Pointers[i].AliasSetId != RtCheck.Pointers[j].AliasSetId)
1252 continue;
1253
1254 Value *PtrI = RtCheck.Pointers[i].PointerValue;
1255 Value *PtrJ = RtCheck.Pointers[j].PointerValue;
1256
1257 unsigned ASi = PtrI->getType()->getPointerAddressSpace();
1258 unsigned ASj = PtrJ->getType()->getPointerAddressSpace();
1259 if (ASi != ASj) {
1260 LLVM_DEBUG(
1261 dbgs() << "LAA: Runtime check would require comparison between"
1262 " different address spaces\n");
1263 return false;
1264 }
1265 }
1266 }
1267
1268 if (MayNeedRTCheck && CanDoRT)
1269 RtCheck.generateChecks(DepCands, IsDepCheckNeeded);
1270
1271 LLVM_DEBUG(dbgs() << "LAA: We need to do " << RtCheck.getNumberOfChecks()
1272 << " pointer comparisons.\n");
1273
1274 // If we can do run-time checks, but there are no checks, no runtime checks
1275 // are needed. This can happen when all pointers point to the same underlying
1276 // object for example.
1277 RtCheck.Need = CanDoRT ? RtCheck.getNumberOfChecks() != 0 : MayNeedRTCheck;
1278
1279 bool CanDoRTIfNeeded = !RtCheck.Need || CanDoRT;
1280 if (!CanDoRTIfNeeded)
1281 RtCheck.reset();
1282 return CanDoRTIfNeeded;
1283}
1284
1285void AccessAnalysis::processMemAccesses() {
1286 // We process the set twice: first we process read-write pointers, last we
1287 // process read-only pointers. This allows us to skip dependence tests for
1288 // read-only pointers.
1289
1290 LLVM_DEBUG(dbgs() << "LAA: Processing memory accesses...\n");
1291 LLVM_DEBUG(dbgs() << " AST: "; AST.dump());
1292 LLVM_DEBUG(dbgs() << "LAA: Accesses(" << Accesses.size() << "):\n");
1293 LLVM_DEBUG({
1294 for (auto A : Accesses)
1295 dbgs() << "\t" << *A.first.getPointer() << " ("
1296 << (A.first.getInt()
1297 ? "write"
1298 : (ReadOnlyPtr.count(A.first.getPointer()) ? "read-only"
1299 : "read"))
1300 << ")\n";
1301 });
1302
1303 // The AliasSetTracker has nicely partitioned our pointers by metadata
1304 // compatibility and potential for underlying-object overlap. As a result, we
1305 // only need to check for potential pointer dependencies within each alias
1306 // set.
1307 for (const auto &AS : AST) {
1308 // Note that both the alias-set tracker and the alias sets themselves used
1309 // ordered collections internally and so the iteration order here is
1310 // deterministic.
1311 auto ASPointers = AS.getPointers();
1312
1313 bool SetHasWrite = false;
1314
1315 // Map of pointers to last access encountered.
1316 typedef DenseMap<const Value*, MemAccessInfo> UnderlyingObjToAccessMap;
1317 UnderlyingObjToAccessMap ObjToLastAccess;
1318
1319 // Set of access to check after all writes have been processed.
1320 PtrAccessMap DeferredAccesses;
1321
1322 // Iterate over each alias set twice, once to process read/write pointers,
1323 // and then to process read-only pointers.
1324 for (int SetIteration = 0; SetIteration < 2; ++SetIteration) {
1325 bool UseDeferred = SetIteration > 0;
1326 PtrAccessMap &S = UseDeferred ? DeferredAccesses : Accesses;
1327
1328 for (const Value *Ptr_ : ASPointers) {
1329 Value *Ptr = const_cast<Value *>(Ptr_);
1330
1331 // For a single memory access in AliasSetTracker, Accesses may contain
1332 // both read and write, and they both need to be handled for CheckDeps.
1333 for (const auto &AC : S) {
1334 if (AC.first.getPointer() != Ptr)
1335 continue;
1336
1337 bool IsWrite = AC.first.getInt();
1338
1339 // If we're using the deferred access set, then it contains only
1340 // reads.
1341 bool IsReadOnlyPtr = ReadOnlyPtr.count(Ptr) && !IsWrite;
1342 if (UseDeferred && !IsReadOnlyPtr)
1343 continue;
1344 // Otherwise, the pointer must be in the PtrAccessSet, either as a
1345 // read or a write.
1346 assert(((IsReadOnlyPtr && UseDeferred) || IsWrite ||
1347 S.count(MemAccessInfo(Ptr, false))) &&
1348 "Alias-set pointer not in the access set?");
1349
1350 MemAccessInfo Access(Ptr, IsWrite);
1351 DepCands.insert(Access);
1352
1353 // Memorize read-only pointers for later processing and skip them in
1354 // the first round (they need to be checked after we have seen all
1355 // write pointers). Note: we also mark pointer that are not
1356 // consecutive as "read-only" pointers (so that we check
1357 // "a[b[i]] +="). Hence, we need the second check for "!IsWrite".
1358 if (!UseDeferred && IsReadOnlyPtr) {
1359 // We only use the pointer keys, the types vector values don't
1360 // matter.
1361 DeferredAccesses.insert({Access, {}});
1362 continue;
1363 }
1364
1365 // If this is a write - check other reads and writes for conflicts. If
1366 // this is a read only check other writes for conflicts (but only if
1367 // there is no other write to the ptr - this is an optimization to
1368 // catch "a[i] = a[i] + " without having to do a dependence check).
1369 if ((IsWrite || IsReadOnlyPtr) && SetHasWrite) {
1370 CheckDeps.push_back(Access);
1371 IsRTCheckAnalysisNeeded = true;
1372 }
1373
1374 if (IsWrite)
1375 SetHasWrite = true;
1376
1377 // Create sets of pointers connected by a shared alias set and
1378 // underlying object.
1379 typedef SmallVector<const Value *, 16> ValueVector;
1380 ValueVector TempObjects;
1381
1382 UnderlyingObjects[Ptr] = {};
1383 SmallVector<const Value *, 16> &UOs = UnderlyingObjects[Ptr];
1384 ::getUnderlyingObjects(Ptr, UOs, LI);
1386 << "Underlying objects for pointer " << *Ptr << "\n");
1387 for (const Value *UnderlyingObj : UOs) {
1388 // nullptr never alias, don't join sets for pointer that have "null"
1389 // in their UnderlyingObjects list.
1390 if (isa<ConstantPointerNull>(UnderlyingObj) &&
1392 TheLoop->getHeader()->getParent(),
1393 UnderlyingObj->getType()->getPointerAddressSpace()))
1394 continue;
1395
1396 UnderlyingObjToAccessMap::iterator Prev =
1397 ObjToLastAccess.find(UnderlyingObj);
1398 if (Prev != ObjToLastAccess.end())
1399 DepCands.unionSets(Access, Prev->second);
1400
1401 ObjToLastAccess[UnderlyingObj] = Access;
1402 LLVM_DEBUG(dbgs() << " " << *UnderlyingObj << "\n");
1403 }
1404 }
1405 }
1406 }
1407 }
1408}
1409
1410/// Return true if an AddRec pointer \p Ptr is unsigned non-wrapping,
1411/// i.e. monotonically increasing/decreasing.
1412static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR,
1413 PredicatedScalarEvolution &PSE, const Loop *L) {
1414
1415 // FIXME: This should probably only return true for NUW.
1417 return true;
1418
1420 return true;
1421
1422 // Scalar evolution does not propagate the non-wrapping flags to values that
1423 // are derived from a non-wrapping induction variable because non-wrapping
1424 // could be flow-sensitive.
1425 //
1426 // Look through the potentially overflowing instruction to try to prove
1427 // non-wrapping for the *specific* value of Ptr.
1428
1429 // The arithmetic implied by an inbounds GEP can't overflow.
1430 auto *GEP = dyn_cast<GetElementPtrInst>(Ptr);
1431 if (!GEP || !GEP->isInBounds())
1432 return false;
1433
1434 // Make sure there is only one non-const index and analyze that.
1435 Value *NonConstIndex = nullptr;
1436 for (Value *Index : GEP->indices())
1437 if (!isa<ConstantInt>(Index)) {
1438 if (NonConstIndex)
1439 return false;
1440 NonConstIndex = Index;
1441 }
1442 if (!NonConstIndex)
1443 // The recurrence is on the pointer, ignore for now.
1444 return false;
1445
1446 // The index in GEP is signed. It is non-wrapping if it's derived from a NSW
1447 // AddRec using a NSW operation.
1448 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(NonConstIndex))
1449 if (OBO->hasNoSignedWrap() &&
1450 // Assume constant for other the operand so that the AddRec can be
1451 // easily found.
1452 isa<ConstantInt>(OBO->getOperand(1))) {
1453 auto *OpScev = PSE.getSCEV(OBO->getOperand(0));
1454
1455 if (auto *OpAR = dyn_cast<SCEVAddRecExpr>(OpScev))
1456 return OpAR->getLoop() == L && OpAR->getNoWrapFlags(SCEV::FlagNSW);
1457 }
1458
1459 return false;
1460}
1461
1462/// Check whether the access through \p Ptr has a constant stride.
1464 Type *AccessTy, Value *Ptr,
1465 const Loop *Lp,
1466 const DenseMap<Value *, const SCEV *> &StridesMap,
1467 bool Assume, bool ShouldCheckWrap) {
1468 Type *Ty = Ptr->getType();
1469 assert(Ty->isPointerTy() && "Unexpected non-ptr");
1470
1471 if (isa<ScalableVectorType>(AccessTy)) {
1472 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Scalable object: " << *AccessTy
1473 << "\n");
1474 return std::nullopt;
1475 }
1476
1477 const SCEV *PtrScev = replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr);
1478
1479 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
1480 if (Assume && !AR)
1481 AR = PSE.getAsAddRec(Ptr);
1482
1483 if (!AR) {
1484 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not an AddRecExpr pointer " << *Ptr
1485 << " SCEV: " << *PtrScev << "\n");
1486 return std::nullopt;
1487 }
1488
1489 // The access function must stride over the innermost loop.
1490 if (Lp != AR->getLoop()) {
1491 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not striding over innermost loop "
1492 << *Ptr << " SCEV: " << *AR << "\n");
1493 return std::nullopt;
1494 }
1495
1496 // Check the step is constant.
1497 const SCEV *Step = AR->getStepRecurrence(*PSE.getSE());
1498
1499 // Calculate the pointer stride and check if it is constant.
1500 const SCEVConstant *C = dyn_cast<SCEVConstant>(Step);
1501 if (!C) {
1502 LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not a constant strided " << *Ptr
1503 << " SCEV: " << *AR << "\n");
1504 return std::nullopt;
1505 }
1506
1507 auto &DL = Lp->getHeader()->getModule()->getDataLayout();
1508 TypeSize AllocSize = DL.getTypeAllocSize(AccessTy);
1509 int64_t Size = AllocSize.getFixedValue();
1510 const APInt &APStepVal = C->getAPInt();
1511
1512 // Huge step value - give up.
1513 if (APStepVal.getBitWidth() > 64)
1514 return std::nullopt;
1515
1516 int64_t StepVal = APStepVal.getSExtValue();
1517
1518 // Strided access.
1519 int64_t Stride = StepVal / Size;
1520 int64_t Rem = StepVal % Size;
1521 if (Rem)
1522 return std::nullopt;
1523
1524 if (!ShouldCheckWrap)
1525 return Stride;
1526
1527 // The address calculation must not wrap. Otherwise, a dependence could be
1528 // inverted.
1529 if (isNoWrapAddRec(Ptr, AR, PSE, Lp))
1530 return Stride;
1531
1532 // An inbounds getelementptr that is a AddRec with a unit stride
1533 // cannot wrap per definition. If it did, the result would be poison
1534 // and any memory access dependent on it would be immediate UB
1535 // when executed.
1536 if (auto *GEP = dyn_cast<GetElementPtrInst>(Ptr);
1537 GEP && GEP->isInBounds() && (Stride == 1 || Stride == -1))
1538 return Stride;
1539
1540 // If the null pointer is undefined, then a access sequence which would
1541 // otherwise access it can be assumed not to unsigned wrap. Note that this
1542 // assumes the object in memory is aligned to the natural alignment.
1543 unsigned AddrSpace = Ty->getPointerAddressSpace();
1544 if (!NullPointerIsDefined(Lp->getHeader()->getParent(), AddrSpace) &&
1545 (Stride == 1 || Stride == -1))
1546 return Stride;
1547
1548 if (Assume) {
1550 LLVM_DEBUG(dbgs() << "LAA: Pointer may wrap:\n"
1551 << "LAA: Pointer: " << *Ptr << "\n"
1552 << "LAA: SCEV: " << *AR << "\n"
1553 << "LAA: Added an overflow assumption\n");
1554 return Stride;
1555 }
1556 LLVM_DEBUG(
1557 dbgs() << "LAA: Bad stride - Pointer may wrap in the address space "
1558 << *Ptr << " SCEV: " << *AR << "\n");
1559 return std::nullopt;
1560}
1561
1562std::optional<int> llvm::getPointersDiff(Type *ElemTyA, Value *PtrA,
1563 Type *ElemTyB, Value *PtrB,
1564 const DataLayout &DL,
1565 ScalarEvolution &SE, bool StrictCheck,
1566 bool CheckType) {
1567 assert(PtrA && PtrB && "Expected non-nullptr pointers.");
1568
1569 // Make sure that A and B are different pointers.
1570 if (PtrA == PtrB)
1571 return 0;
1572
1573 // Make sure that the element types are the same if required.
1574 if (CheckType && ElemTyA != ElemTyB)
1575 return std::nullopt;
1576
1577 unsigned ASA = PtrA->getType()->getPointerAddressSpace();
1578 unsigned ASB = PtrB->getType()->getPointerAddressSpace();
1579
1580 // Check that the address spaces match.
1581 if (ASA != ASB)
1582 return std::nullopt;
1583 unsigned IdxWidth = DL.getIndexSizeInBits(ASA);
1584
1585 APInt OffsetA(IdxWidth, 0), OffsetB(IdxWidth, 0);
1586 Value *PtrA1 = PtrA->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetA);
1587 Value *PtrB1 = PtrB->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetB);
1588
1589 int Val;
1590 if (PtrA1 == PtrB1) {
1591 // Retrieve the address space again as pointer stripping now tracks through
1592 // `addrspacecast`.
1593 ASA = cast<PointerType>(PtrA1->getType())->getAddressSpace();
1594 ASB = cast<PointerType>(PtrB1->getType())->getAddressSpace();
1595 // Check that the address spaces match and that the pointers are valid.
1596 if (ASA != ASB)
1597 return std::nullopt;
1598
1599 IdxWidth = DL.getIndexSizeInBits(ASA);
1600 OffsetA = OffsetA.sextOrTrunc(IdxWidth);
1601 OffsetB = OffsetB.sextOrTrunc(IdxWidth);
1602
1603 OffsetB -= OffsetA;
1604 Val = OffsetB.getSExtValue();
1605 } else {
1606 // Otherwise compute the distance with SCEV between the base pointers.
1607 const SCEV *PtrSCEVA = SE.getSCEV(PtrA);
1608 const SCEV *PtrSCEVB = SE.getSCEV(PtrB);
1609 const auto *Diff =
1610 dyn_cast<SCEVConstant>(SE.getMinusSCEV(PtrSCEVB, PtrSCEVA));
1611 if (!Diff)
1612 return std::nullopt;
1613 Val = Diff->getAPInt().getSExtValue();
1614 }
1615 int Size = DL.getTypeStoreSize(ElemTyA);
1616 int Dist = Val / Size;
1617
1618 // Ensure that the calculated distance matches the type-based one after all
1619 // the bitcasts removal in the provided pointers.
1620 if (!StrictCheck || Dist * Size == Val)
1621 return Dist;
1622 return std::nullopt;
1623}
1624
1626 const DataLayout &DL, ScalarEvolution &SE,
1627 SmallVectorImpl<unsigned> &SortedIndices) {
1629 VL, [](const Value *V) { return V->getType()->isPointerTy(); }) &&
1630 "Expected list of pointer operands.");
1631 // Walk over the pointers, and map each of them to an offset relative to
1632 // first pointer in the array.
1633 Value *Ptr0 = VL[0];
1634
1635 using DistOrdPair = std::pair<int64_t, int>;
1636 auto Compare = llvm::less_first();
1637 std::set<DistOrdPair, decltype(Compare)> Offsets(Compare);
1638 Offsets.emplace(0, 0);
1639 int Cnt = 1;
1640 bool IsConsecutive = true;
1641 for (auto *Ptr : VL.drop_front()) {
1642 std::optional<int> Diff = getPointersDiff(ElemTy, Ptr0, ElemTy, Ptr, DL, SE,
1643 /*StrictCheck=*/true);
1644 if (!Diff)
1645 return false;
1646
1647 // Check if the pointer with the same offset is found.
1648 int64_t Offset = *Diff;
1649 auto Res = Offsets.emplace(Offset, Cnt);
1650 if (!Res.second)
1651 return false;
1652 // Consecutive order if the inserted element is the last one.
1653 IsConsecutive = IsConsecutive && std::next(Res.first) == Offsets.end();
1654 ++Cnt;
1655 }
1656 SortedIndices.clear();
1657 if (!IsConsecutive) {
1658 // Fill SortedIndices array only if it is non-consecutive.
1659 SortedIndices.resize(VL.size());
1660 Cnt = 0;
1661 for (const std::pair<int64_t, int> &Pair : Offsets) {
1662 SortedIndices[Cnt] = Pair.second;
1663 ++Cnt;
1664 }
1665 }
1666 return true;
1667}
1668
1669/// Returns true if the memory operations \p A and \p B are consecutive.
1671 ScalarEvolution &SE, bool CheckType) {
1674 if (!PtrA || !PtrB)
1675 return false;
1676 Type *ElemTyA = getLoadStoreType(A);
1677 Type *ElemTyB = getLoadStoreType(B);
1678 std::optional<int> Diff =
1679 getPointersDiff(ElemTyA, PtrA, ElemTyB, PtrB, DL, SE,
1680 /*StrictCheck=*/true, CheckType);
1681 return Diff && *Diff == 1;
1682}
1683
1685 visitPointers(SI->getPointerOperand(), *InnermostLoop,
1686 [this, SI](Value *Ptr) {
1687 Accesses[MemAccessInfo(Ptr, true)].push_back(AccessIdx);
1688 InstMap.push_back(SI);
1689 ++AccessIdx;
1690 });
1691}
1692
1694 visitPointers(LI->getPointerOperand(), *InnermostLoop,
1695 [this, LI](Value *Ptr) {
1696 Accesses[MemAccessInfo(Ptr, false)].push_back(AccessIdx);
1697 InstMap.push_back(LI);
1698 ++AccessIdx;
1699 });
1700}
1701
1704 switch (Type) {
1705 case NoDep:
1706 case Forward:
1709
1710 case Unknown:
1713 case Backward:
1715 case IndirectUnsafe:
1717 }
1718 llvm_unreachable("unexpected DepType!");
1719}
1720
1722 switch (Type) {
1723 case NoDep:
1724 case Forward:
1725 case ForwardButPreventsForwarding:
1726 case Unknown:
1727 case IndirectUnsafe:
1728 return false;
1729
1730 case BackwardVectorizable:
1731 case Backward:
1732 case BackwardVectorizableButPreventsForwarding:
1733 return true;
1734 }
1735 llvm_unreachable("unexpected DepType!");
1736}
1737
1739 return isBackward() || Type == Unknown;
1740}
1741
1743 switch (Type) {
1744 case Forward:
1745 case ForwardButPreventsForwarding:
1746 return true;
1747
1748 case NoDep:
1749 case Unknown:
1750 case BackwardVectorizable:
1751 case Backward:
1752 case BackwardVectorizableButPreventsForwarding:
1753 case IndirectUnsafe:
1754 return false;
1755 }
1756 llvm_unreachable("unexpected DepType!");
1757}
1758
1759bool MemoryDepChecker::couldPreventStoreLoadForward(uint64_t Distance,
1760 uint64_t TypeByteSize) {
1761 // If loads occur at a distance that is not a multiple of a feasible vector
1762 // factor store-load forwarding does not take place.
1763 // Positive dependences might cause troubles because vectorizing them might
1764 // prevent store-load forwarding making vectorized code run a lot slower.
1765 // a[i] = a[i-3] ^ a[i-8];
1766 // The stores to a[i:i+1] don't align with the stores to a[i-3:i-2] and
1767 // hence on your typical architecture store-load forwarding does not take
1768 // place. Vectorizing in such cases does not make sense.
1769 // Store-load forwarding distance.
1770
1771 // After this many iterations store-to-load forwarding conflicts should not
1772 // cause any slowdowns.
1773 const uint64_t NumItersForStoreLoadThroughMemory = 8 * TypeByteSize;
1774 // Maximum vector factor.
1775 uint64_t MaxVFWithoutSLForwardIssues = std::min(
1776 VectorizerParams::MaxVectorWidth * TypeByteSize, MinDepDistBytes);
1777
1778 // Compute the smallest VF at which the store and load would be misaligned.
1779 for (uint64_t VF = 2 * TypeByteSize; VF <= MaxVFWithoutSLForwardIssues;
1780 VF *= 2) {
1781 // If the number of vector iteration between the store and the load are
1782 // small we could incur conflicts.
1783 if (Distance % VF && Distance / VF < NumItersForStoreLoadThroughMemory) {
1784 MaxVFWithoutSLForwardIssues = (VF >> 1);
1785 break;
1786 }
1787 }
1788
1789 if (MaxVFWithoutSLForwardIssues < 2 * TypeByteSize) {
1790 LLVM_DEBUG(
1791 dbgs() << "LAA: Distance " << Distance
1792 << " that could cause a store-load forwarding conflict\n");
1793 return true;
1794 }
1795
1796 if (MaxVFWithoutSLForwardIssues < MinDepDistBytes &&
1797 MaxVFWithoutSLForwardIssues !=
1798 VectorizerParams::MaxVectorWidth * TypeByteSize)
1799 MinDepDistBytes = MaxVFWithoutSLForwardIssues;
1800 return false;
1801}
1802
1803void MemoryDepChecker::mergeInStatus(VectorizationSafetyStatus S) {
1804 if (Status < S)
1805 Status = S;
1806}
1807
1808/// Given a dependence-distance \p Dist between two
1809/// memory accesses, that have strides in the same direction whose absolute
1810/// value of the maximum stride is given in \p MaxStride, and that have the same
1811/// type size \p TypeByteSize, in a loop whose takenCount is \p
1812/// BackedgeTakenCount, check if it is possible to prove statically that the
1813/// dependence distance is larger than the range that the accesses will travel
1814/// through the execution of the loop. If so, return true; false otherwise. This
1815/// is useful for example in loops such as the following (PR31098):
1816/// for (i = 0; i < D; ++i) {
1817/// = out[i];
1818/// out[i+D] =
1819/// }
1821 const SCEV &BackedgeTakenCount,
1822 const SCEV &Dist, uint64_t MaxStride,
1823 uint64_t TypeByteSize) {
1824
1825 // If we can prove that
1826 // (**) |Dist| > BackedgeTakenCount * Step
1827 // where Step is the absolute stride of the memory accesses in bytes,
1828 // then there is no dependence.
1829 //
1830 // Rationale:
1831 // We basically want to check if the absolute distance (|Dist/Step|)
1832 // is >= the loop iteration count (or > BackedgeTakenCount).
1833 // This is equivalent to the Strong SIV Test (Practical Dependence Testing,
1834 // Section 4.2.1); Note, that for vectorization it is sufficient to prove
1835 // that the dependence distance is >= VF; This is checked elsewhere.
1836 // But in some cases we can prune dependence distances early, and
1837 // even before selecting the VF, and without a runtime test, by comparing
1838 // the distance against the loop iteration count. Since the vectorized code
1839 // will be executed only if LoopCount >= VF, proving distance >= LoopCount
1840 // also guarantees that distance >= VF.
1841 //
1842 const uint64_t ByteStride = MaxStride * TypeByteSize;
1843 const SCEV *Step = SE.getConstant(BackedgeTakenCount.getType(), ByteStride);
1844 const SCEV *Product = SE.getMulExpr(&BackedgeTakenCount, Step);
1845
1846 const SCEV *CastedDist = &Dist;
1847 const SCEV *CastedProduct = Product;
1848 uint64_t DistTypeSizeBits = DL.getTypeSizeInBits(Dist.getType());
1849 uint64_t ProductTypeSizeBits = DL.getTypeSizeInBits(Product->getType());
1850
1851 // The dependence distance can be positive/negative, so we sign extend Dist;
1852 // The multiplication of the absolute stride in bytes and the
1853 // backedgeTakenCount is non-negative, so we zero extend Product.
1854 if (DistTypeSizeBits > ProductTypeSizeBits)
1855 CastedProduct = SE.getZeroExtendExpr(Product, Dist.getType());
1856 else
1857 CastedDist = SE.getNoopOrSignExtend(&Dist, Product->getType());
1858
1859 // Is Dist - (BackedgeTakenCount * Step) > 0 ?
1860 // (If so, then we have proven (**) because |Dist| >= Dist)
1861 const SCEV *Minus = SE.getMinusSCEV(CastedDist, CastedProduct);
1862 if (SE.isKnownPositive(Minus))
1863 return true;
1864
1865 // Second try: Is -Dist - (BackedgeTakenCount * Step) > 0 ?
1866 // (If so, then we have proven (**) because |Dist| >= -1*Dist)
1867 const SCEV *NegDist = SE.getNegativeSCEV(CastedDist);
1868 Minus = SE.getMinusSCEV(NegDist, CastedProduct);
1869 if (SE.isKnownPositive(Minus))
1870 return true;
1871
1872 return false;
1873}
1874
1875/// Check the dependence for two accesses with the same stride \p Stride.
1876/// \p Distance is the positive distance and \p TypeByteSize is type size in
1877/// bytes.
1878///
1879/// \returns true if they are independent.
1881 uint64_t TypeByteSize) {
1882 assert(Stride > 1 && "The stride must be greater than 1");
1883 assert(TypeByteSize > 0 && "The type size in byte must be non-zero");
1884 assert(Distance > 0 && "The distance must be non-zero");
1885
1886 // Skip if the distance is not multiple of type byte size.
1887 if (Distance % TypeByteSize)
1888 return false;
1889
1890 uint64_t ScaledDist = Distance / TypeByteSize;
1891
1892 // No dependence if the scaled distance is not multiple of the stride.
1893 // E.g.
1894 // for (i = 0; i < 1024 ; i += 4)
1895 // A[i+2] = A[i] + 1;
1896 //
1897 // Two accesses in memory (scaled distance is 2, stride is 4):
1898 // | A[0] | | | | A[4] | | | |
1899 // | | | A[2] | | | | A[6] | |
1900 //
1901 // E.g.
1902 // for (i = 0; i < 1024 ; i += 3)
1903 // A[i+4] = A[i] + 1;
1904 //
1905 // Two accesses in memory (scaled distance is 4, stride is 3):
1906 // | A[0] | | | A[3] | | | A[6] | | |
1907 // | | | | | A[4] | | | A[7] | |
1908 return ScaledDist % Stride;
1909}
1910
1911/// Returns true if any of the underlying objects has a loop varying address,
1912/// i.e. may change in \p L.
1913static bool
1915 ScalarEvolution &SE, const Loop *L) {
1916 return any_of(UnderlyingObjects, [&SE, L](const Value *UO) {
1917 return !SE.isLoopInvariant(SE.getSCEV(const_cast<Value *>(UO)), L);
1918 });
1919}
1920
1921namespace {
1922struct DepDistanceStrideAndSizeInfo {
1923 const SCEV *Dist;
1924 uint64_t StrideA;
1925 uint64_t StrideB;
1926 uint64_t TypeByteSize;
1927 bool AIsWrite;
1928 bool BIsWrite;
1929
1930 DepDistanceStrideAndSizeInfo(const SCEV *Dist, uint64_t StrideA,
1931 uint64_t StrideB, uint64_t TypeByteSize,
1932 bool AIsWrite, bool BIsWrite)
1933 : Dist(Dist), StrideA(StrideA), StrideB(StrideB),
1934 TypeByteSize(TypeByteSize), AIsWrite(AIsWrite), BIsWrite(BIsWrite) {}
1935};
1936} // namespace
1937
1938// Get the dependence distance, strides, type size and whether it is a write for
1939// the dependence between A and B. Returns a DepType, if we can prove there's
1940// no dependence or the analysis fails. Outlined to lambda to limit he scope
1941// of various temporary variables, like A/BPtr, StrideA/BPtr and others.
1942// Returns either the dependence result, if it could already be determined, or a
1943// struct containing (Distance, Stride, TypeSize, AIsWrite, BIsWrite).
1944static std::variant<MemoryDepChecker::Dependence::DepType,
1945 DepDistanceStrideAndSizeInfo>
1949 const DenseMap<Value *, const SCEV *> &Strides,
1950 const DenseMap<Value *, SmallVector<const Value *, 16>> &UnderlyingObjects,
1951 PredicatedScalarEvolution &PSE, const Loop *InnermostLoop) {
1952 auto &DL = InnermostLoop->getHeader()->getModule()->getDataLayout();
1953 auto &SE = *PSE.getSE();
1954 auto [APtr, AIsWrite] = A;
1955 auto [BPtr, BIsWrite] = B;
1956
1957 // Two reads are independent.
1958 if (!AIsWrite && !BIsWrite)
1960
1961 Type *ATy = getLoadStoreType(AInst);
1962 Type *BTy = getLoadStoreType(BInst);
1963
1964 // We cannot check pointers in different address spaces.
1965 if (APtr->getType()->getPointerAddressSpace() !=
1966 BPtr->getType()->getPointerAddressSpace())
1968
1969 int64_t StrideAPtr =
1970 getPtrStride(PSE, ATy, APtr, InnermostLoop, Strides, true).value_or(0);
1971 int64_t StrideBPtr =
1972 getPtrStride(PSE, BTy, BPtr, InnermostLoop, Strides, true).value_or(0);
1973
1974 const SCEV *Src = PSE.getSCEV(APtr);
1975 const SCEV *Sink = PSE.getSCEV(BPtr);
1976
1977 // If the induction step is negative we have to invert source and sink of the
1978 // dependence when measuring the distance between them. We should not swap
1979 // AIsWrite with BIsWrite, as their uses expect them in program order.
1980 if (StrideAPtr < 0) {
1981 std::swap(Src, Sink);
1982 std::swap(AInst, BInst);
1983 }
1984
1985 const SCEV *Dist = SE.getMinusSCEV(Sink, Src);
1986
1987 LLVM_DEBUG(dbgs() << "LAA: Src Scev: " << *Src << "Sink Scev: " << *Sink
1988 << "(Induction step: " << StrideAPtr << ")\n");
1989 LLVM_DEBUG(dbgs() << "LAA: Distance for " << *AInst << " to " << *BInst
1990 << ": " << *Dist << "\n");
1991
1992 // Needs accesses where the addresses of the accessed underlying objects do
1993 // not change within the loop.
1994 if (isLoopVariantIndirectAddress(UnderlyingObjects.find(APtr)->second, SE,
1995 InnermostLoop) ||
1996 isLoopVariantIndirectAddress(UnderlyingObjects.find(BPtr)->second, SE,
1997 InnermostLoop))
1999
2000 // Need accesses with constant strides and the same direction. We don't want
2001 // to vectorize "A[B[i]] += ..." and similar code or pointer arithmetic that
2002 // could wrap in the address space.
2003 if (!StrideAPtr || !StrideBPtr || (StrideAPtr > 0 && StrideBPtr < 0) ||
2004 (StrideAPtr < 0 && StrideBPtr > 0)) {
2005 LLVM_DEBUG(dbgs() << "Pointer access with non-constant stride\n");
2007 }
2008
2009 if (!isa<SCEVConstant, SCEVCouldNotCompute>(Dist))
2010 Dist = SE.applyLoopGuards(Dist, InnermostLoop);
2011
2012 uint64_t TypeByteSize = DL.getTypeAllocSize(ATy);
2013 bool HasSameSize =
2014 DL.getTypeStoreSizeInBits(ATy) == DL.getTypeStoreSizeInBits(BTy);
2015 if (!HasSameSize)
2016 TypeByteSize = 0;
2017 return DepDistanceStrideAndSizeInfo(Dist, std::abs(StrideAPtr),
2018 std::abs(StrideBPtr), TypeByteSize,
2019 AIsWrite, BIsWrite);
2020}
2021
2022MemoryDepChecker::Dependence::DepType MemoryDepChecker::isDependent(
2023 const MemAccessInfo &A, unsigned AIdx, const MemAccessInfo &B,
2024 unsigned BIdx, const DenseMap<Value *, const SCEV *> &Strides,
2026 &UnderlyingObjects) {
2027 assert(AIdx < BIdx && "Must pass arguments in program order");
2028
2029 // Get the dependence distance, stride, type size and what access writes for
2030 // the dependence between A and B.
2032 A, InstMap[AIdx], B, InstMap[BIdx], Strides, UnderlyingObjects, PSE,
2033 InnermostLoop);
2034 if (std::holds_alternative<Dependence::DepType>(Res))
2035 return std::get<Dependence::DepType>(Res);
2036
2037 const auto &[Dist, StrideA, StrideB, TypeByteSize, AIsWrite, BIsWrite] =
2038 std::get<DepDistanceStrideAndSizeInfo>(Res);
2039 bool HasSameSize = TypeByteSize > 0;
2040
2041 std::optional<uint64_t> CommonStride =
2042 StrideA == StrideB ? std::make_optional(StrideA) : std::nullopt;
2043 if (isa<SCEVCouldNotCompute>(Dist)) {
2044 // TODO: Relax requirement that there is a common stride to retry with
2045 // non-constant distance dependencies.
2046 FoundNonConstantDistanceDependence |= !!CommonStride;
2047 LLVM_DEBUG(dbgs() << "LAA: Dependence because of uncomputable distance.\n");
2048 return Dependence::Unknown;
2049 }
2050
2051 ScalarEvolution &SE = *PSE.getSE();
2052 auto &DL = InnermostLoop->getHeader()->getModule()->getDataLayout();
2053 uint64_t MaxStride = std::max(StrideA, StrideB);
2054
2055 // If the distance between the acecsses is larger than their maximum absolute
2056 // stride multiplied by the backedge taken count, the accesses are independet,
2057 // i.e. they are far enough appart that accesses won't access the same
2058 // location across all loop ierations.
2059 if (HasSameSize &&
2061 MaxStride, TypeByteSize))
2062 return Dependence::NoDep;
2063
2064 const SCEVConstant *C = dyn_cast<SCEVConstant>(Dist);
2065
2066 // Attempt to prove strided accesses independent.
2067 if (C) {
2068 const APInt &Val = C->getAPInt();
2069 int64_t Distance = Val.getSExtValue();
2070
2071 // If the distance between accesses and their strides are known constants,
2072 // check whether the accesses interlace each other.
2073 if (std::abs(Distance) > 0 && CommonStride && *CommonStride > 1 &&
2074 HasSameSize &&
2075 areStridedAccessesIndependent(std::abs(Distance), *CommonStride,
2076 TypeByteSize)) {
2077 LLVM_DEBUG(dbgs() << "LAA: Strided accesses are independent\n");
2078 return Dependence::NoDep;
2079 }
2080 }
2081
2082 // Negative distances are not plausible dependencies.
2083 if (SE.isKnownNonPositive(Dist)) {
2084 if (SE.isKnownNonNegative(Dist)) {
2085 if (HasSameSize) {
2086 // Write to the same location with the same size.
2087 return Dependence::Forward;
2088 } else {
2089 LLVM_DEBUG(dbgs() << "LAA: possibly zero dependence difference but "
2090 "different type sizes\n");
2091 return Dependence::Unknown;
2092 }
2093 }
2094
2095 bool IsTrueDataDependence = (AIsWrite && !BIsWrite);
2096 // Check if the first access writes to a location that is read in a later
2097 // iteration, where the distance between them is not a multiple of a vector
2098 // factor and relatively small.
2099 //
2100 // NOTE: There is no need to update MaxSafeVectorWidthInBits after call to
2101 // couldPreventStoreLoadForward, even if it changed MinDepDistBytes, since a
2102 // forward dependency will allow vectorization using any width.
2103
2104 if (IsTrueDataDependence && EnableForwardingConflictDetection) {
2105 if (!C) {
2106 // TODO: FoundNonConstantDistanceDependence is used as a necessary
2107 // condition to consider retrying with runtime checks. Historically, we
2108 // did not set it when strides were different but there is no inherent
2109 // reason to.
2110 FoundNonConstantDistanceDependence |= CommonStride.has_value();
2111 return Dependence::Unknown;
2112 }
2113 if (!HasSameSize ||
2114 couldPreventStoreLoadForward(C->getAPInt().abs().getZExtValue(),
2115 TypeByteSize)) {
2116 LLVM_DEBUG(
2117 dbgs() << "LAA: Forward but may prevent st->ld forwarding\n");
2119 }
2120 }
2121
2122 LLVM_DEBUG(dbgs() << "LAA: Dependence is negative\n");
2123 return Dependence::Forward;
2124 }
2125
2126 int64_t MinDistance = SE.getSignedRangeMin(Dist).getSExtValue();
2127 // Below we only handle strictly positive distances.
2128 if (MinDistance <= 0) {
2129 FoundNonConstantDistanceDependence |= CommonStride.has_value();
2130 return Dependence::Unknown;
2131 }
2132
2133 if (!isa<SCEVConstant>(Dist)) {
2134 // Previously this case would be treated as Unknown, possibly setting
2135 // FoundNonConstantDistanceDependence to force re-trying with runtime
2136 // checks. Until the TODO below is addressed, set it here to preserve
2137 // original behavior w.r.t. re-trying with runtime checks.
2138 // TODO: FoundNonConstantDistanceDependence is used as a necessary
2139 // condition to consider retrying with runtime checks. Historically, we
2140 // did not set it when strides were different but there is no inherent
2141 // reason to.
2142 FoundNonConstantDistanceDependence |= CommonStride.has_value();
2143 }
2144
2145 if (!HasSameSize) {
2146 LLVM_DEBUG(dbgs() << "LAA: ReadWrite-Write positive dependency with "
2147 "different type sizes\n");
2148 return Dependence::Unknown;
2149 }
2150
2151 if (!CommonStride)
2152 return Dependence::Unknown;
2153
2154 // Bail out early if passed-in parameters make vectorization not feasible.
2155 unsigned ForcedFactor = (VectorizerParams::VectorizationFactor ?
2157 unsigned ForcedUnroll = (VectorizerParams::VectorizationInterleave ?
2159 // The minimum number of iterations for a vectorized/unrolled version.
2160 unsigned MinNumIter = std::max(ForcedFactor * ForcedUnroll, 2U);
2161
2162 // It's not vectorizable if the distance is smaller than the minimum distance
2163 // needed for a vectroized/unrolled version. Vectorizing one iteration in
2164 // front needs TypeByteSize * Stride. Vectorizing the last iteration needs
2165 // TypeByteSize (No need to plus the last gap distance).
2166 //
2167 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes.
2168 // foo(int *A) {
2169 // int *B = (int *)((char *)A + 14);
2170 // for (i = 0 ; i < 1024 ; i += 2)
2171 // B[i] = A[i] + 1;
2172 // }
2173 //
2174 // Two accesses in memory (stride is 2):
2175 // | A[0] | | A[2] | | A[4] | | A[6] | |
2176 // | B[0] | | B[2] | | B[4] |
2177 //
2178 // MinDistance needs for vectorizing iterations except the last iteration:
2179 // 4 * 2 * (MinNumIter - 1). MinDistance needs for the last iteration: 4.
2180 // So the minimum distance needed is: 4 * 2 * (MinNumIter - 1) + 4.
2181 //
2182 // If MinNumIter is 2, it is vectorizable as the minimum distance needed is
2183 // 12, which is less than distance.
2184 //
2185 // If MinNumIter is 4 (Say if a user forces the vectorization factor to be 4),
2186 // the minimum distance needed is 28, which is greater than distance. It is
2187 // not safe to do vectorization.
2188
2189 // We know that Dist is positive, but it may not be constant. Use the signed
2190 // minimum for computations below, as this ensures we compute the closest
2191 // possible dependence distance.
2192 uint64_t MinDistanceNeeded =
2193 TypeByteSize * *CommonStride * (MinNumIter - 1) + TypeByteSize;
2194 if (MinDistanceNeeded > static_cast<uint64_t>(MinDistance)) {
2195 if (!isa<SCEVConstant>(Dist)) {
2196 // For non-constant distances, we checked the lower bound of the
2197 // dependence distance and the distance may be larger at runtime (and safe
2198 // for vectorization). Classify it as Unknown, so we re-try with runtime
2199 // checks.
2200 return Dependence::Unknown;
2201 }
2202 LLVM_DEBUG(dbgs() << "LAA: Failure because of positive minimum distance "
2203 << MinDistance << '\n');
2204 return Dependence::Backward;
2205 }
2206
2207 // Unsafe if the minimum distance needed is greater than smallest dependence
2208 // distance distance.
2209 if (MinDistanceNeeded > MinDepDistBytes) {
2210 LLVM_DEBUG(dbgs() << "LAA: Failure because it needs at least "
2211 << MinDistanceNeeded << " size in bytes\n");
2212 return Dependence::Backward;
2213 }
2214
2215 // Positive distance bigger than max vectorization factor.
2216 // FIXME: Should use max factor instead of max distance in bytes, which could
2217 // not handle different types.
2218 // E.g. Assume one char is 1 byte in memory and one int is 4 bytes.
2219 // void foo (int *A, char *B) {
2220 // for (unsigned i = 0; i < 1024; i++) {
2221 // A[i+2] = A[i] + 1;
2222 // B[i+2] = B[i] + 1;
2223 // }
2224 // }
2225 //
2226 // This case is currently unsafe according to the max safe distance. If we
2227 // analyze the two accesses on array B, the max safe dependence distance
2228 // is 2. Then we analyze the accesses on array A, the minimum distance needed
2229 // is 8, which is less than 2 and forbidden vectorization, But actually
2230 // both A and B could be vectorized by 2 iterations.
2231 MinDepDistBytes =
2232 std::min(static_cast<uint64_t>(MinDistance), MinDepDistBytes);
2233
2234 bool IsTrueDataDependence = (!AIsWrite && BIsWrite);
2235 uint64_t MinDepDistBytesOld = MinDepDistBytes;
2236 if (IsTrueDataDependence && EnableForwardingConflictDetection &&
2237 isa<SCEVConstant>(Dist) &&
2238 couldPreventStoreLoadForward(MinDistance, TypeByteSize)) {
2239 // Sanity check that we didn't update MinDepDistBytes when calling
2240 // couldPreventStoreLoadForward
2241 assert(MinDepDistBytes == MinDepDistBytesOld &&
2242 "An update to MinDepDistBytes requires an update to "
2243 "MaxSafeVectorWidthInBits");
2244 (void)MinDepDistBytesOld;
2246 }
2247
2248 // An update to MinDepDistBytes requires an update to MaxSafeVectorWidthInBits
2249 // since there is a backwards dependency.
2250 uint64_t MaxVF = MinDepDistBytes / (TypeByteSize * *CommonStride);
2251 LLVM_DEBUG(dbgs() << "LAA: Positive min distance " << MinDistance
2252 << " with max VF = " << MaxVF << '\n');
2253
2254 uint64_t MaxVFInBits = MaxVF * TypeByteSize * 8;
2255 if (!isa<SCEVConstant>(Dist) && MaxVFInBits < MaxTargetVectorWidthInBits) {
2256 // For non-constant distances, we checked the lower bound of the dependence
2257 // distance and the distance may be larger at runtime (and safe for
2258 // vectorization). Classify it as Unknown, so we re-try with runtime checks.
2259 return Dependence::Unknown;
2260 }
2261
2262 MaxSafeVectorWidthInBits = std::min(MaxSafeVectorWidthInBits, MaxVFInBits);
2264}
2265
2267 DepCandidates &AccessSets, MemAccessInfoList &CheckDeps,
2268 const DenseMap<Value *, const SCEV *> &Strides,
2270 &UnderlyingObjects) {
2271
2272 MinDepDistBytes = -1;
2274 for (MemAccessInfo CurAccess : CheckDeps) {
2275 if (Visited.count(CurAccess))
2276 continue;
2277
2278 // Get the relevant memory access set.
2280 AccessSets.findValue(AccessSets.getLeaderValue(CurAccess));
2281
2282 // Check accesses within this set.
2284 AccessSets.member_begin(I);
2286 AccessSets.member_end();
2287
2288 // Check every access pair.
2289 while (AI != AE) {
2290 Visited.insert(*AI);
2291 bool AIIsWrite = AI->getInt();
2292 // Check loads only against next equivalent class, but stores also against
2293 // other stores in the same equivalence class - to the same address.
2295 (AIIsWrite ? AI : std::next(AI));
2296 while (OI != AE) {
2297 // Check every accessing instruction pair in program order.
2298 for (std::vector<unsigned>::iterator I1 = Accesses[*AI].begin(),
2299 I1E = Accesses[*AI].end(); I1 != I1E; ++I1)
2300 // Scan all accesses of another equivalence class, but only the next
2301 // accesses of the same equivalent class.
2302 for (std::vector<unsigned>::iterator
2303 I2 = (OI == AI ? std::next(I1) : Accesses[*OI].begin()),
2304 I2E = (OI == AI ? I1E : Accesses[*OI].end());
2305 I2 != I2E; ++I2) {
2306 auto A = std::make_pair(&*AI, *I1);
2307 auto B = std::make_pair(&*OI, *I2);
2308
2309 assert(*I1 != *I2);
2310 if (*I1 > *I2)
2311 std::swap(A, B);
2312
2314 isDependent(*A.first, A.second, *B.first, B.second, Strides,
2315 UnderlyingObjects);
2317
2318 // Gather dependences unless we accumulated MaxDependences
2319 // dependences. In that case return as soon as we find the first
2320 // unsafe dependence. This puts a limit on this quadratic
2321 // algorithm.
2322 if (RecordDependences) {
2323 if (Type != Dependence::NoDep)
2324 Dependences.push_back(Dependence(A.second, B.second, Type));
2325
2326 if (Dependences.size() >= MaxDependences) {
2327 RecordDependences = false;
2328 Dependences.clear();
2330 << "Too many dependences, stopped recording\n");
2331 }
2332 }
2333 if (!RecordDependences && !isSafeForVectorization())
2334 return false;
2335 }
2336 ++OI;
2337 }
2338 AI++;
2339 }
2340 }
2341
2342 LLVM_DEBUG(dbgs() << "Total Dependences: " << Dependences.size() << "\n");
2343 return isSafeForVectorization();
2344}
2345
2348 MemAccessInfo Access(Ptr, isWrite);
2349 auto &IndexVector = Accesses.find(Access)->second;
2350
2352 transform(IndexVector,
2353 std::back_inserter(Insts),
2354 [&](unsigned Idx) { return this->InstMap[Idx]; });
2355 return Insts;
2356}
2357
2359 "NoDep",
2360 "Unknown",
2361 "IndirectUnsafe",
2362 "Forward",
2363 "ForwardButPreventsForwarding",
2364 "Backward",
2365 "BackwardVectorizable",
2366 "BackwardVectorizableButPreventsForwarding"};
2367
2369 raw_ostream &OS, unsigned Depth,
2370 const SmallVectorImpl<Instruction *> &Instrs) const {
2371 OS.indent(Depth) << DepName[Type] << ":\n";
2372 OS.indent(Depth + 2) << *Instrs[Source] << " -> \n";
2373 OS.indent(Depth + 2) << *Instrs[Destination] << "\n";
2374}
2375
2376bool LoopAccessInfo::canAnalyzeLoop() {
2377 // We need to have a loop header.
2378 LLVM_DEBUG(dbgs() << "LAA: Found a loop in "
2379 << TheLoop->getHeader()->getParent()->getName() << ": "
2380 << TheLoop->getHeader()->getName() << '\n');
2381
2382 // We can only analyze innermost loops.
2383 if (!TheLoop->isInnermost()) {
2384 LLVM_DEBUG(dbgs() << "LAA: loop is not the innermost loop\n");
2385 recordAnalysis("NotInnerMostLoop") << "loop is not the innermost loop";
2386 return false;
2387 }
2388
2389 // We must have a single backedge.
2390 if (TheLoop->getNumBackEdges() != 1) {
2391 LLVM_DEBUG(
2392 dbgs() << "LAA: loop control flow is not understood by analyzer\n");
2393 recordAnalysis("CFGNotUnderstood")
2394 << "loop control flow is not understood by analyzer";
2395 return false;
2396 }
2397
2398 // ScalarEvolution needs to be able to find the exit count.
2399 const SCEV *ExitCount = PSE->getBackedgeTakenCount();
2400 if (isa<SCEVCouldNotCompute>(ExitCount)) {
2401 recordAnalysis("CantComputeNumberOfIterations")
2402 << "could not determine number of loop iterations";
2403 LLVM_DEBUG(dbgs() << "LAA: SCEV could not compute the loop exit count.\n");
2404 return false;
2405 }
2406
2407 return true;
2408}
2409
2410void LoopAccessInfo::analyzeLoop(AAResults *AA, LoopInfo *LI,
2411 const TargetLibraryInfo *TLI,
2412 DominatorTree *DT) {
2413 // Holds the Load and Store instructions.
2416 SmallPtrSet<MDNode *, 8> LoopAliasScopes;
2417
2418 // Holds all the different accesses in the loop.
2419 unsigned NumReads = 0;
2420 unsigned NumReadWrites = 0;
2421
2422 bool HasComplexMemInst = false;
2423
2424 // A runtime check is only legal to insert if there are no convergent calls.
2425 HasConvergentOp = false;
2426
2427 PtrRtChecking->Pointers.clear();
2428 PtrRtChecking->Need = false;
2429
2430 const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel();
2431
2432 const bool EnableMemAccessVersioningOfLoop =
2434 !TheLoop->getHeader()->getParent()->hasOptSize();
2435
2436 // Traverse blocks in fixed RPOT order, regardless of their storage in the
2437 // loop info, as it may be arbitrary.
2438 LoopBlocksRPO RPOT(TheLoop);
2439 RPOT.perform(LI);
2440 for (BasicBlock *BB : RPOT) {
2441 // Scan the BB and collect legal loads and stores. Also detect any
2442 // convergent instructions.
2443 for (Instruction &I : *BB) {
2444 if (auto *Call = dyn_cast<CallBase>(&I)) {
2445 if (Call->isConvergent())
2446 HasConvergentOp = true;
2447 }
2448
2449 // With both a non-vectorizable memory instruction and a convergent
2450 // operation, found in this loop, no reason to continue the search.
2451 if (HasComplexMemInst && HasConvergentOp) {
2452 CanVecMem = false;
2453 return;
2454 }
2455
2456 // Avoid hitting recordAnalysis multiple times.
2457 if (HasComplexMemInst)
2458 continue;
2459
2460 // Record alias scopes defined inside the loop.
2461 if (auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&I))
2462 for (Metadata *Op : Decl->getScopeList()->operands())
2463 LoopAliasScopes.insert(cast<MDNode>(Op));
2464
2465 // Many math library functions read the rounding mode. We will only
2466 // vectorize a loop if it contains known function calls that don't set
2467 // the flag. Therefore, it is safe to ignore this read from memory.
2468 auto *Call = dyn_cast<CallInst>(&I);
2469 if (Call && getVectorIntrinsicIDForCall(Call, TLI))
2470 continue;
2471
2472 // If this is a load, save it. If this instruction can read from memory
2473 // but is not a load, then we quit. Notice that we don't handle function
2474 // calls that read or write.
2475 if (I.mayReadFromMemory()) {
2476 // If the function has an explicit vectorized counterpart, we can safely
2477 // assume that it can be vectorized.
2478 if (Call && !Call->isNoBuiltin() && Call->getCalledFunction() &&
2479 !VFDatabase::getMappings(*Call).empty())
2480 continue;
2481
2482 auto *Ld = dyn_cast<LoadInst>(&I);
2483 if (!Ld) {
2484 recordAnalysis("CantVectorizeInstruction", Ld)
2485 << "instruction cannot be vectorized";
2486 HasComplexMemInst = true;
2487 continue;
2488 }
2489 if (!Ld->isSimple() && !IsAnnotatedParallel) {
2490 recordAnalysis("NonSimpleLoad", Ld)
2491 << "read with atomic ordering or volatile read";
2492 LLVM_DEBUG(dbgs() << "LAA: Found a non-simple load.\n");
2493 HasComplexMemInst = true;
2494 continue;
2495 }
2496 NumLoads++;
2497 Loads.push_back(Ld);
2498 DepChecker->addAccess(Ld);
2499 if (EnableMemAccessVersioningOfLoop)
2500 collectStridedAccess(Ld);
2501 continue;
2502 }
2503
2504 // Save 'store' instructions. Abort if other instructions write to memory.
2505 if (I.mayWriteToMemory()) {
2506 auto *St = dyn_cast<StoreInst>(&I);
2507 if (!St) {
2508 recordAnalysis("CantVectorizeInstruction", St)
2509 << "instruction cannot be vectorized";
2510 HasComplexMemInst = true;
2511 continue;
2512 }
2513 if (!St->isSimple() && !IsAnnotatedParallel) {
2514 recordAnalysis("NonSimpleStore", St)
2515 << "write with atomic ordering or volatile write";
2516 LLVM_DEBUG(dbgs() << "LAA: Found a non-simple store.\n");
2517 HasComplexMemInst = true;
2518 continue;
2519 }
2520 NumStores++;
2521 Stores.push_back(St);
2522 DepChecker->addAccess(St);
2523 if (EnableMemAccessVersioningOfLoop)
2524 collectStridedAccess(St);
2525 }
2526 } // Next instr.
2527 } // Next block.
2528
2529 if (HasComplexMemInst) {
2530 CanVecMem = false;
2531 return;
2532 }
2533
2534 // Now we have two lists that hold the loads and the stores.
2535 // Next, we find the pointers that they use.
2536
2537 // Check if we see any stores. If there are no stores, then we don't
2538 // care if the pointers are *restrict*.
2539 if (!Stores.size()) {
2540 LLVM_DEBUG(dbgs() << "LAA: Found a read-only loop!\n");
2541 CanVecMem = true;
2542 return;
2543 }
2544
2545 MemoryDepChecker::DepCandidates DependentAccesses;
2546 AccessAnalysis Accesses(TheLoop, AA, LI, DependentAccesses, *PSE,
2547 LoopAliasScopes);
2548
2549 // Holds the analyzed pointers. We don't want to call getUnderlyingObjects
2550 // multiple times on the same object. If the ptr is accessed twice, once
2551 // for read and once for write, it will only appear once (on the write
2552 // list). This is okay, since we are going to check for conflicts between
2553 // writes and between reads and writes, but not between reads and reads.
2555
2556 // Record uniform store addresses to identify if we have multiple stores
2557 // to the same address.
2558 SmallPtrSet<Value *, 16> UniformStores;
2559
2560 for (StoreInst *ST : Stores) {
2561 Value *Ptr = ST->getPointerOperand();
2562
2563 if (isInvariant(Ptr)) {
2564 // Record store instructions to loop invariant addresses
2565 StoresToInvariantAddresses.push_back(ST);
2566 HasStoreStoreDependenceInvolvingLoopInvariantAddress |=
2567 !UniformStores.insert(Ptr).second;
2568 }
2569
2570 // If we did *not* see this pointer before, insert it to the read-write
2571 // list. At this phase it is only a 'write' list.
2572 Type *AccessTy = getLoadStoreType(ST);
2573 if (Seen.insert({Ptr, AccessTy}).second) {
2574 ++NumReadWrites;
2575
2577 // The TBAA metadata could have a control dependency on the predication
2578 // condition, so we cannot rely on it when determining whether or not we
2579 // need runtime pointer checks.
2580 if (blockNeedsPredication(ST->getParent(), TheLoop, DT))
2581 Loc.AATags.TBAA = nullptr;
2582
2583 visitPointers(const_cast<Value *>(Loc.Ptr), *TheLoop,
2584 [&Accesses, AccessTy, Loc](Value *Ptr) {
2585 MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr);
2586 Accesses.addStore(NewLoc, AccessTy);
2587 });
2588 }
2589 }
2590
2591 if (IsAnnotatedParallel) {
2592 LLVM_DEBUG(
2593 dbgs() << "LAA: A loop annotated parallel, ignore memory dependency "
2594 << "checks.\n");
2595 CanVecMem = true;
2596 return;
2597 }
2598
2599 for (LoadInst *LD : Loads) {
2600 Value *Ptr = LD->getPointerOperand();
2601 // If we did *not* see this pointer before, insert it to the
2602 // read list. If we *did* see it before, then it is already in
2603 // the read-write list. This allows us to vectorize expressions
2604 // such as A[i] += x; Because the address of A[i] is a read-write
2605 // pointer. This only works if the index of A[i] is consecutive.
2606 // If the address of i is unknown (for example A[B[i]]) then we may
2607 // read a few words, modify, and write a few words, and some of the
2608 // words may be written to the same address.
2609 bool IsReadOnlyPtr = false;
2610 Type *AccessTy = getLoadStoreType(LD);
2611 if (Seen.insert({Ptr, AccessTy}).second ||
2612 !getPtrStride(*PSE, LD->getType(), Ptr, TheLoop, SymbolicStrides).value_or(0)) {
2613 ++NumReads;
2614 IsReadOnlyPtr = true;
2615 }
2616
2617 // See if there is an unsafe dependency between a load to a uniform address and
2618 // store to the same uniform address.
2619 if (UniformStores.count(Ptr)) {
2620 LLVM_DEBUG(dbgs() << "LAA: Found an unsafe dependency between a uniform "
2621 "load and uniform store to the same address!\n");
2622 HasLoadStoreDependenceInvolvingLoopInvariantAddress = true;
2623 }
2624
2626 // The TBAA metadata could have a control dependency on the predication
2627 // condition, so we cannot rely on it when determining whether or not we
2628 // need runtime pointer checks.
2629 if (blockNeedsPredication(LD->getParent(), TheLoop, DT))
2630 Loc.AATags.TBAA = nullptr;
2631
2632 visitPointers(const_cast<Value *>(Loc.Ptr), *TheLoop,
2633 [&Accesses, AccessTy, Loc, IsReadOnlyPtr](Value *Ptr) {
2634 MemoryLocation NewLoc = Loc.getWithNewPtr(Ptr);
2635 Accesses.addLoad(NewLoc, AccessTy, IsReadOnlyPtr);
2636 });
2637 }
2638
2639 // If we write (or read-write) to a single destination and there are no
2640 // other reads in this loop then is it safe to vectorize.
2641 if (NumReadWrites == 1 && NumReads == 0) {
2642 LLVM_DEBUG(dbgs() << "LAA: Found a write-only loop!\n");
2643 CanVecMem = true;
2644 return;
2645 }
2646
2647 // Build dependence sets and check whether we need a runtime pointer bounds
2648 // check.
2649 Accesses.buildDependenceSets();
2650
2651 // Find pointers with computable bounds. We are going to use this information
2652 // to place a runtime bound check.
2653 Value *UncomputablePtr = nullptr;
2654 bool CanDoRTIfNeeded =
2655 Accesses.canCheckPtrAtRT(*PtrRtChecking, PSE->getSE(), TheLoop,
2656 SymbolicStrides, UncomputablePtr, false);
2657 if (!CanDoRTIfNeeded) {
2658 auto *I = dyn_cast_or_null<Instruction>(UncomputablePtr);
2659 recordAnalysis("CantIdentifyArrayBounds", I)
2660 << "cannot identify array bounds";
2661 LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because we can't find "
2662 << "the array bounds.\n");
2663 CanVecMem = false;
2664 return;
2665 }
2666
2667 LLVM_DEBUG(
2668 dbgs() << "LAA: May be able to perform a memory runtime check if needed.\n");
2669
2670 CanVecMem = true;
2671 if (Accesses.isDependencyCheckNeeded()) {
2672 LLVM_DEBUG(dbgs() << "LAA: Checking memory dependencies\n");
2673 CanVecMem = DepChecker->areDepsSafe(
2674 DependentAccesses, Accesses.getDependenciesToCheck(), SymbolicStrides,
2675 Accesses.getUnderlyingObjects());
2676
2677 if (!CanVecMem && DepChecker->shouldRetryWithRuntimeCheck()) {
2678 LLVM_DEBUG(dbgs() << "LAA: Retrying with memory checks\n");
2679
2680 // Clear the dependency checks. We assume they are not needed.
2681 Accesses.resetDepChecks(*DepChecker);
2682
2683 PtrRtChecking->reset();
2684 PtrRtChecking->Need = true;
2685
2686 auto *SE = PSE->getSE();
2687 UncomputablePtr = nullptr;
2688 CanDoRTIfNeeded = Accesses.canCheckPtrAtRT(
2689 *PtrRtChecking, SE, TheLoop, SymbolicStrides, UncomputablePtr, true);
2690
2691 // Check that we found the bounds for the pointer.
2692 if (!CanDoRTIfNeeded) {
2693 auto *I = dyn_cast_or_null<Instruction>(UncomputablePtr);
2694 recordAnalysis("CantCheckMemDepsAtRunTime", I)
2695 << "cannot check memory dependencies at runtime";
2696 LLVM_DEBUG(dbgs() << "LAA: Can't vectorize with memory checks\n");
2697 CanVecMem = false;
2698 return;
2699 }
2700
2701 CanVecMem = true;
2702 }
2703 }
2704
2705 if (HasConvergentOp) {
2706 recordAnalysis("CantInsertRuntimeCheckWithConvergent")
2707 << "cannot add control dependency to convergent operation";
2708 LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because a runtime check "
2709 "would be needed with a convergent operation\n");
2710 CanVecMem = false;
2711 return;
2712 }
2713
2714 if (CanVecMem)
2715 LLVM_DEBUG(
2716 dbgs() << "LAA: No unsafe dependent memory operations in loop. We"
2717 << (PtrRtChecking->Need ? "" : " don't")
2718 << " need runtime memory checks.\n");
2719 else
2720 emitUnsafeDependenceRemark();
2721}
2722
2723void LoopAccessInfo::emitUnsafeDependenceRemark() {
2724 auto Deps = getDepChecker().getDependences();
2725 if (!Deps)
2726 return;
2727 auto Found = llvm::find_if(*Deps, [](const MemoryDepChecker::Dependence &D) {
2730 });
2731 if (Found == Deps->end())
2732 return;
2733 MemoryDepChecker::Dependence Dep = *Found;
2734
2735 LLVM_DEBUG(dbgs() << "LAA: unsafe dependent memory operations in loop\n");
2736
2737 // Emit remark for first unsafe dependence
2738 bool HasForcedDistribution = false;
2739 std::optional<const MDOperand *> Value =
2740 findStringMetadataForLoop(TheLoop, "llvm.loop.distribute.enable");
2741 if (Value) {
2742 const MDOperand *Op = *Value;
2743 assert(Op && mdconst::hasa<ConstantInt>(*Op) && "invalid metadata");
2744 HasForcedDistribution = mdconst::extract<ConstantInt>(*Op)->getZExtValue();
2745 }
2746
2747 const std::string Info =
2748 HasForcedDistribution
2749 ? "unsafe dependent memory operations in loop."
2750 : "unsafe dependent memory operations in loop. Use "
2751 "#pragma clang loop distribute(enable) to allow loop distribution "
2752 "to attempt to isolate the offending operations into a separate "
2753 "loop";
2755 recordAnalysis("UnsafeDep", Dep.getDestination(getDepChecker())) << Info;
2756
2757 switch (Dep.Type) {
2761 llvm_unreachable("Unexpected dependence");
2763 R << "\nBackward loop carried data dependence.";
2764 break;
2766 R << "\nForward loop carried data dependence that prevents "
2767 "store-to-load forwarding.";
2768 break;
2770 R << "\nBackward loop carried data dependence that prevents "
2771 "store-to-load forwarding.";
2772 break;
2774 R << "\nUnsafe indirect dependence.";
2775 break;
2777 R << "\nUnknown data dependence.";
2778 break;
2779 }
2780
2781 if (Instruction *I = Dep.getSource(getDepChecker())) {
2782 DebugLoc SourceLoc = I->getDebugLoc();
2783 if (auto *DD = dyn_cast_or_null<Instruction>(getPointerOperand(I)))
2784 SourceLoc = DD->getDebugLoc();
2785 if (SourceLoc)
2786 R << " Memory location is the same as accessed at "
2787 << ore::NV("Location", SourceLoc);
2788 }
2789}
2790
2792 DominatorTree *DT) {
2793 assert(TheLoop->contains(BB) && "Unknown block used");
2794
2795 // Blocks that do not dominate the latch need predication.
2796 BasicBlock* Latch = TheLoop->getLoopLatch();
2797 return !DT->dominates(BB, Latch);
2798}
2799
2800OptimizationRemarkAnalysis &LoopAccessInfo::recordAnalysis(StringRef RemarkName,
2801 Instruction *I) {
2802 assert(!Report && "Multiple reports generated");
2803
2804 Value *CodeRegion = TheLoop->getHeader();
2805 DebugLoc DL = TheLoop->getStartLoc();
2806
2807 if (I) {
2808 CodeRegion = I->getParent();
2809 // If there is no debug location attached to the instruction, revert back to
2810 // using the loop's.
2811 if (I->getDebugLoc())
2812 DL = I->getDebugLoc();
2813 }
2814
2815 Report = std::make_unique<OptimizationRemarkAnalysis>(DEBUG_TYPE, RemarkName, DL,
2816 CodeRegion);
2817 return *Report;
2818}
2819
2821 auto *SE = PSE->getSE();
2822 // TODO: Is this really what we want? Even without FP SCEV, we may want some
2823 // trivially loop-invariant FP values to be considered invariant.
2824 if (!SE->isSCEVable(V->getType()))
2825 return false;
2826 const SCEV *S = SE->getSCEV(V);
2827 return SE->isLoopInvariant(S, TheLoop);
2828}
2829
2830/// Find the operand of the GEP that should be checked for consecutive
2831/// stores. This ignores trailing indices that have no effect on the final
2832/// pointer.
2833static unsigned getGEPInductionOperand(const GetElementPtrInst *Gep) {
2834 const DataLayout &DL = Gep->getModule()->getDataLayout();
2835 unsigned LastOperand = Gep->getNumOperands() - 1;
2836 TypeSize GEPAllocSize = DL.getTypeAllocSize(Gep->getResultElementType());
2837
2838 // Walk backwards and try to peel off zeros.
2839 while (LastOperand > 1 && match(Gep->getOperand(LastOperand), m_Zero())) {
2840 // Find the type we're currently indexing into.
2841 gep_type_iterator GEPTI = gep_type_begin(Gep);
2842 std::advance(GEPTI, LastOperand - 2);
2843
2844 // If it's a type with the same allocation size as the result of the GEP we
2845 // can peel off the zero index.
2846 TypeSize ElemSize = GEPTI.isStruct()
2847 ? DL.getTypeAllocSize(GEPTI.getIndexedType())
2849 if (ElemSize != GEPAllocSize)
2850 break;
2851 --LastOperand;
2852 }
2853
2854 return LastOperand;
2855}
2856
2857/// If the argument is a GEP, then returns the operand identified by
2858/// getGEPInductionOperand. However, if there is some other non-loop-invariant
2859/// operand, it returns that instead.
2861 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
2862 if (!GEP)
2863 return Ptr;
2864
2865 unsigned InductionOperand = getGEPInductionOperand(GEP);
2866
2867 // Check that all of the gep indices are uniform except for our induction
2868 // operand.
2869 for (unsigned i = 0, e = GEP->getNumOperands(); i != e; ++i)
2870 if (i != InductionOperand &&
2871 !SE->isLoopInvariant(SE->getSCEV(GEP->getOperand(i)), Lp))
2872 return Ptr;
2873 return GEP->getOperand(InductionOperand);
2874}
2875
2876/// If a value has only one user that is a CastInst, return it.
2878 Value *UniqueCast = nullptr;
2879 for (User *U : Ptr->users()) {
2880 CastInst *CI = dyn_cast<CastInst>(U);
2881 if (CI && CI->getType() == Ty) {
2882 if (!UniqueCast)
2883 UniqueCast = CI;
2884 else
2885 return nullptr;
2886 }
2887 }
2888 return UniqueCast;
2889}
2890
2891/// Get the stride of a pointer access in a loop. Looks for symbolic
2892/// strides "a[i*stride]". Returns the symbolic stride, or null otherwise.
2894 auto *PtrTy = dyn_cast<PointerType>(Ptr->getType());
2895 if (!PtrTy || PtrTy->isAggregateType())
2896 return nullptr;
2897
2898 // Try to remove a gep instruction to make the pointer (actually index at this
2899 // point) easier analyzable. If OrigPtr is equal to Ptr we are analyzing the
2900 // pointer, otherwise, we are analyzing the index.
2901 Value *OrigPtr = Ptr;
2902
2903 // The size of the pointer access.
2904 int64_t PtrAccessSize = 1;
2905
2906 Ptr = stripGetElementPtr(Ptr, SE, Lp);
2907 const SCEV *V = SE->getSCEV(Ptr);
2908
2909 if (Ptr != OrigPtr)
2910 // Strip off casts.
2911 while (const SCEVIntegralCastExpr *C = dyn_cast<SCEVIntegralCastExpr>(V))
2912 V = C->getOperand();
2913
2914 const SCEVAddRecExpr *S = dyn_cast<SCEVAddRecExpr>(V);
2915 if (!S)
2916 return nullptr;
2917
2918 // If the pointer is invariant then there is no stride and it makes no
2919 // sense to add it here.
2920 if (Lp != S->getLoop())
2921 return nullptr;
2922
2923 V = S->getStepRecurrence(*SE);
2924 if (!V)
2925 return nullptr;
2926
2927 // Strip off the size of access multiplication if we are still analyzing the
2928 // pointer.
2929 if (OrigPtr == Ptr) {
2930 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(V)) {
2931 if (M->getOperand(0)->getSCEVType() != scConstant)
2932 return nullptr;
2933
2934 const APInt &APStepVal = cast<SCEVConstant>(M->getOperand(0))->getAPInt();
2935
2936 // Huge step value - give up.
2937 if (APStepVal.getBitWidth() > 64)
2938 return nullptr;
2939
2940 int64_t StepVal = APStepVal.getSExtValue();
2941 if (PtrAccessSize != StepVal)
2942 return nullptr;
2943 V = M->getOperand(1);
2944 }
2945 }
2946
2947 // Note that the restriction after this loop invariant check are only
2948 // profitability restrictions.
2949 if (!SE->isLoopInvariant(V, Lp))
2950 return nullptr;
2951
2952 // Look for the loop invariant symbolic value.
2953 const SCEVUnknown *U = dyn_cast<SCEVUnknown>(V);
2954 if (!U) {
2955 const auto *C = dyn_cast<SCEVIntegralCastExpr>(V);
2956 if (!C)
2957 return nullptr;
2958 U = dyn_cast<SCEVUnknown>(C->getOperand());
2959 if (!U)
2960 return nullptr;
2961
2962 // Match legacy behavior - this is not needed for correctness
2963 if (!getUniqueCastUse(U->getValue(), Lp, V->getType()))
2964 return nullptr;
2965 }
2966
2967 return V;
2968}
2969
2970void LoopAccessInfo::collectStridedAccess(Value *MemAccess) {
2971 Value *Ptr = getLoadStorePointerOperand(MemAccess);
2972 if (!Ptr)
2973 return;
2974
2975 // Note: getStrideFromPointer is a *profitability* heuristic. We
2976 // could broaden the scope of values returned here - to anything
2977 // which happens to be loop invariant and contributes to the
2978 // computation of an interesting IV - but we chose not to as we
2979 // don't have a cost model here, and broadening the scope exposes
2980 // far too many unprofitable cases.
2981 const SCEV *StrideExpr = getStrideFromPointer(Ptr, PSE->getSE(), TheLoop);
2982 if (!StrideExpr)
2983 return;
2984
2985 LLVM_DEBUG(dbgs() << "LAA: Found a strided access that is a candidate for "
2986 "versioning:");
2987 LLVM_DEBUG(dbgs() << " Ptr: " << *Ptr << " Stride: " << *StrideExpr << "\n");
2988
2989 if (!SpeculateUnitStride) {
2990 LLVM_DEBUG(dbgs() << " Chose not to due to -laa-speculate-unit-stride\n");
2991 return;
2992 }
2993
2994 // Avoid adding the "Stride == 1" predicate when we know that
2995 // Stride >= Trip-Count. Such a predicate will effectively optimize a single
2996 // or zero iteration loop, as Trip-Count <= Stride == 1.
2997 //
2998 // TODO: We are currently not making a very informed decision on when it is
2999 // beneficial to apply stride versioning. It might make more sense that the
3000 // users of this analysis (such as the vectorizer) will trigger it, based on
3001 // their specific cost considerations; For example, in cases where stride
3002 // versioning does not help resolving memory accesses/dependences, the
3003 // vectorizer should evaluate the cost of the runtime test, and the benefit
3004 // of various possible stride specializations, considering the alternatives
3005 // of using gather/scatters (if available).
3006
3007 const SCEV *BETakenCount = PSE->getBackedgeTakenCount();
3008
3009 // Match the types so we can compare the stride and the BETakenCount.
3010 // The Stride can be positive/negative, so we sign extend Stride;
3011 // The backedgeTakenCount is non-negative, so we zero extend BETakenCount.
3012 const DataLayout &DL = TheLoop->getHeader()->getModule()->getDataLayout();
3013 uint64_t StrideTypeSizeBits = DL.getTypeSizeInBits(StrideExpr->getType());
3014 uint64_t BETypeSizeBits = DL.getTypeSizeInBits(BETakenCount->getType());
3015 const SCEV *CastedStride = StrideExpr;
3016 const SCEV *CastedBECount = BETakenCount;
3017 ScalarEvolution *SE = PSE->getSE();
3018 if (BETypeSizeBits >= StrideTypeSizeBits)
3019 CastedStride = SE->getNoopOrSignExtend(StrideExpr, BETakenCount->getType());
3020 else
3021 CastedBECount = SE->getZeroExtendExpr(BETakenCount, StrideExpr->getType());
3022 const SCEV *StrideMinusBETaken = SE->getMinusSCEV(CastedStride, CastedBECount);
3023 // Since TripCount == BackEdgeTakenCount + 1, checking:
3024 // "Stride >= TripCount" is equivalent to checking:
3025 // Stride - BETakenCount > 0
3026 if (SE->isKnownPositive(StrideMinusBETaken)) {
3027 LLVM_DEBUG(
3028 dbgs() << "LAA: Stride>=TripCount; No point in versioning as the "
3029 "Stride==1 predicate will imply that the loop executes "
3030 "at most once.\n");
3031 return;
3032 }
3033 LLVM_DEBUG(dbgs() << "LAA: Found a strided access that we can version.\n");
3034
3035 // Strip back off the integer cast, and check that our result is a
3036 // SCEVUnknown as we expect.
3037 const SCEV *StrideBase = StrideExpr;
3038 if (const auto *C = dyn_cast<SCEVIntegralCastExpr>(StrideBase))
3039 StrideBase = C->getOperand();
3040 SymbolicStrides[Ptr] = cast<SCEVUnknown>(StrideBase);
3041}
3042
3044 const TargetTransformInfo *TTI,
3045 const TargetLibraryInfo *TLI, AAResults *AA,
3046 DominatorTree *DT, LoopInfo *LI)
3047 : PSE(std::make_unique<PredicatedScalarEvolution>(*SE, *L)),
3048 PtrRtChecking(nullptr), TheLoop(L) {
3049 unsigned MaxTargetVectorWidthInBits = std::numeric_limits<unsigned>::max();
3050 if (TTI) {
3051 TypeSize FixedWidth =
3053 if (FixedWidth.isNonZero()) {
3054 // Scale the vector width by 2 as rough estimate to also consider
3055 // interleaving.
3056 MaxTargetVectorWidthInBits = FixedWidth.getFixedValue() * 2;
3057 }
3058
3059 TypeSize ScalableWidth =
3061 if (ScalableWidth.isNonZero())
3062 MaxTargetVectorWidthInBits = std::numeric_limits<unsigned>::max();
3063 }
3064 DepChecker =
3065 std::make_unique<MemoryDepChecker>(*PSE, L, MaxTargetVectorWidthInBits);
3066 PtrRtChecking = std::make_unique<RuntimePointerChecking>(*DepChecker, SE);
3067 if (canAnalyzeLoop()) {
3068 analyzeLoop(AA, LI, TLI, DT);
3069 }
3070}
3071
3073 if (CanVecMem) {
3074 OS.indent(Depth) << "Memory dependences are safe";
3075 const MemoryDepChecker &DC = getDepChecker();
3076 if (!DC.isSafeForAnyVectorWidth())
3077 OS << " with a maximum safe vector width of "
3078 << DC.getMaxSafeVectorWidthInBits() << " bits";
3079 if (PtrRtChecking->Need)
3080 OS << " with run-time checks";
3081 OS << "\n";
3082 }
3083
3084 if (HasConvergentOp)
3085 OS.indent(Depth) << "Has convergent operation in loop\n";
3086
3087 if (Report)
3088 OS.indent(Depth) << "Report: " << Report->getMsg() << "\n";
3089
3090 if (auto *Dependences = DepChecker->getDependences()) {
3091 OS.indent(Depth) << "Dependences:\n";
3092 for (const auto &Dep : *Dependences) {
3093 Dep.print(OS, Depth + 2, DepChecker->getMemoryInstructions());
3094 OS << "\n";
3095 }
3096 } else
3097 OS.indent(Depth) << "Too many dependences, not recorded\n";
3098
3099 // List the pair of accesses need run-time checks to prove independence.
3100 PtrRtChecking->print(OS, Depth);
3101 OS << "\n";
3102
3103 OS.indent(Depth)
3104 << "Non vectorizable stores to invariant address were "
3105 << (HasStoreStoreDependenceInvolvingLoopInvariantAddress ||
3106 HasLoadStoreDependenceInvolvingLoopInvariantAddress
3107 ? ""
3108 : "not ")
3109 << "found in loop.\n";
3110
3111 OS.indent(Depth) << "SCEV assumptions:\n";
3112 PSE->getPredicate().print(OS, Depth);
3113
3114 OS << "\n";
3115
3116 OS.indent(Depth) << "Expressions re-written:\n";
3117 PSE->print(OS, Depth);
3118}
3119
3121 auto I = LoopAccessInfoMap.insert({&L, nullptr});
3122
3123 if (I.second)
3124 I.first->second =
3125 std::make_unique<LoopAccessInfo>(&L, &SE, TTI, TLI, &AA, &DT, &LI);
3126
3127 return *I.first->second;
3128}
3129
3131 Function &F, const PreservedAnalyses &PA,
3133 // Check whether our analysis is preserved.
3134 auto PAC = PA.getChecker<LoopAccessAnalysis>();
3135 if (!PAC.preserved() && !PAC.preservedSet<AllAnalysesOn<Function>>())
3136 // If not, give up now.
3137 return true;
3138
3139 // Check whether the analyses we depend on became invalid for any reason.
3140 // Skip checking TargetLibraryAnalysis as it is immutable and can't become
3141 // invalid.
3142 return Inv.invalidate<AAManager>(F, PA) ||
3144 Inv.invalidate<LoopAnalysis>(F, PA) ||
3146}
3147
3151 auto &AA = FAM.getResult<AAManager>(F);
3152 auto &DT = FAM.getResult<DominatorTreeAnalysis>(F);
3153 auto &LI = FAM.getResult<LoopAnalysis>(F);
3155 auto &TLI = FAM.getResult<TargetLibraryAnalysis>(F);
3156 return LoopAccessInfoManager(SE, AA, DT, LI, &TTI, &TLI);
3157}
3158
3159AnalysisKey LoopAccessAnalysis::Key;
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
This file implements a class to represent arbitrary precision integral constant values and operations...
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
Analysis containing CSE Info
Definition: CSEInfo.cpp:27
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(X)
Definition: Debug.h:101
This file defines the DenseMap class.
uint64_t Size
bool End
Definition: ELF_riscv.cpp:480
Generic implementation of equivalence classes through the use Tarjan's efficient union-find algorithm...
#define Check(C,...)
#define DEBUG_TYPE
Hexagon Common GEP
IRTranslator LLVM IR MI
static cl::opt< unsigned > MaxDependences("max-dependences", cl::Hidden, cl::desc("Maximum number of dependences collected by " "loop-access analysis (default = 100)"), cl::init(100))
We collect dependences up to this threshold.
static cl::opt< bool > EnableForwardingConflictDetection("store-to-load-forwarding-conflict-detection", cl::Hidden, cl::desc("Enable conflict detection in loop-access analysis"), cl::init(true))
Enable store-to-load forwarding conflict detection.
static void findForkedSCEVs(ScalarEvolution *SE, const Loop *L, Value *Ptr, SmallVectorImpl< PointerIntPair< const SCEV *, 1, bool > > &ScevList, unsigned Depth)
static std::variant< MemoryDepChecker::Dependence::DepType, DepDistanceStrideAndSizeInfo > getDependenceDistanceStrideAndSize(const AccessAnalysis::MemAccessInfo &A, Instruction *AInst, const AccessAnalysis::MemAccessInfo &B, Instruction *BInst, const DenseMap< Value *, const SCEV * > &Strides, const DenseMap< Value *, SmallVector< const Value *, 16 > > &UnderlyingObjects, PredicatedScalarEvolution &PSE, const Loop *InnermostLoop)
static bool hasComputableBounds(PredicatedScalarEvolution &PSE, Value *Ptr, const SCEV *PtrScev, Loop *L, bool Assume)
Check whether a pointer can participate in a runtime bounds check.
static cl::opt< unsigned > MemoryCheckMergeThreshold("memory-check-merge-threshold", cl::Hidden, cl::desc("Maximum number of comparisons done when trying to merge " "runtime memory checks. (default = 100)"), cl::init(100))
The maximum iterations used to merge memory checks.
static bool isNoWrap(PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &Strides, Value *Ptr, Type *AccessTy, Loop *L)
Check whether a pointer address cannot wrap.
static const SCEV * getStrideFromPointer(Value *Ptr, ScalarEvolution *SE, Loop *Lp)
Get the stride of a pointer access in a loop.
static unsigned getGEPInductionOperand(const GetElementPtrInst *Gep)
Find the operand of the GEP that should be checked for consecutive stores.
static bool isSafeDependenceDistance(const DataLayout &DL, ScalarEvolution &SE, const SCEV &BackedgeTakenCount, const SCEV &Dist, uint64_t MaxStride, uint64_t TypeByteSize)
Given a dependence-distance Dist between two memory accesses, that have strides in the same direction...
static cl::opt< unsigned, true > VectorizationInterleave("force-vector-interleave", cl::Hidden, cl::desc("Sets the vectorization interleave count. " "Zero is autoselect."), cl::location(VectorizerParams::VectorizationInterleave))
static bool isLoopVariantIndirectAddress(ArrayRef< const Value * > UnderlyingObjects, ScalarEvolution &SE, const Loop *L)
Returns true if any of the underlying objects has a loop varying address, i.e.
static Value * getUniqueCastUse(Value *Ptr, Loop *Lp, Type *Ty)
If a value has only one user that is a CastInst, return it.
static cl::opt< bool, true > HoistRuntimeChecks("hoist-runtime-checks", cl::Hidden, cl::desc("Hoist inner loop runtime memory checks to outer loop if possible"), cl::location(VectorizerParams::HoistRuntimeChecks), cl::init(true))
static cl::opt< unsigned, true > VectorizationFactor("force-vector-width", cl::Hidden, cl::desc("Sets the SIMD width. Zero is autoselect."), cl::location(VectorizerParams::VectorizationFactor))
static cl::opt< unsigned, true > RuntimeMemoryCheckThreshold("runtime-memory-check-threshold", cl::Hidden, cl::desc("When performing memory disambiguation checks at runtime do not " "generate more than this number of comparisons (default = 8)."), cl::location(VectorizerParams::RuntimeMemoryCheckThreshold), cl::init(8))
static void visitPointers(Value *StartPtr, const Loop &InnermostLoop, function_ref< void(Value *)> AddPointer)
static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR, PredicatedScalarEvolution &PSE, const Loop *L)
Return true if an AddRec pointer Ptr is unsigned non-wrapping, i.e.
static Value * stripGetElementPtr(Value *Ptr, ScalarEvolution *SE, Loop *Lp)
If the argument is a GEP, then returns the operand identified by getGEPInductionOperand.
static bool areStridedAccessesIndependent(uint64_t Distance, uint64_t Stride, uint64_t TypeByteSize)
Check the dependence for two accesses with the same stride Stride.
static const SCEV * getMinFromExprs(const SCEV *I, const SCEV *J, ScalarEvolution *SE)
Compare I and J and return the minimum.
static cl::opt< unsigned > MaxForkedSCEVDepth("max-forked-scev-depth", cl::Hidden, cl::desc("Maximum recursion depth when finding forked SCEVs (default = 5)"), cl::init(5))
static cl::opt< bool > SpeculateUnitStride("laa-speculate-unit-stride", cl::Hidden, cl::desc("Speculate that non-constant strides are unit in LAA"), cl::init(true))
static SmallVector< PointerIntPair< const SCEV *, 1, bool > > findForkedPointer(PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &StridesMap, Value *Ptr, const Loop *L)
static cl::opt< bool > EnableMemAccessVersioning("enable-mem-access-versioning", cl::init(true), cl::Hidden, cl::desc("Enable symbolic stride memory access versioning"))
This enables versioning on the strides of symbolically striding memory accesses in code like the foll...
This header provides classes for managing per-loop analyses.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
This file provides utility analysis objects describing memory locations.
uint64_t High
#define P(N)
FunctionAnalysisManager FAM
This header defines various interfaces for pass management in LLVM.
This file defines the PointerIntPair class.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some templates that are useful if you are working with the STL at all.
raw_pwrite_stream & OS
static LLVM_ATTRIBUTE_ALWAYS_INLINE bool CheckType(MVT::SimpleValueType VT, SDValue N, const TargetLowering *TLI, const DataLayout &DL)
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallSet class.
This file defines the SmallVector class.
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:40
This pass exposes codegen information to IR-level passes.
static const X86InstrFMA3Group Groups[]
A manager for alias analyses.
Class for arbitrary precision integers.
Definition: APInt.h:76
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition: APInt.h:1439
APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
Definition: APInt.cpp:1010
int64_t getSExtValue() const
Get sign extended value.
Definition: APInt.h:1513
This templated class represents "all analyses that operate over <a particular IR unit>" (e....
Definition: Analysis.h:47
API to communicate dependencies between analyses during invalidation.
Definition: PassManager.h:360
bool invalidate(IRUnitT &IR, const PreservedAnalyses &PA)
Trigger the invalidation of some other analysis pass if not already handled and return whether it was...
Definition: PassManager.h:378
A container for analyses that lazily runs them and caches their results.
Definition: PassManager.h:321
PassT::Result & getResult(IRUnitT &IR, ExtraArgTs... ExtraArgs)
Get the result of an analysis pass for a given IR unit.
Definition: PassManager.h:473
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
Definition: ArrayRef.h:204
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:160
LLVM Basic Block Representation.
Definition: BasicBlock.h:60
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:206
const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
Definition: BasicBlock.cpp:289
This class is a wrapper over an AAResults, and it is intended to be used only when there are no IR ch...
This is the base class for all instructions that perform data casts.
Definition: InstrTypes.h:601
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
A debug info location.
Definition: DebugLoc.h:33
iterator find(const_arg_type_t< KeyT > Val)
Definition: DenseMap.h:155
iterator end()
Definition: DenseMap.h:84
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
Definition: DenseMap.h:220
Analysis pass which computes a DominatorTree.
Definition: Dominators.h:279
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition: Dominators.h:162
bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
Definition: Dominators.cpp:122
EquivalenceClasses - This represents a collection of equivalence classes and supports three efficient...
iterator findValue(const ElemTy &V) const
findValue - Return an iterator to the specified value.
iterator insert(const ElemTy &Data)
insert - Insert a new value into the union/find set, ignoring the request if the value already exists...
member_iterator member_end() const
typename std::set< ECValue, ECValueComparator >::const_iterator iterator
iterator* - Provides a way to iterate over all values in the set.
member_iterator member_begin(iterator I) const
member_iterator unionSets(const ElemTy &V1, const ElemTy &V2)
union - Merge the two equivalence sets for the specified values, inserting them if they do not alread...
const ElemTy & getLeaderValue(const ElemTy &V) const
getLeaderValue - Return the leader for the specified value that is in the set.
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
Definition: Function.h:685
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
Definition: Instructions.h:973
Type * getResultElementType() const
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:294
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:83
Class to represent integer types.
Definition: DerivedTypes.h:40
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:278
An instruction for reading from memory.
Definition: Instructions.h:184
Value * getPointerOperand()
Definition: Instructions.h:280
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
This analysis provides dependence information for the memory accesses of a loop.
Result run(Function &F, FunctionAnalysisManager &AM)
bool invalidate(Function &F, const PreservedAnalyses &PA, FunctionAnalysisManager::Invalidator &Inv)
const LoopAccessInfo & getInfo(Loop &L)
Drive the analysis of memory accesses in the loop.
const MemoryDepChecker & getDepChecker() const
the Memory Dependence Checker which can determine the loop-independent and loop-carried dependences b...
bool isInvariant(Value *V) const
Returns true if value V is loop invariant.
void print(raw_ostream &OS, unsigned Depth=0) const
Print the information about the memory accesses in the loop.
static bool blockNeedsPredication(BasicBlock *BB, Loop *TheLoop, DominatorTree *DT)
Return true if the block BB needs to be predicated in order for the loop to be vectorized.
LoopAccessInfo(Loop *L, ScalarEvolution *SE, const TargetTransformInfo *TTI, const TargetLibraryInfo *TLI, AAResults *AA, DominatorTree *DT, LoopInfo *LI)
Analysis pass that exposes the LoopInfo for a function.
Definition: LoopInfo.h:566
bool contains(const LoopT *L) const
Return true if the specified loop is contained within in this loop.
BlockT * getLoopLatch() const
If there is a single latch block for this loop, return it.
bool isInnermost() const
Return true if the loop does not contain any (natural) loops.
unsigned getNumBackEdges() const
Calculate the number of back edges to the loop header.
BlockT * getHeader() const
LoopT * getParentLoop() const
Return the parent loop if it exists or nullptr for top level loops.
Wrapper class to LoopBlocksDFS that provides a standard begin()/end() interface for the DFS reverse p...
Definition: LoopIterator.h:172
Represents a single loop in the control flow graph.
Definition: LoopInfo.h:44
bool isAnnotatedParallel() const
Returns true if the loop is annotated parallel.
Definition: LoopInfo.cpp:564
DebugLoc getStartLoc() const
Return the debug location of the start of this loop.
Definition: LoopInfo.cpp:631
Metadata node.
Definition: Metadata.h:1067
ArrayRef< MDOperand > operands() const
Definition: Metadata.h:1426
Tracking metadata reference owned by Metadata.
Definition: Metadata.h:889
This class implements a map that also provides access to all stored values in a deterministic order.
Definition: MapVector.h:36
Checks memory dependences among accesses to the same underlying object to determine whether there vec...
ArrayRef< unsigned > getOrderForAccess(Value *Ptr, bool IsWrite) const
Return the program order indices for the access location (Ptr, IsWrite).
bool isSafeForAnyVectorWidth() const
Return true if the number of elements that are safe to operate on simultaneously is not bounded.
bool areDepsSafe(DepCandidates &AccessSets, MemAccessInfoList &CheckDeps, const DenseMap< Value *, const SCEV * > &Strides, const DenseMap< Value *, SmallVector< const Value *, 16 > > &UnderlyingObjects)
Check whether the dependencies between the accesses are safe.
const SmallVectorImpl< Instruction * > & getMemoryInstructions() const
The vector of memory access instructions.
const Loop * getInnermostLoop() const
uint64_t getMaxSafeVectorWidthInBits() const
Return the number of elements that are safe to operate on simultaneously, multiplied by the size of t...
bool isSafeForVectorization() const
No memory dependence was encountered that would inhibit vectorization.
const SmallVectorImpl< Dependence > * getDependences() const
Returns the memory dependences.
SmallVector< Instruction *, 4 > getInstructionsForAccess(Value *Ptr, bool isWrite) const
Find the set of instructions that read or write via Ptr.
VectorizationSafetyStatus
Type to keep track of the status of the dependence check.
bool shouldRetryWithRuntimeCheck() const
In same cases when the dependency check fails we can still vectorize the loop with a dynamic array ac...
void addAccess(StoreInst *SI)
Register the location (instructions are given increasing numbers) of a write access.
PointerIntPair< Value *, 1, bool > MemAccessInfo
Representation for a specific memory location.
static MemoryLocation get(const LoadInst *LI)
Return a location with information about the memory reference by the given instruction.
LocationSize Size
The maximum size of the location, in address-units, or UnknownSize if the size is not known.
AAMDNodes AATags
The metadata nodes which describes the aliasing of the location (each member is null if that kind of ...
const Value * Ptr
The address of the start of the location.
Root of the metadata hierarchy.
Definition: Metadata.h:62
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
Definition: Module.h:293
Diagnostic information for optimization analysis remarks.
An interface layer with SCEV used to manage how we see SCEV expressions for values in the context of ...
void addPredicate(const SCEVPredicate &Pred)
Adds a new predicate.
ScalarEvolution * getSE() const
Returns the ScalarEvolution analysis used.
const SCEVPredicate & getPredicate() const
bool hasNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Returns true if we've proved that V doesn't wrap by means of a SCEV predicate.
void setNoOverflow(Value *V, SCEVWrapPredicate::IncrementWrapFlags Flags)
Proves that V doesn't overflow by adding SCEV predicate.
void print(raw_ostream &OS, unsigned Depth) const
Print the SCEV mappings done by the Predicated Scalar Evolution.
const SCEVAddRecExpr * getAsAddRec(Value *V)
Attempts to produce an AddRecExpr for V by adding additional SCEV predicates.
const SCEV * getBackedgeTakenCount()
Get the (predicated) backedge count for the analyzed loop.
const SCEV * getSCEV(Value *V)
Returns the SCEV expression of V, in the context of the current SCEV predicate.
A set of analyses that are preserved following a run of a transformation pass.
Definition: Analysis.h:109
PreservedAnalysisChecker getChecker() const
Build a checker for this PreservedAnalyses and the specified analysis type.
Definition: Analysis.h:264
Holds information about the memory runtime legality checks to verify that a group of pointers do not ...
bool Need
This flag indicates if we need to add the runtime check.
void reset()
Reset the state of the pointer runtime information.
unsigned getNumberOfChecks() const
Returns the number of run-time checks required according to needsChecking.
void printChecks(raw_ostream &OS, const SmallVectorImpl< RuntimePointerCheck > &Checks, unsigned Depth=0) const
Print Checks.
bool needsChecking(const RuntimeCheckingPtrGroup &M, const RuntimeCheckingPtrGroup &N) const
Decide if we need to add a check between two groups of pointers, according to needsChecking.
void print(raw_ostream &OS, unsigned Depth=0) const
Print the list run-time memory checks necessary.
SmallVector< RuntimeCheckingPtrGroup, 2 > CheckingGroups
Holds a partitioning of pointers into "check groups".
void generateChecks(MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies)
Generate the checks and store it.
static bool arePointersInSamePartition(const SmallVectorImpl< int > &PtrToPartition, unsigned PtrIdx1, unsigned PtrIdx2)
Check if pointers are in the same partition.
SmallVector< PointerInfo, 2 > Pointers
Information about the pointers that may require checking.
void insert(Loop *Lp, Value *Ptr, const SCEV *PtrExpr, Type *AccessTy, bool WritePtr, unsigned DepSetId, unsigned ASId, PredicatedScalarEvolution &PSE, bool NeedsFreeze)
Insert a pointer and calculate the start and end SCEVs.
This node represents a polynomial recurrence on the trip count of the specified loop.
const SCEV * evaluateAtIteration(const SCEV *It, ScalarEvolution &SE) const
Return the value of this chain of recurrences at the specified iteration number.
const SCEV * getStepRecurrence(ScalarEvolution &SE) const
Constructs and returns the recurrence indicating how much this expression steps by.
bool isAffine() const
Return true if this represents an expression A + B*x where A and B are loop invariant values.
This class represents a constant integer value.
This is the base class for unary integral cast operator classes.
This node represents multiplication of some number of SCEVs.
NoWrapFlags getNoWrapFlags(NoWrapFlags Mask=NoWrapMask) const
virtual void print(raw_ostream &OS, unsigned Depth=0) const =0
Prints a textual representation of this predicate with an indentation of Depth.
This means that we are dealing with an entirely unknown SCEV value, and only represent it as its LLVM...
This class represents an analyzed expression in the program.
Type * getType() const
Return the LLVM type of this SCEV expression.
Analysis pass that exposes the ScalarEvolution for a function.
The main scalar evolution driver.
bool isKnownNonNegative(const SCEV *S)
Test if the given expression is known to be non-negative.
const SCEV * getNegativeSCEV(const SCEV *V, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap)
Return the SCEV object corresponding to -V.
bool isKnownNonPositive(const SCEV *S)
Test if the given expression is known to be non-positive.
const SCEV * getUMaxExpr(const SCEV *LHS, const SCEV *RHS)
const SCEVPredicate * getEqualPredicate(const SCEV *LHS, const SCEV *RHS)
const SCEV * getConstant(ConstantInt *V)
const SCEV * getSCEV(Value *V)
Return a SCEV expression for the full generality of the specified expression.
const SCEV * getNoopOrSignExtend(const SCEV *V, Type *Ty)
Return a SCEV corresponding to a conversion of the input value to the specified type.
const SCEV * getOne(Type *Ty)
Return a SCEV for the constant 1 of a specific type.
const SCEV * getPtrToIntExpr(const SCEV *Op, Type *Ty)
bool isLoopInvariant(const SCEV *S, const Loop *L)
Return true if the value of the given SCEV is unchanging in the specified loop.
bool isKnownPositive(const SCEV *S)
Test if the given expression is known to be positive.
const SCEV * getZeroExtendExpr(const SCEV *Op, Type *Ty, unsigned Depth=0)
bool isSCEVable(Type *Ty) const
Test if values of the given type are analyzable within the SCEV framework.
Type * getEffectiveSCEVType(Type *Ty) const
Return a type with the same bitwidth as the given type and which represents how SCEV will treat the g...
const SCEV * getUMinExpr(const SCEV *LHS, const SCEV *RHS, bool Sequential=false)
APInt getSignedRangeMin(const SCEV *S)
Determine the min of the signed range for a particular SCEV.
const SCEV * getStoreSizeOfExpr(Type *IntTy, Type *StoreTy)
Return an expression for the store size of StoreTy that is type IntTy.
const SCEV * getMinusSCEV(const SCEV *LHS, const SCEV *RHS, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Return LHS-RHS.
const SCEV * applyLoopGuards(const SCEV *Expr, const Loop *L)
Try to apply information from loop guards for L to Expr.
const SCEV * getMulExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical multiply expression, or something simpler if possible.
const SCEV * getSizeOfExpr(Type *IntTy, TypeSize Size)
Return an expression for a TypeSize.
const SCEV * getAddExpr(SmallVectorImpl< const SCEV * > &Ops, SCEV::NoWrapFlags Flags=SCEV::FlagAnyWrap, unsigned Depth=0)
Get a canonical add expression, or something simpler if possible.
const SCEV * getTruncateOrSignExtend(const SCEV *V, Type *Ty, unsigned Depth=0)
Return a SCEV corresponding to a conversion of the input value to the specified type.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
Definition: SmallPtrSet.h:321
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
Definition: SmallPtrSet.h:360
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
Definition: SmallPtrSet.h:342
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
Definition: SmallPtrSet.h:427
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:135
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
Definition: SmallSet.h:166
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:179
bool empty() const
Definition: SmallVector.h:94
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
reference emplace_back(ArgTypes &&... Args)
Definition: SmallVector.h:950
void resize(size_type N)
Definition: SmallVector.h:651
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
An instruction for storing to memory.
Definition: Instructions.h:317
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
Analysis pass providing the TargetTransformInfo.
Analysis pass providing the TargetLibraryInfo.
Provides information about what library functions are available for the current target.
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
TypeSize getRegisterBitWidth(RegisterKind K) const
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition: Type.h:265
bool isPointerTy() const
True if this is an instance of PointerType.
Definition: Type.h:255
unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
A Use represents the edge between a Value definition and its users.
Definition: Use.h:43
Value * getOperand(unsigned i) const
Definition: User.h:169
unsigned getNumOperands() const
Definition: User.h:191
static SmallVector< VFInfo, 8 > getMappings(const CallInst &CI)
Retrieve all the VFInfo instances associated to the CallInst CI.
Definition: VectorUtils.h:70
LLVM Value Representation.
Definition: Value.h:74
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:255
const Value * stripAndAccumulateInBoundsConstantOffsets(const DataLayout &DL, APInt &Offset) const
This is a wrapper around stripAndAccumulateConstantOffsets with the in-bounds requirement set to fals...
Definition: Value.h:736
StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:309
constexpr ScalarTy getFixedValue() const
Definition: TypeSize.h:199
constexpr bool isNonZero() const
Definition: TypeSize.h:158
An efficient, type-erasing, non-owning reference to a callable.
TypeSize getSequentialElementStride(const DataLayout &DL) const
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:52
raw_ostream & indent(unsigned NumSpaces)
indent - Insert 'NumSpaces' spaces.
friend const_iterator end(StringRef path)
Get end iterator over path.
Definition: Path.cpp:236
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
bool match(Val *V, const Pattern &P)
Definition: PatternMatch.h:49
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
Definition: PatternMatch.h:612
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:450
LocationClass< Ty > location(Ty &L)
Definition: CommandLine.h:470
DiagnosticInfoOptimizationBase::Argument NV
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
std::optional< int > getPointersDiff(Type *ElemTyA, Value *PtrA, Type *ElemTyB, Value *PtrB, const DataLayout &DL, ScalarEvolution &SE, bool StrictCheck=false, bool CheckType=true)
Returns the distance between the pointers PtrA and PtrB iff they are compatible and it is possible to...
@ Low
Lower the current thread's priority such that it does not affect foreground tasks significantly.
@ Offset
Definition: DWP.cpp:456
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1722
Intrinsic::ID getVectorIntrinsicIDForCall(const CallInst *CI, const TargetLibraryInfo *TLI)
Returns intrinsic ID for call.
unsigned getPointerAddressSpace(const Type *T)
Definition: SPIRVUtils.h:122
std::optional< const MDOperand * > findStringMetadataForLoop(const Loop *TheLoop, StringRef Name)
Find string metadata for loop.
Definition: LoopInfo.cpp:1053
const Value * getLoadStorePointerOperand(const Value *V)
A helper function that returns the pointer operand of a load or store instruction.
AddressSpace
Definition: NVPTXBaseInfo.h:21
const Value * getPointerOperand(const Value *V)
A helper function that returns the pointer operand of a load, store or GEP instruction.
OutputIt transform(R &&Range, OutputIt d_first, UnaryFunction F)
Wrapper function around std::transform to apply a function to a range and store the result elsewhere.
Definition: STLExtras.h:1928
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1729
bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
Definition: Function.cpp:2060
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
bool isPointerTy(const Type *T)
Definition: SPIRVUtils.h:116
std::optional< int64_t > getPtrStride(PredicatedScalarEvolution &PSE, Type *AccessTy, Value *Ptr, const Loop *Lp, const DenseMap< Value *, const SCEV * > &StridesMap=DenseMap< Value *, const SCEV * >(), bool Assume=false, bool ShouldCheckWrap=true)
If the pointer has a constant stride return it in units of the access type size.
bool sortPtrAccesses(ArrayRef< Value * > VL, Type *ElemTy, const DataLayout &DL, ScalarEvolution &SE, SmallVectorImpl< unsigned > &SortedIndices)
Attempt to sort the pointers in VL and return the sorted indices in SortedIndices,...
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, LoopInfo *LI=nullptr, unsigned MaxLookup=6)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
const SCEV * replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE, const DenseMap< Value *, const SCEV * > &PtrToStride, Value *Ptr)
Return the SCEV corresponding to a pointer with the symbolic stride replaced with constant one,...
bool isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL, ScalarEvolution &SE, bool CheckType=true)
Returns true if the memory operations A and B are consecutive.
bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
OutputIt copy(R &&Range, OutputIt Out)
Definition: STLExtras.h:1824
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1749
gep_type_iterator gep_type_begin(const User *GEP)
Type * getLoadStoreType(Value *I)
A helper function that returns the type of a load or store instruction.
Implement std::hash so that hash_code can be used in STL containers.
Definition: BitVector.h:858
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition: BitVector.h:860
#define N
MDNode * Scope
The tag for alias scope specification (used with noalias).
Definition: Metadata.h:783
MDNode * TBAA
The tag for type-based alias analysis.
Definition: Metadata.h:777
MDNode * NoAlias
The tag specifying the noalias scope.
Definition: Metadata.h:786
A special type used by analysis passes to provide an address that identifies that particular analysis...
Definition: Analysis.h:26
Dependece between memory access instructions.
Instruction * getDestination(const MemoryDepChecker &DepChecker) const
Return the destination instruction of the dependence.
DepType Type
The type of the dependence.
bool isPossiblyBackward() const
May be a lexically backward dependence type (includes Unknown).
Instruction * getSource(const MemoryDepChecker &DepChecker) const
Return the source instruction of the dependence.
bool isForward() const
Lexically forward dependence.
bool isBackward() const
Lexically backward dependence.
void print(raw_ostream &OS, unsigned Depth, const SmallVectorImpl< Instruction * > &Instrs) const
Print the dependence.
DepType
The type of the dependence.
static const char * DepName[]
String version of the types.
static VectorizationSafetyStatus isSafeForVectorization(DepType Type)
Dependence types that don't prevent vectorization.
unsigned AddressSpace
Address space of the involved pointers.
bool addPointer(unsigned Index, RuntimePointerChecking &RtCheck)
Tries to add the pointer recorded in RtCheck at index Index to this pointer checking group.
bool NeedsFreeze
Whether the pointer needs to be frozen after expansion, e.g.
const SCEV * High
The SCEV expression which represents the upper bound of all the pointers in this group.
SmallVector< unsigned, 2 > Members
Indices of all the pointers that constitute this grouping.
RuntimeCheckingPtrGroup(unsigned Index, RuntimePointerChecking &RtCheck)
Create a new pointer checking group containing a single pointer, with index Index in RtCheck.
const SCEV * Low
The SCEV expression which represents the lower bound of all the pointers in this group.
bool IsWritePtr
Holds the information if this pointer is used for writing to memory.
unsigned DependencySetId
Holds the id of the set of pointers that could be dependent because of a shared underlying object.
unsigned AliasSetId
Holds the id of the disjoint alias set to which this pointer belongs.
static const unsigned MaxVectorWidth
Maximum SIMD width.
static unsigned VectorizationFactor
VF as overridden by the user.
static unsigned RuntimeMemoryCheckThreshold
\When performing memory disambiguation checks at runtime do not make more than this number of compari...
static bool isInterleaveForced()
True if force-vector-interleave was specified by the user.
static unsigned VectorizationInterleave
Interleave factor as overridden by the user.
Function object to check whether the first component of a container supported by std::get (like std::...
Definition: STLExtras.h:1450