llvm.org GIT mirror llvm / 60eee05
[EarlyCSE] Fold invariant loads Redundant invariant loads can be CSE'ed with very little extra effort over what early-cse already tracks, so it looks reasonable to make early-cse handle this case. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@272954 91177308-0d34-0410-b5e6-96231b3b80d8 Sanjoy Das 3 years ago
2 changed file(s) with 121 addition(s) and 8 deletion(s). Raw diff Collapse all Expand all
282282 unsigned Generation;
283283 int MatchingId;
284284 bool IsAtomic;
285 bool IsInvariant;
285286 LoadValue()
286 : DefInst(nullptr), Generation(0), MatchingId(-1), IsAtomic(false) {}
287 : DefInst(nullptr), Generation(0), MatchingId(-1), IsAtomic(false),
288 IsInvariant(false) {}
287289 LoadValue(Instruction *Inst, unsigned Generation, unsigned MatchingId,
288 bool IsAtomic)
289 : DefInst(Inst), Generation(Generation), MatchingId(MatchingId),
290 IsAtomic(IsAtomic) {}
290 bool IsAtomic, bool IsInvariant)
291 : DefInst(Inst), Generation(Generation), MatchingId(MatchingId),
292 IsAtomic(IsAtomic), IsInvariant(IsInvariant) {}
291293 };
292294 typedef RecyclingAllocator
293295 ScopedHashTableVal>
429431 return true;
430432 }
431433
434 bool isInvariantLoad() const {
435 if (auto *LI = dyn_cast(Inst))
436 return LI->getMetadata(LLVMContext::MD_invariant_load);
437 return false;
438 }
432439
433440 bool isMatchingMemLoc(const ParseMemoryInst &Inst) const {
434441 return (getPointerOperand() == Inst.getPointerOperand() &&
611618 }
612619
613620 // If we have an available version of this load, and if it is the right
614 // generation, replace this instruction.
621 // generation or the load is known to be from an invariant location,
622 // replace this instruction.
623 //
624 // A dominating invariant load implies that the location loaded from is
625 // unchanging beginning at the point of the invariant load, so the load
626 // we're CSE'ing _away_ does not need to be invariant, only the available
627 // load we're CSE'ing _to_ does.
615628 LoadValue InVal = AvailableLoads.lookup(MemInst.getPointerOperand());
616 if (InVal.DefInst != nullptr && InVal.Generation == CurrentGeneration &&
629 if (InVal.DefInst != nullptr &&
630 (InVal.Generation == CurrentGeneration || InVal.IsInvariant) &&
617631 InVal.MatchingId == MemInst.getMatchingId() &&
618632 // We don't yet handle removing loads with ordering of any kind.
619633 !MemInst.isVolatile() && MemInst.isUnordered() &&
636650 AvailableLoads.insert(
637651 MemInst.getPointerOperand(),
638652 LoadValue(Inst, CurrentGeneration, MemInst.getMatchingId(),
639 MemInst.isAtomic()));
653 MemInst.isAtomic(), MemInst.isInvariantLoad()));
640654 LastStore = nullptr;
641655 continue;
642656 }
748762 AvailableLoads.insert(
749763 MemInst.getPointerOperand(),
750764 LoadValue(Inst, CurrentGeneration, MemInst.getMatchingId(),
751 MemInst.isAtomic()));
765 MemInst.isAtomic(), false));
752766
753767 // Remember that this was the last unordered store we saw for DSE. We
754768 // don't yet handle DSE on ordered or volatile stores since we don't
0 ; RUN: opt -S -early-cse < %s | FileCheck %s
1
2 declare void @clobber_and_use(i32)
3
4 define void @f_0(i32* %ptr) {
5 ; CHECK-LABEL: @f_0(
6 ; CHECK: %val0 = load i32, i32* %ptr, !invariant.load !0
7 ; CHECK: call void @clobber_and_use(i32 %val0)
8 ; CHECK: call void @clobber_and_use(i32 %val0)
9 ; CHECK: call void @clobber_and_use(i32 %val0)
10 ; CHECK: ret void
11
12 %val0 = load i32, i32* %ptr, !invariant.load !{}
13 call void @clobber_and_use(i32 %val0)
14 %val1 = load i32, i32* %ptr, !invariant.load !{}
15 call void @clobber_and_use(i32 %val1)
16 %val2 = load i32, i32* %ptr, !invariant.load !{}
17 call void @clobber_and_use(i32 %val2)
18 ret void
19 }
20
21 define void @f_1(i32* %ptr) {
22 ; We can forward invariant loads to non-invariant loads, since once an
23 ; invariant load has executed, the location loaded from is known to be
24 ; unchanging.
25
26 ; CHECK-LABEL: @f_1(
27 ; CHECK: %val0 = load i32, i32* %ptr, !invariant.load !0
28 ; CHECK: call void @clobber_and_use(i32 %val0)
29 ; CHECK: call void @clobber_and_use(i32 %val0)
30
31 %val0 = load i32, i32* %ptr, !invariant.load !{}
32 call void @clobber_and_use(i32 %val0)
33 %val1 = load i32, i32* %ptr
34 call void @clobber_and_use(i32 %val1)
35 ret void
36 }
37
38 define void @f_2(i32* %ptr) {
39 ; Negative test -- we can't forward a non-invariant load into an
40 ; invariant load.
41
42 ; CHECK-LABEL: @f_2(
43 ; CHECK: %val0 = load i32, i32* %ptr
44 ; CHECK: call void @clobber_and_use(i32 %val0)
45 ; CHECK: %val1 = load i32, i32* %ptr, !invariant.load !0
46 ; CHECK: call void @clobber_and_use(i32 %val1)
47
48 %val0 = load i32, i32* %ptr
49 call void @clobber_and_use(i32 %val0)
50 %val1 = load i32, i32* %ptr, !invariant.load !{}
51 call void @clobber_and_use(i32 %val1)
52 ret void
53 }
54
55 define void @f_3(i1 %cond, i32* %ptr) {
56 ; CHECK-LABEL: @f_3(
57 %val0 = load i32, i32* %ptr, !invariant.load !{}
58 call void @clobber_and_use(i32 %val0)
59 br i1 %cond, label %left, label %right
60
61 ; CHECK: %val0 = load i32, i32* %ptr, !invariant.load !0
62 ; CHECK: left:
63 ; CHECK-NEXT: call void @clobber_and_use(i32 %val0)
64
65 left:
66 %val1 = load i32, i32* %ptr
67 call void @clobber_and_use(i32 %val1)
68 ret void
69
70 right:
71 ret void
72 }
73
74 define void @f_4(i1 %cond, i32* %ptr) {
75 ; Negative test -- can't forward %val0 to %va1 because that'll break
76 ; def-dominates-use.
77
78 ; CHECK-LABEL: @f_4(
79 br i1 %cond, label %left, label %merge
80
81 left:
82 ; CHECK: left:
83 ; CHECK-NEXT: %val0 = load i32, i32* %ptr, !invariant.load !
84 ; CHECK-NEXT: call void @clobber_and_use(i32 %val0)
85
86 %val0 = load i32, i32* %ptr, !invariant.load !{}
87 call void @clobber_and_use(i32 %val0)
88 br label %merge
89
90 merge:
91 ; CHECK: merge:
92 ; CHECK-NEXT: %val1 = load i32, i32* %ptr
93 ; CHECK-NEXT: call void @clobber_and_use(i32 %val1)
94
95 %val1 = load i32, i32* %ptr
96 call void @clobber_and_use(i32 %val1)
97 ret void
98 }