llvm.org GIT mirror llvm / 9511e49
[EarlyCSE] Make MemorySSA memory dependency check more aggressive. Now that MemorySSA keeps track of whether MemoryUses are optimized, use getClobberingMemoryAccess() to check MemoryUse memory dependencies since it should no longer be so expensive. This is a follow-up change to https://reviews.llvm.org/D25881 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@285080 91177308-0d34-0410-b5e6-96231b3b80d8 Geoff Berry 2 years ago
2 changed file(s) with 45 addition(s) and 16 deletion(s). Raw diff Collapse all Expand all
495495 void removeMSSA(Instruction *Inst) {
496496 if (!MSSA)
497497 return;
498 // FIXME: Removing a store here can leave MemorySSA in an unoptimized state
499 // by creating MemoryPhis that have identical arguments and by creating
498 // Removing a store here can leave MemorySSA in an unoptimized state by
499 // creating MemoryPhis that have identical arguments and by creating
500500 // MemoryUses whose defining access is not an actual clobber. We handle the
501 // phi case here, but the non-optimized MemoryUse case is not handled. Once
502 // MemorySSA tracks whether uses are optimized this will be taken care of on
503 // the MemorySSA side.
501 // phi case eagerly here. The non-optimized MemoryUse case is lazily
502 // updated by MemorySSA getClobberingMemoryAccess.
504503 if (MemoryAccess *MA = MSSA->getMemoryAccess(Inst)) {
505504 // Optimize MemoryPhi nodes that may become redundant by having all the
506505 // same input values once MA is removed.
563562 // LaterInst, if LaterDef dominates EarlierInst then it can't occur between
564563 // EarlierInst and LaterInst and neither can any other write that potentially
565564 // clobbers LaterInst.
566 // FIXME: Use getClobberingMemoryAccess only for stores since it is currently
567 // fairly expensive to call on MemoryUses since it does an AA check even for
568 // MemoryUses that were already optimized by MemorySSA construction. Once
569 // MemorySSA optimized use tracking change has been committed we can use
570 // getClobberingMemoryAccess for MemoryUses as well.
571 MemoryAccess *LaterMA = MSSA->getMemoryAccess(LaterInst);
572 MemoryAccess *LaterDef;
573 if (auto *LaterUse = dyn_cast(LaterMA))
574 LaterDef = LaterUse->getDefiningAccess();
575 else
576 LaterDef = MSSA->getWalker()->getClobberingMemoryAccess(LaterInst);
565 MemoryAccess *LaterDef =
566 MSSA->getWalker()->getClobberingMemoryAccess(LaterInst);
577567 return MSSA->dominates(LaterDef, MSSA->getMemoryAccess(EarlierInst));
578568 }
579569
44
55 @G1 = global i32 zeroinitializer
66 @G2 = global i32 zeroinitializer
7 @G3 = global i32 zeroinitializer
78
89 ;; Simple load value numbering across non-clobbering store.
910 ; CHECK-LABEL: @test1(
6667 store i32 %sum, i32* @G2
6768 ret void
6869 }
70
71
72 ;; Check that MemoryPhi optimization and MemoryUse re-optimization
73 ;; happens during EarlyCSE, enabling more load CSE opportunities.
74 ; CHECK-LABEL: @test_memphiopt2(
75 ; CHECK-NOMEMSSA-LABEL: @test_memphiopt2(
76 define void @test_memphiopt2(i1 %c, i32* %p) {
77 ; CHECK-LABEL: entry:
78 ; CHECK-NOMEMSSA-LABEL: entry:
79 entry:
80 ; CHECK: load
81 ; CHECK-NOMEMSSA: load
82 %v1 = load i32, i32* @G1
83 ; CHECK: store
84 ; CHECK-NOMEMSSA: store
85 store i32 %v1, i32* @G2
86 br i1 %c, label %then, label %end
87
88 ; CHECK-LABEL: then:
89 ; CHECK-NOMEMSSA-LABEL: then:
90 then:
91 ; CHECK: load
92 ; CHECK-NOMEMSSA: load
93 %pv = load i32, i32* %p
94 ; CHECK-NOT: store
95 ; CHECK-NOMEMSSA-NOT: store
96 store i32 %pv, i32* %p
97 br label %end
98
99 ; CHECK-LABEL: end:
100 ; CHECK-NOMEMSSA-LABEL: end:
101 end:
102 ; CHECK-NOT: load
103 ; CHECK-NOMEMSSA: load
104 %v2 = load i32, i32* @G1
105 store i32 %v2, i32* @G3
106 ret void
107 }