llvm.org GIT mirror llvm / d94715e
MergedLoadStoreMotion pass Merges equivalent loads on both sides of a hammock/diamond and hoists into into the header. Merges equivalent stores on both sides of a hammock/diamond and sinks it to the footer. Can enable if conversion and tolerate better load misses and store operand latencies. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@213396 91177308-0d34-0410-b5e6-96231b3b80d8 Gerolf Hoflehner 5 years ago
11 changed file(s) with 736 addition(s) and 1 deletion(s). Raw diff Collapse all Expand all
134134 }
135135
136136 /* [ unit */
137 CAMLprim value llvm_add_merged_load_store_motion(LLVMPassManagerRef PM) {
138 LLVMAddMergedLoadStoreMotionPass(PM);
139 return Val_unit;
140 }
141
142 /* [ unit */
137143 CAMLprim value llvm_add_gvn(LLVMPassManagerRef PM) {
138144 LLVMAddGVNPass(PM);
139145 return Val_unit;
195195 void initializeMemCpyOptPass(PassRegistry&);
196196 void initializeMemDepPrinterPass(PassRegistry&);
197197 void initializeMemoryDependenceAnalysisPass(PassRegistry&);
198 void initializeMergedLoadStoreMotionPass(PassRegistry &);
198199 void initializeMetaRenamerPass(PassRegistry&);
199200 void initializeMergeFunctionsPass(PassRegistry&);
200201 void initializeModuleDebugInfoPrinterPass(PassRegistry&);
133133 (void) llvm::createConstantHoistingPass();
134134 (void) llvm::createCodeGenPreparePass();
135135 (void) llvm::createEarlyCSEPass();
136 (void)llvm::createMergedLoadStoreMotionPass();
136137 (void) llvm::createGVNPass();
137138 (void) llvm::createMemCpyOptPass();
138139 (void) llvm::createLoopDeletionPass();
287287
288288 //===----------------------------------------------------------------------===//
289289 //
290 // MergedLoadStoreMotion - This pass merges loads and stores in diamonds. Loads
291 // are hoisted into the header, while stores sink into the footer.
292 //
293 FunctionPass *createMergedLoadStoreMotionPass();
294
295 //===----------------------------------------------------------------------===//
296 //
290297 // GVN - This pass performs global value numbering and redundant load
291298 // elimination cotemporaneously.
292299 //
4242
4343 /** See llvm::createScalarizerPass function. */
4444 void LLVMAddScalarizerPass(LLVMPassManagerRef PM);
45
46 /** See llvm::createMergedLoadStoreMotionPass function. */
47 void LLVMAddMergedLoadStoreMotionPass(LLVMPassManagerRef PM);
4548
4649 /** See llvm::createGVNPass function. */
4750 void LLVMAddGVNPass(LLVMPassManagerRef PM);
106106 initializeFunctionAttrsPass(R);
107107 initializeGlobalsModRefPass(R);
108108 initializeLICMPass(R);
109 initializeMergedLoadStoreMotionPass(R);
109110 initializeGVNPass(R);
110111 initializeMemCpyOptPass(R);
111112 initializeDCEPass(R);
206206 MPM.add(createSimpleLoopUnrollPass()); // Unroll small loops
207207 addExtensionsToPM(EP_LoopOptimizerEnd, MPM);
208208
209 if (OptLevel > 1)
209 if (OptLevel > 1) {
210 MPM.add(createMergedLoadStoreMotionPass()); // Merge load/stores in diamond
210211 MPM.add(createGVNPass()); // Remove redundancies
212 }
211213 MPM.add(createMemCpyOptPass()); // Remove memcpy / form memset
212214 MPM.add(createSCCPPass()); // Constant prop with SCCP
213215
345347 PM.add(createGlobalsModRefPass()); // IP alias analysis.
346348
347349 PM.add(createLICMPass()); // Hoist loop invariants.
350 PM.add(createMergedLoadStoreMotionPass()); // Merge load/stores in diamonds
348351 PM.add(createGVNPass(DisableGVNLoadPRE)); // Remove redundancies.
349352 PM.add(createMemCpyOptPass()); // Remove dead memcpys.
350353
2121 LoopUnswitch.cpp
2222 LowerAtomic.cpp
2323 MemCpyOptimizer.cpp
24 MergedLoadStoreMotion.cpp
2425 PartiallyInlineLibCalls.cpp
2526 Reassociate.cpp
2627 Reg2Mem.cpp
0 //===- MergedLoadStoreMotion.cpp - merge and hoist/sink load/stores -------===//
1 //
2 // The LLVM Compiler Infrastructure
3 //
4 // This file is distributed under the University of Illinois Open Source
5 // License. See LICENSE.TXT for details.
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 //! \file
10 //! \brief This pass performs merges of loads and stores on both sides of a
11 // diamond (hammock). It hoists the loads and sinks the stores.
12 //
13 // The algorithm iteratively hoists two loads to the same address out of a
14 // diamond (hammock) and merges them into a single load in the header. Similar
15 // it sinks and merges two stores to the tail block (footer). The algorithm
16 // iterates over the instructions of one side of the diamond and attempts to
17 // find a matching load/store on the other side. It hoists / sinks when it
18 // thinks it safe to do so. This optimization helps with eg. hiding load
19 // latencies, triggering if-conversion, and reducing static code size.
20 //
21 //===----------------------------------------------------------------------===//
22 //
23 //
24 // Example:
25 // Diamond shaped code before merge:
26 //
27 // header:
28 // br %cond, label %if.then, label %if.else
29 // / \
30 // / \
31 // / \
32 // if.then: if.else:
33 // %lt = load %addr_l %le = load %addr_l
34 //
35 // <...> <...>
36 // store %st, %addr_s store %se, %addr_s
37 // br label %if.end br label %if.end
38 // \ /
39 // \ /
40 // \ /
41 // if.end ("footer"):
42 // <...>
43 //
44 // Diamond shaped code after merge:
45 //
46 // header:
47 // %l = load %addr_l
48 // br %cond, label %if.then, label %if.else
49 // / \
50 // / \
51 // / \
52 // if.then: if.else:
53 //
54 // <...> <...>
55 // br label %if.end br label %if.end
56 // \ /
57 // \ /
58 // \ /
59 // if.end ("footer"):
60 // %s.sink = phi [%st, if.then], [%se, if.else]
61 // <...>
62 // store %s.sink, %addr_s
63 // <...>
64 //
65 //
66 //===----------------------- TODO -----------------------------------------===//
67 //
68 // 1) Generalize to regions other than diamonds
69 // 2) Be more aggressive merging memory operations
70 // Note that both changes require register pressure control
71 //
72 //===----------------------------------------------------------------------===//
73
74 #include "llvm/Transforms/Scalar.h"
75 #include "llvm/ADT/SetVector.h"
76 #include "llvm/ADT/SmallPtrSet.h"
77 #include "llvm/ADT/Statistic.h"
78 #include "llvm/Analysis/AliasAnalysis.h"
79 #include "llvm/Analysis/CFG.h"
80 #include "llvm/Analysis/Loads.h"
81 #include "llvm/Analysis/MemoryBuiltins.h"
82 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
83 #include "llvm/IR/Metadata.h"
84 #include "llvm/IR/PatternMatch.h"
85 #include "llvm/Support/Allocator.h"
86 #include "llvm/Support/CommandLine.h"
87 #include "llvm/Support/Debug.h"
88 #include "llvm/Target/TargetLibraryInfo.h"
89 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
90 #include "llvm/Transforms/Utils/SSAUpdater.h"
91 #include
92 using namespace llvm;
93
94 #define DEBUG_TYPE "mldst-motion"
95
96 //===----------------------------------------------------------------------===//
97 // MergedLoadStoreMotion Pass
98 //===----------------------------------------------------------------------===//
99 static cl::opt
100 EnableMLSM("mlsm", cl::desc("Enable motion of merged load and store"),
101 cl::init(true));
102
103 namespace {
104 class MergedLoadStoreMotion : public FunctionPass {
105 AliasAnalysis *AA;
106 MemoryDependenceAnalysis *MD;
107
108 public:
109 static char ID; // Pass identification, replacement for typeid
110 explicit MergedLoadStoreMotion(void) : FunctionPass(ID), MD(nullptr) {
111 initializeMergedLoadStoreMotionPass(*PassRegistry::getPassRegistry());
112 }
113
114 bool runOnFunction(Function &F) override;
115
116 private:
117 // This transformation requires dominator postdominator info
118 void getAnalysisUsage(AnalysisUsage &AU) const override {
119 AU.addRequired();
120 AU.addRequired();
121 AU.addRequired();
122 AU.addPreserved();
123 }
124
125 // Helper routines
126
127 ///
128 /// \brief Remove instruction from parent and update memory dependence
129 /// analysis.
130 ///
131 void removeInstruction(Instruction *Inst);
132 BasicBlock *getDiamondTail(BasicBlock *BB);
133 bool isDiamondHead(BasicBlock *BB);
134 // Routines for hoisting loads
135 bool isLoadHoistBarrier(Instruction *Inst);
136 LoadInst *canHoistFromBlock(BasicBlock *BB, LoadInst *LI);
137 void hoistInstruction(BasicBlock *BB, Instruction *HoistCand,
138 Instruction *ElseInst);
139 bool isSafeToHoist(Instruction *I) const;
140 bool hoistLoad(BasicBlock *BB, LoadInst *HoistCand, LoadInst *ElseInst);
141 bool mergeLoads(BasicBlock *BB);
142 // Routines for sinking stores
143 StoreInst *canSinkFromBlock(BasicBlock *BB, StoreInst *SI);
144 PHINode *getPHIOperand(BasicBlock *BB, StoreInst *S0, StoreInst *S1);
145 bool isStoreSinkBarrier(Instruction *Inst);
146 bool sinkStore(BasicBlock *BB, StoreInst *SinkCand, StoreInst *ElseInst);
147 bool mergeStores(BasicBlock *BB);
148 // The mergeLoad/Store algorithms could have Size0 * Size1 complexity,
149 // where Size0 and Size1 are the #instructions on the two sides of
150 // the diamond. The constant chosen here is arbitrary. Compiler Time
151 // Control is enforced by the check Size0 * Size1 < MagicCompileTimeControl.
152 const int MagicCompileTimeControl = 250;
153 };
154
155 char MergedLoadStoreMotion::ID = 0;
156 }
157
158 ///
159 /// \brief createMergedLoadStoreMotionPass - The public interface to this file.
160 ///
161 FunctionPass *llvm::createMergedLoadStoreMotionPass() {
162 return new MergedLoadStoreMotion();
163 }
164
165 INITIALIZE_PASS_BEGIN(MergedLoadStoreMotion, "mldst-motion",
166 "MergedLoadStoreMotion", false, false)
167 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis)
168 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
169 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
170 INITIALIZE_PASS_END(MergedLoadStoreMotion, "mldst-motion",
171 "MergedLoadStoreMotion", false, false)
172
173 ///
174 /// \brief Remove instruction from parent and update memory dependence analysis.
175 ///
176 void MergedLoadStoreMotion::removeInstruction(Instruction *Inst) {
177 // Notify the memory dependence analysis.
178 if (MD) {
179 MD->removeInstruction(Inst);
180 if (LoadInst *LI = dyn_cast(Inst))
181 MD->invalidateCachedPointerInfo(LI->getPointerOperand());
182 if (Inst->getType()->getScalarType()->isPointerTy()) {
183 MD->invalidateCachedPointerInfo(Inst);
184 }
185 }
186 Inst->eraseFromParent();
187 }
188
189 ///
190 /// \brief Return tail block of a diamond.
191 ///
192 BasicBlock *MergedLoadStoreMotion::getDiamondTail(BasicBlock *BB) {
193 assert(isDiamondHead(BB) && "Basic block is not head of a diamond");
194 BranchInst *BI = (BranchInst *)(BB->getTerminator());
195 BasicBlock *Succ0 = BI->getSuccessor(0);
196 BasicBlock *Tail = Succ0->getTerminator()->getSuccessor(0);
197 return Tail;
198 }
199
200 ///
201 /// \brief True when BB is the head of a diamond (hammock)
202 ///
203 bool MergedLoadStoreMotion::isDiamondHead(BasicBlock *BB) {
204 if (!BB)
205 return false;
206 if (!isa(BB->getTerminator()))
207 return false;
208 if (BB->getTerminator()->getNumSuccessors() != 2)
209 return false;
210
211 BranchInst *BI = (BranchInst *)(BB->getTerminator());
212 BasicBlock *Succ0 = BI->getSuccessor(0);
213 BasicBlock *Succ1 = BI->getSuccessor(1);
214
215 if (!Succ0->getSinglePredecessor() ||
216 Succ0->getTerminator()->getNumSuccessors() != 1)
217 return false;
218 if (!Succ1->getSinglePredecessor() ||
219 Succ1->getTerminator()->getNumSuccessors() != 1)
220 return false;
221
222 BasicBlock *Tail = Succ0->getTerminator()->getSuccessor(0);
223 // Ignore triangles.
224 if (Succ1->getTerminator()->getSuccessor(0) != Tail)
225 return false;
226 return true;
227 }
228
229 ///
230 /// \brief True when instruction is a hoist barrier for a load
231 ///
232 /// Whenever an instruction could possibly modify the value
233 /// being loaded or protect against the load from happening
234 /// it is considered a hoist barrier.
235 ///
236 bool MergedLoadStoreMotion::isLoadHoistBarrier(Instruction *Inst) {
237 // FIXME: A call with no side effects should not be a barrier.
238 // Aren't all such calls covered by mayHaveSideEffects() below?
239 // Then this check can be removed.
240 if (isa(Inst))
241 return true;
242 if (isa(Inst))
243 return true;
244 // Note: mayHaveSideEffects covers all instructions that could
245 // trigger a change to state. Eg. in-flight stores have to be executed
246 // before ordered loads or fences, calls could invoke functions that store
247 // data to memory etc.
248 if (Inst->mayHaveSideEffects()) {
249 return true;
250 }
251 DEBUG(dbgs() << "No Hoist Barrier\n");
252 return false;
253 }
254
255 ///
256 /// \brief Decide if a load can be hoisted
257 ///
258 /// When there is a load in \p BB to the same address as \p LI
259 /// and it can be hoisted from \p BB, return that load.
260 /// Otherwise return Null.
261 ///
262 LoadInst *MergedLoadStoreMotion::canHoistFromBlock(BasicBlock *BB,
263 LoadInst *LI) {
264 LoadInst *I = nullptr;
265 assert(isa(LI));
266 if (LI->isUsedOutsideOfBlock(LI->getParent()))
267 return nullptr;
268
269 for (BasicBlock::iterator BBI = BB->begin(), BBE = BB->end(); BBI != BBE;
270 ++BBI) {
271 Instruction *Inst = BBI;
272
273 // Only merge and hoist loads when their result in used only in BB
274 if (isLoadHoistBarrier(Inst))
275 break;
276 if (!isa(Inst))
277 continue;
278 if (Inst->isUsedOutsideOfBlock(Inst->getParent()))
279 continue;
280
281 AliasAnalysis::Location LocLI = AA->getLocation(LI);
282 AliasAnalysis::Location LocInst = AA->getLocation((LoadInst *)Inst);
283 if (AA->isMustAlias(LocLI, LocInst) && LI->getType() == Inst->getType()) {
284 I = (LoadInst *)Inst;
285 break;
286 }
287 }
288 return I;
289 }
290
291 ///
292 /// \brief Merge two equivalent instructions \p HoistCand and \p ElseInst into
293 /// \p BB
294 ///
295 /// BB is the head of a diamond
296 ///
297 void MergedLoadStoreMotion::hoistInstruction(BasicBlock *BB,
298 Instruction *HoistCand,
299 Instruction *ElseInst) {
300 DEBUG(dbgs() << " Hoist Instruction into BB \n"; BB->dump();
301 dbgs() << "Instruction Left\n"; HoistCand->dump(); dbgs() << "\n";
302 dbgs() << "Instruction Right\n"; ElseInst->dump(); dbgs() << "\n");
303 // Hoist the instruction.
304 assert(HoistCand->getParent() != BB);
305
306 // Intersect optional metadata.
307 HoistCand->intersectOptionalDataWith(ElseInst);
308 HoistCand->dropUnknownMetadata();
309
310 // Prepend point for instruction insert
311 Instruction *HoistPt = BB->getTerminator();
312
313 // Merged instruction
314 Instruction *HoistedInst = HoistCand->clone();
315
316 // Notify AA of the new value.
317 if (isa(HoistCand))
318 AA->copyValue(HoistCand, HoistedInst);
319
320 // Hoist instruction.
321 HoistedInst->insertBefore(HoistPt);
322
323 HoistCand->replaceAllUsesWith(HoistedInst);
324 removeInstruction(HoistCand);
325 // Replace the else block instruction.
326 ElseInst->replaceAllUsesWith(HoistedInst);
327 removeInstruction(ElseInst);
328 }
329
330 ///
331 /// \brief Return true if no operand of \p I is defined in I's parent block
332 ///
333 bool MergedLoadStoreMotion::isSafeToHoist(Instruction *I) const {
334 BasicBlock *Parent = I->getParent();
335 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
336 Instruction *Instr = dyn_cast(I->getOperand(i));
337 if (Instr && Instr->getParent() == Parent)
338 return false;
339 }
340 return true;
341 }
342
343 ///
344 /// \brief Merge two equivalent loads and GEPs and hoist into diamond head
345 ///
346 bool MergedLoadStoreMotion::hoistLoad(BasicBlock *BB, LoadInst *L0,
347 LoadInst *L1) {
348 // Only one definition?
349 Instruction *A0 = dyn_cast(L0->getPointerOperand());
350 Instruction *A1 = dyn_cast(L1->getPointerOperand());
351 if (A0 && A1 && A0->isIdenticalTo(A1) && isSafeToHoist(A0) &&
352 A0->hasOneUse() && (A0->getParent() == L0->getParent()) &&
353 A1->hasOneUse() && (A1->getParent() == L1->getParent()) &&
354 isa(A0)) {
355 DEBUG(dbgs() << "Hoist Instruction into BB \n"; BB->dump();
356 dbgs() << "Instruction Left\n"; L0->dump(); dbgs() << "\n";
357 dbgs() << "Instruction Right\n"; L1->dump(); dbgs() << "\n");
358 hoistInstruction(BB, A0, A1);
359 hoistInstruction(BB, L0, L1);
360 return true;
361 } else
362 return false;
363 }
364
365 ///
366 /// \brief Try to hoist two loads to same address into diamond header
367 ///
368 /// Starting from a diamond head block, iterate over the instructions in one
369 /// successor block and try to match a load in the second successor.
370 ///
371 bool MergedLoadStoreMotion::mergeLoads(BasicBlock *BB) {
372 bool MergedLoads = false;
373 assert(isDiamondHead(BB));
374 BranchInst *BI = dyn_cast(BB->getTerminator());
375 BasicBlock *Succ0 = BI->getSuccessor(0);
376 BasicBlock *Succ1 = BI->getSuccessor(1);
377 // #Instructions in Succ1 for Compile Time Control
378 int Size1 = Succ1->size();
379 int NLoads = 0;
380 for (BasicBlock::iterator BBI = Succ0->begin(), BBE = Succ0->end();
381 BBI != BBE;) {
382
383 Instruction *I = BBI;
384 ++BBI;
385 if (isLoadHoistBarrier(I))
386 break;
387
388 // Only move non-simple (atomic, volatile) loads.
389 if (!isa(I))
390 continue;
391
392 LoadInst *L0 = (LoadInst *)I;
393 if (!L0->isSimple())
394 continue;
395
396 ++NLoads;
397 if (NLoads * Size1 >= MagicCompileTimeControl)
398 break;
399 if (LoadInst *L1 = canHoistFromBlock(Succ1, L0)) {
400 bool Res = hoistLoad(BB, L0, L1);
401 MergedLoads |= Res;
402 // Don't attempt to hoist above loads that had not been hoisted.
403 if (!Res)
404 break;
405 }
406 }
407 return MergedLoads;
408 }
409
410 ///
411 /// \brief True when instruction is sink barrier for a store
412 ///
413 bool MergedLoadStoreMotion::isStoreSinkBarrier(Instruction *Inst) {
414 if (isa(Inst))
415 return true;
416 if (isa(Inst) && !isa(Inst))
417 return true;
418 // Note: mayHaveSideEffects covers all instructions that could
419 // trigger a change to state. Eg. in-flight stores have to be executed
420 // before ordered loads or fences, calls could invoke functions that store
421 // data to memory etc.
422 if (!isa(Inst) && Inst->mayHaveSideEffects()) {
423 return true;
424 }
425 DEBUG(dbgs() << "No Sink Barrier\n");
426 return false;
427 }
428
429 ///
430 /// \brief Check if \p BB contains a store to the same address as \p SI
431 ///
432 /// \return The store in \p when it is safe to sink. Otherwise return Null.
433 ///
434 StoreInst *MergedLoadStoreMotion::canSinkFromBlock(BasicBlock *BB,
435 StoreInst *SI) {
436 StoreInst *I = 0;
437 DEBUG(dbgs() << "can Sink? : "; SI->dump(); dbgs() << "\n");
438 for (BasicBlock::reverse_iterator RBI = BB->rbegin(), RBE = BB->rend();
439 RBI != RBE; ++RBI) {
440 Instruction *Inst = &*RBI;
441
442 // Only move loads if they are used in the block.
443 if (isStoreSinkBarrier(Inst))
444 break;
445 if (isa(Inst)) {
446 AliasAnalysis::Location LocSI = AA->getLocation(SI);
447 AliasAnalysis::Location LocInst = AA->getLocation((StoreInst *)Inst);
448 if (AA->isMustAlias(LocSI, LocInst)) {
449 I = (StoreInst *)Inst;
450 break;
451 }
452 }
453 }
454 return I;
455 }
456
457 ///
458 /// \brief Create a PHI node in BB for the operands of S0 and S1
459 ///
460 PHINode *MergedLoadStoreMotion::getPHIOperand(BasicBlock *BB, StoreInst *S0,
461 StoreInst *S1) {
462 // Create a phi if the values mismatch.
463 PHINode *NewPN = 0;
464 Value *Opd1 = S0->getValueOperand();
465 Value *Opd2 = S1->getValueOperand();
466 if (Opd1 != Opd2) {
467 NewPN = PHINode::Create(Opd1->getType(), 2, Opd2->getName() + ".sink",
468 BB->begin());
469 NewPN->addIncoming(Opd1, S0->getParent());
470 NewPN->addIncoming(Opd2, S1->getParent());
471 if (NewPN->getType()->getScalarType()->isPointerTy()) {
472 // Notify AA of the new value.
473 AA->copyValue(Opd1, NewPN);
474 AA->copyValue(Opd2, NewPN);
475 // AA needs to be informed when a PHI-use of the pointer value is added
476 for (unsigned I = 0, E = NewPN->getNumIncomingValues(); I != E; ++I) {
477 unsigned J = PHINode::getOperandNumForIncomingValue(I);
478 AA->addEscapingUse(NewPN->getOperandUse(J));
479 }
480 if (MD)
481 MD->invalidateCachedPointerInfo(NewPN);
482 }
483 }
484 return NewPN;
485 }
486
487 ///
488 /// \brief Merge two stores to same address and sink into \p BB
489 ///
490 /// Also sinks GEP instruction computing the store address
491 ///
492 bool MergedLoadStoreMotion::sinkStore(BasicBlock *BB, StoreInst *S0,
493 StoreInst *S1) {
494 // Only one definition?
495 Instruction *A0 = dyn_cast(S0->getPointerOperand());
496 Instruction *A1 = dyn_cast(S1->getPointerOperand());
497 if (A0 && A1 && A0->isIdenticalTo(A1) && A0->hasOneUse() &&
498 (A0->getParent() == S0->getParent()) && A1->hasOneUse() &&
499 (A1->getParent() == S1->getParent()) && isa(A0)) {
500 DEBUG(dbgs() << "Sink Instruction into BB \n"; BB->dump();
501 dbgs() << "Instruction Left\n"; S0->dump(); dbgs() << "\n";
502 dbgs() << "Instruction Right\n"; S1->dump(); dbgs() << "\n");
503 // Hoist the instruction.
504 BasicBlock::iterator InsertPt = BB->getFirstInsertionPt();
505 // Intersect optional metadata.
506 S0->intersectOptionalDataWith(S1);
507 S0->dropUnknownMetadata();
508
509 // Create the new store to be inserted at the join point.
510 StoreInst *SNew = (StoreInst *)(S0->clone());
511 Instruction *ANew = A0->clone();
512 AA->copyValue(S0, SNew);
513 SNew->insertBefore(InsertPt);
514 ANew->insertBefore(SNew);
515
516 assert(S0->getParent() == A0->getParent());
517 assert(S1->getParent() == A1->getParent());
518
519 PHINode *NewPN = getPHIOperand(BB, S0, S1);
520 // New PHI operand? Use it.
521 if (NewPN)
522 SNew->setOperand(0, NewPN);
523 removeInstruction(S0);
524 removeInstruction(S1);
525 A0->replaceAllUsesWith(ANew);
526 removeInstruction(A0);
527 A1->replaceAllUsesWith(ANew);
528 removeInstruction(A1);
529 return true;
530 }
531 return false;
532 }
533
534 ///
535 /// \brief True when two stores are equivalent and can sink into the footer
536 ///
537 /// Starting from a diamond tail block, iterate over the instructions in one
538 /// predecessor block and try to match a store in the second predecessor.
539 ///
540 bool MergedLoadStoreMotion::mergeStores(BasicBlock *T) {
541
542 bool MergedStores = false;
543 assert(T && "Footer of a diamond cannot be empty");
544
545 pred_iterator PI = pred_begin(T), E = pred_end(T);
546 assert(PI != E);
547 BasicBlock *Pred0 = *PI;
548 ++PI;
549 BasicBlock *Pred1 = *PI;
550 ++PI;
551 // tail block of a diamond/hammock?
552 if (Pred0 == Pred1)
553 return false; // No.
554 if (PI != E)
555 return false; // No. More than 2 predecessors.
556
557 // #Instructions in Succ1 for Compile Time Control
558 int Size1 = Pred1->size();
559 int NStores = 0;
560
561 for (BasicBlock::reverse_iterator RBI = Pred0->rbegin(), RBE = Pred0->rend();
562 RBI != RBE;) {
563
564 Instruction *I = &*RBI;
565 ++RBI;
566 if (isStoreSinkBarrier(I))
567 break;
568 // Sink move non-simple (atomic, volatile) stores
569 if (!isa(I))
570 continue;
571 StoreInst *S0 = (StoreInst *)I;
572 if (!S0->isSimple())
573 continue;
574
575 ++NStores;
576 if (NStores * Size1 >= MagicCompileTimeControl)
577 break;
578 if (StoreInst *S1 = canSinkFromBlock(Pred1, S0)) {
579 bool Res = sinkStore(T, S0, S1);
580 MergedStores |= Res;
581 // Don't attempt to sink below stores that had to stick around
582 // But after removal of a store and some of its feeding
583 // instruction search again from the beginning since the iterator
584 // is likely stale at this point.
585 if (!Res)
586 break;
587 else {
588 RBI = Pred0->rbegin();
589 RBE = Pred0->rend();
590 DEBUG(dbgs() << "Search again\n"; Instruction *I = &*RBI; I->dump());
591 }
592 }
593 }
594 return MergedStores;
595 }
596 ///
597 /// \brief Run the transformation for each function
598 ///
599 bool MergedLoadStoreMotion::runOnFunction(Function &F) {
600 MD = &getAnalysis();
601 AA = &getAnalysis();
602
603 bool Changed = false;
604 if (!EnableMLSM)
605 return false;
606 DEBUG(dbgs() << "Instruction Merger\n");
607
608 // Merge unconditional branches, allowing PRE to catch more
609 // optimization opportunities.
610 for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE;) {
611 BasicBlock *BB = FI++;
612
613 // Hoist equivalent loads and sink stores
614 // outside diamonds when possible
615 // Run outside core GVN
616 if (isDiamondHead(BB)) {
617 Changed |= mergeLoads(BB);
618 Changed |= mergeStores(getDiamondTail(BB));
619 }
620 }
621 return Changed;
622 }
5151 initializeLowerAtomicPass(Registry);
5252 initializeLowerExpectIntrinsicPass(Registry);
5353 initializeMemCpyOptPass(Registry);
54 initializeMergedLoadStoreMotionPass(Registry);
5455 initializePartiallyInlineLibCallsPass(Registry);
5556 initializeReassociatePass(Registry);
5657 initializeRegToMemPass(Registry);
9192 unwrap(PM)->add(createGVNPass());
9293 }
9394
95 void LLVMAddMergedLoadStoreMotionPass(LLVMPassManagerRef PM) {
96 unwrap(PM)->add(createMergedLoadStoreMotionPass());
97 }
98
9499 void LLVMAddIndVarSimplifyPass(LLVMPassManagerRef PM) {
95100 unwrap(PM)->add(createIndVarSimplifyPass());
96101 }
0 ; Tests to make sure that loads and stores in a diamond get merged
1 ; Loads are hoisted into the header. Stores sunks into the footer.
2 ; RUN: opt -basicaa -memdep -mldst-motion -S < %s | FileCheck %s
3 target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
4
5 %struct.node = type { i64, %struct.node*, %struct.node*, %struct.node*, i64, %struct.arc*, i64, i64, i64 }
6 %struct.arc = type { i64, i64, i64 }
7
8 define i64 @foo(%struct.node* nocapture readonly %r) nounwind {
9 entry:
10 %node.0.in16 = getelementptr inbounds %struct.node* %r, i64 0, i32 2
11 %node.017 = load %struct.node** %node.0.in16, align 8
12 %tobool18 = icmp eq %struct.node* %node.017, null
13 br i1 %tobool18, label %while.end, label %while.body.preheader
14
15 ; CHECK-LABEL: while.body.preheader
16 while.body.preheader: ; preds = %entry
17 ; CHECK: load
18 br label %while.body
19
20 while.body: ; preds = %while.body.preheader, %if.end
21 %node.020 = phi %struct.node* [ %node.0, %if.end ], [ %node.017, %while.body.preheader ]
22 %sum.019 = phi i64 [ %inc, %if.end ], [ 0, %while.body.preheader ]
23 %orientation = getelementptr inbounds %struct.node* %node.020, i64 0, i32 4
24 %0 = load i64* %orientation, align 8
25 %cmp = icmp eq i64 %0, 1
26 br i1 %cmp, label %if.then, label %if.else
27 ; CHECK: if.then
28 if.then: ; preds = %while.body
29 %a = getelementptr inbounds %struct.node* %node.020, i64 0, i32 5
30 ; CHECK-NOT: load %struct.arc
31 %1 = load %struct.arc** %a, align 8
32 %cost = getelementptr inbounds %struct.arc* %1, i64 0, i32 0
33 ; CHECK-NOT: load i64*
34 %2 = load i64* %cost, align 8
35 %pred = getelementptr inbounds %struct.node* %node.020, i64 0, i32 1
36 ; CHECK-NOT: load %struct.node**
37 %3 = load %struct.node** %pred, align 8
38 %p = getelementptr inbounds %struct.node* %3, i64 0, i32 6
39 ; CHECK-NOT: load i64*
40 %4 = load i64* %p, align 8
41 %add = add nsw i64 %4, %2
42 %p1 = getelementptr inbounds %struct.node* %node.020, i64 0, i32 6
43 ; CHECK-NOT: store i64
44 store i64 %add, i64* %p1, align 8
45 br label %if.end
46
47 ; CHECK: if.else
48 if.else: ; preds = %while.body
49 %pred2 = getelementptr inbounds %struct.node* %node.020, i64 0, i32 1
50 ; CHECK-NOT: load %struct.node**
51 %5 = load %struct.node** %pred2, align 8
52 %p3 = getelementptr inbounds %struct.node* %5, i64 0, i32 6
53 ; CHECK-NOT: load i64*
54 %6 = load i64* %p3, align 8
55 %a4 = getelementptr inbounds %struct.node* %node.020, i64 0, i32 5
56 ; CHECK-NOT: load %struct.arc**
57 %7 = load %struct.arc** %a4, align 8
58 %cost5 = getelementptr inbounds %struct.arc* %7, i64 0, i32 0
59 ; CHECK-NOT: load i64*
60 %8 = load i64* %cost5, align 8
61 %sub = sub nsw i64 %6, %8
62 %p6 = getelementptr inbounds %struct.node* %node.020, i64 0, i32 6
63 ; CHECK-NOT: store i64
64 store i64 %sub, i64* %p6, align 8
65 br label %if.end
66
67 ; CHECK: if.end
68 if.end: ; preds = %if.else, %if.then
69 ; CHECK: store
70 %inc = add nsw i64 %sum.019, 1
71 %node.0.in = getelementptr inbounds %struct.node* %node.020, i64 0, i32 2
72 %node.0 = load %struct.node** %node.0.in, align 8
73 %tobool = icmp eq %struct.node* %node.0, null
74 br i1 %tobool, label %while.end.loopexit, label %while.body
75
76 while.end.loopexit: ; preds = %if.end
77 %inc.lcssa = phi i64 [ %inc, %if.end ]
78 br label %while.end
79
80 while.end: ; preds = %while.end.loopexit, %entry
81 %sum.0.lcssa = phi i64 [ 0, %entry ], [ %inc.lcssa, %while.end.loopexit ]
82 ret i64 %sum.0.lcssa
83 }