llvm.org GIT mirror llvm / 74a4533
Remove the old CodePlacementOpt pass. It was superseded by MachineBlockPlacement and disabled by default since LLVM 3.1. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@178349 91177308-0d34-0410-b5e6-96231b3b80d8 Benjamin Kramer 7 years ago
16 changed file(s) with 10 addition(s) and 471 deletion(s). Raw diff Collapse all Expand all
443443 /// information.
444444 extern char &MachineBlockPlacementStatsID;
445445
446 /// Code Placement - This pass optimize code placement and aligns loop
447 /// headers to target specific alignment boundary.
448 extern char &CodePlacementOptID;
449
450446 /// GCLowering Pass - Performs target-independent LLVM IR transformations for
451447 /// highly portable strategies.
452448 ///
9090 void initializeCalculateSpillWeightsPass(PassRegistry&);
9191 void initializeCallGraphAnalysisGroup(PassRegistry&);
9292 void initializeCodeGenPreparePass(PassRegistry&);
93 void initializeCodePlacementOptPass(PassRegistry&);
9493 void initializeConstantMergePass(PassRegistry&);
9594 void initializeConstantPropagationPass(PassRegistry&);
9695 void initializeMachineCopyPropagationPass(PassRegistry&);
693693 return false;
694694 }
695695
696 /// This function returns true if the target would benefit from code placement
697 /// optimization.
698 /// @brief Determine if the target should perform code placement optimization.
699 bool shouldOptimizeCodePlacement() const {
700 return BenefitFromCodePlacementOpt;
701 }
702
703696 /// getOptimalMemOpType - Returns the target specific optimal type for load
704697 /// and store operations as a result of memset, memcpy, and memmove
705698 /// lowering. If DstAlign is zero that means it's safe to destination
16431636 /// to memmove, used for functions with OpSize attribute.
16441637 unsigned MaxStoresPerMemmoveOptSize;
16451638
1646 /// This field specifies whether the target can benefit from code placement
1647 /// optimization.
1648 bool BenefitFromCodePlacementOpt;
1649
16501639 /// PredictableSelectIsExpensive - Tells the code generator that select is
16511640 /// more expensive than a branch if the branch is usually predicted right.
16521641 bool PredictableSelectIsExpensive;
66 CalcSpillWeights.cpp
77 CallingConvLower.cpp
88 CodeGen.cpp
9 CodePlacementOpt.cpp
109 CriticalAntiDepBreaker.cpp
1110 DFAPacketizer.cpp
1211 DeadMachineInstructionElim.cpp
2121 initializeBasicTTIPass(Registry);
2222 initializeBranchFolderPassPass(Registry);
2323 initializeCalculateSpillWeightsPass(Registry);
24 initializeCodePlacementOptPass(Registry);
2524 initializeDeadMachineInstructionElimPass(Registry);
2625 initializeEarlyIfConverterPass(Registry);
2726 initializeExpandPostRAPass(Registry);
+0
-423
lib/CodeGen/CodePlacementOpt.cpp less more
None //===-- CodePlacementOpt.cpp - Code Placement pass. -----------------------===//
1 //
2 // The LLVM Compiler Infrastructure
3 //
4 // This file is distributed under the University of Illinois Open Source
5 // License. See LICENSE.TXT for details.
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the pass that optimizes code placement and aligns loop
10 // headers to target-specific alignment boundaries.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #define DEBUG_TYPE "code-placement"
15 #include "llvm/CodeGen/Passes.h"
16 #include "llvm/ADT/Statistic.h"
17 #include "llvm/CodeGen/MachineFunctionPass.h"
18 #include "llvm/CodeGen/MachineLoopInfo.h"
19 #include "llvm/Support/Compiler.h"
20 #include "llvm/Support/Debug.h"
21 #include "llvm/Target/TargetInstrInfo.h"
22 #include "llvm/Target/TargetLowering.h"
23 #include "llvm/Target/TargetMachine.h"
24 using namespace llvm;
25
26 STATISTIC(NumLoopsAligned, "Number of loops aligned");
27 STATISTIC(NumIntraElim, "Number of intra loop branches eliminated");
28 STATISTIC(NumIntraMoved, "Number of intra loop branches moved");
29
30 namespace {
31 class CodePlacementOpt : public MachineFunctionPass {
32 const MachineLoopInfo *MLI;
33 const TargetInstrInfo *TII;
34 const TargetLowering *TLI;
35
36 public:
37 static char ID;
38 CodePlacementOpt() : MachineFunctionPass(ID) {}
39
40 virtual bool runOnMachineFunction(MachineFunction &MF);
41
42 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
43 AU.addRequired();
44 AU.addPreservedID(MachineDominatorsID);
45 MachineFunctionPass::getAnalysisUsage(AU);
46 }
47
48 private:
49 bool HasFallthrough(MachineBasicBlock *MBB);
50 bool HasAnalyzableTerminator(MachineBasicBlock *MBB);
51 void Splice(MachineFunction &MF,
52 MachineFunction::iterator InsertPt,
53 MachineFunction::iterator Begin,
54 MachineFunction::iterator End);
55 bool EliminateUnconditionalJumpsToTop(MachineFunction &MF,
56 MachineLoop *L);
57 bool MoveDiscontiguousLoopBlocks(MachineFunction &MF,
58 MachineLoop *L);
59 bool OptimizeIntraLoopEdgesInLoopNest(MachineFunction &MF, MachineLoop *L);
60 bool OptimizeIntraLoopEdges(MachineFunction &MF);
61 bool AlignLoops(MachineFunction &MF);
62 bool AlignLoop(MachineFunction &MF, MachineLoop *L, unsigned Align);
63 };
64
65 char CodePlacementOpt::ID = 0;
66 } // end anonymous namespace
67
68 char &llvm::CodePlacementOptID = CodePlacementOpt::ID;
69 INITIALIZE_PASS(CodePlacementOpt, "code-placement",
70 "Code Placement Optimizer", false, false)
71
72 /// HasFallthrough - Test whether the given branch has a fallthrough, either as
73 /// a plain fallthrough or as a fallthrough case of a conditional branch.
74 ///
75 bool CodePlacementOpt::HasFallthrough(MachineBasicBlock *MBB) {
76 MachineBasicBlock *TBB = 0, *FBB = 0;
77 SmallVector Cond;
78 if (TII->AnalyzeBranch(*MBB, TBB, FBB, Cond))
79 return false;
80 // This conditional branch has no fallthrough.
81 if (FBB)
82 return false;
83 // An unconditional branch has no fallthrough.
84 if (Cond.empty() && TBB)
85 return false;
86 // It has a fallthrough.
87 return true;
88 }
89
90 /// HasAnalyzableTerminator - Test whether AnalyzeBranch will succeed on MBB.
91 /// This is called before major changes are begun to test whether it will be
92 /// possible to complete the changes.
93 ///
94 /// Target-specific code is hereby encouraged to make AnalyzeBranch succeed
95 /// whenever possible.
96 ///
97 bool CodePlacementOpt::HasAnalyzableTerminator(MachineBasicBlock *MBB) {
98 // Conservatively ignore EH landing pads.
99 if (MBB->isLandingPad()) return false;
100
101 // Aggressively handle return blocks and similar constructs.
102 if (MBB->succ_empty()) return true;
103
104 // Ask the target's AnalyzeBranch if it can handle this block.
105 MachineBasicBlock *TBB = 0, *FBB = 0;
106 SmallVector Cond;
107 // Make sure the terminator is understood.
108 if (TII->AnalyzeBranch(*MBB, TBB, FBB, Cond))
109 return false;
110 // Ignore blocks which look like they might have EH-related control flow.
111 // AnalyzeBranch thinks it knows how to analyze such things, but it doesn't
112 // recognize the possibility of a control transfer through an unwind.
113 // Such blocks contain EH_LABEL instructions, however they may be in the
114 // middle of the block. Instead of searching for them, just check to see
115 // if the CFG disagrees with AnalyzeBranch.
116 if (1u + !Cond.empty() != MBB->succ_size())
117 return false;
118 // Make sure we have the option of reversing the condition.
119 if (!Cond.empty() && TII->ReverseBranchCondition(Cond))
120 return false;
121 return true;
122 }
123
124 /// Splice - Move the sequence of instructions [Begin,End) to just before
125 /// InsertPt. Update branch instructions as needed to account for broken
126 /// fallthrough edges and to take advantage of newly exposed fallthrough
127 /// opportunities.
128 ///
129 void CodePlacementOpt::Splice(MachineFunction &MF,
130 MachineFunction::iterator InsertPt,
131 MachineFunction::iterator Begin,
132 MachineFunction::iterator End) {
133 assert(Begin != MF.begin() && End != MF.begin() && InsertPt != MF.begin() &&
134 "Splice can't change the entry block!");
135 MachineFunction::iterator OldBeginPrior = prior(Begin);
136 MachineFunction::iterator OldEndPrior = prior(End);
137
138 MF.splice(InsertPt, Begin, End);
139
140 prior(Begin)->updateTerminator();
141 OldBeginPrior->updateTerminator();
142 OldEndPrior->updateTerminator();
143 }
144
145 /// EliminateUnconditionalJumpsToTop - Move blocks which unconditionally jump
146 /// to the loop top to the top of the loop so that they have a fall through.
147 /// This can introduce a branch on entry to the loop, but it can eliminate a
148 /// branch within the loop. See the @simple case in
149 /// test/CodeGen/X86/loop_blocks.ll for an example of this.
150 bool CodePlacementOpt::EliminateUnconditionalJumpsToTop(MachineFunction &MF,
151 MachineLoop *L) {
152 bool Changed = false;
153 MachineBasicBlock *TopMBB = L->getTopBlock();
154
155 bool BotHasFallthrough = HasFallthrough(L->getBottomBlock());
156
157 if (TopMBB == MF.begin() ||
158 HasAnalyzableTerminator(prior(MachineFunction::iterator(TopMBB)))) {
159 new_top:
160 for (MachineBasicBlock::pred_iterator PI = TopMBB->pred_begin(),
161 PE = TopMBB->pred_end(); PI != PE; ++PI) {
162 MachineBasicBlock *Pred = *PI;
163 if (Pred == TopMBB) continue;
164 if (HasFallthrough(Pred)) continue;
165 if (!L->contains(Pred)) continue;
166
167 // Verify that we can analyze all the loop entry edges before beginning
168 // any changes which will require us to be able to analyze them.
169 if (Pred == MF.begin())
170 continue;
171 if (!HasAnalyzableTerminator(Pred))
172 continue;
173 if (!HasAnalyzableTerminator(prior(MachineFunction::iterator(Pred))))
174 continue;
175
176 // Move the block.
177 DEBUG(dbgs() << "CGP: Moving blocks starting at BB#" << Pred->getNumber()
178 << " to top of loop.\n");
179 Changed = true;
180
181 // Move it and all the blocks that can reach it via fallthrough edges
182 // exclusively, to keep existing fallthrough edges intact.
183 MachineFunction::iterator Begin = Pred;
184 MachineFunction::iterator End = llvm::next(Begin);
185 while (Begin != MF.begin()) {
186 MachineFunction::iterator Prior = prior(Begin);
187 if (Prior == MF.begin())
188 break;
189 // Stop when a non-fallthrough edge is found.
190 if (!HasFallthrough(Prior))
191 break;
192 // Stop if a block which could fall-through out of the loop is found.
193 if (Prior->isSuccessor(End))
194 break;
195 // If we've reached the top, stop scanning.
196 if (Prior == MachineFunction::iterator(TopMBB)) {
197 // We know top currently has a fall through (because we just checked
198 // it) which would be lost if we do the transformation, so it isn't
199 // worthwhile to do the transformation unless it would expose a new
200 // fallthrough edge.
201 if (!Prior->isSuccessor(End))
202 goto next_pred;
203 // Otherwise we can stop scanning and proceed to move the blocks.
204 break;
205 }
206 // If we hit a switch or something complicated, don't move anything
207 // for this predecessor.
208 if (!HasAnalyzableTerminator(prior(MachineFunction::iterator(Prior))))
209 break;
210 // Ok, the block prior to Begin will be moved along with the rest.
211 // Extend the range to include it.
212 Begin = Prior;
213 ++NumIntraMoved;
214 }
215
216 // Move the blocks.
217 Splice(MF, TopMBB, Begin, End);
218
219 // Update TopMBB.
220 TopMBB = L->getTopBlock();
221
222 // We have a new loop top. Iterate on it. We shouldn't have to do this
223 // too many times if BranchFolding has done a reasonable job.
224 goto new_top;
225 next_pred:;
226 }
227 }
228
229 // If the loop previously didn't exit with a fall-through and it now does,
230 // we eliminated a branch.
231 if (Changed &&
232 !BotHasFallthrough &&
233 HasFallthrough(L->getBottomBlock())) {
234 ++NumIntraElim;
235 }
236
237 return Changed;
238 }
239
240 /// MoveDiscontiguousLoopBlocks - Move any loop blocks that are not in the
241 /// portion of the loop contiguous with the header. This usually makes the loop
242 /// contiguous, provided that AnalyzeBranch can handle all the relevant
243 /// branching. See the @cfg_islands case in test/CodeGen/X86/loop_blocks.ll
244 /// for an example of this.
245 bool CodePlacementOpt::MoveDiscontiguousLoopBlocks(MachineFunction &MF,
246 MachineLoop *L) {
247 bool Changed = false;
248 MachineBasicBlock *TopMBB = L->getTopBlock();
249 MachineBasicBlock *BotMBB = L->getBottomBlock();
250
251 // Determine a position to move orphaned loop blocks to. If TopMBB is not
252 // entered via fallthrough and BotMBB is exited via fallthrough, prepend them
253 // to the top of the loop to avoid losing that fallthrough. Otherwise append
254 // them to the bottom, even if it previously had a fallthrough, on the theory
255 // that it's worth an extra branch to keep the loop contiguous.
256 MachineFunction::iterator InsertPt =
257 llvm::next(MachineFunction::iterator(BotMBB));
258 bool InsertAtTop = false;
259 if (TopMBB != MF.begin() &&
260 !HasFallthrough(prior(MachineFunction::iterator(TopMBB))) &&
261 HasFallthrough(BotMBB)) {
262 InsertPt = TopMBB;
263 InsertAtTop = true;
264 }
265
266 // Keep a record of which blocks are in the portion of the loop contiguous
267 // with the loop header.
268 SmallPtrSet ContiguousBlocks;
269 for (MachineFunction::iterator I = TopMBB,
270 E = llvm::next(MachineFunction::iterator(BotMBB)); I != E; ++I)
271 ContiguousBlocks.insert(I);
272
273 // Find non-contigous blocks and fix them.
274 if (InsertPt != MF.begin() && HasAnalyzableTerminator(prior(InsertPt)))
275 for (MachineLoop::block_iterator BI = L->block_begin(), BE = L->block_end();
276 BI != BE; ++BI) {
277 MachineBasicBlock *BB = *BI;
278
279 // Verify that we can analyze all the loop entry edges before beginning
280 // any changes which will require us to be able to analyze them.
281 if (!HasAnalyzableTerminator(BB))
282 continue;
283 if (!HasAnalyzableTerminator(prior(MachineFunction::iterator(BB))))
284 continue;
285
286 // If the layout predecessor is part of the loop, this block will be
287 // processed along with it. This keeps them in their relative order.
288 if (BB != MF.begin() &&
289 L->contains(prior(MachineFunction::iterator(BB))))
290 continue;
291
292 // Check to see if this block is already contiguous with the main
293 // portion of the loop.
294 if (!ContiguousBlocks.insert(BB))
295 continue;
296
297 // Move the block.
298 DEBUG(dbgs() << "CGP: Moving blocks starting at BB#" << BB->getNumber()
299 << " to be contiguous with loop.\n");
300 Changed = true;
301
302 // Process this block and all loop blocks contiguous with it, to keep
303 // them in their relative order.
304 MachineFunction::iterator Begin = BB;
305 MachineFunction::iterator End = llvm::next(MachineFunction::iterator(BB));
306 for (; End != MF.end(); ++End) {
307 if (!L->contains(End)) break;
308 if (!HasAnalyzableTerminator(End)) break;
309 ContiguousBlocks.insert(End);
310 ++NumIntraMoved;
311 }
312
313 // If we're inserting at the bottom of the loop, and the code we're
314 // moving originally had fall-through successors, bring the sucessors
315 // up with the loop blocks to preserve the fall-through edges.
316 if (!InsertAtTop)
317 for (; End != MF.end(); ++End) {
318 if (L->contains(End)) break;
319 if (!HasAnalyzableTerminator(End)) break;
320 if (!HasFallthrough(prior(End))) break;
321 }
322
323 // Move the blocks. This may invalidate TopMBB and/or BotMBB, but
324 // we don't need them anymore at this point.
325 Splice(MF, InsertPt, Begin, End);
326 }
327
328 return Changed;
329 }
330
331 /// OptimizeIntraLoopEdgesInLoopNest - Reposition loop blocks to minimize
332 /// intra-loop branching and to form contiguous loops.
333 ///
334 /// This code takes the approach of making minor changes to the existing
335 /// layout to fix specific loop-oriented problems. Also, it depends on
336 /// AnalyzeBranch, which can't understand complex control instructions.
337 ///
338 bool CodePlacementOpt::OptimizeIntraLoopEdgesInLoopNest(MachineFunction &MF,
339 MachineLoop *L) {
340 bool Changed = false;
341
342 // Do optimization for nested loops.
343 for (MachineLoop::iterator I = L->begin(), E = L->end(); I != E; ++I)
344 Changed |= OptimizeIntraLoopEdgesInLoopNest(MF, *I);
345
346 // Do optimization for this loop.
347 Changed |= EliminateUnconditionalJumpsToTop(MF, L);
348 Changed |= MoveDiscontiguousLoopBlocks(MF, L);
349
350 return Changed;
351 }
352
353 /// OptimizeIntraLoopEdges - Reposition loop blocks to minimize
354 /// intra-loop branching and to form contiguous loops.
355 ///
356 bool CodePlacementOpt::OptimizeIntraLoopEdges(MachineFunction &MF) {
357 bool Changed = false;
358
359 if (!TLI->shouldOptimizeCodePlacement())
360 return Changed;
361
362 // Do optimization for each loop in the function.
363 for (MachineLoopInfo::iterator I = MLI->begin(), E = MLI->end();
364 I != E; ++I)
365 if (!(*I)->getParentLoop())
366 Changed |= OptimizeIntraLoopEdgesInLoopNest(MF, *I);
367
368 return Changed;
369 }
370
371 /// AlignLoops - Align loop headers to target preferred alignments.
372 ///
373 bool CodePlacementOpt::AlignLoops(MachineFunction &MF) {
374 const Function *F = MF.getFunction();
375 if (F->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
376 Attribute::OptimizeForSize))
377 return false;
378
379 unsigned Align = TLI->getPrefLoopAlignment();
380 if (!Align)
381 return false; // Don't care about loop alignment.
382
383 bool Changed = false;
384
385 for (MachineLoopInfo::iterator I = MLI->begin(), E = MLI->end();
386 I != E; ++I)
387 Changed |= AlignLoop(MF, *I, Align);
388
389 return Changed;
390 }
391
392 /// AlignLoop - Align loop headers to target preferred alignments.
393 ///
394 bool CodePlacementOpt::AlignLoop(MachineFunction &MF, MachineLoop *L,
395 unsigned Align) {
396 bool Changed = false;
397
398 // Do alignment for nested loops.
399 for (MachineLoop::iterator I = L->begin(), E = L->end(); I != E; ++I)
400 Changed |= AlignLoop(MF, *I, Align);
401
402 L->getTopBlock()->setAlignment(Align);
403 Changed = true;
404 ++NumLoopsAligned;
405
406 return Changed;
407 }
408
409 bool CodePlacementOpt::runOnMachineFunction(MachineFunction &MF) {
410 MLI = &getAnalysis();
411 if (MLI->empty())
412 return false; // No loops.
413
414 TLI = MF.getTarget().getTargetLowering();
415 TII = MF.getTarget().getInstrInfo();
416
417 bool Changed = OptimizeIntraLoopEdges(MF);
418
419 Changed |= AlignLoops(MF);
420
421 return Changed;
422 }
3838 static cl::opt DisableEarlyTailDup("disable-early-taildup", cl::Hidden,
3939 cl::desc("Disable pre-register allocation tail duplication"));
4040 static cl::opt DisableBlockPlacement("disable-block-placement",
41 cl::Hidden, cl::desc("Disable the probability-driven block placement, and "
42 "re-enable the old code placement pass"));
41 cl::Hidden, cl::desc("Disable probability-driven block placement"));
4342 static cl::opt EnableBlockPlacementStats("enable-block-placement-stats",
4443 cl::Hidden, cl::desc("Collect probability-driven block placement stats"));
45 static cl::opt DisableCodePlace("disable-code-place", cl::Hidden,
46 cl::desc("Disable code placement"));
4744 static cl::opt DisableSSC("disable-ssc", cl::Hidden,
4845 cl::desc("Disable Stack Slot Coloring"));
4946 static cl::opt DisableMachineDCE("disable-machine-dce", cl::Hidden,
148145 return applyDisable(TargetID, DisableEarlyTailDup);
149146
150147 if (StandardID == &MachineBlockPlacementID)
151 return applyDisable(TargetID, DisableCodePlace);
152
153 if (StandardID == &CodePlacementOptID)
154 return applyDisable(TargetID, DisableCodePlace);
148 return applyDisable(TargetID, DisableBlockPlacement);
155149
156150 if (StandardID == &StackSlotColoringID)
157151 return applyDisable(TargetID, DisableSSC);
741735
742736 /// Add standard basic block placement passes.
743737 void TargetPassConfig::addBlockPlacement() {
744 AnalysisID PassID = 0;
745 if (!DisableBlockPlacement) {
746 // MachineBlockPlacement is a new pass which subsumes the functionality of
747 // CodPlacementOpt. The old code placement pass can be restored by
748 // disabling block placement, but eventually it will be removed.
749 PassID = addPass(&MachineBlockPlacementID);
750 } else {
751 PassID = addPass(&CodePlacementOptID);
752 }
753 if (PassID) {
738 if (addPass(&MachineBlockPlacementID)) {
754739 // Run a separate pass to collect block placement statistics.
755740 if (EnableBlockPlacementStats)
756741 addPass(&MachineBlockPlacementStatsID);
709709 MaxStoresPerMemset = MaxStoresPerMemcpy = MaxStoresPerMemmove = 8;
710710 MaxStoresPerMemsetOptSize = MaxStoresPerMemcpyOptSize
711711 = MaxStoresPerMemmoveOptSize = 4;
712 BenefitFromCodePlacementOpt = false;
713712 UseUnderscoreSetJmp = false;
714713 UseUnderscoreLongJmp = false;
715714 SelectIsExpensive = false;
878878 // On ARM arguments smaller than 4 bytes are extended, so all arguments
879879 // are at least 4 bytes aligned.
880880 setMinStackArgumentAlignment(4);
881
882 BenefitFromCodePlacementOpt = true;
883881
884882 // Prefer likely predicted branches to selects on out-of-order cores.
885883 PredictableSelectIsExpensive = Subtarget->isLikeA9();
542542 MaxStoresPerMemmoveOptSize = 8;
543543
544544 setPrefFunctionAlignment(4);
545 BenefitFromCodePlacementOpt = true;
546545 }
547546 }
548547
13421342 MaxStoresPerMemmove = 8; // For @llvm.memmove -> sequence of stores
13431343 MaxStoresPerMemmoveOptSize = Subtarget->isTargetDarwin() ? 8 : 4;
13441344 setPrefLoopAlignment(4); // 2^4 bytes.
1345 BenefitFromCodePlacementOpt = true;
13461345
13471346 // Predictable cmov don't hurt on atom because it's in-order.
13481347 PredictableSelectIsExpensive = !Subtarget->isAtom();
None ; RUN: llc -mtriple=thumbv7-apple-ios -disable-code-place < %s | FileCheck %s
1 ; RUN: llc -mtriple=armv7-apple-ios -disable-code-place < %s | FileCheck %s
0 ; RUN: llc -mtriple=thumbv7-apple-ios -disable-block-placement < %s | FileCheck %s
1 ; RUN: llc -mtriple=armv7-apple-ios -disable-block-placement < %s | FileCheck %s
22
33 ; LLVM IR optimizers canonicalize icmp+select this way.
44 ; Make sure that TwoAddressInstructionPass can commute the corresponding
None ; RUN: llc -mtriple=thumbv7-apple-ios -disable-code-place < %s | FileCheck %s
1 ; RUN: llc -mtriple=armv7-apple-ios -disable-code-place < %s | FileCheck %s
0 ; RUN: llc -mtriple=thumbv7-apple-ios -disable-block-placement < %s | FileCheck %s
1 ; RUN: llc -mtriple=armv7-apple-ios -disable-block-placement < %s | FileCheck %s
22
33 ; LSR should compare against the post-incremented induction variable.
44 ; In this case, the immediate value is -2 which requires a cmn instruction.
None ; RUN: llc < %s -widen-vmovs -mcpu=cortex-a8 -verify-machineinstrs -disable-code-place | FileCheck %s
0 ; RUN: llc < %s -widen-vmovs -mcpu=cortex-a8 -verify-machineinstrs -disable-block-placement | FileCheck %s
11 target triple = "thumbv7-apple-ios"
22
33 ; The 1.0e+10 constant is loaded from the constant pool and kept in a register.
None ; RUN: llc < %s -march=x86 -disable-code-place | FileCheck %s
0 ; RUN: llc < %s -march=x86 -disable-block-placement | FileCheck %s
11 ;
22 ; Test RegistersDefinedFromSameValue. We have multiple copies of the same vreg:
33 ; while.body85.i:
None ; RUN: llc -asm-verbose=false -disable-branch-fold -disable-code-place -disable-tail-duplicate -march=x86-64 -mcpu=nehalem < %s | FileCheck %s
0 ; RUN: llc -asm-verbose=false -disable-branch-fold -disable-block-placement -disable-tail-duplicate -march=x86-64 -mcpu=nehalem < %s | FileCheck %s
11 ; rdar://7236213
22 ;
33 ; The scheduler's 2-address hack has been disabled, so there is