llvm.org GIT mirror llvm / f4afdfc
Build the Hopfield network incrementally when splitting global live ranges. It is common for large live ranges to have few basic blocks with register uses and many live-through blocks without any uses. This approach grows the Hopfield network incrementally around the use blocks, completely avoiding checking interference for some through blocks. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@129188 91177308-0d34-0410-b5e6-96231b3b80d8 Jakob Stoklund Olesen 9 years ago
7 changed file(s) with 195 addition(s) and 96 deletion(s). Raw diff Collapse all Expand all
1515 #ifndef LLVM_CODEGEN_EDGEBUNDLES_H
1616 #define LLVM_CODEGEN_EDGEBUNDLES_H
1717
18 #include "llvm/ADT/ArrayRef.h"
1819 #include "llvm/ADT/IntEqClasses.h"
1920 #include "llvm/CodeGen/MachineFunctionPass.h"
2021
2829 /// 2*BB->getNumber()+1 -> Outgoing bundle.
2930 IntEqClasses EC;
3031
32 /// Blocks - Map each bundle to a list of basic block numbers.
33 SmallVector, 4> Blocks;
34
3135 public:
3236 static char ID;
3337 EdgeBundles() : MachineFunctionPass(ID) {}
3842
3943 /// getNumBundles - Return the total number of bundles in the CFG.
4044 unsigned getNumBundles() const { return EC.getNumClasses(); }
45
46 /// getBlocks - Return an array of blocks that are connected to Bundle.
47 ArrayRef getBlocks(unsigned Bundle) { return Blocks[Bundle]; }
4148
4249 /// getMachineFunction - Return the last machine function computed.
4350 const MachineFunction *getMachineFunction() const { return MF; }
5252 EC.compress();
5353 if (ViewEdgeBundles)
5454 view();
55
56 // Compute the reverse mapping.
57 Blocks.clear();
58 Blocks.resize(getNumBundles());
59
60 for (unsigned i = 0, e = MF->getNumBlockIDs(); i != e; ++i) {
61 unsigned b0 = getBundle(i, 0);
62 unsigned b1 = getBundle(i, 1);
63 Blocks[b0].push_back(i);
64 if (b1 != b0)
65 Blocks[b1].push_back(i);
66 }
67
5568 return false;
5669 }
5770
8194 O << "}\n";
8295 return O;
8396 }
84
85
2121 #include "SpillPlacement.h"
2222 #include "SplitKit.h"
2323 #include "VirtRegMap.h"
24 #include "llvm/ADT/SparseBitVector.h"
2425 #include "llvm/ADT/Statistic.h"
2526 #include "llvm/Analysis/AliasAnalysis.h"
2627 #include "llvm/Function.h"
125126 /// All basic blocks where the current register has uses.
126127 SmallVector SplitConstraints;
127128
128 /// All basic blocks where the current register is live-through and
129 /// interference free.
130 SmallVector TransparentBlocks;
129 /// Live-through blocks that have already been added to SpillPlacer.
130 SparseBitVector<> ActiveThroughBlocks;
131131
132132 /// Global live range splitting candidate info.
133133 struct GlobalSplitCandidate {
172172 void LRE_WillShrinkVirtReg(unsigned);
173173 void LRE_DidCloneVirtReg(unsigned, unsigned);
174174
175 bool addSplitConstraints(unsigned, float&);
175 bool addSplitConstraints(InterferenceCache::Cursor, float&);
176 void addThroughConstraints(InterferenceCache::Cursor, ArrayRef);
177 void growRegion(InterferenceCache::Cursor);
176178 float calcGlobalSplitCost(unsigned, const BitVector&);
177179 void splitAroundRegion(LiveInterval&, unsigned, const BitVector&,
178180 SmallVectorImpl&);
416418 /// interference pattern in Physreg and its aliases. Add the constraints to
417419 /// SpillPlacement and return the static cost of this split in Cost, assuming
418420 /// that all preferences in SplitConstraints are met.
419 /// If it is evident that no bundles will be live, abort early and return false.
420 bool RAGreedy::addSplitConstraints(unsigned PhysReg, float &Cost) {
421 InterferenceCache::Cursor Intf(IntfCache, PhysReg);
421 /// Return false if there are no bundles with positive bias.
422 bool RAGreedy::addSplitConstraints(InterferenceCache::Cursor Intf,
423 float &Cost) {
422424 ArrayRef UseBlocks = SA->getUseBlocks();
423425
424426 // Reset interference dependent info.
463465 if (Ins)
464466 StaticCost += Ins * SpillPlacer->getBlockFrequency(BC.Number);
465467 }
468 Cost = StaticCost;
466469
467470 // Add constraints for use-blocks. Note that these are the only constraints
468471 // that may add a positive bias, it is downhill from here.
469472 SpillPlacer->addConstraints(SplitConstraints);
470 if (SpillPlacer->getPositiveNodes() == 0)
471 return false;
472
473 Cost = StaticCost;
474
475 // Now handle the live-through blocks without uses. These can only add
476 // negative bias, so we can abort whenever there are no more positive nodes.
477 // Compute constraints for a group of 8 blocks at a time.
473 return SpillPlacer->scanActiveBundles();
474 }
475
476
477 /// addThroughConstraints - Add constraints and links to SpillPlacer from the
478 /// live-through blocks in Blocks.
479 void RAGreedy::addThroughConstraints(InterferenceCache::Cursor Intf,
480 ArrayRef Blocks) {
478481 const unsigned GroupSize = 8;
479482 SpillPlacement::BlockConstraint BCS[GroupSize];
480 unsigned B = 0;
481 TransparentBlocks.clear();
482
483 ArrayRef ThroughBlocks = SA->getThroughBlocks();
484 for (unsigned i = 0; i != ThroughBlocks.size(); ++i) {
485 unsigned Number = ThroughBlocks[i];
483 unsigned TBS[GroupSize];
484 unsigned B = 0, T = 0;
485
486 for (unsigned i = 0; i != Blocks.size(); ++i) {
487 unsigned Number = Blocks[i];
488 Intf.moveToBlock(Number);
489
490 if (!Intf.hasInterference()) {
491 assert(T < GroupSize && "Array overflow");
492 TBS[T] = Number;
493 if (++T == GroupSize) {
494 SpillPlacer->addLinks(ArrayRef(TBS, T));
495 T = 0;
496 }
497 continue;
498 }
499
486500 assert(B < GroupSize && "Array overflow");
487501 BCS[B].Number = Number;
488 Intf.moveToBlock(Number);
489
490 if (!Intf.hasInterference()) {
491 TransparentBlocks.push_back(Number);
492 continue;
493 }
494502
495503 // Interference for the live-in value.
496504 if (Intf.first() <= Indexes->getMBBStartIdx(Number))
508516 ArrayRef Array(BCS, B);
509517 SpillPlacer->addConstraints(Array);
510518 B = 0;
511 // Abort early when all hope is lost.
512 if (SpillPlacer->getPositiveNodes() == 0)
513 return false;
514519 }
515520 }
516521
517522 ArrayRef Array(BCS, B);
518523 SpillPlacer->addConstraints(Array);
519 if (SpillPlacer->getPositiveNodes() == 0)
520 return false;
521
522 // There is still some positive bias. Add all the links.
523 SpillPlacer->addLinks(TransparentBlocks);
524 return true;
525 }
526
524 SpillPlacer->addLinks(ArrayRef(TBS, T));
525 }
526
527 void RAGreedy::growRegion(InterferenceCache::Cursor Intf) {
528 // Keep track of through blocks that have already been added to SpillPlacer.
529 SparseBitVector<> Added;
530 SmallVector ThroughBlocks;
531 #ifndef NDEBUG
532 unsigned Visited = 0;
533 #endif
534 for (;;) {
535 ArrayRef NewBundles = SpillPlacer->getRecentPositive();
536 if (NewBundles.empty())
537 break;
538 // Find new through blocks in the periphery of PrefRegBundles.
539 for (int i = 0, e = NewBundles.size(); i != e; ++i) {
540 unsigned Bundle = NewBundles[i];
541 // Look at all blocks connected to Bundle in the full graph.
542 ArrayRef Blocks = Bundles->getBlocks(Bundle);
543 for (ArrayRef::iterator I = Blocks.begin(), E = Blocks.end();
544 I != E; ++I) {
545 unsigned Block = *I;
546 if (!SA->isThroughBlock(Block) || !Added.test_and_set(Block))
547 continue;
548 // This is a new through block. Add it to SpillPlacer later.
549 ThroughBlocks.push_back(Block);
550 #ifndef NDEBUG
551 ++Visited;
552 #endif
553 }
554 }
555 // Any new blocks to add?
556 if (!ThroughBlocks.empty()) {
557 addThroughConstraints(Intf, ThroughBlocks);
558 ThroughBlocks.clear();
559 }
560 // Perhaps iterating can enable more bundles?
561 SpillPlacer->iterate();
562 }
563
564 // Rememeber the relevant set of through blocks for splitAroundRegion().
565 ActiveThroughBlocks |= Added;
566 DEBUG(dbgs() << ", v=" << Visited);
567 }
527568
528569 /// calcGlobalSplitCost - Return the global split cost of following the split
529570 /// pattern in LiveBundles. This cost should be added to the local cost of the
549590 }
550591
551592 InterferenceCache::Cursor Intf(IntfCache, PhysReg);
552 ArrayRef ThroughBlocks = SA->getThroughBlocks();
553 SplitConstraints.resize(UseBlocks.size() + ThroughBlocks.size());
554 for (unsigned i = 0; i != ThroughBlocks.size(); ++i) {
555 unsigned Number = ThroughBlocks[i];
593 for (SparseBitVector<>::iterator I = ActiveThroughBlocks.begin(),
594 E = ActiveThroughBlocks.end(); I != E; ++I) {
595 unsigned Number = *I;
556596 bool RegIn = LiveBundles[Bundles->getBundle(Number, 0)];
557597 bool RegOut = LiveBundles[Bundles->getBundle(Number, 1)];
558598 if (!RegIn && !RegOut)
765805 }
766806
767807 // Handle live-through blocks.
768 ArrayRef ThroughBlocks = SA->getThroughBlocks();
769 for (unsigned i = 0; i != ThroughBlocks.size(); ++i) {
770 unsigned Number = ThroughBlocks[i];
808 for (SparseBitVector<>::iterator I = ActiveThroughBlocks.begin(),
809 E = ActiveThroughBlocks.end(); I != E; ++I) {
810 unsigned Number = *I;
771811 bool RegIn = LiveBundles[Bundles->getBundle(Number, 0)];
772812 bool RegOut = LiveBundles[Bundles->getBundle(Number, 1)];
773813 DEBUG(dbgs() << "Live through BB#" << Number << '\n');
803843 BitVector LiveBundles, BestBundles;
804844 float BestCost = 0;
805845 unsigned BestReg = 0;
846 ActiveThroughBlocks.clear();
806847
807848 Order.rewind();
808849 for (unsigned Cand = 0; unsigned PhysReg = Order.next(); ++Cand) {
812853
813854 SpillPlacer->prepare(LiveBundles);
814855 float Cost;
815 if (!addSplitConstraints(PhysReg, Cost)) {
816 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tno positive bias\n");
817 continue;
818 }
819 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tbiased = "
820 << SpillPlacer->getPositiveNodes() << ", static = " << Cost);
856 InterferenceCache::Cursor Intf(IntfCache, PhysReg);
857 if (!addSplitConstraints(Intf, Cost)) {
858 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tno positive bundles\n");
859 continue;
860 }
861 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tstatic = " << Cost);
821862 if (BestReg && Cost >= BestCost) {
822863 DEBUG(dbgs() << " worse than " << PrintReg(BestReg, TRI) << '\n');
823864 continue;
824865 }
866 growRegion(Intf);
825867
826868 SpillPlacer->finish();
827869
134134
135135 /// addBias - Bias this node from an ingoing[0] or outgoing[1] link.
136136 /// Return the change to the total number of positive biases.
137 int addBias(float w, bool out) {
137 void addBias(float w, bool out) {
138138 // Normalize w relative to all connected blocks from that direction.
139139 w *= Scale[out];
140 int Before = Bias > 0;
141140 Bias += w;
142 int After = Bias > 0;
143 return After - Before;
144141 }
145142
146143 /// update - Recompute Value from Bias and Links. Return true when node
229226 if (I->Entry != DontCare) {
230227 unsigned ib = bundles->getBundle(I->Number, 0);
231228 activate(ib);
232 PositiveNodes += nodes[ib].addBias(Freq * Bias[I->Entry], 1);
229 nodes[ib].addBias(Freq * Bias[I->Entry], 1);
233230 }
234231
235232 // Live-out from block?
236233 if (I->Exit != DontCare) {
237234 unsigned ob = bundles->getBundle(I->Number, 1);
238235 activate(ob);
239 PositiveNodes += nodes[ob].addBias(Freq * Bias[I->Exit], 0);
236 nodes[ob].addBias(Freq * Bias[I->Exit], 0);
240237 }
241238 }
242239 }
253250 continue;
254251 activate(ib);
255252 activate(ob);
253 if (nodes[ib].Links.empty() && !nodes[ib].mustSpill())
254 Linked.push_back(ib);
255 if (nodes[ob].Links.empty() && !nodes[ob].mustSpill())
256 Linked.push_back(ob);
256257 float Freq = getBlockFrequency(Number);
257258 nodes[ib].addLink(ob, Freq, 1);
258259 nodes[ob].addLink(ib, Freq, 0);
259260 }
260261 }
261262
263 bool SpillPlacement::scanActiveBundles() {
264 Linked.clear();
265 RecentPositive.clear();
266 for (int n = ActiveNodes->find_first(); n>=0; n = ActiveNodes->find_next(n)) {
267 nodes[n].update(nodes);
268 // A node that must spill, or a node without any links is not going to
269 // change its value ever again, so exclude it from iterations.
270 if (nodes[n].mustSpill())
271 continue;
272 if (!nodes[n].Links.empty())
273 Linked.push_back(n);
274 if (nodes[n].preferReg())
275 RecentPositive.push_back(n);
276 }
277 return !RecentPositive.empty();
278 }
279
262280 /// iterate - Repeatedly update the Hopfield nodes until stability or the
263281 /// maximum number of iterations is reached.
264282 /// @param Linked - Numbers of linked nodes that need updating.
265 void SpillPlacement::iterate(const SmallVectorImpl &Linked) {
283 void SpillPlacement::iterate() {
284 // First update the recently positive nodes. They have likely received new
285 // negative bias that will turn them off.
286 while (!RecentPositive.empty())
287 nodes[RecentPositive.pop_back_val()].update(nodes);
288
266289 if (Linked.empty())
267290 return;
268291
278301 for (SmallVectorImpl::const_reverse_iterator I =
279302 llvm::next(Linked.rbegin()), E = Linked.rend(); I != E; ++I) {
280303 unsigned n = *I;
281 bool C = nodes[n].update(nodes);
282 Changed |= C;
283 }
284 if (!Changed)
304 if (nodes[n].update(nodes)) {
305 Changed = true;
306 if (nodes[n].preferReg())
307 RecentPositive.push_back(n);
308 }
309 }
310 if (!Changed || !RecentPositive.empty())
285311 return;
286312
287313 // Scan forwards, skipping the first node which was just updated.
289315 for (SmallVectorImpl::const_iterator I =
290316 llvm::next(Linked.begin()), E = Linked.end(); I != E; ++I) {
291317 unsigned n = *I;
292 bool C = nodes[n].update(nodes);
293 Changed |= C;
294 }
295 if (!Changed)
318 if (nodes[n].update(nodes)) {
319 Changed = true;
320 if (nodes[n].preferReg())
321 RecentPositive.push_back(n);
322 }
323 }
324 if (!Changed || !RecentPositive.empty())
296325 return;
297326 }
298327 }
299328
300329 void SpillPlacement::prepare(BitVector &RegBundles) {
330 Linked.clear();
331 RecentPositive.clear();
301332 // Reuse RegBundles as our ActiveNodes vector.
302333 ActiveNodes = &RegBundles;
303334 ActiveNodes->clear();
304335 ActiveNodes->resize(bundles->getNumBundles());
305 PositiveNodes = 0;
306336 }
307337
308338 bool
309339 SpillPlacement::finish() {
310340 assert(ActiveNodes && "Call prepare() first");
311 // Update all active nodes, and find the ones that are actually linked to
312 // something so their value may change when iterating.
313 SmallVector Linked;
314 for (int n = ActiveNodes->find_first(); n>=0; n = ActiveNodes->find_next(n)) {
315 nodes[n].update(nodes);
316 // A node that must spill, or a node without any links is not going to
317 // change its value ever again, so exclude it from iterations.
318 if (!nodes[n].Links.empty() && !nodes[n].mustSpill())
319 Linked.push_back(n);
320 }
321
322 // Iterate the network to convergence.
323 iterate(Linked);
324341
325342 // Write preferences back to ActiveNodes.
326343 bool Perfect = true;
4848 // caller.
4949 BitVector *ActiveNodes;
5050
51 // The number of active nodes with a positive bias.
52 unsigned PositiveNodes;
51 // Nodes with active links. Populated by scanActiveBundles.
52 SmallVector Linked;
53
54 // Nodes that went positive during the last call to scanActiveBundles or
55 // iterate.
56 SmallVector RecentPositive;
5357
5458 // Block frequencies are computed once. Indexed by block number.
5559 SmallVector BlockFrequency;
9498 /// addLinks - Add transparent blocks with the given numbers.
9599 void addLinks(ArrayRef Links);
96100
97 /// getPositiveNodes - Return the total number of graph nodes with a positive
98 /// bias after adding constraints.
99 unsigned getPositiveNodes() const { return PositiveNodes; }
101 /// scanActiveBundles - Perform an initial scan of all bundles activated by
102 /// addConstraints and addLinks, updating their state. Add all the bundles
103 /// that now prefer a register to RecentPositive.
104 /// Prepare internal data structures for iterate.
105 /// Return true is there are any positive nodes.
106 bool scanActiveBundles();
107
108 /// iterate - Update the network iteratively until convergence, or new bundles
109 /// are found.
110 void iterate();
111
112 /// getRecentPositive - Return an array of bundles that became positive during
113 /// the previous call to scanActiveBundles or iterate.
114 ArrayRef getRecentPositive() { return RecentPositive; }
100115
101116 /// finish - Compute the optimal spill code placement given the
102117 /// constraints. No MustSpill constraints will be violated, and the smallest
119134 virtual void releaseMemory();
120135
121136 void activate(unsigned);
122 void iterate(const SmallVectorImpl&);
123137 };
124138
125139 } // end namespace llvm
131131 DEBUG(dbgs() << "Analyze counted "
132132 << UseSlots.size() << " instrs in "
133133 << UseBlocks.size() << " blocks, through "
134 << ThroughBlocks.size() << " blocks.\n");
134 << NumThroughBlocks << " blocks.\n");
135135 }
136136
137137 /// calcLiveBlockInfo - Fill the LiveBlocks array with information about blocks
138138 /// where CurLI is live.
139139 bool SplitAnalysis::calcLiveBlockInfo() {
140 ThroughBlocks.resize(MF.getNumBlockIDs());
141 NumThroughBlocks = 0;
140142 if (CurLI->empty())
141143 return true;
142144
192194 BI.LiveThrough = !hasGap && BI.LiveIn && BI.LiveOut;
193195 if (Uses)
194196 UseBlocks.push_back(BI);
195 else
196 ThroughBlocks.push_back(BI.MBB->getNumber());
197
197 else {
198 ++NumThroughBlocks;
199 ThroughBlocks.set(BI.MBB->getNumber());
200 }
198201 // FIXME: This should never happen. The live range stops or starts without a
199202 // corresponding use. An earlier pass did something wrong.
200203 if (!BI.LiveThrough && !Uses)
8888 SmallVector UseBlocks;
8989
9090 /// ThroughBlocks - Block numbers where CurLI is live through without uses.
91 SmallVector ThroughBlocks;
91 BitVector ThroughBlocks;
92
93 /// NumThroughBlocks - Number of live-through blocks.
94 unsigned NumThroughBlocks;
9295
9396 SlotIndex computeLastSplitPoint(unsigned Num);
9497
134137 /// where CurLI has uses.
135138 ArrayRef getUseBlocks() { return UseBlocks; }
136139
137 /// getThroughBlocks - Return an array of block numbers where CurLI is live
138 /// through without uses.
139 ArrayRef getThroughBlocks() { return ThroughBlocks; }
140 /// getNumThroughBlocks - Return the number of through blocks.
141 unsigned getNumThroughBlocks() const { return NumThroughBlocks; }
142
143 /// isThroughBlock - Return true if CurLI is live through MBB without uses.
144 bool isThroughBlock(unsigned MBB) const { return ThroughBlocks.test(MBB); }
140145
141146 typedef SmallPtrSet BlockPtrSet;
142147