llvm.org GIT mirror llvm / afe77f3
Introduce a new data structure, the SparseMultiSet, and changes to the MI scheduler to use it. A SparseMultiSet adds multiset behavior to SparseSet, while retaining SparseSet's desirable properties. Essentially, SparseMultiSet provides multiset behavior by storing its dense data in doubly linked lists that are inlined into the dense vector. This allows it to provide good data locality as well as vector-like constant-time clear() and fast constant time find(), insert(), and erase(). It also allows SparseMultiSet to have a builtin recycler rather than keeping SparseSet's behavior of always swapping upon removal, which allows it to preserve more iterators. It's often a better alternative to a SparseSet of a growable container or vector-of-vector. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@173064 91177308-0d34-0410-b5e6-96231b3b80d8 Michael Ilseman 6 years ago
5 changed file(s) with 806 addition(s) and 93 deletion(s). Raw diff Collapse all Expand all
0 //===--- llvm/ADT/SparseMultiSet.h - Sparse multiset ------------*- C++ -*-===//
1 //
2 // The LLVM Compiler Infrastructure
3 //
4 // This file is distributed under the University of Illinois Open Source
5 // License. See LICENSE.TXT for details.
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the SparseMultiSet class, which adds multiset behavior to
10 // the SparseSet.
11 //
12 // A sparse multiset holds a small number of objects identified by integer keys
13 // from a moderately sized universe. The sparse multiset uses more memory than
14 // other containers in order to provide faster operations. Any key can map to
15 // multiple values. A SparseMultiSetNode class is provided, which serves as a
16 // convenient base class for the contents of a SparseMultiSet.
17 //
18 //===----------------------------------------------------------------------===//
19
20 #ifndef LLVM_ADT_SPARSEMULTISET_H
21 #define LLVM_ADT_SPARSEMULTISET_H
22
23 #include "llvm/ADT/SparseSet.h"
24
25 namespace llvm {
26
27 /// Fast multiset implementation for objects that can be identified by small
28 /// unsigned keys.
29 ///
30 /// SparseMultiSet allocates memory proportional to the size of the key
31 /// universe, so it is not recommended for building composite data structures.
32 /// It is useful for algorithms that require a single set with fast operations.
33 ///
34 /// Compared to DenseSet and DenseMap, SparseMultiSet provides constant-time
35 /// fast clear() as fast as a vector. The find(), insert(), and erase()
36 /// operations are all constant time, and typically faster than a hash table.
37 /// The iteration order doesn't depend on numerical key values, it only depends
38 /// on the order of insert() and erase() operations. Iteration order is the
39 /// insertion order. Iteration is only provided over elements of equivalent
40 /// keys, but iterators are bidirectional.
41 ///
42 /// Compared to BitVector, SparseMultiSet uses 8x-40x more memory, but
43 /// offers constant-time clear() and size() operations as well as fast iteration
44 /// independent on the size of the universe.
45 ///
46 /// SparseMultiSet contains a dense vector holding all the objects and a sparse
47 /// array holding indexes into the dense vector. Most of the memory is used by
48 /// the sparse array which is the size of the key universe. The SparseT template
49 /// parameter provides a space/speed tradeoff for sets holding many elements.
50 ///
51 /// When SparseT is uint32_t, find() only touches up to 3 cache lines, but the
52 /// sparse array uses 4 x Universe bytes.
53 ///
54 /// When SparseT is uint8_t (the default), find() touches up to 3+[N/256] cache
55 /// lines, but the sparse array is 4x smaller. N is the number of elements in
56 /// the set.
57 ///
58 /// For sets that may grow to thousands of elements, SparseT should be set to
59 /// uint16_t or uint32_t.
60 ///
61 /// Multiset behavior is provided by providing doubly linked lists for values
62 /// that are inlined in the dense vector. SparseMultiSet is a good choice when
63 /// one desires a growable number of entries per key, as it will retain the
64 /// SparseSet algorithmic properties despite being growable. Thus, it is often a
65 /// better choice than a SparseSet of growable containers or a vector of
66 /// vectors. SparseMultiSet also keeps iterators valid after erasure (provided
67 /// the iterators don't point to the element erased), allowing for more
68 /// intuitive and fast removal.
69 ///
70 /// @tparam ValueT The type of objects in the set.
71 /// @tparam KeyFunctorT A functor that computes an unsigned index from KeyT.
72 /// @tparam SparseT An unsigned integer type. See above.
73 ///
74 template
75 typename KeyFunctorT = llvm::identity,
76 typename SparseT = uint8_t>
77 class SparseMultiSet {
78 /// The actual data that's stored, as a doubly-linked list implemented via
79 /// indices into the DenseVector. The doubly linked list is implemented
80 /// circular in Prev indices, and INVALID-terminated in Next indices. This
81 /// provides efficient access to list tails. These nodes can also be
82 /// tombstones, in which case they are actually nodes in a single-linked
83 /// freelist of recyclable slots.
84 struct SMSNode {
85 static const unsigned INVALID = ~0U;
86
87 ValueT Data;
88 unsigned Prev;
89 unsigned Next;
90
91 SMSNode(ValueT D, unsigned P, unsigned N) : Data(D), Prev(P), Next(N) { }
92
93 /// List tails have invalid Nexts.
94 bool isTail() const {
95 return Next == INVALID;
96 }
97
98 /// Whether this node is a tombstone node, and thus is in our freelist.
99 bool isTombstone() const {
100 return Prev == INVALID;
101 }
102
103 /// Since the list is circular in Prev, all non-tombstone nodes have a valid
104 /// Prev.
105 bool isValid() const { return Prev != INVALID; }
106 };
107
108 typedef typename KeyFunctorT::argument_type KeyT;
109 typedef SmallVector DenseT;
110 DenseT Dense;
111 SparseT *Sparse;
112 unsigned Universe;
113 KeyFunctorT KeyIndexOf;
114 SparseSetValFunctor ValIndexOf;
115
116 /// We have a built-in recycler for reusing tombstone slots. This recycler
117 /// puts a singly-linked free list into tombstone slots, allowing us quick
118 /// erasure, iterator preservation, and dense size.
119 unsigned FreelistIdx;
120 unsigned NumFree;
121
122 unsigned sparseIndex(const ValueT &Val) const {
123 assert(ValIndexOf(Val) < Universe &&
124 "Invalid key in set. Did object mutate?");
125 return ValIndexOf(Val);
126 }
127 unsigned sparseIndex(const SMSNode &N) const { return sparseIndex(N.Data); }
128
129 // Disable copy construction and assignment.
130 // This data structure is not meant to be used that way.
131 SparseMultiSet(const SparseMultiSet&) LLVM_DELETED_FUNCTION;
132 SparseMultiSet &operator=(const SparseMultiSet&) LLVM_DELETED_FUNCTION;
133
134 /// Whether the given entry is the head of the list. List heads's previous
135 /// pointers are to the tail of the list, allowing for efficient access to the
136 /// list tail. D must be a valid entry node.
137 bool isHead(const SMSNode &D) const {
138 assert(D.isValid() && "Invalid node for head");
139 return Dense[D.Prev].isTail();
140 }
141
142 /// Whether the given entry is a singleton entry, i.e. the only entry with
143 /// that key.
144 bool isSingleton(const SMSNode &N) const {
145 assert(N.isValid() && "Invalid node for singleton");
146 // Is N its own predecessor?
147 return &Dense[N.Prev] == &N;
148 }
149
150 /// Add in the given SMSNode. Uses a free entry in our freelist if
151 /// available. Returns the index of the added node.
152 unsigned addValue(const ValueT& V, unsigned Prev, unsigned Next) {
153 if (NumFree == 0) {
154 Dense.push_back(SMSNode(V, Prev, Next));
155 return Dense.size() - 1;
156 }
157
158 // Peel off a free slot
159 unsigned Idx = FreelistIdx;
160 unsigned NextFree = Dense[Idx].Next;
161 assert(Dense[Idx].isTombstone() && "Non-tombstone free?");
162
163 Dense[Idx] = SMSNode(V, Prev, Next);
164 FreelistIdx = NextFree;
165 --NumFree;
166 return Idx;
167 }
168
169 /// Make the current index a new tombstone. Pushes it onto the freelist.
170 void makeTombstone(unsigned Idx) {
171 Dense[Idx].Prev = SMSNode::INVALID;
172 Dense[Idx].Next = FreelistIdx;
173 FreelistIdx = Idx;
174 ++NumFree;
175 }
176
177 public:
178 typedef ValueT value_type;
179 typedef ValueT &reference;
180 typedef const ValueT &const_reference;
181 typedef ValueT *pointer;
182 typedef const ValueT *const_pointer;
183
184 SparseMultiSet()
185 : Sparse(0), Universe(0), FreelistIdx(SMSNode::INVALID), NumFree(0) { }
186
187 ~SparseMultiSet() { free(Sparse); }
188
189 /// Set the universe size which determines the largest key the set can hold.
190 /// The universe must be sized before any elements can be added.
191 ///
192 /// @param U Universe size. All object keys must be less than U.
193 ///
194 void setUniverse(unsigned U) {
195 // It's not hard to resize the universe on a non-empty set, but it doesn't
196 // seem like a likely use case, so we can add that code when we need it.
197 assert(empty() && "Can only resize universe on an empty map");
198 // Hysteresis prevents needless reallocations.
199 if (U >= Universe/4 && U <= Universe)
200 return;
201 free(Sparse);
202 // The Sparse array doesn't actually need to be initialized, so malloc
203 // would be enough here, but that will cause tools like valgrind to
204 // complain about branching on uninitialized data.
205 Sparse = reinterpret_cast(calloc(U, sizeof(SparseT)));
206 Universe = U;
207 }
208
209 /// Our iterators are iterators over the collection of objects that share a
210 /// key.
211 template
212 class iterator_base : public std::iterator
213 ValueT> {
214 friend class SparseMultiSet;
215 SMSPtrTy SMS;
216 unsigned Idx;
217 unsigned SparseIdx;
218
219 iterator_base(SMSPtrTy P, unsigned I, unsigned SI)
220 : SMS(P), Idx(I), SparseIdx(SI) { }
221
222 /// Whether our iterator has fallen outside our dense vector.
223 bool isEnd() const {
224 if (Idx == SMSNode::INVALID)
225 return true;
226
227 assert(Idx < SMS->Dense.size() && "Out of range, non-INVALID Idx?");
228 return false;
229 }
230
231 /// Whether our iterator is properly keyed, i.e. the SparseIdx is valid
232 bool isKeyed() const { return SparseIdx < SMS->Universe; }
233
234 unsigned Prev() const { return SMS->Dense[Idx].Prev; }
235 unsigned Next() const { return SMS->Dense[Idx].Next; }
236
237 void setPrev(unsigned P) { SMS->Dense[Idx].Prev = P; }
238 void setNext(unsigned N) { SMS->Dense[Idx].Next = N; }
239
240 public:
241 typedef std::iterator super;
242 typedef typename super::value_type value_type;
243 typedef typename super::difference_type difference_type;
244 typedef typename super::pointer pointer;
245 typedef typename super::reference reference;
246
247 iterator_base(const iterator_base &RHS)
248 : SMS(RHS.SMS), Idx(RHS.Idx), SparseIdx(RHS.SparseIdx) { }
249
250 const iterator_base &operator=(const iterator_base &RHS) {
251 SMS = RHS.SMS;
252 Idx = RHS.Idx;
253 SparseIdx = RHS.SparseIdx;
254 return *this;
255 }
256
257 reference operator*() const {
258 assert(isKeyed() && SMS->sparseIndex(SMS->Dense[Idx].Data) == SparseIdx &&
259 "Dereferencing iterator of invalid key or index");
260
261 return SMS->Dense[Idx].Data;
262 }
263 pointer operator->() const { return &operator*(); }
264
265 /// Comparison operators
266 bool operator==(const iterator_base &RHS) const {
267 // end compares equal
268 if (SMS == RHS.SMS && Idx == RHS.Idx) {
269 assert(isEnd() || SparseIdx == RHS.SparseIdx &&
270 "Same dense entry, but different keys?");
271 return true;
272 }
273
274 return false;
275 }
276
277 bool operator!=(const iterator_base &RHS) const {
278 return !operator==(RHS);
279 }
280
281 /// Increment and decrement operators
282 iterator_base &operator--() { // predecrement - Back up
283 assert(isKeyed() && "Decrementing an invalid iterator");
284 assert(isEnd() || !SMS->isHead(SMS->Dense[Idx]) &&
285 "Decrementing head of list");
286
287 // If we're at the end, then issue a new find()
288 if (isEnd())
289 Idx = SMS->findIndex(SparseIdx).Prev();
290 else
291 Idx = Prev();
292
293 return *this;
294 }
295 iterator_base &operator++() { // preincrement - Advance
296 assert(!isEnd() && isKeyed() && "Incrementing an invalid/end iterator");
297 Idx = Next();
298 return *this;
299 }
300 iterator_base operator--(int) { // postdecrement
301 iterator_base I(*this);
302 --*this;
303 return I;
304 }
305 iterator_base operator++(int) { // postincrement
306 iterator_base I(*this);
307 ++*this;
308 return I;
309 }
310 };
311 typedef iterator_base iterator;
312 typedef iterator_base const_iterator;
313
314 // Convenience types
315 typedef std::pair RangePair;
316
317 /// Returns an iterator past this container. Note that such an iterator cannot
318 /// be decremented, but will compare equal to other end iterators.
319 iterator end() { return iterator(this, SMSNode::INVALID, SMSNode::INVALID); }
320 const_iterator end() const {
321 return const_iterator(this, SMSNode::INVALID, SMSNode::INVALID);
322 }
323
324 /// Returns true if the set is empty.
325 ///
326 /// This is not the same as BitVector::empty().
327 ///
328 bool empty() const { return size() == 0; }
329
330 /// Returns the number of elements in the set.
331 ///
332 /// This is not the same as BitVector::size() which returns the size of the
333 /// universe.
334 ///
335 unsigned size() const {
336 assert(NumFree <= Dense.size() && "Out-of-bounds free entries");
337 return Dense.size() - NumFree;
338 }
339
340 /// Clears the set. This is a very fast constant time operation.
341 ///
342 void clear() {
343 // Sparse does not need to be cleared, see find().
344 Dense.clear();
345 NumFree = 0;
346 FreelistIdx = SMSNode::INVALID;
347 }
348
349 /// Find an element by its index.
350 ///
351 /// @param Idx A valid index to find.
352 /// @returns An iterator to the element identified by key, or end().
353 ///
354 iterator findIndex(unsigned Idx) {
355 assert(Idx < Universe && "Key out of range");
356 assert(std::numeric_limits::is_integer &&
357 !std::numeric_limits::is_signed &&
358 "SparseT must be an unsigned integer type");
359 const unsigned Stride = std::numeric_limits::max() + 1u;
360 for (unsigned i = Sparse[Idx], e = Dense.size(); i < e; i += Stride) {
361 const unsigned FoundIdx = sparseIndex(Dense[i]);
362 // Check that we're pointing at the correct entry and that it is the head
363 // of a valid list.
364 if (Idx == FoundIdx && Dense[i].isValid() && isHead(Dense[i]))
365 return iterator(this, i, Idx);
366 // Stride is 0 when SparseT >= unsigned. We don't need to loop.
367 if (!Stride)
368 break;
369 }
370 return end();
371 }
372
373 /// Find an element by its key.
374 ///
375 /// @param Key A valid key to find.
376 /// @returns An iterator to the element identified by key, or end().
377 ///
378 iterator find(const KeyT &Key) {
379 return findIndex(KeyIndexOf(Key));
380 }
381
382 const_iterator find(const KeyT &Key) const {
383 iterator I = const_cast(this)->findIndex(KeyIndexOf(Key));
384 return const_iterator(I.SMS, I.Idx, KeyIndexOf(Key));
385 }
386
387 /// Returns the number of elements identified by Key. This will be linear in
388 /// the number of elements of that key.
389 unsigned count(const KeyT &Key) const {
390 unsigned Ret = 0;
391 for (const_iterator It = find(Key); It != end(); ++It)
392 ++Ret;
393
394 return Ret;
395 }
396
397 /// Returns true if this set contains an element identified by Key.
398 bool contains(const KeyT &Key) const {
399 return find(Key) != end();
400 }
401
402 /// Return the head and tail of the subset's list, otherwise returns end().
403 iterator getHead(const KeyT &Key) { return find(Key); }
404 iterator getTail(const KeyT &Key) {
405 iterator I = find(Key);
406 if (I != end())
407 I = iterator(this, I.Prev(), KeyIndexOf(Key));
408 return I;
409 }
410
411 /// The bounds of the range of items sharing Key K. First member is the head
412 /// of the list, and the second member is a decrementable end iterator for
413 /// that key.
414 RangePair equal_range(const KeyT &K) {
415 iterator B = find(K);
416 iterator E = iterator(this, SMSNode::INVALID, B.SparseIdx);
417 return make_pair(B, E);
418 }
419
420 /// Insert a new element at the tail of the subset list. Returns an iterator
421 /// to the newly added entry.
422 iterator insert(const ValueT &Val) {
423 unsigned Idx = sparseIndex(Val);
424 iterator I = findIndex(Idx);
425
426 unsigned NodeIdx = addValue(Val, SMSNode::INVALID, SMSNode::INVALID);
427
428 if (I == end()) {
429 // Make a singleton list
430 Sparse[Idx] = NodeIdx;
431 Dense[NodeIdx].Prev = NodeIdx;
432 return iterator(this, NodeIdx, Idx);
433 }
434
435 // Stick it at the end.
436 unsigned HeadIdx = I.Idx;
437 unsigned TailIdx = I.Prev();
438 Dense[TailIdx].Next = NodeIdx;
439 Dense[HeadIdx].Prev = NodeIdx;
440 Dense[NodeIdx].Prev = TailIdx;
441
442 return iterator(this, NodeIdx, Idx);
443 }
444
445 /// Erases an existing element identified by a valid iterator.
446 ///
447 /// This invalidates iterators pointing at the same entry, but erase() returns
448 /// an iterator pointing to the next element in the subset's list. This makes
449 /// it possible to erase selected elements while iterating over the subset:
450 ///
451 /// tie(I, E) = Set.equal_range(Key);
452 /// while (I != E)
453 /// if (test(*I))
454 /// I = Set.erase(I);
455 /// else
456 /// ++I;
457 ///
458 /// Note that if the last element in the subset list is erased, this will
459 /// return an end iterator which can be decremented to get the new tail (if it
460 /// exists):
461 ///
462 /// tie(B, I) = Set.equal_range(Key);
463 /// for (bool isBegin = B == I; !isBegin; /* empty */) {
464 /// isBegin = (--I) == B;
465 /// if (test(I))
466 /// break;
467 /// I = erase(I);
468 /// }
469 iterator erase(iterator I) {
470 assert(I.isKeyed() && !I.isEnd() && !Dense[I.Idx].isTombstone() &&
471 "erasing invalid/end/tombstone iterator");
472
473 // First, unlink the node from its list. Then swap the node out with the
474 // dense vector's last entry
475 iterator NextI = unlink(Dense[I.Idx]);
476
477 // Put in a tombstone.
478 makeTombstone(I.Idx);
479
480 return NextI;
481 }
482
483 /// Erase all elements with the given key. This invalidates all
484 /// iterators of that key.
485 void eraseAll(const KeyT &K) {
486 for (iterator I = find(K); I != end(); /* empty */)
487 I = erase(I);
488 }
489
490 private:
491 /// Unlink the node from its list. Returns the next node in the list.
492 iterator unlink(const SMSNode &N) {
493 if (isSingleton(N)) {
494 // Singleton is already unlinked
495 assert(N.Next == SMSNode::INVALID && "Singleton has next?");
496 return iterator(this, SMSNode::INVALID, ValIndexOf(N.Data));
497 }
498
499 if (isHead(N)) {
500 // If we're the head, then update the sparse array and our next.
501 Sparse[sparseIndex(N)] = N.Next;
502 Dense[N.Next].Prev = N.Prev;
503 return iterator(this, N.Next, ValIndexOf(N.Data));
504 }
505
506 if (N.isTail()) {
507 // If we're the tail, then update our head and our previous.
508 findIndex(sparseIndex(N)).setPrev(N.Prev);
509 Dense[N.Prev].Next = N.Next;
510
511 // Give back an end iterator that can be decremented
512 iterator I(this, N.Prev, ValIndexOf(N.Data));
513 return ++I;
514 }
515
516 // Otherwise, just drop us
517 Dense[N.Next].Prev = N.Prev;
518 Dense[N.Prev].Next = N.Next;
519 return iterator(this, N.Next, ValIndexOf(N.Data));
520 }
521 };
522
523 } // end namespace llvm
524
525 #endif
1616
1717 #include "llvm/ADT/SmallSet.h"
1818 #include "llvm/ADT/SparseSet.h"
19 #include "llvm/ADT/SparseMultiSet.h"
1920 #include "llvm/CodeGen/MachineDominators.h"
2021 #include "llvm/CodeGen/MachineLoopInfo.h"
2122 #include "llvm/CodeGen/ScheduleDAG.h"
4748 struct PhysRegSUOper {
4849 SUnit *SU;
4950 int OpIdx;
50
51 PhysRegSUOper(SUnit *su, int op): SU(su), OpIdx(op) {}
51 unsigned Reg;
52
53 PhysRegSUOper(SUnit *su, int op, unsigned R): SU(su), OpIdx(op), Reg(R) {}
54
55 unsigned getSparseSetIndex() const { return Reg; }
5256 };
5357
54 /// Combine a SparseSet with a 1x1 vector to track physical registers.
55 /// The SparseSet allows iterating over the (few) live registers for quickly
56 /// comparing against a regmask or clearing the set.
57 ///
58 /// Storage for the map is allocated once for the pass. The map can be
59 /// cleared between scheduling regions without freeing unused entries.
60 class Reg2SUnitsMap {
61 SparseSet PhysRegSet;
62 std::vector > SUnits;
63 public:
64 typedef SparseSet::const_iterator const_iterator;
65
66 // Allow iteration over register numbers (keys) in the map. If needed, we
67 // can provide an iterator over SUnits (values) as well.
68 const_iterator reg_begin() const { return PhysRegSet.begin(); }
69 const_iterator reg_end() const { return PhysRegSet.end(); }
70
71 /// Initialize the map with the number of registers.
72 /// If the map is already large enough, no allocation occurs.
73 /// For simplicity we expect the map to be empty().
74 void setRegLimit(unsigned Limit);
75
76 /// Returns true if the map is empty.
77 bool empty() const { return PhysRegSet.empty(); }
78
79 /// Clear the map without deallocating storage.
80 void clear();
81
82 bool contains(unsigned Reg) const { return PhysRegSet.count(Reg); }
83
84 /// If this register is mapped, return its existing SUnits vector.
85 /// Otherwise map the register and return an empty SUnits vector.
86 std::vector &operator[](unsigned Reg) {
87 bool New = PhysRegSet.insert(Reg).second;
88 assert((!New || SUnits[Reg].empty()) && "stale SUnits vector");
89 (void)New;
90 return SUnits[Reg];
91 }
92
93 /// Erase an existing element without freeing memory.
94 void erase(unsigned Reg) {
95 PhysRegSet.erase(Reg);
96 SUnits[Reg].clear();
97 }
98 };
58 /// Use a SparseMultiSet to track physical registers. Storage is only
59 /// allocated once for the pass. It can be cleared in constant time and reused
60 /// without any frees.
61 typedef SparseMultiSet, uint16_t> Reg2SUnitsMap;
9962
10063 /// Use SparseSet as a SparseMap by relying on the fact that it never
10164 /// compares ValueT's, only unsigned keys. This allows the set to be cleared
167167 BB = 0;
168168 }
169169
170 /// Initialize the map with the number of registers.
171 void Reg2SUnitsMap::setRegLimit(unsigned Limit) {
172 PhysRegSet.setUniverse(Limit);
173 SUnits.resize(Limit);
174 }
175
176 /// Clear the map without deallocating storage.
177 void Reg2SUnitsMap::clear() {
178 for (const_iterator I = reg_begin(), E = reg_end(); I != E; ++I) {
179 SUnits[*I].clear();
180 }
181 PhysRegSet.clear();
182 }
183
184170 /// Initialize the DAG and common scheduler state for the current scheduling
185171 /// region. This does not actually create the DAG, only clears it. The
186172 /// scheduling driver may call BuildSchedGraph multiple times per scheduling
227213 if (Reg == 0) continue;
228214
229215 if (TRI->isPhysicalRegister(Reg))
230 Uses[Reg].push_back(PhysRegSUOper(&ExitSU, -1));
216 Uses.insert(PhysRegSUOper(&ExitSU, -1, Reg));
231217 else {
232218 assert(!IsPostRA && "Virtual register encountered after regalloc.");
233219 if (MO.readsReg()) // ignore undef operands
244230 E = (*SI)->livein_end(); I != E; ++I) {
245231 unsigned Reg = *I;
246232 if (!Uses.contains(Reg))
247 Uses[Reg].push_back(PhysRegSUOper(&ExitSU, -1));
233 Uses.insert(PhysRegSUOper(&ExitSU, -1, Reg));
248234 }
249235 }
250236 }
262248 Alias.isValid(); ++Alias) {
263249 if (!Uses.contains(*Alias))
264250 continue;
265 std::vector &UseList = Uses[*Alias];
266 for (unsigned i = 0, e = UseList.size(); i != e; ++i) {
267 SUnit *UseSU = UseList[i].SU;
251 for (Reg2SUnitsMap::iterator I = Uses.find(*Alias); I != Uses.end(); ++I) {
252 SUnit *UseSU = I->SU;
268253 if (UseSU == SU)
269254 continue;
270255
271256 // Adjust the dependence latency using operand def/use information,
272257 // then allow the target to perform its own adjustments.
273 int UseOp = UseList[i].OpIdx;
258 int UseOp = I->OpIdx;
274259 MachineInstr *RegUse = 0;
275260 SDep Dep;
276261 if (UseOp < 0)
310295 Alias.isValid(); ++Alias) {
311296 if (!Defs.contains(*Alias))
312297 continue;
313 std::vector &DefList = Defs[*Alias];
314 for (unsigned i = 0, e = DefList.size(); i != e; ++i) {
315 SUnit *DefSU = DefList[i].SU;
298 for (Reg2SUnitsMap::iterator I = Defs.find(*Alias); I != Defs.end(); ++I) {
299 SUnit *DefSU = I->SU;
316300 if (DefSU == &ExitSU)
317301 continue;
318302 if (DefSU != SU &&
336320 // Either insert a new Reg2SUnits entry with an empty SUnits list, or
337321 // retrieve the existing SUnits list for this register's uses.
338322 // Push this SUnit on the use list.
339 Uses[MO.getReg()].push_back(PhysRegSUOper(SU, OperIdx));
323 Uses.insert(PhysRegSUOper(SU, OperIdx, MO.getReg()));
340324 }
341325 else {
342326 addPhysRegDataDeps(SU, OperIdx);
343
344 // Either insert a new Reg2SUnits entry with an empty SUnits list, or
345 // retrieve the existing SUnits list for this register's defs.
346 std::vector &DefList = Defs[MO.getReg()];
327 unsigned Reg = MO.getReg();
347328
348329 // clear this register's use list
349 if (Uses.contains(MO.getReg()))
350 Uses[MO.getReg()].clear();
351
352 if (!MO.isDead())
353 DefList.clear();
354
355 // Calls will not be reordered because of chain dependencies (see
356 // below). Since call operands are dead, calls may continue to be added
357 // to the DefList making dependence checking quadratic in the size of
358 // the block. Instead, we leave only one call at the back of the
359 // DefList.
360 if (SU->isCall) {
361 while (!DefList.empty() && DefList.back().SU->isCall)
362 DefList.pop_back();
363 }
330 if (Uses.contains(Reg))
331 Uses.eraseAll(Reg);
332
333 if (!MO.isDead()) {
334 Defs.eraseAll(Reg);
335 } else if (SU->isCall) {
336 // Calls will not be reordered because of chain dependencies (see
337 // below). Since call operands are dead, calls may continue to be added
338 // to the DefList making dependence checking quadratic in the size of
339 // the block. Instead, we leave only one call at the back of the
340 // DefList.
341 Reg2SUnitsMap::RangePair P = Defs.equal_range(Reg);
342 Reg2SUnitsMap::iterator B = P.first;
343 Reg2SUnitsMap::iterator I = P.second;
344 for (bool isBegin = I == B; !isBegin; /* empty */) {
345 isBegin = (--I) == B;
346 if (!I->SU->isCall)
347 break;
348 I = Defs.erase(I);
349 }
350 }
351
364352 // Defs are pushed in the order they are visited and never reordered.
365 DefList.push_back(PhysRegSUOper(SU, OperIdx));
353 Defs.insert(PhysRegSUOper(SU, OperIdx, Reg));
366354 }
367355 }
368356
725713
726714 assert(Defs.empty() && Uses.empty() &&
727715 "Only BuildGraph should update Defs/Uses");
728 Defs.setRegLimit(TRI->getNumRegs());
729 Uses.setRegLimit(TRI->getNumRegs());
716 Defs.setUniverse(TRI->getNumRegs());
717 Uses.setUniverse(TRI->getNumRegs());
730718
731719 assert(VRegDefs.empty() && "Only BuildSchedGraph may access VRegDefs");
732720 // FIXME: Allow SparseSet to reserve space for the creation of virtual
2323 SmallStringTest.cpp
2424 SmallVectorTest.cpp
2525 SparseBitVectorTest.cpp
26 SparseMultiSetTest.cpp
2627 SparseSetTest.cpp
2728 StringMapTest.cpp
2829 StringRefTest.cpp
0 //===------ ADT/SparseSetTest.cpp - SparseSet unit tests - -----*- C++ -*-===//
1 //
2 // The LLVM Compiler Infrastructure
3 //
4 // This file is distributed under the University of Illinois Open Source
5 // License. See LICENSE.TXT for details.
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "llvm/ADT/SparseMultiSet.h"
10 #include "gtest/gtest.h"
11
12 using namespace llvm;
13
14 namespace {
15
16 typedef SparseMultiSet USet;
17
18 // Empty set tests.
19 TEST(SparseMultiSetTest, EmptySet) {
20 USet Set;
21 EXPECT_TRUE(Set.empty());
22 EXPECT_EQ(0u, Set.size());
23
24 Set.setUniverse(10);
25
26 // Lookups on empty set.
27 EXPECT_TRUE(Set.find(0) == Set.end());
28 EXPECT_TRUE(Set.find(9) == Set.end());
29
30 // Same thing on a const reference.
31 const USet &CSet = Set;
32 EXPECT_TRUE(CSet.empty());
33 EXPECT_EQ(0u, CSet.size());
34 EXPECT_TRUE(CSet.find(0) == CSet.end());
35 USet::const_iterator I = CSet.find(5);
36 EXPECT_TRUE(I == CSet.end());
37 }
38
39 // Single entry set tests.
40 TEST(SparseMultiSetTest, SingleEntrySet) {
41 USet Set;
42 Set.setUniverse(10);
43 USet::iterator I = Set.insert(5);
44 EXPECT_TRUE(I != Set.end());
45 EXPECT_TRUE(*I == 5);
46
47 EXPECT_FALSE(Set.empty());
48 EXPECT_EQ(1u, Set.size());
49
50 EXPECT_TRUE(Set.find(0) == Set.end());
51 EXPECT_TRUE(Set.find(9) == Set.end());
52
53 EXPECT_FALSE(Set.contains(0));
54 EXPECT_TRUE(Set.contains(5));
55
56 // Extra insert.
57 I = Set.insert(5);
58 EXPECT_TRUE(I != Set.end());
59 EXPECT_TRUE(I == ++Set.find(5));
60 I--;
61 EXPECT_TRUE(I == Set.find(5));
62
63 // Erase non-existent element.
64 I = Set.find(1);
65 EXPECT_TRUE(I == Set.end());
66 EXPECT_EQ(2u, Set.size());
67 EXPECT_EQ(5u, *Set.find(5));
68
69 // Erase iterator.
70 I = Set.find(5);
71 EXPECT_TRUE(I != Set.end());
72 I = Set.erase(I);
73 EXPECT_TRUE(I != Set.end());
74 I = Set.erase(I);
75 EXPECT_TRUE(I == Set.end());
76 EXPECT_TRUE(Set.empty());
77 }
78
79 // Multiple entry set tests.
80 TEST(SparseMultiSetTest, MultipleEntrySet) {
81 USet Set;
82 Set.setUniverse(10);
83
84 Set.insert(5);
85 Set.insert(5);
86 Set.insert(5);
87 Set.insert(3);
88 Set.insert(2);
89 Set.insert(1);
90 Set.insert(4);
91 EXPECT_EQ(7u, Set.size());
92
93 // Erase last element by key.
94 EXPECT_TRUE(Set.erase(Set.find(4)) == Set.end());
95 EXPECT_EQ(6u, Set.size());
96 EXPECT_FALSE(Set.contains(4));
97 EXPECT_TRUE(Set.find(4) == Set.end());
98
99 // Erase first element by key.
100 EXPECT_EQ(3u, Set.count(5));
101 EXPECT_TRUE(Set.find(5) != Set.end());
102 EXPECT_TRUE(Set.erase(Set.find(5)) != Set.end());
103 EXPECT_EQ(5u, Set.size());
104 EXPECT_EQ(2u, Set.count(5));
105
106 Set.insert(6);
107 Set.insert(7);
108 EXPECT_EQ(7u, Set.size());
109
110 // Erase tail by iterator.
111 EXPECT_TRUE(Set.getTail(6) == Set.getHead(6));
112 USet::iterator I = Set.erase(Set.find(6));
113 EXPECT_TRUE(I == Set.end());
114 EXPECT_EQ(6u, Set.size());
115
116 // Erase tails by iterator.
117 EXPECT_EQ(2u, Set.count(5));
118 I = Set.getTail(5);
119 I = Set.erase(I);
120 EXPECT_TRUE(I == Set.end());
121 --I;
122 EXPECT_EQ(1u, Set.count(5));
123 EXPECT_EQ(5u, *I);
124 I = Set.erase(I);
125 EXPECT_TRUE(I == Set.end());
126 EXPECT_EQ(0u, Set.count(5));
127
128 Set.insert(8);
129 Set.insert(8);
130 Set.insert(8);
131 Set.insert(8);
132 Set.insert(8);
133
134 // Erase all the 8s
135 EXPECT_EQ(5u, std::distance(Set.getHead(8), Set.end()));
136 Set.eraseAll(8);
137 EXPECT_EQ(0u, std::distance(Set.getHead(8), Set.end()));
138
139 // Clear and resize the universe.
140 Set.clear();
141 EXPECT_EQ(0u, Set.size());
142 EXPECT_FALSE(Set.contains(3));
143 Set.setUniverse(1000);
144
145 // Add more than 256 elements.
146 for (unsigned i = 100; i != 800; ++i)
147 Set.insert(i);
148
149 for (unsigned i = 0; i != 10; ++i)
150 Set.eraseAll(i);
151
152 for (unsigned i = 100; i != 800; ++i)
153 EXPECT_EQ(1u, Set.count(i));
154
155 EXPECT_FALSE(Set.contains(99));
156 EXPECT_FALSE(Set.contains(800));
157 EXPECT_EQ(700u, Set.size());
158 }
159
160 // Test out iterators
161 TEST(SparseMultiSetTest, Iterators) {
162 USet Set;
163 Set.setUniverse(100);
164
165 Set.insert(0);
166 Set.insert(1);
167 Set.insert(2);
168 Set.insert(0);
169 Set.insert(1);
170 Set.insert(0);
171
172 USet::RangePair RangePair = Set.equal_range(0);
173 USet::iterator B = RangePair.first;
174 USet::iterator E = RangePair.second;
175
176 // Move the iterators around, going to end and coming back.
177 EXPECT_EQ(3u, std::distance(B, E));
178 EXPECT_EQ(B, --(--(--E)));
179 EXPECT_EQ(++(++(++E)), Set.end());
180 EXPECT_EQ(B, --(--(--E)));
181 EXPECT_EQ(++(++(++E)), Set.end());
182
183 // Insert into the tail, and move around again
184 Set.insert(0);
185 EXPECT_EQ(B, --(--(--(--E))));
186 EXPECT_EQ(++(++(++(++E))), Set.end());
187 EXPECT_EQ(B, --(--(--(--E))));
188 EXPECT_EQ(++(++(++(++E))), Set.end());
189
190 // Erase a tail, and move around again
191 USet::iterator Erased = Set.erase(Set.getTail(0));
192 EXPECT_EQ(Erased, E);
193 EXPECT_EQ(B, --(--(--E)));
194
195 USet Set2;
196 Set2.setUniverse(11);
197 Set2.insert(3);
198 EXPECT_TRUE(!Set2.contains(0));
199 EXPECT_TRUE(!Set.contains(3));
200
201 EXPECT_EQ(Set2.getHead(3), Set2.getTail(3));
202 EXPECT_EQ(Set2.getHead(0), Set2.getTail(0));
203 B = Set2.find(3);
204 EXPECT_EQ(Set2.find(3), --(++B));
205 }
206
207 struct Alt {
208 unsigned Value;
209 explicit Alt(unsigned x) : Value(x) {}
210 unsigned getSparseSetIndex() const { return Value - 1000; }
211 };
212
213 TEST(SparseMultiSetTest, AltStructSet) {
214 typedef SparseMultiSet ASet;
215 ASet Set;
216 Set.setUniverse(10);
217 Set.insert(Alt(1005));
218
219 ASet::iterator I = Set.find(5);
220 ASSERT_TRUE(I != Set.end());
221 EXPECT_EQ(1005u, I->Value);
222
223 Set.insert(Alt(1006));
224 Set.insert(Alt(1006));
225 I = Set.erase(Set.find(6));
226 ASSERT_TRUE(I != Set.end());
227 EXPECT_EQ(1006u, I->Value);
228 I = Set.erase(Set.find(6));
229 ASSERT_TRUE(I == Set.end());
230
231 EXPECT_TRUE(Set.contains(5));
232 EXPECT_FALSE(Set.contains(6));
233 }
234 } // namespace