Counter Strike : Global Offensive Source Code
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1057 lines
32 KiB

  1. //===- llvm/ADT/DenseMap.h - Dense probed hash table ------------*- C++ -*-===//
  2. //
  3. // The LLVM Compiler Infrastructure
  4. //
  5. // This file is distributed under the University of Illinois Open Source
  6. // License. See LICENSE.TXT for details.
  7. //
  8. //===----------------------------------------------------------------------===//
  9. //
  10. // This file defines the DenseMap class.
  11. //
  12. //===----------------------------------------------------------------------===//
  13. #ifndef LLVM_ADT_DENSEMAP_H
  14. #define LLVM_ADT_DENSEMAP_H
  15. #include "llvm/ADT/DenseMapInfo.h"
  16. #include "llvm/Support/AlignOf.h"
  17. #include "llvm/Support/Compiler.h"
  18. #include "llvm/Support/MathExtras.h"
  19. #include "llvm/Support/PointerLikeTypeTraits.h"
  20. #include "llvm/Support/type_traits.h"
  21. #include <algorithm>
  22. #include <cassert>
  23. #include <climits>
  24. #include <cstddef>
  25. #include <cstring>
  26. #include <iterator>
  27. #include <new>
  28. #include <utility>
  29. namespace llvm {
  30. template<typename KeyT, typename ValueT,
  31. typename KeyInfoT = DenseMapInfo<KeyT>,
  32. bool IsConst = false>
  33. class DenseMapIterator;
  34. template<typename DerivedT,
  35. typename KeyT, typename ValueT, typename KeyInfoT>
  36. class DenseMapBase {
  37. protected:
  38. typedef std::pair<KeyT, ValueT> BucketT;
  39. public:
  40. typedef KeyT key_type;
  41. typedef ValueT mapped_type;
  42. typedef BucketT value_type;
  43. typedef DenseMapIterator<KeyT, ValueT, KeyInfoT> iterator;
  44. typedef DenseMapIterator<KeyT, ValueT,
  45. KeyInfoT, true> const_iterator;
  46. inline iterator begin() {
  47. // When the map is empty, avoid the overhead of AdvancePastEmptyBuckets().
  48. return empty() ? end() : iterator(getBuckets(), getBucketsEnd());
  49. }
  50. inline iterator end() {
  51. return iterator(getBucketsEnd(), getBucketsEnd(), true);
  52. }
  53. inline const_iterator begin() const {
  54. return empty() ? end() : const_iterator(getBuckets(), getBucketsEnd());
  55. }
  56. inline const_iterator end() const {
  57. return const_iterator(getBucketsEnd(), getBucketsEnd(), true);
  58. }
  59. bool empty() const { return getNumEntries() == 0; }
  60. unsigned size() const { return getNumEntries(); }
  61. /// Grow the densemap so that it has at least Size buckets. Does not shrink
  62. void resize(size_t Size) {
  63. if (Size > getNumBuckets())
  64. grow(Size);
  65. }
  66. void clear() {
  67. if (getNumEntries() == 0 && getNumTombstones() == 0) return;
  68. // If the capacity of the array is huge, and the # elements used is small,
  69. // shrink the array.
  70. if (getNumEntries() * 4 < getNumBuckets() && getNumBuckets() > 64) {
  71. shrink_and_clear();
  72. return;
  73. }
  74. const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
  75. for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
  76. if (!KeyInfoT::isEqual(P->first, EmptyKey)) {
  77. if (!KeyInfoT::isEqual(P->first, TombstoneKey)) {
  78. P->second.~ValueT();
  79. decrementNumEntries();
  80. }
  81. P->first = EmptyKey;
  82. }
  83. }
  84. assert(getNumEntries() == 0 && "Node count imbalance!");
  85. setNumTombstones(0);
  86. }
  87. /// count - Return true if the specified key is in the map.
  88. bool count(const KeyT &Val) const {
  89. const BucketT *TheBucket;
  90. return LookupBucketFor(Val, TheBucket);
  91. }
  92. iterator find(const KeyT &Val) {
  93. BucketT *TheBucket;
  94. if (LookupBucketFor(Val, TheBucket))
  95. return iterator(TheBucket, getBucketsEnd(), true);
  96. return end();
  97. }
  98. const_iterator find(const KeyT &Val) const {
  99. const BucketT *TheBucket;
  100. if (LookupBucketFor(Val, TheBucket))
  101. return const_iterator(TheBucket, getBucketsEnd(), true);
  102. return end();
  103. }
  104. /// Alternate version of find() which allows a different, and possibly
  105. /// less expensive, key type.
  106. /// The DenseMapInfo is responsible for supplying methods
  107. /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
  108. /// type used.
  109. template<class LookupKeyT>
  110. iterator find_as(const LookupKeyT &Val) {
  111. BucketT *TheBucket;
  112. if (LookupBucketFor(Val, TheBucket))
  113. return iterator(TheBucket, getBucketsEnd(), true);
  114. return end();
  115. }
  116. template<class LookupKeyT>
  117. const_iterator find_as(const LookupKeyT &Val) const {
  118. const BucketT *TheBucket;
  119. if (LookupBucketFor(Val, TheBucket))
  120. return const_iterator(TheBucket, getBucketsEnd(), true);
  121. return end();
  122. }
  123. /// lookup - Return the entry for the specified key, or a default
  124. /// constructed value if no such entry exists.
  125. ValueT lookup(const KeyT &Val) const {
  126. const BucketT *TheBucket;
  127. if (LookupBucketFor(Val, TheBucket))
  128. return TheBucket->second;
  129. return ValueT();
  130. }
  131. // Inserts key,value pair into the map if the key isn't already in the map.
  132. // If the key is already in the map, it returns false and doesn't update the
  133. // value.
  134. std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) {
  135. BucketT *TheBucket;
  136. if (LookupBucketFor(KV.first, TheBucket))
  137. return std::make_pair(iterator(TheBucket, getBucketsEnd(), true),
  138. false); // Already in map.
  139. // Otherwise, insert the new element.
  140. TheBucket = InsertIntoBucket(KV.first, KV.second, TheBucket);
  141. return std::make_pair(iterator(TheBucket, getBucketsEnd(), true), true);
  142. }
  143. #if LLVM_HAS_RVALUE_REFERENCES
  144. // Inserts key,value pair into the map if the key isn't already in the map.
  145. // If the key is already in the map, it returns false and doesn't update the
  146. // value.
  147. std::pair<iterator, bool> insert(std::pair<KeyT, ValueT> &&KV) {
  148. BucketT *TheBucket;
  149. if (LookupBucketFor(KV.first, TheBucket))
  150. return std::make_pair(iterator(TheBucket, getBucketsEnd(), true),
  151. false); // Already in map.
  152. // Otherwise, insert the new element.
  153. TheBucket = InsertIntoBucket(std::move(KV.first),
  154. std::move(KV.second),
  155. TheBucket);
  156. return std::make_pair(iterator(TheBucket, getBucketsEnd(), true), true);
  157. }
  158. #endif
  159. /// insert - Range insertion of pairs.
  160. template<typename InputIt>
  161. void insert(InputIt I, InputIt E) {
  162. for (; I != E; ++I)
  163. insert(*I);
  164. }
  165. bool erase(const KeyT &Val) {
  166. BucketT *TheBucket;
  167. if (!LookupBucketFor(Val, TheBucket))
  168. return false; // not in map.
  169. TheBucket->second.~ValueT();
  170. TheBucket->first = getTombstoneKey();
  171. decrementNumEntries();
  172. incrementNumTombstones();
  173. return true;
  174. }
  175. void erase(iterator I) {
  176. BucketT *TheBucket = &*I;
  177. TheBucket->second.~ValueT();
  178. TheBucket->first = getTombstoneKey();
  179. decrementNumEntries();
  180. incrementNumTombstones();
  181. }
  182. value_type& FindAndConstruct(const KeyT &Key) {
  183. BucketT *TheBucket;
  184. if (LookupBucketFor(Key, TheBucket))
  185. return *TheBucket;
  186. return *InsertIntoBucket(Key, ValueT(), TheBucket);
  187. }
  188. ValueT &operator[](const KeyT &Key) {
  189. return FindAndConstruct(Key).second;
  190. }
  191. #if LLVM_HAS_RVALUE_REFERENCES
  192. value_type& FindAndConstruct(KeyT &&Key) {
  193. BucketT *TheBucket;
  194. if (LookupBucketFor(Key, TheBucket))
  195. return *TheBucket;
  196. return *InsertIntoBucket(Key, ValueT(), TheBucket);
  197. }
  198. ValueT &operator[](KeyT &&Key) {
  199. return FindAndConstruct(Key).second;
  200. }
  201. #endif
  202. /// isPointerIntoBucketsArray - Return true if the specified pointer points
  203. /// somewhere into the DenseMap's array of buckets (i.e. either to a key or
  204. /// value in the DenseMap).
  205. bool isPointerIntoBucketsArray(const void *Ptr) const {
  206. return Ptr >= getBuckets() && Ptr < getBucketsEnd();
  207. }
  208. /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets
  209. /// array. In conjunction with the previous method, this can be used to
  210. /// determine whether an insertion caused the DenseMap to reallocate.
  211. const void *getPointerIntoBucketsArray() const { return getBuckets(); }
  212. protected:
  213. DenseMapBase() {}
  214. void destroyAll() {
  215. if (getNumBuckets() == 0) // Nothing to do.
  216. return;
  217. const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey();
  218. for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) {
  219. if (!KeyInfoT::isEqual(P->first, EmptyKey) &&
  220. !KeyInfoT::isEqual(P->first, TombstoneKey))
  221. P->second.~ValueT();
  222. P->first.~KeyT();
  223. }
  224. #ifndef NDEBUG
  225. memset((void*)getBuckets(), 0x5a, sizeof(BucketT)*getNumBuckets());
  226. #endif
  227. }
  228. void initEmpty() {
  229. setNumEntries(0);
  230. setNumTombstones(0);
  231. assert((getNumBuckets() & (getNumBuckets()-1)) == 0 &&
  232. "# initial buckets must be a power of two!");
  233. const KeyT EmptyKey = getEmptyKey();
  234. for (BucketT *B = getBuckets(), *E = getBucketsEnd(); B != E; ++B)
  235. new (&B->first) KeyT(EmptyKey);
  236. }
  237. void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) {
  238. initEmpty();
  239. // Insert all the old elements.
  240. const KeyT EmptyKey = getEmptyKey();
  241. const KeyT TombstoneKey = getTombstoneKey();
  242. for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) {
  243. if (!KeyInfoT::isEqual(B->first, EmptyKey) &&
  244. !KeyInfoT::isEqual(B->first, TombstoneKey)) {
  245. // Insert the key/value into the new table.
  246. BucketT *DestBucket;
  247. bool FoundVal = LookupBucketFor(B->first, DestBucket);
  248. (void)FoundVal; // silence warning.
  249. assert(!FoundVal && "Key already in new map?");
  250. DestBucket->first = llvm_move(B->first);
  251. new (&DestBucket->second) ValueT(llvm_move(B->second));
  252. incrementNumEntries();
  253. // Free the value.
  254. B->second.~ValueT();
  255. }
  256. B->first.~KeyT();
  257. }
  258. #ifndef NDEBUG
  259. if (OldBucketsBegin != OldBucketsEnd)
  260. memset((void*)OldBucketsBegin, 0x5a,
  261. sizeof(BucketT) * (OldBucketsEnd - OldBucketsBegin));
  262. #endif
  263. }
  264. template <typename OtherBaseT>
  265. void copyFrom(const DenseMapBase<OtherBaseT, KeyT, ValueT, KeyInfoT>& other) {
  266. assert(getNumBuckets() == other.getNumBuckets());
  267. setNumEntries(other.getNumEntries());
  268. setNumTombstones(other.getNumTombstones());
  269. if (isPodLike<KeyT>::value && isPodLike<ValueT>::value)
  270. memcpy(getBuckets(), other.getBuckets(),
  271. getNumBuckets() * sizeof(BucketT));
  272. else
  273. for (size_t i = 0; i < getNumBuckets(); ++i) {
  274. new (&getBuckets()[i].first) KeyT(other.getBuckets()[i].first);
  275. if (!KeyInfoT::isEqual(getBuckets()[i].first, getEmptyKey()) &&
  276. !KeyInfoT::isEqual(getBuckets()[i].first, getTombstoneKey()))
  277. new (&getBuckets()[i].second) ValueT(other.getBuckets()[i].second);
  278. }
  279. }
  280. void swap(DenseMapBase& RHS) {
  281. std::swap(getNumEntries(), RHS.getNumEntries());
  282. std::swap(getNumTombstones(), RHS.getNumTombstones());
  283. }
  284. static unsigned getHashValue(const KeyT &Val) {
  285. return KeyInfoT::getHashValue(Val);
  286. }
  287. template<typename LookupKeyT>
  288. static unsigned getHashValue(const LookupKeyT &Val) {
  289. return KeyInfoT::getHashValue(Val);
  290. }
  291. static const KeyT getEmptyKey() {
  292. return KeyInfoT::getEmptyKey();
  293. }
  294. static const KeyT getTombstoneKey() {
  295. return KeyInfoT::getTombstoneKey();
  296. }
  297. private:
  298. unsigned getNumEntries() const {
  299. return static_cast<const DerivedT *>(this)->getNumEntries();
  300. }
  301. void setNumEntries(unsigned Num) {
  302. static_cast<DerivedT *>(this)->setNumEntries(Num);
  303. }
  304. void incrementNumEntries() {
  305. setNumEntries(getNumEntries() + 1);
  306. }
  307. void decrementNumEntries() {
  308. setNumEntries(getNumEntries() - 1);
  309. }
  310. unsigned getNumTombstones() const {
  311. return static_cast<const DerivedT *>(this)->getNumTombstones();
  312. }
  313. void setNumTombstones(unsigned Num) {
  314. static_cast<DerivedT *>(this)->setNumTombstones(Num);
  315. }
  316. void incrementNumTombstones() {
  317. setNumTombstones(getNumTombstones() + 1);
  318. }
  319. void decrementNumTombstones() {
  320. setNumTombstones(getNumTombstones() - 1);
  321. }
  322. const BucketT *getBuckets() const {
  323. return static_cast<const DerivedT *>(this)->getBuckets();
  324. }
  325. BucketT *getBuckets() {
  326. return static_cast<DerivedT *>(this)->getBuckets();
  327. }
  328. unsigned getNumBuckets() const {
  329. return static_cast<const DerivedT *>(this)->getNumBuckets();
  330. }
  331. BucketT *getBucketsEnd() {
  332. return getBuckets() + getNumBuckets();
  333. }
  334. const BucketT *getBucketsEnd() const {
  335. return getBuckets() + getNumBuckets();
  336. }
  337. void grow(unsigned AtLeast) {
  338. static_cast<DerivedT *>(this)->grow(AtLeast);
  339. }
  340. void shrink_and_clear() {
  341. static_cast<DerivedT *>(this)->shrink_and_clear();
  342. }
  343. BucketT *InsertIntoBucket(const KeyT &Key, const ValueT &Value,
  344. BucketT *TheBucket) {
  345. TheBucket = InsertIntoBucketImpl(Key, TheBucket);
  346. TheBucket->first = Key;
  347. new (&TheBucket->second) ValueT(Value);
  348. return TheBucket;
  349. }
  350. #if LLVM_HAS_RVALUE_REFERENCES
  351. BucketT *InsertIntoBucket(const KeyT &Key, ValueT &&Value,
  352. BucketT *TheBucket) {
  353. TheBucket = InsertIntoBucketImpl(Key, TheBucket);
  354. TheBucket->first = Key;
  355. new (&TheBucket->second) ValueT(std::move(Value));
  356. return TheBucket;
  357. }
  358. BucketT *InsertIntoBucket(KeyT &&Key, ValueT &&Value, BucketT *TheBucket) {
  359. TheBucket = InsertIntoBucketImpl(Key, TheBucket);
  360. TheBucket->first = std::move(Key);
  361. new (&TheBucket->second) ValueT(std::move(Value));
  362. return TheBucket;
  363. }
  364. #endif
  365. BucketT *InsertIntoBucketImpl(const KeyT &Key, BucketT *TheBucket) {
  366. // If the load of the hash table is more than 3/4, or if fewer than 1/8 of
  367. // the buckets are empty (meaning that many are filled with tombstones),
  368. // grow the table.
  369. //
  370. // The later case is tricky. For example, if we had one empty bucket with
  371. // tons of tombstones, failing lookups (e.g. for insertion) would have to
  372. // probe almost the entire table until it found the empty bucket. If the
  373. // table completely filled with tombstones, no lookup would ever succeed,
  374. // causing infinite loops in lookup.
  375. unsigned NewNumEntries = getNumEntries() + 1;
  376. unsigned NumBuckets = getNumBuckets();
  377. if (NewNumEntries*4 >= NumBuckets*3) {
  378. this->grow(NumBuckets * 2);
  379. LookupBucketFor(Key, TheBucket);
  380. NumBuckets = getNumBuckets();
  381. }
  382. if (NumBuckets-(NewNumEntries+getNumTombstones()) <= NumBuckets/8) {
  383. this->grow(NumBuckets * 2);
  384. LookupBucketFor(Key, TheBucket);
  385. }
  386. assert(TheBucket);
  387. // Only update the state after we've grown our bucket space appropriately
  388. // so that when growing buckets we have self-consistent entry count.
  389. incrementNumEntries();
  390. // If we are writing over a tombstone, remember this.
  391. const KeyT EmptyKey = getEmptyKey();
  392. if (!KeyInfoT::isEqual(TheBucket->first, EmptyKey))
  393. decrementNumTombstones();
  394. return TheBucket;
  395. }
  396. /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in
  397. /// FoundBucket. If the bucket contains the key and a value, this returns
  398. /// true, otherwise it returns a bucket with an empty marker or tombstone and
  399. /// returns false.
  400. template<typename LookupKeyT>
  401. bool LookupBucketFor(const LookupKeyT &Val,
  402. const BucketT *&FoundBucket) const {
  403. const BucketT *BucketsPtr = getBuckets();
  404. const unsigned NumBuckets = getNumBuckets();
  405. if (NumBuckets == 0) {
  406. FoundBucket = 0;
  407. return false;
  408. }
  409. // FoundTombstone - Keep track of whether we find a tombstone while probing.
  410. const BucketT *FoundTombstone = 0;
  411. const KeyT EmptyKey = getEmptyKey();
  412. const KeyT TombstoneKey = getTombstoneKey();
  413. assert(!KeyInfoT::isEqual(Val, EmptyKey) &&
  414. !KeyInfoT::isEqual(Val, TombstoneKey) &&
  415. "Empty/Tombstone value shouldn't be inserted into map!");
  416. unsigned BucketNo = getHashValue(Val) & (NumBuckets-1);
  417. unsigned ProbeAmt = 1;
  418. while (1) {
  419. const BucketT *ThisBucket = BucketsPtr + BucketNo;
  420. // Found Val's bucket? If so, return it.
  421. if (KeyInfoT::isEqual(Val, ThisBucket->first)) {
  422. FoundBucket = ThisBucket;
  423. return true;
  424. }
  425. // If we found an empty bucket, the key doesn't exist in the set.
  426. // Insert it and return the default value.
  427. if (KeyInfoT::isEqual(ThisBucket->first, EmptyKey)) {
  428. // If we've already seen a tombstone while probing, fill it in instead
  429. // of the empty bucket we eventually probed to.
  430. FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket;
  431. return false;
  432. }
  433. // If this is a tombstone, remember it. If Val ends up not in the map, we
  434. // prefer to return it than something that would require more probing.
  435. if (KeyInfoT::isEqual(ThisBucket->first, TombstoneKey) && !FoundTombstone)
  436. FoundTombstone = ThisBucket; // Remember the first tombstone found.
  437. // Otherwise, it's a hash collision or a tombstone, continue quadratic
  438. // probing.
  439. BucketNo += ProbeAmt++;
  440. BucketNo &= (NumBuckets-1);
  441. }
  442. }
  443. template <typename LookupKeyT>
  444. bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) {
  445. const BucketT *ConstFoundBucket;
  446. bool Result = const_cast<const DenseMapBase *>(this)
  447. ->LookupBucketFor(Val, ConstFoundBucket);
  448. FoundBucket = const_cast<BucketT *>(ConstFoundBucket);
  449. return Result;
  450. }
  451. public:
  452. /// Return the approximate size (in bytes) of the actual map.
  453. /// This is just the raw memory used by DenseMap.
  454. /// If entries are pointers to objects, the size of the referenced objects
  455. /// are not included.
  456. size_t getMemorySize() const {
  457. return getNumBuckets() * sizeof(BucketT);
  458. }
  459. };
  460. template<typename KeyT, typename ValueT,
  461. typename KeyInfoT = DenseMapInfo<KeyT> >
  462. class DenseMap
  463. : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT>,
  464. KeyT, ValueT, KeyInfoT> {
  465. // Lift some types from the dependent base class into this class for
  466. // simplicity of referring to them.
  467. typedef DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT> BaseT;
  468. typedef typename BaseT::BucketT BucketT;
  469. friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT>;
  470. BucketT *Buckets;
  471. unsigned NumEntries;
  472. unsigned NumTombstones;
  473. unsigned NumBuckets;
  474. public:
  475. explicit DenseMap(unsigned NumInitBuckets = 0) {
  476. init(NumInitBuckets);
  477. }
  478. DenseMap(const DenseMap &other) : BaseT() {
  479. init(0);
  480. copyFrom(other);
  481. }
  482. #if LLVM_HAS_RVALUE_REFERENCES
  483. DenseMap(DenseMap &&other) : BaseT() {
  484. init(0);
  485. swap(other);
  486. }
  487. #endif
  488. template<typename InputIt>
  489. DenseMap(const InputIt &I, const InputIt &E) {
  490. init(NextPowerOf2(std::distance(I, E)));
  491. this->insert(I, E);
  492. }
  493. ~DenseMap() {
  494. this->destroyAll();
  495. operator delete(Buckets);
  496. }
  497. void swap(DenseMap& RHS) {
  498. std::swap(Buckets, RHS.Buckets);
  499. std::swap(NumEntries, RHS.NumEntries);
  500. std::swap(NumTombstones, RHS.NumTombstones);
  501. std::swap(NumBuckets, RHS.NumBuckets);
  502. }
  503. DenseMap& operator=(const DenseMap& other) {
  504. copyFrom(other);
  505. return *this;
  506. }
  507. #if LLVM_HAS_RVALUE_REFERENCES
  508. DenseMap& operator=(DenseMap &&other) {
  509. this->destroyAll();
  510. operator delete(Buckets);
  511. init(0);
  512. swap(other);
  513. return *this;
  514. }
  515. #endif
  516. void copyFrom(const DenseMap& other) {
  517. this->destroyAll();
  518. operator delete(Buckets);
  519. if (allocateBuckets(other.NumBuckets)) {
  520. this->BaseT::copyFrom(other);
  521. } else {
  522. NumEntries = 0;
  523. NumTombstones = 0;
  524. }
  525. }
  526. void init(unsigned InitBuckets) {
  527. if (allocateBuckets(InitBuckets)) {
  528. this->BaseT::initEmpty();
  529. } else {
  530. NumEntries = 0;
  531. NumTombstones = 0;
  532. }
  533. }
  534. void grow(unsigned AtLeast) {
  535. unsigned OldNumBuckets = NumBuckets;
  536. BucketT *OldBuckets = Buckets;
  537. allocateBuckets(std::max<unsigned>(64, NextPowerOf2(AtLeast-1)));
  538. assert(Buckets);
  539. if (!OldBuckets) {
  540. this->BaseT::initEmpty();
  541. return;
  542. }
  543. this->moveFromOldBuckets(OldBuckets, OldBuckets+OldNumBuckets);
  544. // Free the old table.
  545. operator delete(OldBuckets);
  546. }
  547. void shrink_and_clear() {
  548. unsigned OldNumEntries = NumEntries;
  549. this->destroyAll();
  550. // Reduce the number of buckets.
  551. unsigned NewNumBuckets = 0;
  552. if (OldNumEntries)
  553. NewNumBuckets = std::max(64, 1 << (Log2_32_Ceil(OldNumEntries) + 1));
  554. if (NewNumBuckets == NumBuckets) {
  555. this->BaseT::initEmpty();
  556. return;
  557. }
  558. operator delete(Buckets);
  559. init(NewNumBuckets);
  560. }
  561. private:
  562. unsigned getNumEntries() const {
  563. return NumEntries;
  564. }
  565. void setNumEntries(unsigned Num) {
  566. NumEntries = Num;
  567. }
  568. unsigned getNumTombstones() const {
  569. return NumTombstones;
  570. }
  571. void setNumTombstones(unsigned Num) {
  572. NumTombstones = Num;
  573. }
  574. BucketT *getBuckets() const {
  575. return Buckets;
  576. }
  577. unsigned getNumBuckets() const {
  578. return NumBuckets;
  579. }
  580. bool allocateBuckets(unsigned Num) {
  581. NumBuckets = Num;
  582. if (NumBuckets == 0) {
  583. Buckets = 0;
  584. return false;
  585. }
  586. Buckets = static_cast<BucketT*>(operator new(sizeof(BucketT) * NumBuckets));
  587. return true;
  588. }
  589. };
  590. template<typename KeyT, typename ValueT,
  591. unsigned InlineBuckets = 4,
  592. typename KeyInfoT = DenseMapInfo<KeyT> >
  593. class SmallDenseMap
  594. : public DenseMapBase<SmallDenseMap<KeyT, ValueT, InlineBuckets, KeyInfoT>,
  595. KeyT, ValueT, KeyInfoT> {
  596. // Lift some types from the dependent base class into this class for
  597. // simplicity of referring to them.
  598. typedef DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT> BaseT;
  599. typedef typename BaseT::BucketT BucketT;
  600. friend class DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT>;
  601. unsigned Small : 1;
  602. unsigned NumEntries : 31;
  603. unsigned NumTombstones;
  604. struct LargeRep {
  605. BucketT *Buckets;
  606. unsigned NumBuckets;
  607. };
  608. /// A "union" of an inline bucket array and the struct representing
  609. /// a large bucket. This union will be discriminated by the 'Small' bit.
  610. AlignedCharArrayUnion<BucketT[InlineBuckets], LargeRep> storage;
  611. public:
  612. explicit SmallDenseMap(unsigned NumInitBuckets = 0) {
  613. init(NumInitBuckets);
  614. }
  615. SmallDenseMap(const SmallDenseMap &other) {
  616. init(0);
  617. copyFrom(other);
  618. }
  619. #if LLVM_HAS_RVALUE_REFERENCES
  620. SmallDenseMap(SmallDenseMap &&other) {
  621. init(0);
  622. swap(other);
  623. }
  624. #endif
  625. template<typename InputIt>
  626. SmallDenseMap(const InputIt &I, const InputIt &E) {
  627. init(NextPowerOf2(std::distance(I, E)));
  628. this->insert(I, E);
  629. }
  630. ~SmallDenseMap() {
  631. this->destroyAll();
  632. deallocateBuckets();
  633. }
  634. void swap(SmallDenseMap& RHS) {
  635. unsigned TmpNumEntries = RHS.NumEntries;
  636. RHS.NumEntries = NumEntries;
  637. NumEntries = TmpNumEntries;
  638. std::swap(NumTombstones, RHS.NumTombstones);
  639. const KeyT EmptyKey = this->getEmptyKey();
  640. const KeyT TombstoneKey = this->getTombstoneKey();
  641. if (Small && RHS.Small) {
  642. // If we're swapping inline bucket arrays, we have to cope with some of
  643. // the tricky bits of DenseMap's storage system: the buckets are not
  644. // fully initialized. Thus we swap every key, but we may have
  645. // a one-directional move of the value.
  646. for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
  647. BucketT *LHSB = &getInlineBuckets()[i],
  648. *RHSB = &RHS.getInlineBuckets()[i];
  649. bool hasLHSValue = (!KeyInfoT::isEqual(LHSB->first, EmptyKey) &&
  650. !KeyInfoT::isEqual(LHSB->first, TombstoneKey));
  651. bool hasRHSValue = (!KeyInfoT::isEqual(RHSB->first, EmptyKey) &&
  652. !KeyInfoT::isEqual(RHSB->first, TombstoneKey));
  653. if (hasLHSValue && hasRHSValue) {
  654. // Swap together if we can...
  655. std::swap(*LHSB, *RHSB);
  656. continue;
  657. }
  658. // Swap separately and handle any assymetry.
  659. std::swap(LHSB->first, RHSB->first);
  660. if (hasLHSValue) {
  661. new (&RHSB->second) ValueT(llvm_move(LHSB->second));
  662. LHSB->second.~ValueT();
  663. } else if (hasRHSValue) {
  664. new (&LHSB->second) ValueT(llvm_move(RHSB->second));
  665. RHSB->second.~ValueT();
  666. }
  667. }
  668. return;
  669. }
  670. if (!Small && !RHS.Small) {
  671. std::swap(getLargeRep()->Buckets, RHS.getLargeRep()->Buckets);
  672. std::swap(getLargeRep()->NumBuckets, RHS.getLargeRep()->NumBuckets);
  673. return;
  674. }
  675. SmallDenseMap &SmallSide = Small ? *this : RHS;
  676. SmallDenseMap &LargeSide = Small ? RHS : *this;
  677. // First stash the large side's rep and move the small side across.
  678. LargeRep TmpRep = llvm_move(*LargeSide.getLargeRep());
  679. LargeSide.getLargeRep()->~LargeRep();
  680. LargeSide.Small = true;
  681. // This is similar to the standard move-from-old-buckets, but the bucket
  682. // count hasn't actually rotated in this case. So we have to carefully
  683. // move construct the keys and values into their new locations, but there
  684. // is no need to re-hash things.
  685. for (unsigned i = 0, e = InlineBuckets; i != e; ++i) {
  686. BucketT *NewB = &LargeSide.getInlineBuckets()[i],
  687. *OldB = &SmallSide.getInlineBuckets()[i];
  688. new (&NewB->first) KeyT(llvm_move(OldB->first));
  689. OldB->first.~KeyT();
  690. if (!KeyInfoT::isEqual(NewB->first, EmptyKey) &&
  691. !KeyInfoT::isEqual(NewB->first, TombstoneKey)) {
  692. new (&NewB->second) ValueT(llvm_move(OldB->second));
  693. OldB->second.~ValueT();
  694. }
  695. }
  696. // The hard part of moving the small buckets across is done, just move
  697. // the TmpRep into its new home.
  698. SmallSide.Small = false;
  699. new (SmallSide.getLargeRep()) LargeRep(llvm_move(TmpRep));
  700. }
  701. SmallDenseMap& operator=(const SmallDenseMap& other) {
  702. copyFrom(other);
  703. return *this;
  704. }
  705. #if LLVM_HAS_RVALUE_REFERENCES
  706. SmallDenseMap& operator=(SmallDenseMap &&other) {
  707. this->destroyAll();
  708. deallocateBuckets();
  709. init(0);
  710. swap(other);
  711. return *this;
  712. }
  713. #endif
  714. void copyFrom(const SmallDenseMap& other) {
  715. this->destroyAll();
  716. deallocateBuckets();
  717. Small = true;
  718. if (other.getNumBuckets() > InlineBuckets) {
  719. Small = false;
  720. allocateBuckets(other.getNumBuckets());
  721. }
  722. this->BaseT::copyFrom(other);
  723. }
  724. void init(unsigned InitBuckets) {
  725. Small = true;
  726. if (InitBuckets > InlineBuckets) {
  727. Small = false;
  728. new (getLargeRep()) LargeRep(allocateBuckets(InitBuckets));
  729. }
  730. this->BaseT::initEmpty();
  731. }
  732. void grow(unsigned AtLeast) {
  733. if (AtLeast >= InlineBuckets)
  734. AtLeast = std::max<unsigned>(64, NextPowerOf2(AtLeast-1));
  735. if (Small) {
  736. if (AtLeast < InlineBuckets)
  737. return; // Nothing to do.
  738. // First move the inline buckets into a temporary storage.
  739. AlignedCharArrayUnion<BucketT[InlineBuckets]> TmpStorage;
  740. BucketT *TmpBegin = reinterpret_cast<BucketT *>(TmpStorage.buffer);
  741. BucketT *TmpEnd = TmpBegin;
  742. // Loop over the buckets, moving non-empty, non-tombstones into the
  743. // temporary storage. Have the loop move the TmpEnd forward as it goes.
  744. const KeyT EmptyKey = this->getEmptyKey();
  745. const KeyT TombstoneKey = this->getTombstoneKey();
  746. for (BucketT *P = getBuckets(), *E = P + InlineBuckets; P != E; ++P) {
  747. if (!KeyInfoT::isEqual(P->first, EmptyKey) &&
  748. !KeyInfoT::isEqual(P->first, TombstoneKey)) {
  749. assert(size_t(TmpEnd - TmpBegin) < InlineBuckets &&
  750. "Too many inline buckets!");
  751. new (&TmpEnd->first) KeyT(llvm_move(P->first));
  752. new (&TmpEnd->second) ValueT(llvm_move(P->second));
  753. ++TmpEnd;
  754. P->second.~ValueT();
  755. }
  756. P->first.~KeyT();
  757. }
  758. // Now make this map use the large rep, and move all the entries back
  759. // into it.
  760. Small = false;
  761. new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
  762. this->moveFromOldBuckets(TmpBegin, TmpEnd);
  763. return;
  764. }
  765. LargeRep OldRep = llvm_move(*getLargeRep());
  766. getLargeRep()->~LargeRep();
  767. if (AtLeast <= InlineBuckets) {
  768. Small = true;
  769. } else {
  770. new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
  771. }
  772. this->moveFromOldBuckets(OldRep.Buckets, OldRep.Buckets+OldRep.NumBuckets);
  773. // Free the old table.
  774. operator delete(OldRep.Buckets);
  775. }
  776. void shrink_and_clear() {
  777. unsigned OldSize = this->size();
  778. this->destroyAll();
  779. // Reduce the number of buckets.
  780. unsigned NewNumBuckets = 0;
  781. if (OldSize) {
  782. NewNumBuckets = 1 << (Log2_32_Ceil(OldSize) + 1);
  783. if (NewNumBuckets > InlineBuckets && NewNumBuckets < 64u)
  784. NewNumBuckets = 64;
  785. }
  786. if ((Small && NewNumBuckets <= InlineBuckets) ||
  787. (!Small && NewNumBuckets == getLargeRep()->NumBuckets)) {
  788. this->BaseT::initEmpty();
  789. return;
  790. }
  791. deallocateBuckets();
  792. init(NewNumBuckets);
  793. }
  794. private:
  795. unsigned getNumEntries() const {
  796. return NumEntries;
  797. }
  798. void setNumEntries(unsigned Num) {
  799. assert(Num < INT_MAX && "Cannot support more than INT_MAX entries");
  800. NumEntries = Num;
  801. }
  802. unsigned getNumTombstones() const {
  803. return NumTombstones;
  804. }
  805. void setNumTombstones(unsigned Num) {
  806. NumTombstones = Num;
  807. }
  808. const BucketT *getInlineBuckets() const {
  809. assert(Small);
  810. // Note that this cast does not violate aliasing rules as we assert that
  811. // the memory's dynamic type is the small, inline bucket buffer, and the
  812. // 'storage.buffer' static type is 'char *'.
  813. return reinterpret_cast<const BucketT *>(storage.buffer);
  814. }
  815. BucketT *getInlineBuckets() {
  816. return const_cast<BucketT *>(
  817. const_cast<const SmallDenseMap *>(this)->getInlineBuckets());
  818. }
  819. const LargeRep *getLargeRep() const {
  820. assert(!Small);
  821. // Note, same rule about aliasing as with getInlineBuckets.
  822. return reinterpret_cast<const LargeRep *>(storage.buffer);
  823. }
  824. LargeRep *getLargeRep() {
  825. return const_cast<LargeRep *>(
  826. const_cast<const SmallDenseMap *>(this)->getLargeRep());
  827. }
  828. const BucketT *getBuckets() const {
  829. return Small ? getInlineBuckets() : getLargeRep()->Buckets;
  830. }
  831. BucketT *getBuckets() {
  832. return const_cast<BucketT *>(
  833. const_cast<const SmallDenseMap *>(this)->getBuckets());
  834. }
  835. unsigned getNumBuckets() const {
  836. return Small ? InlineBuckets : getLargeRep()->NumBuckets;
  837. }
  838. void deallocateBuckets() {
  839. if (Small)
  840. return;
  841. operator delete(getLargeRep()->Buckets);
  842. getLargeRep()->~LargeRep();
  843. }
  844. LargeRep allocateBuckets(unsigned Num) {
  845. assert(Num > InlineBuckets && "Must allocate more buckets than are inline");
  846. LargeRep Rep = {
  847. static_cast<BucketT*>(operator new(sizeof(BucketT) * Num)), Num
  848. };
  849. return Rep;
  850. }
  851. };
  852. template<typename KeyT, typename ValueT,
  853. typename KeyInfoT, bool IsConst>
  854. class DenseMapIterator {
  855. typedef std::pair<KeyT, ValueT> Bucket;
  856. typedef DenseMapIterator<KeyT, ValueT,
  857. KeyInfoT, true> ConstIterator;
  858. friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, true>;
  859. public:
  860. typedef ptrdiff_t difference_type;
  861. typedef typename conditional<IsConst, const Bucket, Bucket>::type value_type;
  862. typedef value_type *pointer;
  863. typedef value_type &reference;
  864. typedef std::forward_iterator_tag iterator_category;
  865. private:
  866. pointer Ptr, End;
  867. public:
  868. DenseMapIterator() : Ptr(0), End(0) {}
  869. DenseMapIterator(pointer Pos, pointer E, bool NoAdvance = false)
  870. : Ptr(Pos), End(E) {
  871. if (!NoAdvance) AdvancePastEmptyBuckets();
  872. }
  873. // If IsConst is true this is a converting constructor from iterator to
  874. // const_iterator and the default copy constructor is used.
  875. // Otherwise this is a copy constructor for iterator.
  876. DenseMapIterator(const DenseMapIterator<KeyT, ValueT,
  877. KeyInfoT, false>& I)
  878. : Ptr(I.Ptr), End(I.End) {}
  879. reference operator*() const {
  880. return *Ptr;
  881. }
  882. pointer operator->() const {
  883. return Ptr;
  884. }
  885. bool operator==(const ConstIterator &RHS) const {
  886. return Ptr == RHS.operator->();
  887. }
  888. bool operator!=(const ConstIterator &RHS) const {
  889. return Ptr != RHS.operator->();
  890. }
  891. inline DenseMapIterator& operator++() { // Preincrement
  892. ++Ptr;
  893. AdvancePastEmptyBuckets();
  894. return *this;
  895. }
  896. DenseMapIterator operator++(int) { // Postincrement
  897. DenseMapIterator tmp = *this; ++*this; return tmp;
  898. }
  899. private:
  900. void AdvancePastEmptyBuckets() {
  901. const KeyT Empty = KeyInfoT::getEmptyKey();
  902. const KeyT Tombstone = KeyInfoT::getTombstoneKey();
  903. while (Ptr != End &&
  904. (KeyInfoT::isEqual(Ptr->first, Empty) ||
  905. KeyInfoT::isEqual(Ptr->first, Tombstone)))
  906. ++Ptr;
  907. }
  908. };
  909. template<typename KeyT, typename ValueT, typename KeyInfoT>
  910. static inline size_t
  911. capacity_in_bytes(const DenseMap<KeyT, ValueT, KeyInfoT> &X) {
  912. return X.getMemorySize();
  913. }
  914. } // end namespace llvm
  915. #endif