1//===- llvm/ADT/DenseMap.h - Dense probed hash table ------------*- C++ -*-===// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 7//===----------------------------------------------------------------------===// 10/// This file defines the DenseMap class. 12//===----------------------------------------------------------------------===// 14#ifndef LLVM_ADT_DENSEMAP_H 15#define LLVM_ADT_DENSEMAP_H 29#include <initializer_list> 39// We extend a pair to allow users to override the bucket type with their own 40// implementation without requiring two members. 41template <
typename KeyT,
typename ValueT>
46constKeyT &
getFirst()
const{
return std::pair<KeyT, ValueT>::first; }
51}
// end namespace detail 54typename KeyInfoT = DenseMapInfo<KeyT>,
59template <
typename DerivedT,
typenameKeyT,
typenameValueT,
typename KeyInfoT,
76// When the map is empty, avoid the overhead of advancing/retreating past 80if (shouldReverseIterate<KeyT>())
81return makeIterator(getBucketsEnd() - 1, getBuckets(), *
this);
82return makeIterator(getBuckets(), getBucketsEnd(), *
this);
85return makeIterator(getBucketsEnd(), getBucketsEnd(), *
this,
true);
90if (shouldReverseIterate<KeyT>())
91return makeConstIterator(getBucketsEnd() - 1, getBuckets(), *
this);
92return makeConstIterator(getBuckets(), getBucketsEnd(), *
this);
95return makeConstIterator(getBucketsEnd(), getBucketsEnd(), *
this,
true);
98 [[nodiscard]]
boolempty()
const{
return getNumEntries() == 0; }
99unsignedsize()
const{
return getNumEntries(); }
101 /// Grow the densemap so that it can contain at least \p NumEntries items 102 /// before resizing again. 106if (NumBuckets > getNumBuckets())
112if (getNumEntries() == 0 && getNumTombstones() == 0)
115// If the capacity of the array is huge, and the # elements used is small, 117if (getNumEntries() * 4 < getNumBuckets() && getNumBuckets() > 64) {
123ifconstexpr (std::is_trivially_destructible_v<ValueT>) {
124// Use a simpler loop when values don't need destruction. 125for (BucketT *
P = getBuckets(), *
E = getBucketsEnd();
P !=
E; ++
P)
126P->getFirst() = EmptyKey;
129unsigned NumEntries = getNumEntries();
130for (BucketT *
P = getBuckets(), *
E = getBucketsEnd();
P !=
E; ++
P) {
131if (!KeyInfoT::isEqual(
P->getFirst(), EmptyKey)) {
132if (!KeyInfoT::isEqual(
P->getFirst(), TombstoneKey)) {
133P->getSecond().~ValueT();
136P->getFirst() = EmptyKey;
139assert(NumEntries == 0 &&
"Node count imbalance!");
146 /// Return true if the specified key is in the map, false otherwise. 148return doFind(Val) !=
nullptr;
151 /// Return 1 if the specified key is in the map, 0 otherwise. 157if (BucketT *Bucket = doFind(Val))
159 Bucket, shouldReverseIterate<KeyT>() ? getBuckets() : getBucketsEnd(),
164if (
const BucketT *Bucket = doFind(Val))
165return makeConstIterator(
166 Bucket, shouldReverseIterate<KeyT>() ? getBuckets() : getBucketsEnd(),
171 /// Alternate version of find() which allows a different, and possibly 172 /// less expensive, key type. 173 /// The DenseMapInfo is responsible for supplying methods 174 /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key 177if (BucketT *Bucket = doFind(Val))
179 Bucket, shouldReverseIterate<KeyT>() ? getBuckets() : getBucketsEnd(),
183template <
class LookupKeyT>
185if (
const BucketT *Bucket = doFind(Val))
186return makeConstIterator(
187 Bucket, shouldReverseIterate<KeyT>() ? getBuckets() : getBucketsEnd(),
192 /// lookup - Return the entry for the specified key, or a default 193 /// constructed value if no such entry exists. 195if (
const BucketT *Bucket = doFind(Val))
196return Bucket->getSecond();
200 /// at - Return the entry for the specified key, or abort if no such 203auto Iter = this->
find(std::move(Val));
204assert(Iter != this->
end() &&
"DenseMap::at failed due to a missing key");
208// Inserts key,value pair into the map if the key isn't already in the map. 209// If the key is already in the map, it returns false and doesn't update the 211 std::pair<iterator, bool>
insert(
const std::pair<KeyT, ValueT> &KV) {
215// Inserts key,value pair into the map if the key isn't already in the map. 216// If the key is already in the map, it returns false and doesn't update the 218 std::pair<iterator, bool>
insert(std::pair<KeyT, ValueT> &&KV) {
219returntry_emplace(std::move(KV.first), std::move(KV.second));
222// Inserts key,value pair into the map if the key isn't already in the map. 223// The value is constructed in-place if the key is not in the map, otherwise 225template <
typename... Ts>
228if (LookupBucketFor(Key, TheBucket))
229return std::make_pair(makeIterator(TheBucket,
230 shouldReverseIterate<KeyT>()
234false);
// Already in map. 236// Otherwise, insert the new element. 238 InsertIntoBucket(TheBucket, std::move(Key), std::forward<Ts>(Args)...);
239return std::make_pair(makeIterator(TheBucket,
240 shouldReverseIterate<KeyT>()
247// Inserts key,value pair into the map if the key isn't already in the map. 248// The value is constructed in-place if the key is not in the map, otherwise 250template <
typename... Ts>
253if (LookupBucketFor(Key, TheBucket))
254return std::make_pair(makeIterator(TheBucket,
255 shouldReverseIterate<KeyT>()
259false);
// Already in map. 261// Otherwise, insert the new element. 262 TheBucket = InsertIntoBucket(TheBucket, Key, std::forward<Ts>(Args)...);
263return std::make_pair(makeIterator(TheBucket,
264 shouldReverseIterate<KeyT>()
271 /// Alternate version of insert() which allows a different, and possibly 272 /// less expensive, key type. 273 /// The DenseMapInfo is responsible for supplying methods 274 /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key 276template <
typename LookupKeyT>
277 std::pair<iterator, bool>
insert_as(std::pair<KeyT, ValueT> &&KV,
278const LookupKeyT &Val) {
280if (LookupBucketFor(Val, TheBucket))
281return std::make_pair(makeIterator(TheBucket,
282 shouldReverseIterate<KeyT>()
286false);
// Already in map. 288// Otherwise, insert the new element. 289 TheBucket = InsertIntoBucketWithLookup(TheBucket, std::move(KV.first),
290 std::move(KV.second), Val);
291return std::make_pair(makeIterator(TheBucket,
292 shouldReverseIterate<KeyT>()
299 /// insert - Range insertion of pairs. 300template <
typename InputIt>
voidinsert(InputIt
I, InputIt
E) {
309 Ret.first->second = std::forward<V>(Val);
315auto Ret =
try_emplace(std::move(Key), std::forward<V>(Val));
317 Ret.first->second = std::forward<V>(Val);
322 BucketT *TheBucket = doFind(Val);
324returnfalse;
// not in map. 326 TheBucket->getSecond().~ValueT();
328 decrementNumEntries();
329 incrementNumTombstones();
333 BucketT *TheBucket = &*
I;
334 TheBucket->getSecond().~ValueT();
336 decrementNumEntries();
337 incrementNumTombstones();
342if (LookupBucketFor(Key, TheBucket))
343return TheBucket->second;
345return InsertIntoBucket(TheBucket, Key)->second;
350if (LookupBucketFor(Key, TheBucket))
351return TheBucket->second;
353return InsertIntoBucket(TheBucket, std::move(Key))->second;
356 /// isPointerIntoBucketsArray - Return true if the specified pointer points 357 /// somewhere into the DenseMap's array of buckets (i.e. either to a key or 358 /// value in the DenseMap). 360returnPtr >= getBuckets() &&
Ptr < getBucketsEnd();
363 /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets 364 /// array. In conjunction with the previous method, this can be used to 365 /// determine whether an insertion caused the DenseMap to reallocate. 372if (getNumBuckets() == 0)
// Nothing to do. 376for (BucketT *
P = getBuckets(), *
E = getBucketsEnd();
P !=
E; ++
P) {
377if (!KeyInfoT::isEqual(
P->getFirst(), EmptyKey) &&
378 !KeyInfoT::isEqual(
P->getFirst(), TombstoneKey))
379P->getSecond().~ValueT();
380P->getFirst().~KeyT();
388assert((getNumBuckets() & (getNumBuckets() - 1)) == 0 &&
389"# initial buckets must be a power of two!");
391for (BucketT *
B = getBuckets(), *
E = getBucketsEnd();
B !=
E; ++
B)
392 ::new (&
B->getFirst())
KeyT(EmptyKey);
395 /// Returns the number of buckets to allocate to ensure that the DenseMap can 396 /// accommodate \p NumEntries without need to grow(). 398// Ensure that "NumEntries * 4 < NumBuckets * 3" 401// +1 is required because of the strict equality. 402// For example if NumEntries is 48, we need to return 401. 409// Insert all the old elements. 412for (BucketT *
B = OldBucketsBegin, *
E = OldBucketsEnd;
B !=
E; ++
B) {
413if (!KeyInfoT::isEqual(
B->getFirst(), EmptyKey) &&
414 !KeyInfoT::isEqual(
B->getFirst(), TombstoneKey)) {
415// Insert the key/value into the new table. 417bool FoundVal = LookupBucketFor(
B->getFirst(), DestBucket);
418 (void)FoundVal;
// silence warning. 419assert(!FoundVal &&
"Key already in new map?");
420 DestBucket->getFirst() = std::move(
B->getFirst());
421 ::new (&DestBucket->getSecond())
ValueT(std::move(
B->getSecond()));
422 incrementNumEntries();
425B->getSecond().~ValueT();
427B->getFirst().~KeyT();
431template <
typename OtherBaseT>
435assert(getNumBuckets() == other.getNumBuckets());
437 setNumEntries(other.getNumEntries());
438 setNumTombstones(other.getNumTombstones());
440 BucketT *Buckets = getBuckets();
441const BucketT *OtherBuckets = other.getBuckets();
442constsize_t NumBuckets = getNumBuckets();
443ifconstexpr (std::is_trivially_copyable_v<KeyT> &&
444 std::is_trivially_copyable_v<ValueT>) {
445 memcpy(
reinterpret_cast<void *
>(Buckets), OtherBuckets,
446 NumBuckets *
sizeof(BucketT));
450for (
size_tI = 0;
I < NumBuckets; ++
I) {
451 ::new (&Buckets[
I].getFirst())
KeyT(OtherBuckets[
I].getFirst());
452if (!KeyInfoT::isEqual(Buckets[
I].getFirst(), EmptyKey) &&
453 !KeyInfoT::isEqual(Buckets[
I].getFirst(), TombstoneKey))
454 ::new (&Buckets[
I].getSecond())
ValueT(OtherBuckets[
I].getSecond());
460return KeyInfoT::getHashValue(Val);
463template <
typename LookupKeyT>
465return KeyInfoT::getHashValue(Val);
469static_assert(std::is_base_of_v<DenseMapBase, DerivedT>,
470"Must pass the derived type to this template!");
471return KeyInfoT::getEmptyKey();
478bool NoAdvance =
false) {
479if (shouldReverseIterate<KeyT>()) {
480 BucketT *
B =
P == getBucketsEnd() ? getBuckets() :
P + 1;
487const DebugEpochBase &Epoch,
488constbool NoAdvance =
false)
const{
489if (shouldReverseIterate<KeyT>()) {
490const BucketT *
B =
P == getBucketsEnd() ? getBuckets() :
P + 1;
496unsigned getNumEntries()
const{
497returnstatic_cast<constDerivedT *
>(
this)->getNumEntries();
500void setNumEntries(
unsigned Num) {
501static_cast<DerivedT *
>(
this)->setNumEntries(Num);
504void incrementNumEntries() { setNumEntries(getNumEntries() + 1); }
506void decrementNumEntries() { setNumEntries(getNumEntries() - 1); }
508unsigned getNumTombstones()
const{
509returnstatic_cast<constDerivedT *
>(
this)->getNumTombstones();
512void setNumTombstones(
unsigned Num) {
513static_cast<DerivedT *
>(
this)->setNumTombstones(Num);
516void incrementNumTombstones() { setNumTombstones(getNumTombstones() + 1); }
518void decrementNumTombstones() { setNumTombstones(getNumTombstones() - 1); }
520const BucketT *getBuckets()
const{
521returnstatic_cast<constDerivedT *
>(
this)->getBuckets();
524 BucketT *getBuckets() {
returnstatic_cast<DerivedT *
>(
this)->getBuckets(); }
526unsigned getNumBuckets()
const{
527returnstatic_cast<constDerivedT *
>(
this)->getNumBuckets();
530 BucketT *getBucketsEnd() {
return getBuckets() + getNumBuckets(); }
532const BucketT *getBucketsEnd()
const{
533return getBuckets() + getNumBuckets();
536void grow(
unsigned AtLeast) {
static_cast<DerivedT *
>(
this)->grow(AtLeast); }
538void shrink_and_clear() {
static_cast<DerivedT *
>(
this)->shrink_and_clear(); }
540template <
typename KeyArg,
typename... ValueArgs>
541 BucketT *InsertIntoBucket(BucketT *TheBucket, KeyArg &&Key,
542 ValueArgs &&...Values) {
543 TheBucket = InsertIntoBucketImpl(Key, TheBucket);
545 TheBucket->getFirst() = std::forward<KeyArg>(Key);
546 ::new (&TheBucket->getSecond())
ValueT(
std::forward<ValueArgs>(Values)...);
550 template <typename LookupKeyT>
551 BucketT *InsertIntoBucketWithLookup(BucketT *TheBucket,
KeyT &&Key,
553 TheBucket = InsertIntoBucketImpl(
Lookup, TheBucket);
555 TheBucket->getFirst() = std::move(Key);
560 template <typename LookupKeyT>
561 BucketT *InsertIntoBucketImpl(
const LookupKeyT &
Lookup, BucketT *TheBucket) {
564// If the load of the hash table is more than 3/4, or if fewer than 1/8 of 565// the buckets are empty (meaning that many are filled with tombstones), 568// The later case is tricky. For example, if we had one empty bucket with 569// tons of tombstones, failing lookups (e.g. for insertion) would have to 570// probe almost the entire table until it found the empty bucket. If the 571// table completely filled with tombstones, no lookup would ever succeed, 572// causing infinite loops in lookup. 573unsigned NewNumEntries = getNumEntries() + 1;
574unsigned NumBuckets = getNumBuckets();
576 this->grow(NumBuckets * 2);
577 LookupBucketFor(
Lookup, TheBucket);
578 NumBuckets = getNumBuckets();
580 (NewNumEntries + getNumTombstones()) <=
582 this->grow(NumBuckets);
583 LookupBucketFor(
Lookup, TheBucket);
587// Only update the state after we've grown our bucket space appropriately 588// so that when growing buckets we have self-consistent entry count. 589 incrementNumEntries();
591// If we are writing over a tombstone, remember this. 593if (!KeyInfoT::isEqual(TheBucket->getFirst(), EmptyKey))
594 decrementNumTombstones();
599template <
typename LookupKeyT> BucketT *doFind(
const LookupKeyT &Val) {
600 BucketT *BucketsPtr = getBuckets();
601constunsigned NumBuckets = getNumBuckets();
607unsigned ProbeAmt = 1;
609 BucketT *Bucket = BucketsPtr + BucketNo;
610if (
LLVM_LIKELY(KeyInfoT::isEqual(Val, Bucket->getFirst())))
612if (
LLVM_LIKELY(KeyInfoT::isEqual(Bucket->getFirst(), EmptyKey)))
615// Otherwise, it's a hash collision or a tombstone, continue quadratic 617 BucketNo += ProbeAmt++;
618 BucketNo &= NumBuckets - 1;
622template <
typename LookupKeyT>
623const BucketT *doFind(
const LookupKeyT &Val)
const{
624returnconst_cast<DenseMapBase *
>(
this)->doFind(Val);
// NOLINT 627 /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in 628 /// FoundBucket. If the bucket contains the key and a value, this returns 629 /// true, otherwise it returns a bucket with an empty marker or tombstone and 631template <
typename LookupKeyT>
632bool LookupBucketFor(
const LookupKeyT &Val, BucketT *&FoundBucket) {
633 BucketT *BucketsPtr = getBuckets();
634constunsigned NumBuckets = getNumBuckets();
636if (NumBuckets == 0) {
637 FoundBucket =
nullptr;
641// FoundTombstone - Keep track of whether we find a tombstone while probing. 642 BucketT *FoundTombstone =
nullptr;
645assert(!KeyInfoT::isEqual(Val, EmptyKey) &&
646 !KeyInfoT::isEqual(Val, TombstoneKey) &&
647"Empty/Tombstone value shouldn't be inserted into map!");
650unsigned ProbeAmt = 1;
652 BucketT *ThisBucket = BucketsPtr + BucketNo;
653// Found Val's bucket? If so, return it. 654if (
LLVM_LIKELY(KeyInfoT::isEqual(Val, ThisBucket->getFirst()))) {
655 FoundBucket = ThisBucket;
659// If we found an empty bucket, the key doesn't exist in the set. 660// Insert it and return the default value. 661if (
LLVM_LIKELY(KeyInfoT::isEqual(ThisBucket->getFirst(), EmptyKey))) {
662// If we've already seen a tombstone while probing, fill it in instead 663// of the empty bucket we eventually probed to. 664 FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket;
668// If this is a tombstone, remember it. If Val ends up not in the map, we 669// prefer to return it than something that would require more probing. 670if (KeyInfoT::isEqual(ThisBucket->getFirst(), TombstoneKey) &&
672 FoundTombstone = ThisBucket;
// Remember the first tombstone found. 674// Otherwise, it's a hash collision or a tombstone, continue quadratic 676 BucketNo += ProbeAmt++;
677 BucketNo &= (NumBuckets - 1);
682 /// Return the approximate size (in bytes) of the actual map. 683 /// This is just the raw memory used by DenseMap. 684 /// If entries are pointers to objects, the size of the referenced objects 685 /// are not included. 689/// Equality comparison for DenseMap. 691/// Iterates over elements of LHS confirming that each (key, value) pair in LHS 692/// is also in RHS, and that no additional pairs are in RHS. 693/// Equivalent to N calls to RHS.find and N value comparisons. Amortized 694/// complexity is linear, worst case is O(N^2) (if every hash collides). 695template <
typename DerivedT,
typenameKeyT,
typenameValueT,
typename KeyInfoT,
700if (
LHS.size() !=
RHS.size())
704autoI =
RHS.find(KV.first);
705if (
I ==
RHS.end() ||
I->second != KV.second)
712/// Inequality comparison for DenseMap. 714/// Equivalent to !(LHS == RHS). See operator== for performance notes. 715template <
typename DerivedT,
typenameKeyT,
typenameValueT,
typename KeyInfoT,
724typename KeyInfoT = DenseMapInfo<KeyT>,
727 KeyT, ValueT, KeyInfoT, BucketT> {
730// Lift some types from the dependent base class into this class for 731// simplicity of referring to them. 736unsigned NumTombstones;
740 /// Create a DenseMap with an optional \p InitialReserve that guarantee that 741 /// this number of elements can be inserted in the map without grow() 742explicitDenseMap(
unsigned InitialReserve = 0) { init(InitialReserve); }
754template <
typename InputIt>
DenseMap(
const InputIt &
I,
const InputIt &
E) {
755 init(std::distance(
I,
E));
759DenseMap(std::initializer_list<typename BaseT::value_type> Vals) {
761 this->insert(Vals.begin(), Vals.end());
770 this->incrementEpoch();
795if (allocateBuckets(other.NumBuckets)) {
796 this->BaseT::copyFrom(other);
803voidinit(
unsigned InitNumEntries) {
804auto InitBuckets = BaseT::getMinBucketToReserveForEntries(InitNumEntries);
805if (allocateBuckets(InitBuckets)) {
806 this->BaseT::initEmpty();
814unsigned OldNumBuckets = NumBuckets;
815 BucketT *OldBuckets = Buckets;
817 allocateBuckets(std::max<unsigned>(
821 this->BaseT::initEmpty();
825 this->moveFromOldBuckets(OldBuckets, OldBuckets + OldNumBuckets);
827// Free the old table. 833unsigned OldNumBuckets = NumBuckets;
834unsigned OldNumEntries = NumEntries;
837// Reduce the number of buckets. 838unsigned NewNumBuckets = 0;
840 NewNumBuckets = std::max(64, 1 << (
Log2_32_Ceil(OldNumEntries) + 1));
841if (NewNumBuckets == NumBuckets) {
842 this->BaseT::initEmpty();
852unsigned getNumEntries()
const{
return NumEntries; }
854void setNumEntries(
unsigned Num) { NumEntries = Num; }
856unsigned getNumTombstones()
const{
return NumTombstones; }
858void setNumTombstones(
unsigned Num) { NumTombstones = Num; }
860 BucketT *getBuckets()
const{
return Buckets; }
862unsigned getNumBuckets()
const{
return NumBuckets; }
864bool allocateBuckets(
unsigned Num) {
866if (NumBuckets == 0) {
871 Buckets =
static_cast<BucketT *
>(
877template <
typenameKeyT,
typenameValueT,
unsigned InlineBuckets = 4,
878typename KeyInfoT = DenseMapInfo<KeyT>,
882 SmallDenseMap<KeyT, ValueT, InlineBuckets, KeyInfoT, BucketT>, KeyT,
883 ValueT, KeyInfoT, BucketT> {
886// Lift some types from the dependent base class into this class for 887// simplicity of referring to them. 891"InlineBuckets must be a power of 2.");
894unsigned NumEntries : 31;
895unsigned NumTombstones;
902 /// A "union" of an inline bucket array and the struct representing 903 /// a large bucket. This union will be discriminated by the 'Small' bit. 908if (NumInitBuckets > InlineBuckets)
910 init(NumInitBuckets);
923template <
typename InputIt>
938unsigned TmpNumEntries =
RHS.NumEntries;
939RHS.NumEntries = NumEntries;
940 NumEntries = TmpNumEntries;
943constKeyT EmptyKey = this->getEmptyKey();
944constKeyT TombstoneKey = this->getTombstoneKey();
945if (Small &&
RHS.Small) {
946// If we're swapping inline bucket arrays, we have to cope with some of 947// the tricky bits of DenseMap's storage system: the buckets are not 948// fully initialized. Thus we swap every key, but we may have 949// a one-directional move of the value. 950for (
unsigned i = 0, e = InlineBuckets; i != e; ++i) {
951 BucketT *LHSB = &getInlineBuckets()[i],
952 *RHSB = &
RHS.getInlineBuckets()[i];
953bool hasLHSValue = (!KeyInfoT::isEqual(LHSB->getFirst(), EmptyKey) &&
954 !KeyInfoT::isEqual(LHSB->getFirst(), TombstoneKey));
955bool hasRHSValue = (!KeyInfoT::isEqual(RHSB->getFirst(), EmptyKey) &&
956 !KeyInfoT::isEqual(RHSB->getFirst(), TombstoneKey));
957if (hasLHSValue && hasRHSValue) {
958// Swap together if we can... 962// Swap separately and handle any asymmetry. 963std::swap(LHSB->getFirst(), RHSB->getFirst());
965 ::new (&RHSB->getSecond())
ValueT(std::move(LHSB->getSecond()));
966 LHSB->getSecond().~ValueT();
967 }
elseif (hasRHSValue) {
968 ::new (&LHSB->getSecond())
ValueT(std::move(RHSB->getSecond()));
969 RHSB->getSecond().~ValueT();
974if (!Small && !
RHS.Small) {
975std::swap(getLargeRep()->Buckets,
RHS.getLargeRep()->Buckets);
976std::swap(getLargeRep()->NumBuckets,
RHS.getLargeRep()->NumBuckets);
983// First stash the large side's rep and move the small side across. 984 LargeRep TmpRep = std::move(*LargeSide.getLargeRep());
985 LargeSide.getLargeRep()->~LargeRep();
986 LargeSide.Small =
true;
987// This is similar to the standard move-from-old-buckets, but the bucket 988// count hasn't actually rotated in this case. So we have to carefully 989// move construct the keys and values into their new locations, but there 990// is no need to re-hash things. 991for (
unsigned i = 0, e = InlineBuckets; i != e; ++i) {
992 BucketT *NewB = &LargeSide.getInlineBuckets()[i],
993 *OldB = &SmallSide.getInlineBuckets()[i];
994 ::new (&NewB->getFirst())
KeyT(std::move(OldB->getFirst()));
995 OldB->getFirst().~KeyT();
996if (!KeyInfoT::isEqual(NewB->getFirst(), EmptyKey) &&
997 !KeyInfoT::isEqual(NewB->getFirst(), TombstoneKey)) {
998 ::new (&NewB->getSecond())
ValueT(std::move(OldB->getSecond()));
999 OldB->getSecond().~ValueT();
1003// The hard part of moving the small buckets across is done, just move 1004// the TmpRep into its new home. 1005 SmallSide.Small =
false;
1006new (SmallSide.getLargeRep()) LargeRep(std::move(TmpRep));
1017 deallocateBuckets();
1025 deallocateBuckets();
1027if (other.getNumBuckets() > InlineBuckets) {
1029new (getLargeRep()) LargeRep(allocateBuckets(other.getNumBuckets()));
1031 this->BaseT::copyFrom(other);
1036if (InitBuckets > InlineBuckets) {
1038new (getLargeRep()) LargeRep(allocateBuckets(InitBuckets));
1040 this->BaseT::initEmpty();
1044if (AtLeast > InlineBuckets)
1045 AtLeast = std::max<unsigned>(64,
NextPowerOf2(AtLeast - 1));
1048// First move the inline buckets into a temporary storage. 1050 BucketT *TmpBegin =
reinterpret_cast<BucketT *
>(&TmpStorage);
1051 BucketT *TmpEnd = TmpBegin;
1053// Loop over the buckets, moving non-empty, non-tombstones into the 1054// temporary storage. Have the loop move the TmpEnd forward as it goes. 1055constKeyT EmptyKey = this->getEmptyKey();
1056constKeyT TombstoneKey = this->getTombstoneKey();
1057for (BucketT *
P = getBuckets(), *
E =
P + InlineBuckets;
P !=
E; ++
P) {
1058if (!KeyInfoT::isEqual(
P->getFirst(), EmptyKey) &&
1059 !KeyInfoT::isEqual(
P->getFirst(), TombstoneKey)) {
1060assert(
size_t(TmpEnd - TmpBegin) < InlineBuckets &&
1061"Too many inline buckets!");
1062 ::new (&TmpEnd->getFirst())
KeyT(std::move(
P->getFirst()));
1063 ::new (&TmpEnd->getSecond())
ValueT(std::move(
P->getSecond()));
1065P->getSecond().~ValueT();
1067P->getFirst().~KeyT();
1070// AtLeast == InlineBuckets can happen if there are many tombstones, 1071// and grow() is used to remove them. Usually we always switch to the 1073if (AtLeast > InlineBuckets) {
1075new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
1077 this->moveFromOldBuckets(TmpBegin, TmpEnd);
1081 LargeRep OldRep = std::move(*getLargeRep());
1082 getLargeRep()->~LargeRep();
1083if (AtLeast <= InlineBuckets) {
1086new (getLargeRep()) LargeRep(allocateBuckets(AtLeast));
1089 this->moveFromOldBuckets(OldRep.Buckets,
1090 OldRep.Buckets + OldRep.NumBuckets);
1092// Free the old table. 1098unsigned OldSize = this->
size();
1101// Reduce the number of buckets. 1102unsigned NewNumBuckets = 0;
1105if (NewNumBuckets > InlineBuckets && NewNumBuckets < 64u)
1108if ((Small && NewNumBuckets <= InlineBuckets) ||
1109 (!Small && NewNumBuckets == getLargeRep()->NumBuckets)) {
1110 this->BaseT::initEmpty();
1114 deallocateBuckets();
1115 init(NewNumBuckets);
1119unsigned getNumEntries()
const{
return NumEntries; }
1121void setNumEntries(
unsigned Num) {
1122// NumEntries is hardcoded to be 31 bits wide. 1123assert(Num < (1U << 31) &&
"Cannot support more than 1<<31 entries");
1127unsigned getNumTombstones()
const{
return NumTombstones; }
1129void setNumTombstones(
unsigned Num) { NumTombstones = Num; }
1131const BucketT *getInlineBuckets()
const{
1133// Note that this cast does not violate aliasing rules as we assert that 1134// the memory's dynamic type is the small, inline bucket buffer, and the 1135// 'storage' is a POD containing a char buffer. 1136returnreinterpret_cast<constBucketT *
>(&storage);
1139 BucketT *getInlineBuckets() {
1140returnconst_cast<BucketT *
>(
1141const_cast<constSmallDenseMap *
>(
this)->getInlineBuckets());
1144const LargeRep *getLargeRep()
const{
1146// Note, same rule about aliasing as with getInlineBuckets. 1147returnreinterpret_cast<constLargeRep *
>(&storage);
1150 LargeRep *getLargeRep() {
1151returnconst_cast<LargeRep *
>(
1152const_cast<constSmallDenseMap *
>(
this)->getLargeRep());
1155const BucketT *getBuckets()
const{
1156returnSmall ? getInlineBuckets() : getLargeRep()->Buckets;
1159 BucketT *getBuckets() {
1160returnconst_cast<BucketT *
>(
1161const_cast<constSmallDenseMap *
>(
this)->getBuckets());
1164unsigned getNumBuckets()
const{
1165returnSmall ? InlineBuckets : getLargeRep()->NumBuckets;
1168void deallocateBuckets() {
1173sizeof(BucketT) * getLargeRep()->NumBuckets,
1175 getLargeRep()->~LargeRep();
1178 LargeRep allocateBuckets(
unsigned Num) {
1179assert(Num > InlineBuckets &&
"Must allocate more buckets than are inline");
1181sizeof(BucketT) * Num,
alignof(BucketT))),
1187template <
typenameKeyT,
typenameValueT,
typename KeyInfoT,
typename Bucket,
1195usingvalue_type = std::conditional_t<IsConst, const Bucket, Bucket>;
1208bool NoAdvance =
false)
1210assert(isHandleInSync() &&
"invalid construction!");
1214if (shouldReverseIterate<KeyT>()) {
1215 RetreatPastEmptyBuckets();
1218 AdvancePastEmptyBuckets();
1221// Converting ctor from non-const iterators to const iterators. SFINAE'd out 1222// for const iterator destinations so it doesn't end up as a user defined copy 1224template <
bool IsConstSrc,
1225typename = std::enable_if_t<!IsConstSrc && IsConst>>
1231assert(isHandleInSync() &&
"invalid iterator access!");
1233if (shouldReverseIterate<KeyT>())
1238assert(isHandleInSync() &&
"invalid iterator access!");
1240if (shouldReverseIterate<KeyT>())
1247assert((!
LHS.Ptr ||
LHS.isHandleInSync()) &&
"handle not in sync!");
1248assert((!
RHS.Ptr ||
RHS.isHandleInSync()) &&
"handle not in sync!");
1250"comparing incomparable iterators!");
1260assert(isHandleInSync() &&
"invalid iterator access!");
1262if (shouldReverseIterate<KeyT>()) {
1264 RetreatPastEmptyBuckets();
1268 AdvancePastEmptyBuckets();
1272assert(isHandleInSync() &&
"invalid iterator access!");
1279void AdvancePastEmptyBuckets() {
1281constKeyT Empty = KeyInfoT::getEmptyKey();
1282constKeyT Tombstone = KeyInfoT::getTombstoneKey();
1284while (
Ptr !=
End && (KeyInfoT::isEqual(
Ptr->getFirst(), Empty) ||
1285 KeyInfoT::isEqual(
Ptr->getFirst(), Tombstone)))
1289void RetreatPastEmptyBuckets() {
1291constKeyT Empty = KeyInfoT::getEmptyKey();
1292constKeyT Tombstone = KeyInfoT::getTombstoneKey();
1294while (
Ptr !=
End && (KeyInfoT::isEqual(
Ptr[-1].getFirst(), Empty) ||
1295 KeyInfoT::isEqual(
Ptr[-1].getFirst(), Tombstone)))
1300template <
typename KeyT,
typename ValueT,
typename KeyInfoT>
1302returnX.getMemorySize();
1305}
// end namespace llvm 1307#endif// LLVM_ADT_DENSEMAP_H static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define LLVM_UNLIKELY(EXPR)
#define LLVM_LIKELY(EXPR)
This file defines DenseMapInfo traits for DenseMap.
This file defines the DebugEpochBase and DebugEpochBase::HandleBase classes.
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
This file defines counterparts of C library allocation functions defined in the namespace 'std'.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static int Lookup(ArrayRef< TableEntry > Table, unsigned Opcode)
ValueT lookup(const_arg_type_t< KeyT > Val) const
lookup - Return the entry for the specified key, or a default constructed value if no such entry exis...
iterator find(const_arg_type_t< KeyT > Val)
static unsigned getHashValue(const KeyT &Val)
static const KeyT getEmptyKey()
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
std::pair< iterator, bool > insert(std::pair< KeyT, ValueT > &&KV)
bool erase(const KeyT &Val)
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT > iterator
std::pair< iterator, bool > insert_as(std::pair< KeyT, ValueT > &&KV, const LookupKeyT &Val)
Alternate version of insert() which allows a different, and possibly less expensive,...
const_iterator find_as(const LookupKeyT &Val) const
const_iterator end() const
void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd)
iterator find_as(const LookupKeyT &Val)
Alternate version of find() which allows a different, and possibly less expensive,...
const_iterator find(const_arg_type_t< KeyT > Val) const
void insert(InputIt I, InputIt E)
insert - Range insertion of pairs.
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
static const KeyT getTombstoneKey()
const ValueT & at(const_arg_type_t< KeyT > Val) const
at - Return the entry for the specified key, or abort if no such entry exists.
bool isPointerIntoBucketsArray(const void *Ptr) const
isPointerIntoBucketsArray - Return true if the specified pointer points somewhere into the DenseMap's...
void copyFrom(const DenseMapBase< OtherBaseT, KeyT, ValueT, KeyInfoT, BucketT > &other)
bool contains(const_arg_type_t< KeyT > Val) const
Return true if the specified key is in the map, false otherwise.
std::pair< iterator, bool > try_emplace(const KeyT &Key, Ts &&...Args)
const_iterator begin() const
const void * getPointerIntoBucketsArray() const
getPointerIntoBucketsArray() - Return an opaque pointer into the buckets array.
std::pair< iterator, bool > insert_or_assign(KeyT &&Key, V &&Val)
unsigned getMinBucketToReserveForEntries(unsigned NumEntries)
Returns the number of buckets to allocate to ensure that the DenseMap can accommodate NumEntries with...
static unsigned getHashValue(const LookupKeyT &Val)
ValueT & operator[](const KeyT &Key)
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT, true > const_iterator
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
std::pair< iterator, bool > insert_or_assign(const KeyT &Key, V &&Val)
void reserve(size_type NumEntries)
Grow the densemap so that it can contain at least NumEntries items before resizing again.
ValueT & operator[](KeyT &&Key)
size_t getMemorySize() const
Return the approximate size (in bytes) of the actual map.
std::conditional_t< IsConst, const Bucket, Bucket > value_type
friend bool operator!=(const DenseMapIterator &LHS, const DenseMapIterator &RHS)
DenseMapIterator & operator++()
pointer operator->() const
reference operator*() const
DenseMapIterator(pointer Pos, pointer E, const DebugEpochBase &Epoch, bool NoAdvance=false)
DenseMapIterator()=default
DenseMapIterator operator++(int)
DenseMapIterator(const DenseMapIterator< KeyT, ValueT, KeyInfoT, Bucket, IsConstSrc > &I)
friend bool operator==(const DenseMapIterator &LHS, const DenseMapIterator &RHS)
std::forward_iterator_tag iterator_category
DenseMap(std::initializer_list< typename BaseT::value_type > Vals)
void copyFrom(const DenseMap &other)
DenseMap & operator=(DenseMap &&other)
DenseMap(unsigned InitialReserve=0)
Create a DenseMap with an optional InitialReserve that guarantee that this number of elements can be ...
void grow(unsigned AtLeast)
void init(unsigned InitNumEntries)
DenseMap(const DenseMap &other)
DenseMap(const InputIt &I, const InputIt &E)
DenseMap(DenseMap &&other)
DenseMap & operator=(const DenseMap &other)
void grow(unsigned AtLeast)
SmallDenseMap(const InputIt &I, const InputIt &E)
void swap(SmallDenseMap &RHS)
void init(unsigned InitBuckets)
SmallDenseMap & operator=(SmallDenseMap &&other)
SmallDenseMap & operator=(const SmallDenseMap &other)
SmallDenseMap(unsigned NumInitBuckets=0)
SmallDenseMap(std::initializer_list< typename BaseT::value_type > Vals)
SmallDenseMap(SmallDenseMap &&other)
SmallDenseMap(const SmallDenseMap &other)
void copyFrom(const SmallDenseMap &other)
constexpr char IsConst[]
Key for Kernel::Arg::Metadata::mIsConst.
This is an optimization pass for GlobalISel generic memory operations.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
BitVector::size_type capacity_in_bytes(const BitVector &X)
bool operator!=(uint64_t V1, const APInt &V2)
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
T bit_ceil(T Value)
Returns the smallest integral power of two no smaller than Value if Value is nonzero.
bool operator==(const AddressRangeValuePair &LHS, const AddressRangeValuePair &RHS)
LLVM_ATTRIBUTE_RETURNS_NONNULL LLVM_ATTRIBUTE_RETURNS_NOALIAS void * allocate_buffer(size_t Size, size_t Alignment)
Allocate a buffer of memory with the given size and alignment.
void deallocate_buffer(void *Ptr, size_t Size, size_t Alignment)
Deallocate a buffer of memory with the given size and alignment.
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
constexpr uint64_t NextPowerOf2(uint64_t A)
Returns the next power of two (in 64-bits) that is strictly greater than A.
Implement std::hash so that hash_code can be used in STL containers.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
A suitably aligned and sized character array member which can hold elements of any type.
const ValueT & getSecond() const
const KeyT & getFirst() const