[llvm] [ADT] Make internal methods of DenseMap/SmallDenseMap private (NFC) (PR #165079)
via llvm-commits
llvm-commits at lists.llvm.org
Fri Oct 24 23:30:03 PDT 2025
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-llvm-adt
Author: Kazu Hirata (kazutakahirata)
<details>
<summary>Changes</summary>
This patch moves the init, copyFrom, and grow methods in DenseMap and
SmallDenseMap from public to private to hide implementation details.
The only problem is that PhysicalRegisterUsageInfo calls
DenseMap::grow instead of DenseMap::reserve, which I don't think is
intended. This patch updates the call to reserve.
---
Full diff: https://github.com/llvm/llvm-project/pull/165079.diff
2 Files Affected:
- (modified) llvm/include/llvm/ADT/DenseMap.h (+90-90)
- (modified) llvm/lib/CodeGen/RegisterUsageInfo.cpp (+1-1)
``````````diff
diff --git a/llvm/include/llvm/ADT/DenseMap.h b/llvm/include/llvm/ADT/DenseMap.h
index 25b5262800a10..0b9ae77606266 100644
--- a/llvm/include/llvm/ADT/DenseMap.h
+++ b/llvm/include/llvm/ADT/DenseMap.h
@@ -767,37 +767,6 @@ class DenseMap : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT, BucketT>,
return *this;
}
- void copyFrom(const DenseMap &other) {
- this->destroyAll();
- deallocateBuckets();
- if (allocateBuckets(other.NumBuckets)) {
- this->BaseT::copyFrom(other);
- } else {
- NumEntries = 0;
- NumTombstones = 0;
- }
- }
-
- void grow(unsigned AtLeast) {
- unsigned OldNumBuckets = NumBuckets;
- BucketT *OldBuckets = Buckets;
-
- allocateBuckets(std::max<unsigned>(
- 64, static_cast<unsigned>(NextPowerOf2(AtLeast - 1))));
- assert(Buckets);
- if (!OldBuckets) {
- this->BaseT::initEmpty();
- return;
- }
-
- this->moveFromOldBuckets(
- llvm::make_range(OldBuckets, OldBuckets + OldNumBuckets));
-
- // Free the old table.
- deallocate_buffer(OldBuckets, sizeof(BucketT) * OldNumBuckets,
- alignof(BucketT));
- }
-
void shrink_and_clear() {
unsigned OldNumBuckets = NumBuckets;
unsigned OldNumEntries = NumEntries;
@@ -855,6 +824,37 @@ class DenseMap : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT, BucketT>,
NumTombstones = 0;
}
}
+
+ void copyFrom(const DenseMap &other) {
+ this->destroyAll();
+ deallocateBuckets();
+ if (allocateBuckets(other.NumBuckets)) {
+ this->BaseT::copyFrom(other);
+ } else {
+ NumEntries = 0;
+ NumTombstones = 0;
+ }
+ }
+
+ void grow(unsigned AtLeast) {
+ unsigned OldNumBuckets = NumBuckets;
+ BucketT *OldBuckets = Buckets;
+
+ allocateBuckets(std::max<unsigned>(
+ 64, static_cast<unsigned>(NextPowerOf2(AtLeast - 1))));
+ assert(Buckets);
+ if (!OldBuckets) {
+ this->BaseT::initEmpty();
+ return;
+ }
+
+ this->moveFromOldBuckets(
+ llvm::make_range(OldBuckets, OldBuckets + OldNumBuckets));
+
+ // Free the old table.
+ deallocate_buffer(OldBuckets, sizeof(BucketT) * OldNumBuckets,
+ alignof(BucketT));
+ }
};
template <typename KeyT, typename ValueT, unsigned InlineBuckets = 4,
@@ -1007,65 +1007,6 @@ class SmallDenseMap
return *this;
}
- void copyFrom(const SmallDenseMap &other) {
- this->destroyAll();
- deallocateBuckets();
- allocateBuckets(other.getNumBuckets());
- this->BaseT::copyFrom(other);
- }
-
- void init(unsigned InitNumEntries) {
- auto InitBuckets = BaseT::getMinBucketToReserveForEntries(InitNumEntries);
- allocateBuckets(InitBuckets);
- this->BaseT::initEmpty();
- }
-
- void grow(unsigned AtLeast) {
- if (AtLeast > InlineBuckets)
- AtLeast = std::max<unsigned>(64, NextPowerOf2(AtLeast - 1));
-
- if (Small) {
- // First move the inline buckets into a temporary storage.
- AlignedCharArrayUnion<BucketT[InlineBuckets]> TmpStorage;
- BucketT *TmpBegin = reinterpret_cast<BucketT *>(&TmpStorage);
- BucketT *TmpEnd = TmpBegin;
-
- // Loop over the buckets, moving non-empty, non-tombstones into the
- // temporary storage. Have the loop move the TmpEnd forward as it goes.
- const KeyT EmptyKey = this->getEmptyKey();
- const KeyT TombstoneKey = this->getTombstoneKey();
- for (BucketT &B : inlineBuckets()) {
- if (!KeyInfoT::isEqual(B.getFirst(), EmptyKey) &&
- !KeyInfoT::isEqual(B.getFirst(), TombstoneKey)) {
- assert(size_t(TmpEnd - TmpBegin) < InlineBuckets &&
- "Too many inline buckets!");
- ::new (&TmpEnd->getFirst()) KeyT(std::move(B.getFirst()));
- ::new (&TmpEnd->getSecond()) ValueT(std::move(B.getSecond()));
- ++TmpEnd;
- B.getSecond().~ValueT();
- }
- B.getFirst().~KeyT();
- }
-
- // AtLeast == InlineBuckets can happen if there are many tombstones,
- // and grow() is used to remove them. Usually we always switch to the
- // large rep here.
- allocateBuckets(AtLeast);
- this->moveFromOldBuckets(llvm::make_range(TmpBegin, TmpEnd));
- return;
- }
-
- LargeRep OldRep = std::move(*getLargeRep());
- getLargeRep()->~LargeRep();
- allocateBuckets(AtLeast);
-
- this->moveFromOldBuckets(OldRep.buckets());
-
- // Free the old table.
- deallocate_buffer(OldRep.Buckets, sizeof(BucketT) * OldRep.NumBuckets,
- alignof(BucketT));
- }
-
void shrink_and_clear() {
unsigned OldSize = this->size();
this->destroyAll();
@@ -1162,6 +1103,65 @@ class SmallDenseMap
new (getLargeRep()) LargeRep{NewBuckets, Num};
}
}
+
+ void init(unsigned InitNumEntries) {
+ auto InitBuckets = BaseT::getMinBucketToReserveForEntries(InitNumEntries);
+ allocateBuckets(InitBuckets);
+ this->BaseT::initEmpty();
+ }
+
+ void copyFrom(const SmallDenseMap &other) {
+ this->destroyAll();
+ deallocateBuckets();
+ allocateBuckets(other.getNumBuckets());
+ this->BaseT::copyFrom(other);
+ }
+
+ void grow(unsigned AtLeast) {
+ if (AtLeast > InlineBuckets)
+ AtLeast = std::max<unsigned>(64, NextPowerOf2(AtLeast - 1));
+
+ if (Small) {
+ // First move the inline buckets into a temporary storage.
+ AlignedCharArrayUnion<BucketT[InlineBuckets]> TmpStorage;
+ BucketT *TmpBegin = reinterpret_cast<BucketT *>(&TmpStorage);
+ BucketT *TmpEnd = TmpBegin;
+
+ // Loop over the buckets, moving non-empty, non-tombstones into the
+ // temporary storage. Have the loop move the TmpEnd forward as it goes.
+ const KeyT EmptyKey = this->getEmptyKey();
+ const KeyT TombstoneKey = this->getTombstoneKey();
+ for (BucketT &B : inlineBuckets()) {
+ if (!KeyInfoT::isEqual(B.getFirst(), EmptyKey) &&
+ !KeyInfoT::isEqual(B.getFirst(), TombstoneKey)) {
+ assert(size_t(TmpEnd - TmpBegin) < InlineBuckets &&
+ "Too many inline buckets!");
+ ::new (&TmpEnd->getFirst()) KeyT(std::move(B.getFirst()));
+ ::new (&TmpEnd->getSecond()) ValueT(std::move(B.getSecond()));
+ ++TmpEnd;
+ B.getSecond().~ValueT();
+ }
+ B.getFirst().~KeyT();
+ }
+
+ // AtLeast == InlineBuckets can happen if there are many tombstones,
+ // and grow() is used to remove them. Usually we always switch to the
+ // large rep here.
+ allocateBuckets(AtLeast);
+ this->moveFromOldBuckets(llvm::make_range(TmpBegin, TmpEnd));
+ return;
+ }
+
+ LargeRep OldRep = std::move(*getLargeRep());
+ getLargeRep()->~LargeRep();
+ allocateBuckets(AtLeast);
+
+ this->moveFromOldBuckets(OldRep.buckets());
+
+ // Free the old table.
+ deallocate_buffer(OldRep.Buckets, sizeof(BucketT) * OldRep.NumBuckets,
+ alignof(BucketT));
+ }
};
template <typename KeyT, typename ValueT, typename KeyInfoT, typename Bucket,
diff --git a/llvm/lib/CodeGen/RegisterUsageInfo.cpp b/llvm/lib/CodeGen/RegisterUsageInfo.cpp
index 7a4628a6e91d4..2ef380fc7cad4 100644
--- a/llvm/lib/CodeGen/RegisterUsageInfo.cpp
+++ b/llvm/lib/CodeGen/RegisterUsageInfo.cpp
@@ -44,7 +44,7 @@ void PhysicalRegisterUsageInfo::setTargetMachine(const TargetMachine &TM) {
}
bool PhysicalRegisterUsageInfo::doInitialization(Module &M) {
- RegMasks.grow(M.size());
+ RegMasks.reserve(M.size());
return false;
}
``````````
</details>
https://github.com/llvm/llvm-project/pull/165079
More information about the llvm-commits
mailing list