[llvm] [ADT][ConcurrentHashTable] Refactor ConcurrentHashTable. (PR #71932)

via llvm-commits llvm-commits at lists.llvm.org
Fri Nov 10 04:22:56 PST 2023


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-llvm-adt

Author: None (avl-llvm)

<details>
<summary>Changes</summary>

The purpose of this refactoring is to optimize concurrent hashtable for the case when the number of read operations exceeds write operations. It 10x speedups execution in case multiple reading the same element(100000000 insertions of the same item). It 5%-10% speedups test reading strings from DWARFFile. Additionally, the patch adds implementation for non-aggregate types. Also, possibility of replacing synchronization object is added.

---

Patch is 46.45 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/71932.diff


3 Files Affected:

- (modified) llvm/include/llvm/ADT/ConcurrentHashtable.h (+590-245) 
- (modified) llvm/include/llvm/DWARFLinkerParallel/StringPool.h (+13) 
- (modified) llvm/unittests/ADT/ConcurrentHashtableTest.cpp (+291) 


``````````diff
diff --git a/llvm/include/llvm/ADT/ConcurrentHashtable.h b/llvm/include/llvm/ADT/ConcurrentHashtable.h
index a07688228da187a..7660346752bf59b 100644
--- a/llvm/include/llvm/ADT/ConcurrentHashtable.h
+++ b/llvm/include/llvm/ADT/ConcurrentHashtable.h
@@ -22,19 +22,16 @@
 #include <atomic>
 #include <cstddef>
 #include <iomanip>
-#include <mutex>
+#include <shared_mutex>
 #include <sstream>
 #include <type_traits>
 
 namespace llvm {
 
-/// ConcurrentHashTable - is a resizeable concurrent hashtable.
-/// The number of resizings limited up to x2^31. This hashtable is
-/// useful to have efficient access to aggregate data(like strings,
-/// type descriptors...) and to keep only single copy of such
-/// an aggregate. The hashtable allows only concurrent insertions:
+/// This file contains an implementation of resizeable concurrent hashtable.
+/// The hashtable allows only concurrent insertions:
 ///
-/// KeyDataTy* = insert ( const KeyTy& );
+/// std::pair<DataTy, bool> = insert ( const KeyTy& );
 ///
 /// Data structure:
 ///
@@ -54,343 +51,691 @@ namespace llvm {
 /// Different buckets may have different sizes. If the single bucket is full
 /// then the bucket is resized.
 ///
-/// BucketsArray keeps all buckets. Each bucket keeps an array of Entries
-/// (pointers to KeyDataTy) and another array of entries hashes:
+/// ConcurrentHashTableBase is a base implementation which encapsulates
+/// common operations. The implementation assumes that stored data are
+/// POD. It uses special markers for uninitialised values. Uninintialised
+/// value is either zero, either 0xff(depending on the ZeroIsUndefValue
+/// parameter).
 ///
-/// BucketsArray[BucketIdx].Hashes[EntryIdx]:
-/// BucketsArray[BucketIdx].Entries[EntryIdx]:
-///
-/// [Bucket 0].Hashes -> [uint32_t][uint32_t]
-/// [Bucket 0].Entries -> [KeyDataTy*][KeyDataTy*]
-///
-/// [Bucket 1].Hashes -> [uint32_t][uint32_t][uint32_t][uint32_t]
-/// [Bucket 1].Entries -> [KeyDataTy*][KeyDataTy*][KeyDataTy*][KeyDataTy*]
-///                      .........................
-/// [Bucket N].Hashes -> [uint32_t][uint32_t][uint32_t]
-/// [Bucket N].Entries -> [KeyDataTy*][KeyDataTy*][KeyDataTy*]
-///
-/// ConcurrentHashTableByPtr uses an external thread-safe allocator to allocate
-/// KeyDataTy items.
+/// ConcurrentHashTableBase uses shared_mutex interface to lock buckets:
+///   - try_lock_shared/unlock_shared if rehashing is not neccessary.
+///   - lock/unlock when the bucket should be exclusively locked and resized.
 
-template <typename KeyTy, typename KeyDataTy, typename AllocatorTy>
-class ConcurrentHashTableInfoByPtr {
-public:
+template <typename KeyTy> struct ConcurrentHashTableBaseInfo {
   /// \returns Hash value for the specified \p Key.
   static inline uint64_t getHashValue(const KeyTy &Key) {
-    return xxh3_64bits(Key);
+    return std::hash<KeyTy>{}(Key);
   }
 
-  /// \returns true if both \p LHS and \p RHS are equal.
-  static inline bool isEqual(const KeyTy &LHS, const KeyTy &RHS) {
-    return LHS == RHS;
-  }
-
-  /// \returns key for the specified \p KeyData.
-  static inline const KeyTy &getKey(const KeyDataTy &KeyData) {
-    return KeyData.getKey();
-  }
+  /// Type of the mutex object(use void for single-thread version).
+#if LLVM_ENABLE_THREADS
+  using MutexTy = std::shared_mutex;
+#else
+  using MutexTy = void;
+#endif
 
-  /// \returns newly created object of KeyDataTy type.
-  static inline KeyDataTy *create(const KeyTy &Key, AllocatorTy &Allocator) {
-    return KeyDataTy::create(Key, Allocator);
-  }
+  /// Use 'true' for 0x00 undef value, 'false' for 0xff.
+  static constexpr bool ZeroIsUndefValue = true;
 };
 
-template <typename KeyTy, typename KeyDataTy, typename AllocatorTy,
-          typename Info =
-              ConcurrentHashTableInfoByPtr<KeyTy, KeyDataTy, AllocatorTy>>
-class ConcurrentHashTableByPtr {
+template <typename KeyTy, typename DataTy, typename DerivedImplTy,
+          typename Info = ConcurrentHashTableBaseInfo<KeyTy>>
+class ConcurrentHashTableBase {
 public:
-  ConcurrentHashTableByPtr(
-      AllocatorTy &Allocator, uint64_t EstimatedSize = 100000,
+  /// ReservedSize - Specify the number of items for which space
+  ///                will be allocated in advance.
+  /// ThreadsNum - Specify the number of threads that will work
+  ///                with the table.
+  /// InitialNumberOfBuckets - Specify number of buckets. Small
+  ///                number of buckets may lead to high thread
+  ///                competitions and slow execution due to cache
+  ///                synchronization.
+  ConcurrentHashTableBase(
+      uint64_t ReservedSize = 0,
       size_t ThreadsNum = parallel::strategy.compute_thread_count(),
-      size_t InitialNumberOfBuckets = 128)
-      : MultiThreadAllocator(Allocator) {
+      uint64_t InitialNumberOfBuckets = 0) {
     assert((ThreadsNum > 0) && "ThreadsNum must be greater than 0");
-    assert((InitialNumberOfBuckets > 0) &&
-           "InitialNumberOfBuckets must be greater than 0");
+
+    this->ThreadsNum = ThreadsNum;
 
     // Calculate number of buckets.
-    uint64_t EstimatedNumberOfBuckets = ThreadsNum;
-    if (ThreadsNum > 1) {
-      EstimatedNumberOfBuckets *= InitialNumberOfBuckets;
-      EstimatedNumberOfBuckets *= std::max(
-          1,
-          countr_zero(PowerOf2Ceil(EstimatedSize / InitialNumberOfBuckets)) >>
-              2);
+    if constexpr (std::is_void<typename Info::MutexTy>::value)
+      NumberOfBuckets = 1;
+    else {
+      if (InitialNumberOfBuckets)
+        NumberOfBuckets = InitialNumberOfBuckets;
+      else
+        NumberOfBuckets = (ThreadsNum == 1) ? 1 : ThreadsNum * 256;
+      NumberOfBuckets = PowerOf2Ceil(NumberOfBuckets);
     }
-    EstimatedNumberOfBuckets = PowerOf2Ceil(EstimatedNumberOfBuckets);
-    NumberOfBuckets =
-        std::min(EstimatedNumberOfBuckets, (uint64_t)(1Ull << 31));
 
     // Allocate buckets.
     BucketsArray = std::make_unique<Bucket[]>(NumberOfBuckets);
 
-    InitialBucketSize = EstimatedSize / NumberOfBuckets;
-    InitialBucketSize = std::max((uint32_t)1, InitialBucketSize);
-    InitialBucketSize = PowerOf2Ceil(InitialBucketSize);
+    uint64_t InitialBucketSize =
+        calculateBucketSizeFromOverallSize(ReservedSize);
 
     // Initialize each bucket.
-    for (uint32_t Idx = 0; Idx < NumberOfBuckets; Idx++) {
-      HashesPtr Hashes = new ExtHashBitsTy[InitialBucketSize];
-      memset(Hashes, 0, sizeof(ExtHashBitsTy) * InitialBucketSize);
-
-      DataPtr Entries = new EntryDataTy[InitialBucketSize];
-      memset(Entries, 0, sizeof(EntryDataTy) * InitialBucketSize);
-
-      BucketsArray[Idx].Size = InitialBucketSize;
-      BucketsArray[Idx].Hashes = Hashes;
-      BucketsArray[Idx].Entries = Entries;
+    for (uint64_t CurIdx = 0; CurIdx < NumberOfBuckets; ++CurIdx) {
+      BucketsArray[CurIdx].Size = InitialBucketSize;
+      BucketsArray[CurIdx].Data =
+          static_cast<DerivedImplTy *>(this)->allocateData(InitialBucketSize);
     }
 
-    // Calculate masks.
-    HashMask = NumberOfBuckets - 1;
-
-    size_t LeadingZerosNumber = countl_zero(HashMask);
-    HashBitsNum = 64 - LeadingZerosNumber;
-
-    // We keep only high 32-bits of hash value. So bucket size cannot
-    // exceed 2^31. Bucket size is always power of two.
-    MaxBucketSize = 1Ull << (std::min((size_t)31, LeadingZerosNumber));
+    // Calculate mask.
+    BucketsHashMask = NumberOfBuckets - 1;
 
-    // Calculate mask for extended hash bits.
-    ExtHashMask = (uint64_t)NumberOfBuckets * MaxBucketSize - 1;
-  }
-
-  virtual ~ConcurrentHashTableByPtr() {
-    // Deallocate buckets.
-    for (uint32_t Idx = 0; Idx < NumberOfBuckets; Idx++) {
-      delete[] BucketsArray[Idx].Hashes;
-      delete[] BucketsArray[Idx].Entries;
-    }
+    size_t LeadingZerosNumber = countl_zero(BucketsHashMask);
+    BucketsHashBitsNum = 64 - LeadingZerosNumber;
   }
 
-  /// Insert new value \p NewValue or return already existing entry.
-  ///
-  /// \returns entry and "true" if an entry is just inserted or
-  /// "false" if an entry already exists.
-  std::pair<KeyDataTy *, bool> insert(const KeyTy &NewValue) {
-    // Calculate bucket index.
-    uint64_t Hash = Info::getHashValue(NewValue);
-    Bucket &CurBucket = BucketsArray[getBucketIdx(Hash)];
-    uint32_t ExtHashBits = getExtHashBits(Hash);
-
-#if LLVM_ENABLE_THREADS
-    // Lock bucket.
-    CurBucket.Guard.lock();
-#endif
-
-    HashesPtr BucketHashes = CurBucket.Hashes;
-    DataPtr BucketEntries = CurBucket.Entries;
-    uint32_t CurEntryIdx = getStartIdx(ExtHashBits, CurBucket.Size);
-
-    while (true) {
-      uint32_t CurEntryHashBits = BucketHashes[CurEntryIdx];
-
-      if (CurEntryHashBits == 0 && BucketEntries[CurEntryIdx] == nullptr) {
-        // Found empty slot. Insert data.
-        KeyDataTy *NewData = Info::create(NewValue, MultiThreadAllocator);
-        BucketEntries[CurEntryIdx] = NewData;
-        BucketHashes[CurEntryIdx] = ExtHashBits;
-
-        CurBucket.NumberOfEntries++;
-        RehashBucket(CurBucket);
-
-#if LLVM_ENABLE_THREADS
-        CurBucket.Guard.unlock();
-#endif
-
-        return {NewData, true};
+  /// Erase content of table.
+  void clear(uint64_t ReservedSize = 0) {
+    if (ReservedSize) {
+      for (uint64_t CurIdx = 0; CurIdx < NumberOfBuckets; ++CurIdx) {
+        Bucket &CurBucket = BucketsArray[CurIdx];
+        delete[] CurBucket.Data;
+        uint64_t BucketSize = calculateBucketSizeFromOverallSize(ReservedSize);
+        CurBucket.Size = BucketSize;
+        CurBucket.Data =
+            static_cast<DerivedImplTy *>(this)->allocateData(BucketSize);
       }
-
-      if (CurEntryHashBits == ExtHashBits) {
-        // Hash matched. Check value for equality.
-        KeyDataTy *EntryData = BucketEntries[CurEntryIdx];
-        if (Info::isEqual(Info::getKey(*EntryData), NewValue)) {
-          // Already existed entry matched with inserted data is found.
-#if LLVM_ENABLE_THREADS
-          CurBucket.Guard.unlock();
-#endif
-
-          return {EntryData, false};
-        }
+    } else {
+      for (uint64_t CurIdx = 0; CurIdx < NumberOfBuckets; ++CurIdx) {
+        Bucket &CurBucket = BucketsArray[CurIdx];
+        uint64_t BufferSize =
+            static_cast<DerivedImplTy *>(this)->getBufferSize(CurBucket.Size);
+        fillBufferWithUndefValue(CurBucket.Data, BufferSize);
       }
-
-      CurEntryIdx++;
-      CurEntryIdx &= (CurBucket.Size - 1);
     }
+  }
 
-    llvm_unreachable("Insertion error.");
-    return {};
+  ~ConcurrentHashTableBase() {
+    // Deallocate buckets.
+    for (uint64_t Idx = 0; Idx < NumberOfBuckets; Idx++)
+      delete[] BucketsArray[Idx].Data;
   }
 
   /// Print information about current state of hash table structures.
   void printStatistic(raw_ostream &OS) {
     OS << "\n--- HashTable statistic:\n";
     OS << "\nNumber of buckets = " << NumberOfBuckets;
-    OS << "\nInitial bucket size = " << InitialBucketSize;
 
-    uint64_t NumberOfNonEmptyBuckets = 0;
-    uint64_t NumberOfEntriesPlusEmpty = 0;
+    uint64_t OverallNumberOfAllocatedEntries = 0;
     uint64_t OverallNumberOfEntries = 0;
     uint64_t OverallSize = sizeof(*this) + NumberOfBuckets * sizeof(Bucket);
 
-    DenseMap<uint32_t, uint32_t> BucketSizesMap;
+    DenseMap<uint64_t, uint64_t> BucketSizesMap;
 
     // For each bucket...
-    for (uint32_t Idx = 0; Idx < NumberOfBuckets; Idx++) {
+    for (uint64_t Idx = 0; Idx < NumberOfBuckets; Idx++) {
       Bucket &CurBucket = BucketsArray[Idx];
 
       BucketSizesMap[CurBucket.Size]++;
 
-      if (CurBucket.NumberOfEntries != 0)
-        NumberOfNonEmptyBuckets++;
-      NumberOfEntriesPlusEmpty += CurBucket.Size;
-      OverallNumberOfEntries += CurBucket.NumberOfEntries;
+      OverallNumberOfAllocatedEntries += CurBucket.Size;
+      OverallNumberOfEntries +=
+          calculateNumberOfEntries(CurBucket.Data, CurBucket.Size);
       OverallSize +=
-          (sizeof(ExtHashBitsTy) + sizeof(EntryDataTy)) * CurBucket.Size;
+          static_cast<DerivedImplTy *>(this)->getBufferSize(CurBucket.Size);
     }
 
     OS << "\nOverall number of entries = " << OverallNumberOfEntries;
-    OS << "\nOverall number of non empty buckets = " << NumberOfNonEmptyBuckets;
-    for (auto &BucketSize : BucketSizesMap)
-      OS << "\n Number of buckets with size " << BucketSize.first << ": "
-         << BucketSize.second;
+    OS << "\nOverall allocated size = " << OverallSize;
 
     std::stringstream stream;
     stream << std::fixed << std::setprecision(2)
-           << ((float)OverallNumberOfEntries / (float)NumberOfEntriesPlusEmpty);
+           << ((float)OverallNumberOfEntries /
+               (float)OverallNumberOfAllocatedEntries);
     std::string str = stream.str();
 
     OS << "\nLoad factor = " << str;
-    OS << "\nOverall allocated size = " << OverallSize;
+    for (auto &BucketSize : BucketSizesMap)
+      OS << "\n Number of buckets with size " << BucketSize.first << ": "
+         << BucketSize.second;
   }
 
 protected:
-  using ExtHashBitsTy = uint32_t;
-  using EntryDataTy = KeyDataTy *;
-
-  using HashesPtr = ExtHashBitsTy *;
-  using DataPtr = EntryDataTy *;
+  struct VoidMutex {
+    inline bool try_lock_shared() { return true; }
+    inline void lock_shared() {}
+    inline void unlock_shared() {}
+    inline void lock() {}
+    inline void unlock() {}
+  };
 
-  // Bucket structure. Keeps bucket data.
-  struct Bucket {
+  /// Bucket structure. Keeps bucket data.
+  struct Bucket
+      : public std::conditional_t<std::is_void<typename Info::MutexTy>::value,
+                                  VoidMutex, typename Info::MutexTy> {
     Bucket() = default;
 
-    // Size of bucket.
-    uint32_t Size = 0;
+    /// Size of bucket.
+    uint64_t Size;
+
+    /// Buffer keeping bucket data.
+    uint8_t *Data;
+  };
+
+  void fillBufferWithUndefValue(uint8_t *Data, uint64_t BufferSize) const {
+    if constexpr (Info::ZeroIsUndefValue)
+      memset(Data, 0, BufferSize);
+    else
+      memset(Data, 0xff, BufferSize);
+  }
+
+  uint64_t calculateNumberOfEntries(uint8_t *Data, uint64_t Size) {
+    uint64_t Result = 0;
+    for (uint64_t CurIdx = 0; CurIdx < Size; CurIdx++) {
+      auto &AtomicData =
+          static_cast<DerivedImplTy *>(this)->getDataEntry(Data, CurIdx, Size);
+      if (!isNull(AtomicData.load()))
+        Result++;
+    }
+
+    return Result;
+  }
+
+  uint64_t calculateBucketSizeFromOverallSize(uint64_t OverallSize) const {
+    uint64_t BucketSize = OverallSize / NumberOfBuckets;
+    BucketSize = std::max((uint64_t)1, BucketSize);
+    BucketSize = PowerOf2Ceil(BucketSize);
+    return BucketSize;
+  }
+
+  template <typename T> static inline bool isNull(T Data) {
+    if constexpr (Info::ZeroIsUndefValue)
+      return Data == 0;
+    else if constexpr (sizeof(Data) == 1)
+      return reinterpret_cast<uint8_t>(Data) == 0xff;
+    else if constexpr (sizeof(Data) == 2)
+      return reinterpret_cast<uint16_t>(Data) == 0xffff;
+    else if constexpr (sizeof(Data) == 4)
+      return reinterpret_cast<uint32_t>(Data) == 0xffffffff;
+    else if constexpr (sizeof(Data) == 8)
+      return reinterpret_cast<uint64_t>(Data) == 0xffffffffffffffff;
+
+    llvm_unreachable("Unsupported data size");
+  }
+
+  /// Common implementation of insert method. This implementation selects
+  /// bucket, locks bucket and calls to child implementation which does final
+  /// insertion.
+  template <typename... Args>
+  std::pair<DataTy, bool> insert(const KeyTy &NewKey, Args... args) {
+    // Calculate hash.
+    uint64_t Hash = Info::getHashValue(NewKey);
+    // Get bucket.
+    Bucket &CurBucket = BucketsArray[getBucketIdx(Hash)];
+
+    // Calculate extendend hash bits.
+    uint64_t ExtHashBits = Hash >> BucketsHashBitsNum;
+    std::pair<DataTy, bool> Result;
+
+    while (true) {
+      uint64_t RehashingSize = 0;
+      if (CurBucket.try_lock_shared()) {
+        // Call child implementation.
+        if (static_cast<DerivedImplTy *>(this)->insertImpl(
+                CurBucket, ExtHashBits, NewKey, Result, args...)) {
+          CurBucket.unlock_shared();
+          return Result;
+        }
+
+        RehashingSize = CurBucket.Size;
+        CurBucket.unlock_shared();
+      }
+
+      // Rehash bucket.
+      rehashBucketOrWait(CurBucket, RehashingSize);
+    }
+
+    llvm_unreachable("Unhandled path of insert() method");
+    return {};
+  }
+
+  /// Rehash bucket data.
+  void rehashBucketOrWait(Bucket &CurBucket, uint64_t RehashingSize) {
+    CurBucket.lock();
+    uint64_t OldSize = CurBucket.Size;
+    if (RehashingSize != OldSize) {
+      CurBucket.unlock();
+      return;
+    }
+
+    uint8_t *OldData = CurBucket.Data;
+    uint64_t NewSize = OldSize << 1;
+    uint8_t *NewData =
+        static_cast<DerivedImplTy *>(this)->allocateData(NewSize);
+
+    // Iterate through old data.
+    for (uint64_t CurIdx = 0; CurIdx < OldSize; ++CurIdx) {
+      auto &AtomicData = static_cast<DerivedImplTy *>(this)->getDataEntry(
+          OldData, CurIdx, OldSize);
+      auto CurData = AtomicData.load(std::memory_order_acquire);
+
+      // Check data entry for null value.
+      if (!isNull(CurData)) {
+        auto &AtomicKey = static_cast<DerivedImplTy *>(this)->getKeyEntry(
+            OldData, CurIdx, OldSize);
+        auto CurKey = AtomicKey.load(std::memory_order_acquire);
+
+        // Get index for position in the new bucket.
+        uint64_t ExtHashBits =
+            static_cast<DerivedImplTy *>(this)->getExtHashBits(CurKey);
+        uint64_t NewIdx = getStartIdx(ExtHashBits, NewSize);
+        while (true) {
+          auto &NewAtomicData =
+              static_cast<DerivedImplTy *>(this)->getDataEntry(NewData, NewIdx,
+                                                               NewSize);
+          auto NewCurData = NewAtomicData.load(std::memory_order_acquire);
+
+          if (isNull(NewCurData)) {
+            // Store data entry and key into the new bucket data.
+            NewAtomicData.store(CurData, std::memory_order_release);
+            auto &NewAtomicKey =
+                static_cast<DerivedImplTy *>(this)->getKeyEntry(NewData, NewIdx,
+                                                                NewSize);
+            NewAtomicKey.store(CurKey, std::memory_order_release);
+            break;
+          }
+
+          ++NewIdx;
+          NewIdx &= (NewSize - 1);
+        }
+      }
+    }
+
+    CurBucket.Size = NewSize;
+    CurBucket.Data = NewData;
+    CurBucket.unlock();
+
+    delete[] OldData;
+  }
+
+  uint64_t getBucketIdx(hash_code Hash) { return Hash & BucketsHashMask; }
+
+  uint64_t getStartIdx(uint64_t ExtHashBits, uint64_t BucketSize) {
+    assert((BucketSize > 0) && "Empty bucket");
+
+    return ExtHashBits & (BucketSize - 1);
+  }
+
+  /// Number of bits in hash mask.
+  uint8_t BucketsHashBitsNum = 0;
 
-    // Number of non-null entries.
-    uint32_t NumberOfEntries = 0;
+  /// Hash mask.
+  uint64_t BucketsHashMask = 0;
 
-    // Hashes for [Size] entries.
-    HashesPtr Hashes = nullptr;
+  /// Array of buckets.
+  std::unique_ptr<Bucket[]> BucketsArray;
+
+  /// The number of buckets.
+  uint64_t NumberOfBuckets = 0;
 
-    // [Size] entries.
-    DataPtr Entries = nullptr;
+  /// Number of available threads.
+  size_t ThreadsNum = 0;
+};
 
+/// ConcurrentHashTable: This class is optimized for small data like
+/// uint32_t or uint64_t. It keeps keys and data in the internal table.
+/// Keys and data should have equal alignment and size. They also should
+/// satisfy requirements for atomic operations.
+///
+/// Bucket.Data contains an array of pairs [ DataTy, KeyTy ]:
+///
+/// [Bucket].Data -> [DataTy0][KeyTy0]...[DataTyN][KeyTyN]
+
+template <typename KeyTy> class ConcurrentHashTableInfo {
+public:
+  /// \returns Hash value for the specified \p Key.
+  static inline uint64_t getHashValue(KeyTy Key) {
+    return std::hash<KeyTy>{}(Key);
+  }
+
+  /// \returns true if both \p LHS and \p RHS are equal.
+  static inline bool isEqual(KeyTy LHS, KeyTy RHS) { return LHS == RHS; }
+
+  /// Type of the mutex object(use void for single-thread version).
 #if LLVM_ENABLE_THREADS
-    // Mutex for this bucket.
-    std::mutex Guard;
+  using MutexTy = std::shared_mutex;
+#else
+  using MutexTy = void;
 #endif
-  };
 
-  // Reallocate and rehash bucket if this is full enough.
-  void RehashBucket(Bucket &CurBucket) {
-    assert((CurBucket.Size > 0) && "...
[truncated]

``````````

</details>


https://github.com/llvm/llvm-project/pull/71932


More information about the llvm-commits mailing list