[llvm] r327647 - Refactor the PDB HashTable class.
Zachary Turner via llvm-commits
llvm-commits at lists.llvm.org
Fri Mar 16 09:21:51 PDT 2018
Interesting, didn't see this in any of my local builds. I should be able
to commit a fix shortly.
On Fri, Mar 16, 2018 at 1:04 AM Mikael Holmén <mikael.holmen at ericsson.com>
wrote:
> Hi Zachary,
>
> Not sure if it's this commit or any of the others changing in
> HashTable.h that causes it but at least clang 3.6 and gcc 5.4.0
> complains when building HashTableTest.cpp on trunk now.
>
> With clang:
>
> /repo/app/clang/3.6/bin/clang++ -march=corei7 -DGTEST_HAS_RTTI=0
> -DGTEST_HAS_TR1_TUPLE=0 -DGTEST_LANG_CXX11=1 -D_DEBUG -D_GNU_SOURCE
> -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS
> -Iunittests/DebugInfo/PDB -I../unittests/DebugInfo/PDB
> -I/usr/include/libxml2 -Iinclude -I../include
> -I../utils/unittest/googletest/include
> -I../utils/unittest/googlemock/include
> -I/repo/app/valgrind/3.11.0/include -fPIC -fvisibility-inlines-hidden
> -Werror -Werror=date-time -std=c++11 -Wall -W -Wno-unused-parameter
> -Wwrite-strings -Wcast-qual -Wmissing-field-initializers -pedantic
> -Wno-long-long -Wcovered-switch-default -Wnon-virtual-dtor
> -Wdelete-non-virtual-dtor -Wstring-conversion -fcolor-diagnostics
> -ffunction-sections -fdata-sections -O3 -UNDEBUG
> -Wno-variadic-macros -Wno-gnu-zero-variadic-macro-arguments
> -fno-exceptions -fno-rtti -MMD -MT
>
> unittests/DebugInfo/PDB/CMakeFiles/DebugInfoPDBTests.dir/HashTableTest.cpp.o
> -MF
>
> unittests/DebugInfo/PDB/CMakeFiles/DebugInfoPDBTests.dir/HashTableTest.cpp.o.d
> -o
>
> unittests/DebugInfo/PDB/CMakeFiles/DebugInfoPDBTests.dir/HashTableTest.cpp.o
> -c ../unittests/DebugInfo/PDB/HashTableTest.cpp
> In file included from ../unittests/DebugInfo/PDB/HashTableTest.cpp:10:
> ../include/llvm/DebugInfo/PDB/Native/HashTable.h:282:73: error:
> comparison of integers of different signs: 'uint32_t' (aka 'unsigned
> int') and 'const int' [-Werror,-Wsign-compare]
> assert(Traits.storageKeyToLookupKey(Buckets[Entry.index()].first)
> == K);
> ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
> ^ ~
> /usr/include/assert.h:89:5: note: expanded from macro 'assert'
> ((expr) \
> ^
> ../include/llvm/DebugInfo/PDB/Native/HashTable.h:256:12: note: in
> instantiation of function template specialization
> 'llvm::pdb::HashTable<unsigned int, llvm::pdb::PdbHashTraits<uint32_t>
> >::set_as_internal<int>' requested here
> return set_as_internal(K, std::move(V), None);
> ^
> ../unittests/DebugInfo/PDB/HashTableTest.cpp:44:9: note: in
> instantiation of function template specialization
> 'llvm::pdb::HashTable<unsigned int, llvm::pdb::PdbHashTraits<uint32_t>
> >::set_as<int>' requested here
> Table.set_as(3, 7);
> ^
> In file included from ../unittests/DebugInfo/PDB/HashTableTest.cpp:10:
> ../include/llvm/DebugInfo/PDB/Native/HashTable.h:230:60: error:
> comparison of integers of different signs: 'uint32_t' (aka 'unsigned
> int') and 'const int' [-Werror,-Wsign-compare]
> if (Traits.storageKeyToLookupKey(Buckets[I].first) == K)
> ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ^ ~
> ../unittests/DebugInfo/PDB/HashTableTest.cpp:46:32: note: in
> instantiation of function template specialization
> 'llvm::pdb::HashTable<unsigned int, llvm::pdb::PdbHashTraits<uint32_t>
> >::find_as<int>' requested here
> ASSERT_NE(Table.end(), Table.find_as(3));
> ^
> ../utils/unittest/googletest/include/gtest/gtest.h:1960:54: note:
> expanded from macro 'ASSERT_NE'
> # define ASSERT_NE(val1, val2) GTEST_ASSERT_NE(val1, val2)
> ^
> ../utils/unittest/googletest/include/gtest/gtest.h:1942:63: note:
> expanded from macro 'GTEST_ASSERT_NE'
> ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperNE, val1, val2)
> ^
> ../utils/unittest/googletest/include/gtest/gtest_pred_impl.h:166:40:
> note: expanded from macro 'ASSERT_PRED_FORMAT2'
> GTEST_PRED_FORMAT2_(pred_format, v1, v2, GTEST_FATAL_FAILURE_)
> ^
> ../utils/unittest/googletest/include/gtest/gtest_pred_impl.h:147:43:
> note: expanded from macro 'GTEST_PRED_FORMAT2_'
> GTEST_ASSERT_(pred_format(#v1, #v2, v1, v2), \
> ^
> ../utils/unittest/googletest/include/gtest/gtest_pred_impl.h:77:52:
> note: expanded from macro 'GTEST_ASSERT_'
> if (const ::testing::AssertionResult gtest_ar = (expression)) \
> ^
> 2 errors generated.
>
>
> and with gcc:
>
> [230/411] Building CXX object
>
> unittests/DebugInfo/PDB/CMakeFiles/DebugInfoPDBTests.dir/HashTableTest.cpp.o
> In file included from ../unittests/DebugInfo/PDB/HashTableTest.cpp:10:0:
> ../include/llvm/DebugInfo/PDB/Native/HashTable.h: In instantiation of
> 'llvm::pdb::HashTable<ValueT, TraitsT>::iterator
> llvm::pdb::HashTable<ValueT, TraitsT>::find_as(const Key&) const [with
> Key = int; ValueT = unsigned int; TraitsT =
> llvm::pdb::PdbHashTraits<unsigned int>; llvm::pdb::HashTable<ValueT,
> TraitsT>::iterator = llvm::pdb::HashTableIterator<unsigned int,
> llvm::pdb::PdbHashTraits<unsigned int> >]':
> ../unittests/DebugInfo/PDB/HashTableTest.cpp:46:3: required from here
> ../include/llvm/DebugInfo/PDB/Native/HashTable.h:230:60: warning:
> comparison between signed and unsigned integer expressions [-Wsign-compare]
> if (Traits.storageKeyToLookupKey(Buckets[I].first) == K)
> ^
> In file included from
> /proj/bbi_twh/wh_bbi/x86_64-Linux2/bbigcc/
> 1.5.4.0/crosscompiler/include/c++/5.4.0/cassert:43:0,
> from ../include/llvm/Support/BinaryStreamArray.h:17,
> from ../include/llvm/Support/BinaryStreamReader.h:15,
> from ../include/llvm/DebugInfo/PDB/Native/HashTable.h:16,
> from ../unittests/DebugInfo/PDB/HashTableTest.cpp:10:
> ../include/llvm/DebugInfo/PDB/Native/HashTable.h: In instantiation of
> 'bool llvm::pdb::HashTable<ValueT, TraitsT>::set_as_internal(const Key&,
> ValueT, llvm::Optional<unsigned int>) [with Key = int; ValueT = unsigned
> int; TraitsT = llvm::pdb::PdbHashTraits<unsigned int>]':
> ../include/llvm/DebugInfo/PDB/Native/HashTable.h:256:27: required from
> 'bool llvm::pdb::HashTable<ValueT, TraitsT>::set_as(const Key&, ValueT)
> [with Key = int; ValueT = unsigned int; TraitsT =
> llvm::pdb::PdbHashTraits<unsigned int>]'
> ../unittests/DebugInfo/PDB/HashTableTest.cpp:44:20: required from here
> ../include/llvm/DebugInfo/PDB/Native/HashTable.h:282:73: warning:
> comparison between signed and unsigned integer expressions [-Wsign-compare]
>
> assert(Traits.storageKeyToLookupKey(Buckets[Entry.index()].first) == K);
>
>
> Regards,
> Mikael
>
> On 03/15/2018 06:38 PM, Zachary Turner via llvm-commits wrote:
> > Author: zturner
> > Date: Thu Mar 15 10:38:26 2018
> > New Revision: 327647
> >
> > URL: http://llvm.org/viewvc/llvm-project?rev=327647&view=rev
> > Log:
> > Refactor the PDB HashTable class.
> >
> > It previously only worked when the key and value types were
> > both 4 byte integers. We now have a use case for a non trivial
> > value type, so we need to extend it to support arbitrary value
> > types, which means templatizing it.
> >
> > Modified:
> > llvm/trunk/include/llvm/DebugInfo/PDB/Native/HashTable.h
> > llvm/trunk/include/llvm/DebugInfo/PDB/Native/NamedStreamMap.h
> > llvm/trunk/include/llvm/DebugInfo/PDB/Native/TpiStream.h
> > llvm/trunk/lib/DebugInfo/PDB/Native/HashTable.cpp
> > llvm/trunk/lib/DebugInfo/PDB/Native/NamedStreamMap.cpp
> > llvm/trunk/lib/DebugInfo/PDB/Native/TpiStream.cpp
> > llvm/trunk/tools/llvm-pdbutil/Analyze.cpp
> > llvm/trunk/unittests/DebugInfo/PDB/HashTableTest.cpp
> >
> > Modified: llvm/trunk/include/llvm/DebugInfo/PDB/Native/HashTable.h
> > URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/DebugInfo/PDB/Native/HashTable.h?rev=327647&r1=327646&r2=327647&view=diff
> >
> ==============================================================================
> > --- llvm/trunk/include/llvm/DebugInfo/PDB/Native/HashTable.h (original)
> > +++ llvm/trunk/include/llvm/DebugInfo/PDB/Native/HashTable.h Thu Mar 15
> 10:38:26 2018
> > @@ -12,6 +12,7 @@
> >
> > #include "llvm/ADT/SparseBitVector.h"
> > #include "llvm/ADT/iterator.h"
> > +#include "llvm/DebugInfo/PDB/Native/RawError.h"
> > #include "llvm/Support/Endian.h"
> > #include "llvm/Support/Error.h"
> > #include <cstdint>
> > @@ -26,73 +27,196 @@ class BinaryStreamWriter;
> >
> > namespace pdb {
> >
> > -class HashTable;
> > +template <typename ValueT, typename TraitsT> class HashTable;
> >
> > +template <typename ValueT, typename TraitsT>
> > class HashTableIterator
> > - : public iterator_facade_base<HashTableIterator,
> std::forward_iterator_tag,
> > - std::pair<uint32_t, uint32_t>> {
> > - friend HashTable;
> > -
> > - HashTableIterator(const HashTable &Map, uint32_t Index, bool IsEnd);
> > + : public iterator_facade_base<HashTableIterator<ValueT, TraitsT>,
> > + std::forward_iterator_tag,
> > + std::pair<uint32_t, ValueT>> {
> > + friend HashTable<ValueT, TraitsT>;
> > +
> > + HashTableIterator(const HashTable<ValueT, TraitsT> &Map, uint32_t
> Index,
> > + bool IsEnd)
> > + : Map(&Map), Index(Index), IsEnd(IsEnd) {}
> >
> > public:
> > - HashTableIterator(const HashTable &Map);
> > + HashTableIterator(const HashTable<ValueT, TraitsT> &Map) : Map(&Map) {
> > + int I = Map.Present.find_first();
> > + if (I == -1) {
> > + Index = 0;
> > + IsEnd = true;
> > + } else {
> > + Index = static_cast<uint32_t>(I);
> > + IsEnd = false;
> > + }
> > + }
> >
> > - HashTableIterator &operator=(const HashTableIterator &R);
> > - bool operator==(const HashTableIterator &R) const;
> > - const std::pair<uint32_t, uint32_t> &operator*() const;
> > - HashTableIterator &operator++();
> > + HashTableIterator &operator=(const HashTableIterator &R) {
> > + Map = R.Map;
> > + return *this;
> > + }
> > + bool operator==(const HashTableIterator &R) const {
> > + if (IsEnd && R.IsEnd)
> > + return true;
> > + if (IsEnd != R.IsEnd)
> > + return false;
> > +
> > + return (Map == R.Map) && (Index == R.Index);
> > + }
> > + const std::pair<uint32_t, ValueT> &operator*() const {
> > + assert(Map->Present.test(Index));
> > + return Map->Buckets[Index];
> > + }
> > + HashTableIterator &operator++() {
> > + while (Index < Map->Buckets.size()) {
> > + ++Index;
> > + if (Map->Present.test(Index))
> > + return *this;
> > + }
> > +
> > + IsEnd = true;
> > + return *this;
> > + }
> >
> > private:
> > bool isEnd() const { return IsEnd; }
> > uint32_t index() const { return Index; }
> >
> > - const HashTable *Map;
> > + const HashTable<ValueT, TraitsT> *Map;
> > uint32_t Index;
> > bool IsEnd;
> > };
> >
> > +template <typename T> struct PdbHashTraits {};
> > +
> > +template <> struct PdbHashTraits<uint32_t> {
> > + uint32_t hashLookupKey(uint32_t N) const { return N; }
> > + uint32_t storageKeyToLookupKey(uint32_t N) const { return N; }
> > + uint32_t lookupKeyToStorageKey(uint32_t N) { return N; }
> > +};
> > +
> > +template <typename ValueT, typename TraitsT = PdbHashTraits<ValueT>>
> > class HashTable {
> > - friend class HashTableIterator;
> > + using iterator = HashTableIterator<ValueT, TraitsT>;
> > + friend iterator;
> >
> > struct Header {
> > support::ulittle32_t Size;
> > support::ulittle32_t Capacity;
> > };
> >
> > - using BucketList = std::vector<std::pair<uint32_t, uint32_t>>;
> > + using BucketList = std::vector<std::pair<uint32_t, ValueT>>;
> >
> > public:
> > - HashTable();
> > - explicit HashTable(uint32_t Capacity);
> > + HashTable() { Buckets.resize(8); }
> > +
> > + explicit HashTable(TraitsT Traits) : HashTable(8, std::move(Traits))
> {}
> > + HashTable(uint32_t Capacity, TraitsT Traits) : Traits(Traits) {
> > + Buckets.resize(Capacity);
> > + }
> > +
> > + Error load(BinaryStreamReader &Stream) {
> > + const Header *H;
> > + if (auto EC = Stream.readObject(H))
> > + return EC;
> > + if (H->Capacity == 0)
> > + return make_error<RawError>(raw_error_code::corrupt_file,
> > + "Invalid Hash Table Capacity");
> > + if (H->Size > maxLoad(H->Capacity))
> > + return make_error<RawError>(raw_error_code::corrupt_file,
> > + "Invalid Hash Table Size");
> > +
> > + Buckets.resize(H->Capacity);
> > +
> > + if (auto EC = readSparseBitVector(Stream, Present))
> > + return EC;
> > + if (Present.count() != H->Size)
> > + return make_error<RawError>(raw_error_code::corrupt_file,
> > + "Present bit vector does not match
> size!");
> > +
> > + if (auto EC = readSparseBitVector(Stream, Deleted))
> > + return EC;
> > + if (Present.intersects(Deleted))
> > + return make_error<RawError>(raw_error_code::corrupt_file,
> > + "Present bit vector interesects
> deleted!");
> > +
> > + for (uint32_t P : Present) {
> > + if (auto EC = Stream.readInteger(Buckets[P].first))
> > + return EC;
> > + const ValueT *Value;
> > + if (auto EC = Stream.readObject(Value))
> > + return EC;
> > + Buckets[P].second = *Value;
> > + }
> > +
> > + return Error::success();
> > + }
> >
> > - Error load(BinaryStreamReader &Stream);
> > + uint32_t calculateSerializedLength() const {
> > + uint32_t Size = sizeof(Header);
> >
> > - uint32_t calculateSerializedLength() const;
> > - Error commit(BinaryStreamWriter &Writer) const;
> > + int NumBitsP = Present.find_last() + 1;
> > + int NumBitsD = Deleted.find_last() + 1;
> >
> > - void clear();
> > + // Present bit set number of words, followed by that many actual
> words.
> > + Size += sizeof(uint32_t);
> > + Size += alignTo(NumBitsP, sizeof(uint32_t));
> >
> > - uint32_t capacity() const;
> > - uint32_t size() const;
> > + // Deleted bit set number of words, followed by that many actual
> words.
> > + Size += sizeof(uint32_t);
> > + Size += alignTo(NumBitsD, sizeof(uint32_t));
> >
> > - HashTableIterator begin() const;
> > - HashTableIterator end() const;
> > + // One (Key, ValueT) pair for each entry Present.
> > + Size += (sizeof(uint32_t) + sizeof(ValueT)) * size();
> > +
> > + return Size;
> > + }
> >
> > - /// Find the entry with the specified key value.
> > - HashTableIterator find(uint32_t K) const;
> > + Error commit(BinaryStreamWriter &Writer) const {
> > + Header H;
> > + H.Size = size();
> > + H.Capacity = capacity();
> > + if (auto EC = Writer.writeObject(H))
> > + return EC;
> > +
> > + if (auto EC = writeSparseBitVector(Writer, Present))
> > + return EC;
> > +
> > + if (auto EC = writeSparseBitVector(Writer, Deleted))
> > + return EC;
> > +
> > + for (const auto &Entry : *this) {
> > + if (auto EC = Writer.writeInteger(Entry.first))
> > + return EC;
> > + if (auto EC = Writer.writeObject(Entry.second))
> > + return EC;
> > + }
> > + return Error::success();
> > + }
> > +
> > + void clear() {
> > + Buckets.resize(8);
> > + Present.clear();
> > + Deleted.clear();
> > + }
> > +
> > + uint32_t capacity() const { return Buckets.size(); }
> > + uint32_t size() const { return Present.count(); }
> > +
> > + iterator begin() const { return iterator(*this); }
> > + iterator end() const { return iterator(*this, 0, true); }
> >
> > /// Find the entry whose key has the specified hash value, using the
> specified
> > /// traits defining hash function and equality.
> > - template <typename Traits, typename Key, typename Context>
> > - HashTableIterator find_as(const Key &K, const Context &Ctx) const {
> > - uint32_t H = Traits::hash(K, Ctx) % capacity();
> > + template <typename Key> iterator find_as(const Key &K) const {
> > + uint32_t H = Traits.hashLookupKey(K) % capacity();
> > uint32_t I = H;
> > Optional<uint32_t> FirstUnused;
> > do {
> > if (isPresent(I)) {
> > - if (Traits::realKey(Buckets[I].first, Ctx) == K)
> > - return HashTableIterator(*this, I, false);
> > + if (Traits.storageKeyToLookupKey(Buckets[I].first) == K)
> > + return iterator(*this, I, false);
> > } else {
> > if (!FirstUnused)
> > FirstUnused = I;
> > @@ -111,40 +235,26 @@ public:
> > // table were Present. But this would violate the load factor
> constraints
> > // that we impose, so it should never happen.
> > assert(FirstUnused);
> > - return HashTableIterator(*this, *FirstUnused, true);
> > + return iterator(*this, *FirstUnused, true);
> > }
> >
> > - /// Set the entry with the specified key to the specified value.
> > - void set(uint32_t K, uint32_t V);
> > -
> > /// Set the entry using a key type that the specified Traits can
> convert
> > /// from a real key to an internal key.
> > - template <typename Traits, typename Key, typename Context>
> > - bool set_as(const Key &K, uint32_t V, Context &Ctx) {
> > - return set_as_internal<Traits, Key, Context>(K, V, None, Ctx);
> > + template <typename Key> bool set_as(const Key &K, ValueT V) {
> > + return set_as_internal(K, std::move(V), None);
> > }
> >
> > - void remove(uint32_t K);
> > -
> > - template <typename Traits, typename Key, typename Context>
> > - void remove_as(const Key &K, Context &Ctx) {
> > - auto Iter = find_as<Traits, Key, Context>(K, Ctx);
> > - // It wasn't here to begin with, just exit.
> > - if (Iter == end())
> > - return;
> > -
> > - assert(Present.test(Iter.index()));
> > - assert(!Deleted.test(Iter.index()));
> > - Deleted.set(Iter.index());
> > - Present.reset(Iter.index());
> > + template <typename Key> ValueT get(const Key &K) const {
> > + auto Iter = find_as(K);
> > + assert(Iter != end());
> > + return (*Iter).second;
> > }
> >
> > - uint32_t get(uint32_t K);
> > -
> > protected:
> > bool isPresent(uint32_t K) const { return Present.test(K); }
> > bool isDeleted(uint32_t K) const { return Deleted.test(K); }
> >
> > + TraitsT Traits;
> > BucketList Buckets;
> > mutable SparseBitVector<> Present;
> > mutable SparseBitVector<> Deleted;
> > @@ -152,13 +262,12 @@ protected:
> > private:
> > /// Set the entry using a key type that the specified Traits can
> convert
> > /// from a real key to an internal key.
> > - template <typename Traits, typename Key, typename Context>
> > - bool set_as_internal(const Key &K, uint32_t V, Optional<uint32_t>
> InternalKey,
> > - Context &Ctx) {
> > - auto Entry = find_as<Traits, Key, Context>(K, Ctx);
> > + template <typename Key>
> > + bool set_as_internal(const Key &K, ValueT V, Optional<uint32_t>
> InternalKey) {
> > + auto Entry = find_as(K);
> > if (Entry != end()) {
> > assert(isPresent(Entry.index()));
> > - assert(Traits::realKey(Buckets[Entry.index()].first, Ctx) == K);
> > + assert(Traits.storageKeyToLookupKey(Buckets[Entry.index()].first)
> == K);
> > // We're updating, no need to do anything special.
> > Buckets[Entry.index()].second = V;
> > return false;
> > @@ -167,21 +276,20 @@ private:
> > auto &B = Buckets[Entry.index()];
> > assert(!isPresent(Entry.index()));
> > assert(Entry.isEnd());
> > - B.first = InternalKey ? *InternalKey : Traits::lowerKey(K, Ctx);
> > + B.first = InternalKey ? *InternalKey :
> Traits.lookupKeyToStorageKey(K);
> > B.second = V;
> > Present.set(Entry.index());
> > Deleted.reset(Entry.index());
> >
> > - grow<Traits, Key, Context>(Ctx);
> > + grow();
> >
> > - assert((find_as<Traits, Key, Context>(K, Ctx)) != end());
> > + assert((find_as(K)) != end());
> > return true;
> > }
> >
> > - static uint32_t maxLoad(uint32_t capacity);
> > + static uint32_t maxLoad(uint32_t capacity) { return capacity * 2 / 3
> + 1; }
> >
> > - template <typename Traits, typename Key, typename Context>
> > - void grow(Context &Ctx) {
> > + void grow() {
> > uint32_t S = size();
> > if (S < maxLoad(capacity()))
> > return;
> > @@ -193,11 +301,10 @@ private:
> > // Growing requires rebuilding the table and re-hashing every
> item. Make a
> > // copy with a larger capacity, insert everything into the copy,
> then swap
> > // it in.
> > - HashTable NewMap(NewCapacity);
> > + HashTable NewMap(NewCapacity, Traits);
> > for (auto I : Present) {
> > - auto RealKey = Traits::realKey(Buckets[I].first, Ctx);
> > - NewMap.set_as_internal<Traits, Key, Context>(RealKey,
> Buckets[I].second,
> > - Buckets[I].first,
> Ctx);
> > + auto LookupKey = Traits.storageKeyToLookupKey(Buckets[I].first);
> > + NewMap.set_as_internal(LookupKey, Buckets[I].second,
> Buckets[I].first);
> > }
> >
> > Buckets.swap(NewMap.Buckets);
> > @@ -206,13 +313,11 @@ private:
> > assert(capacity() == NewCapacity);
> > assert(size() == S);
> > }
> > -
> > - static Error readSparseBitVector(BinaryStreamReader &Stream,
> > - SparseBitVector<> &V);
> > - static Error writeSparseBitVector(BinaryStreamWriter &Writer,
> > - SparseBitVector<> &Vec);
> > };
> >
> > +Error readSparseBitVector(BinaryStreamReader &Stream, SparseBitVector<>
> &V);
> > +Error writeSparseBitVector(BinaryStreamWriter &Writer,
> SparseBitVector<> &Vec);
> > +
> > } // end namespace pdb
> >
> > } // end namespace llvm
> >
> > Modified: llvm/trunk/include/llvm/DebugInfo/PDB/Native/NamedStreamMap.h
> > URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/DebugInfo/PDB/Native/NamedStreamMap.h?rev=327647&r1=327646&r2=327647&view=diff
> >
> ==============================================================================
> > --- llvm/trunk/include/llvm/DebugInfo/PDB/Native/NamedStreamMap.h
> (original)
> > +++ llvm/trunk/include/llvm/DebugInfo/PDB/Native/NamedStreamMap.h Thu
> Mar 15 10:38:26 2018
> > @@ -25,6 +25,17 @@ class BinaryStreamWriter;
> >
> > namespace pdb {
> >
> > +class NamedStreamMap;
> > +
> > +struct NamedStreamMapTraits {
> > + NamedStreamMap *NS;
> > +
> > + explicit NamedStreamMapTraits(NamedStreamMap &NS);
> > + uint16_t hashLookupKey(StringRef S) const;
> > + StringRef storageKeyToLookupKey(uint32_t Offset) const;
> > + uint32_t lookupKeyToStorageKey(StringRef S);
> > +};
> > +
> > class NamedStreamMap {
> > friend class NamedStreamMapBuilder;
> >
> > @@ -46,9 +57,10 @@ public:
> > StringMap<uint32_t> entries() const;
> >
> > private:
> > + NamedStreamMapTraits HashTraits;
> > /// Closed hash table from Offset -> StreamNumber, where Offset is
> the offset
> > /// of the stream name in NamesBuffer.
> > - HashTable OffsetIndexMap;
> > + HashTable<support::ulittle32_t, NamedStreamMapTraits> OffsetIndexMap;
> >
> > /// Buffer of string data.
> > std::vector<char> NamesBuffer;
> >
> > Modified: llvm/trunk/include/llvm/DebugInfo/PDB/Native/TpiStream.h
> > URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/DebugInfo/PDB/Native/TpiStream.h?rev=327647&r1=327646&r2=327647&view=diff
> >
> ==============================================================================
> > --- llvm/trunk/include/llvm/DebugInfo/PDB/Native/TpiStream.h (original)
> > +++ llvm/trunk/include/llvm/DebugInfo/PDB/Native/TpiStream.h Thu Mar 15
> 10:38:26 2018
> > @@ -51,7 +51,7 @@ public:
> > uint32_t getNumHashBuckets() const;
> > FixedStreamArray<support::ulittle32_t> getHashValues() const;
> > FixedStreamArray<codeview::TypeIndexOffset> getTypeIndexOffsets()
> const;
> > - HashTable &getHashAdjusters();
> > + HashTable<support::ulittle32_t> &getHashAdjusters();
> >
> > codeview::CVTypeRange types(bool *HadError) const;
> > const codeview::CVTypeArray &typeArray() const { return TypeRecords;
> }
> > @@ -75,7 +75,7 @@ private:
> > std::unique_ptr<BinaryStream> HashStream;
> > FixedStreamArray<support::ulittle32_t> HashValues;
> > FixedStreamArray<codeview::TypeIndexOffset> TypeIndexOffsets;
> > - HashTable HashAdjusters;
> > + HashTable<support::ulittle32_t> HashAdjusters;
> >
> > const TpiStreamHeader *Header;
> > };
> >
> > Modified: llvm/trunk/lib/DebugInfo/PDB/Native/HashTable.cpp
> > URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/DebugInfo/PDB/Native/HashTable.cpp?rev=327647&r1=327646&r2=327647&view=diff
> >
> ==============================================================================
> > --- llvm/trunk/lib/DebugInfo/PDB/Native/HashTable.cpp (original)
> > +++ llvm/trunk/lib/DebugInfo/PDB/Native/HashTable.cpp Thu Mar 15
> 10:38:26 2018
> > @@ -22,130 +22,7 @@
> > using namespace llvm;
> > using namespace llvm::pdb;
> >
> > -namespace {
> > -struct IdentityTraits {
> > - static uint32_t hash(uint32_t K, const HashTable &Ctx) { return K; }
> > - static uint32_t realKey(uint32_t K, const HashTable &Ctx) { return K;
> }
> > - static uint32_t lowerKey(uint32_t K, const HashTable &Ctx) { return
> K; }
> > -};
> > -} // namespace
> > -
> > -HashTable::HashTable() : HashTable(8) {}
> > -
> > -HashTable::HashTable(uint32_t Capacity) { Buckets.resize(Capacity); }
> > -
> > -Error HashTable::load(BinaryStreamReader &Stream) {
> > - const Header *H;
> > - if (auto EC = Stream.readObject(H))
> > - return EC;
> > - if (H->Capacity == 0)
> > - return make_error<RawError>(raw_error_code::corrupt_file,
> > - "Invalid Hash Table Capacity");
> > - if (H->Size > maxLoad(H->Capacity))
> > - return make_error<RawError>(raw_error_code::corrupt_file,
> > - "Invalid Hash Table Size");
> > -
> > - Buckets.resize(H->Capacity);
> > -
> > - if (auto EC = readSparseBitVector(Stream, Present))
> > - return EC;
> > - if (Present.count() != H->Size)
> > - return make_error<RawError>(raw_error_code::corrupt_file,
> > - "Present bit vector does not match
> size!");
> > -
> > - if (auto EC = readSparseBitVector(Stream, Deleted))
> > - return EC;
> > - if (Present.intersects(Deleted))
> > - return make_error<RawError>(raw_error_code::corrupt_file,
> > - "Present bit vector interesects
> deleted!");
> > -
> > - for (uint32_t P : Present) {
> > - if (auto EC = Stream.readInteger(Buckets[P].first))
> > - return EC;
> > - if (auto EC = Stream.readInteger(Buckets[P].second))
> > - return EC;
> > - }
> > -
> > - return Error::success();
> > -}
> > -
> > -uint32_t HashTable::calculateSerializedLength() const {
> > - uint32_t Size = sizeof(Header);
> > -
> > - int NumBitsP = Present.find_last() + 1;
> > - int NumBitsD = Deleted.find_last() + 1;
> > -
> > - // Present bit set number of words, followed by that many actual
> words.
> > - Size += sizeof(uint32_t);
> > - Size += alignTo(NumBitsP, sizeof(uint32_t));
> > -
> > - // Deleted bit set number of words, followed by that many actual
> words.
> > - Size += sizeof(uint32_t);
> > - Size += alignTo(NumBitsD, sizeof(uint32_t));
> > -
> > - // One (Key, Value) pair for each entry Present.
> > - Size += 2 * sizeof(uint32_t) * size();
> > -
> > - return Size;
> > -}
> > -
> > -Error HashTable::commit(BinaryStreamWriter &Writer) const {
> > - Header H;
> > - H.Size = size();
> > - H.Capacity = capacity();
> > - if (auto EC = Writer.writeObject(H))
> > - return EC;
> > -
> > - if (auto EC = writeSparseBitVector(Writer, Present))
> > - return EC;
> > -
> > - if (auto EC = writeSparseBitVector(Writer, Deleted))
> > - return EC;
> > -
> > - for (const auto &Entry : *this) {
> > - if (auto EC = Writer.writeInteger(Entry.first))
> > - return EC;
> > - if (auto EC = Writer.writeInteger(Entry.second))
> > - return EC;
> > - }
> > - return Error::success();
> > -}
> > -
> > -void HashTable::clear() {
> > - Buckets.resize(8);
> > - Present.clear();
> > - Deleted.clear();
> > -}
> > -
> > -uint32_t HashTable::capacity() const { return Buckets.size(); }
> > -
> > -uint32_t HashTable::size() const { return Present.count(); }
> > -
> > -HashTableIterator HashTable::begin() const { return
> HashTableIterator(*this); }
> > -
> > -HashTableIterator HashTable::end() const {
> > - return HashTableIterator(*this, 0, true);
> > -}
> > -
> > -HashTableIterator HashTable::find(uint32_t K) const {
> > - return find_as<IdentityTraits>(K, *this);
> > -}
> > -
> > -void HashTable::set(uint32_t K, uint32_t V) {
> > - set_as<IdentityTraits, uint32_t>(K, V, *this);
> > -}
> > -
> > -void HashTable::remove(uint32_t K) { remove_as<IdentityTraits>(K,
> *this); }
> > -
> > -uint32_t HashTable::get(uint32_t K) {
> > - auto I = find(K);
> > - assert(I != end());
> > - return (*I).second;
> > -}
> > -
> > -uint32_t HashTable::maxLoad(uint32_t capacity) { return capacity * 2 /
> 3 + 1; }
> > -
> > -Error HashTable::readSparseBitVector(BinaryStreamReader &Stream,
> > +Error llvm::pdb::readSparseBitVector(BinaryStreamReader &Stream,
> > SparseBitVector<> &V) {
> > uint32_t NumWords;
> > if (auto EC = Stream.readInteger(NumWords))
> > @@ -167,7 +44,7 @@ Error HashTable::readSparseBitVector(Bin
> > return Error::success();
> > }
> >
> > -Error HashTable::writeSparseBitVector(BinaryStreamWriter &Writer,
> > +Error llvm::pdb::writeSparseBitVector(BinaryStreamWriter &Writer,
> > SparseBitVector<> &Vec) {
> > int ReqBits = Vec.find_last() + 1;
> > uint32_t NumWords = alignTo(ReqBits, sizeof(uint32_t)) /
> sizeof(uint32_t);
> > @@ -191,48 +68,3 @@ Error HashTable::writeSparseBitVector(Bi
> > }
> > return Error::success();
> > }
> > -
> > -HashTableIterator::HashTableIterator(const HashTable &Map, uint32_t
> Index,
> > - bool IsEnd)
> > - : Map(&Map), Index(Index), IsEnd(IsEnd) {}
> > -
> > -HashTableIterator::HashTableIterator(const HashTable &Map) : Map(&Map) {
> > - int I = Map.Present.find_first();
> > - if (I == -1) {
> > - Index = 0;
> > - IsEnd = true;
> > - } else {
> > - Index = static_cast<uint32_t>(I);
> > - IsEnd = false;
> > - }
> > -}
> > -
> > -HashTableIterator &HashTableIterator::operator=(const HashTableIterator
> &R) {
> > - Map = R.Map;
> > - return *this;
> > -}
> > -
> > -bool HashTableIterator::operator==(const HashTableIterator &R) const {
> > - if (IsEnd && R.IsEnd)
> > - return true;
> > - if (IsEnd != R.IsEnd)
> > - return false;
> > -
> > - return (Map == R.Map) && (Index == R.Index);
> > -}
> > -
> > -const std::pair<uint32_t, uint32_t> &HashTableIterator::operator*()
> const {
> > - assert(Map->Present.test(Index));
> > - return Map->Buckets[Index];
> > -}
> > -
> > -HashTableIterator &HashTableIterator::operator++() {
> > - while (Index < Map->Buckets.size()) {
> > - ++Index;
> > - if (Map->Present.test(Index))
> > - return *this;
> > - }
> > -
> > - IsEnd = true;
> > - return *this;
> > -}
> >
> > Modified: llvm/trunk/lib/DebugInfo/PDB/Native/NamedStreamMap.cpp
> > URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/DebugInfo/PDB/Native/NamedStreamMap.cpp?rev=327647&r1=327646&r2=327647&view=diff
> >
> ==============================================================================
> > --- llvm/trunk/lib/DebugInfo/PDB/Native/NamedStreamMap.cpp (original)
> > +++ llvm/trunk/lib/DebugInfo/PDB/Native/NamedStreamMap.cpp Thu Mar 15
> 10:38:26 2018
> > @@ -27,26 +27,27 @@
> > using namespace llvm;
> > using namespace llvm::pdb;
> >
> > -namespace {
> > -struct NamedStreamMapTraits {
> > - static uint16_t hash(StringRef S, const NamedStreamMap &NS) {
> > - // In the reference implementation, this uses
> > - // HASH Hasher<ULONG*, USHORT*>::hashPbCb(PB pb, size_t cb, ULONG
> ulMod).
> > - // Here, the type HASH is a typedef of unsigned short.
> > - // ** It is not a bug that we truncate the result of hashStringV1,
> in fact
> > - // it is a bug if we do not! **
> > - return static_cast<uint16_t>(hashStringV1(S));
> > - }
> > - static StringRef realKey(uint32_t Offset, const NamedStreamMap &NS) {
> > - return NS.getString(Offset);
> > - }
> > - static uint32_t lowerKey(StringRef S, NamedStreamMap &NS) {
> > - return NS.appendStringData(S);
> > - }
> > -};
> > -} // namespace
> > +NamedStreamMapTraits::NamedStreamMapTraits(NamedStreamMap &NS) :
> NS(&NS) {}
> > +
> > +uint16_t NamedStreamMapTraits::hashLookupKey(StringRef S) const {
> > + // In the reference implementation, this uses
> > + // HASH Hasher<ULONG*, USHORT*>::hashPbCb(PB pb, size_t cb, ULONG
> ulMod).
> > + // Here, the type HASH is a typedef of unsigned short.
> > + // ** It is not a bug that we truncate the result of hashStringV1, in
> fact
> > + // it is a bug if we do not! **
> > + return static_cast<uint16_t>(hashStringV1(S));
> > +}
> > +
> > +StringRef NamedStreamMapTraits::storageKeyToLookupKey(uint32_t Offset)
> const {
> > + return NS->getString(Offset);
> > +}
> > +
> > +uint32_t NamedStreamMapTraits::lookupKeyToStorageKey(StringRef S) {
> > + return NS->appendStringData(S);
> > +}
> >
> > -NamedStreamMap::NamedStreamMap() {}
> > +NamedStreamMap::NamedStreamMap()
> > + : HashTraits(*this), OffsetIndexMap(HashTraits) {}
> >
> > Error NamedStreamMap::load(BinaryStreamReader &Stream) {
> > uint32_t StringBufferSize;
> > @@ -98,7 +99,7 @@ uint32_t NamedStreamMap::hashString(uint
> > }
> >
> > bool NamedStreamMap::get(StringRef Stream, uint32_t &StreamNo) const {
> > - auto Iter = OffsetIndexMap.find_as<NamedStreamMapTraits>(Stream,
> *this);
> > + auto Iter = OffsetIndexMap.find_as(Stream);
> > if (Iter == OffsetIndexMap.end())
> > return false;
> > StreamNo = (*Iter).second;
> > @@ -122,5 +123,5 @@ uint32_t NamedStreamMap::appendStringDat
> > }
> >
> > void NamedStreamMap::set(StringRef Stream, uint32_t StreamNo) {
> > - OffsetIndexMap.set_as<NamedStreamMapTraits>(Stream, StreamNo, *this);
> > + OffsetIndexMap.set_as(Stream, support::ulittle32_t(StreamNo));
> > }
> >
> > Modified: llvm/trunk/lib/DebugInfo/PDB/Native/TpiStream.cpp
> > URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/DebugInfo/PDB/Native/TpiStream.cpp?rev=327647&r1=327646&r2=327647&view=diff
> >
> ==============================================================================
> > --- llvm/trunk/lib/DebugInfo/PDB/Native/TpiStream.cpp (original)
> > +++ llvm/trunk/lib/DebugInfo/PDB/Native/TpiStream.cpp Thu Mar 15
> 10:38:26 2018
> > @@ -152,7 +152,9 @@ FixedStreamArray<TypeIndexOffset> TpiStr
> > return TypeIndexOffsets;
> > }
> >
> > -HashTable &TpiStream::getHashAdjusters() { return HashAdjusters; }
> > +HashTable<support::ulittle32_t> &TpiStream::getHashAdjusters() {
> > + return HashAdjusters;
> > +}
> >
> > CVTypeRange TpiStream::types(bool *HadError) const {
> > return make_range(TypeRecords.begin(HadError), TypeRecords.end());
> >
> > Modified: llvm/trunk/tools/llvm-pdbutil/Analyze.cpp
> > URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/tools/llvm-pdbutil/Analyze.cpp?rev=327647&r1=327646&r2=327647&view=diff
> >
> ==============================================================================
> > --- llvm/trunk/tools/llvm-pdbutil/Analyze.cpp (original)
> > +++ llvm/trunk/tools/llvm-pdbutil/Analyze.cpp Thu Mar 15 10:38:26 2018
> > @@ -125,7 +125,7 @@ Error AnalysisStyle::dump() {
> >
> > const auto &Collisions = CollisionsIter->second;
> > outs() << TypeName << "\n";
> > - outs() << formatv(" [HEAD] {0:x} {1} {2}\n", A.second,
> > + outs() << formatv(" [HEAD] {0:x} {1} {2}\n", uint32_t(A.second),
> > getLeafTypeName(HeadRecord.Type), TypeName);
> > for (const auto &Chain : Collisions) {
> > if (Chain.TI == TI)
> >
> > Modified: llvm/trunk/unittests/DebugInfo/PDB/HashTableTest.cpp
> > URL:
> http://llvm.org/viewvc/llvm-project/llvm/trunk/unittests/DebugInfo/PDB/HashTableTest.cpp?rev=327647&r1=327646&r2=327647&view=diff
> >
> ==============================================================================
> > --- llvm/trunk/unittests/DebugInfo/PDB/HashTableTest.cpp (original)
> > +++ llvm/trunk/unittests/DebugInfo/PDB/HashTableTest.cpp Thu Mar 15
> 10:38:26 2018
> > @@ -8,10 +8,14 @@
> >
> //===----------------------------------------------------------------------===//
> >
> > #include "llvm/DebugInfo/PDB/Native/HashTable.h"
> > +
> > +#include "llvm/DebugInfo/PDB/Native/Hash.h"
> > #include "llvm/DebugInfo/PDB/Native/NamedStreamMap.h"
> > +#include "llvm/Support/Allocator.h"
> > #include "llvm/Support/BinaryByteStream.h"
> > #include "llvm/Support/BinaryStreamReader.h"
> > #include "llvm/Support/BinaryStreamWriter.h"
> > +#include "llvm/Support/StringSaver.h"
> > #include "llvm/Testing/Support/Error.h"
> >
> > #include "gtest/gtest.h"
> > @@ -23,7 +27,8 @@ using namespace llvm::pdb;
> > using namespace llvm::support;
> >
> > namespace {
> > -class HashTableInternals : public HashTable {
> > +
> > +class HashTableInternals : public HashTable<uint32_t> {
> > public:
> > using HashTable::Buckets;
> > using HashTable::Present;
> > @@ -32,18 +37,18 @@ public:
> > }
> >
> > TEST(HashTableTest, TestSimple) {
> > - HashTable Table;
> > + HashTableInternals Table;
> > EXPECT_EQ(0u, Table.size());
> > EXPECT_GT(Table.capacity(), 0u);
> >
> > - Table.set(3, 7);
> > + Table.set_as(3, 7);
> > EXPECT_EQ(1u, Table.size());
> > - ASSERT_NE(Table.end(), Table.find(3));
> > + ASSERT_NE(Table.end(), Table.find_as(3));
> > EXPECT_EQ(7u, Table.get(3));
> > }
> >
> > TEST(HashTableTest, TestCollision) {
> > - HashTable Table;
> > + HashTableInternals Table;
> > EXPECT_EQ(0u, Table.size());
> > EXPECT_GT(Table.capacity(), 0u);
> >
> > @@ -53,39 +58,33 @@ TEST(HashTableTest, TestCollision) {
> > uint32_t N1 = Table.capacity() + 1;
> > uint32_t N2 = 2 * N1;
> >
> > - Table.set(N1, 7);
> > - Table.set(N2, 12);
> > + Table.set_as(N1, 7);
> > + Table.set_as(N2, 12);
> > EXPECT_EQ(2u, Table.size());
> > - ASSERT_NE(Table.end(), Table.find(N1));
> > - ASSERT_NE(Table.end(), Table.find(N2));
> > + ASSERT_NE(Table.end(), Table.find_as(N1));
> > + ASSERT_NE(Table.end(), Table.find_as(N2));
> >
> > EXPECT_EQ(7u, Table.get(N1));
> > EXPECT_EQ(12u, Table.get(N2));
> > }
> >
> > TEST(HashTableTest, TestRemove) {
> > - HashTable Table;
> > + HashTableInternals Table;
> > EXPECT_EQ(0u, Table.size());
> > EXPECT_GT(Table.capacity(), 0u);
> >
> > - Table.set(1, 2);
> > - Table.set(3, 4);
> > + Table.set_as(1, 2);
> > + Table.set_as(3, 4);
> > EXPECT_EQ(2u, Table.size());
> > - ASSERT_NE(Table.end(), Table.find(1));
> > - ASSERT_NE(Table.end(), Table.find(3));
> > + ASSERT_NE(Table.end(), Table.find_as(1));
> > + ASSERT_NE(Table.end(), Table.find_as(3));
> >
> > EXPECT_EQ(2u, Table.get(1));
> > EXPECT_EQ(4u, Table.get(3));
> > -
> > - Table.remove(1u);
> > - EXPECT_EQ(1u, Table.size());
> > - EXPECT_EQ(Table.end(), Table.find(1));
> > - ASSERT_NE(Table.end(), Table.find(3));
> > - EXPECT_EQ(4u, Table.get(3));
> > }
> >
> > TEST(HashTableTest, TestCollisionAfterMultipleProbes) {
> > - HashTable Table;
> > + HashTableInternals Table;
> > EXPECT_EQ(0u, Table.size());
> > EXPECT_GT(Table.capacity(), 0u);
> >
> > @@ -96,31 +95,17 @@ TEST(HashTableTest, TestCollisionAfterMu
> > uint32_t N2 = N1 + 1;
> > uint32_t N3 = 2 * N1;
> >
> > - Table.set(N1, 7);
> > - Table.set(N2, 11);
> > - Table.set(N3, 13);
> > + Table.set_as(N1, 7);
> > + Table.set_as(N2, 11);
> > + Table.set_as(N3, 13);
> > EXPECT_EQ(3u, Table.size());
> > - ASSERT_NE(Table.end(), Table.find(N1));
> > - ASSERT_NE(Table.end(), Table.find(N2));
> > - ASSERT_NE(Table.end(), Table.find(N3));
> > + ASSERT_NE(Table.end(), Table.find_as(N1));
> > + ASSERT_NE(Table.end(), Table.find_as(N2));
> > + ASSERT_NE(Table.end(), Table.find_as(N3));
> >
> > EXPECT_EQ(7u, Table.get(N1));
> > EXPECT_EQ(11u, Table.get(N2));
> > EXPECT_EQ(13u, Table.get(N3));
> > -
> > - // Remove the one that had been filled in the middle, then insert
> another one
> > - // with a collision. It should fill the newly emptied slot.
> > - Table.remove(N2);
> > - uint32_t N4 = N1 * 3;
> > - Table.set(N4, 17);
> > - EXPECT_EQ(3u, Table.size());
> > - ASSERT_NE(Table.end(), Table.find(N1));
> > - ASSERT_NE(Table.end(), Table.find(N3));
> > - ASSERT_NE(Table.end(), Table.find(N4));
> > -
> > - EXPECT_EQ(7u, Table.get(N1));
> > - EXPECT_EQ(13u, Table.get(N3));
> > - EXPECT_EQ(17u, Table.get(N4));
> > }
> >
> > TEST(HashTableTest, Grow) {
> > @@ -128,15 +113,15 @@ TEST(HashTableTest, Grow) {
> > // guaranteed to trigger a grow. Then verify that the size is the
> same, the
> > // capacity is larger, and all the original items are still in the
> table.
> >
> > - HashTable Table;
> > + HashTableInternals Table;
> > uint32_t OldCapacity = Table.capacity();
> > for (uint32_t I = 0; I < OldCapacity; ++I) {
> > - Table.set(OldCapacity + I * 2 + 1, I * 2 + 3);
> > + Table.set_as(OldCapacity + I * 2 + 1, I * 2 + 3);
> > }
> > EXPECT_EQ(OldCapacity, Table.size());
> > EXPECT_GT(Table.capacity(), OldCapacity);
> > for (uint32_t I = 0; I < OldCapacity; ++I) {
> > - ASSERT_NE(Table.end(), Table.find(OldCapacity + I * 2 + 1));
> > + ASSERT_NE(Table.end(), Table.find_as(OldCapacity + I * 2 + 1));
> > EXPECT_EQ(I * 2 + 3, Table.get(OldCapacity + I * 2 + 1));
> > }
> > }
> > @@ -145,7 +130,7 @@ TEST(HashTableTest, Serialization) {
> > HashTableInternals Table;
> > uint32_t Cap = Table.capacity();
> > for (uint32_t I = 0; I < Cap; ++I) {
> > - Table.set(Cap + I * 2 + 1, I * 2 + 3);
> > + Table.set_as(Cap + I * 2 + 1, I * 2 + 3);
> > }
> >
> > std::vector<uint8_t> Buffer(Table.calculateSerializedLength());
> > @@ -207,3 +192,73 @@ TEST(HashTableTest, NamedStreamMap) {
> > EXPECT_EQ(7U, N);
> > } while (std::next_permutation(Streams.begin(), Streams.end()));
> > }
> > +
> > +namespace {
> > +struct FooBar {
> > + std::string S;
> > + uint32_t X;
> > + uint32_t Y;
> > + double Z;
> > +};
> > +
> > +} // namespace
> > +
> > +namespace llvm {
> > +namespace pdb {
> > +template <> struct PdbHashTraits<FooBar> {
> > + std::vector<char> Buffer;
> > +
> > + PdbHashTraits() { Buffer.push_back(0); }
> > +
> > + uint32_t hashLookupKey(StringRef S) const {
> > + return llvm::pdb::hashStringV1(S);
> > + }
> > +
> > + StringRef storageKeyToLookupKey(uint32_t N) const {
> > + if (N >= Buffer.size())
> > + return StringRef();
> > +
> > + return StringRef(Buffer.data() + N);
> > + }
> > +
> > + uint32_t lookupKeyToStorageKey(StringRef S) {
> > + uint32_t N = Buffer.size();
> > + Buffer.insert(Buffer.end(), S.begin(), S.end());
> > + Buffer.push_back('\0');
> > + return N;
> > + }
> > +};
> > +} // namespace pdb
> > +} // namespace llvm
> > +
> > +TEST(HashTableTest, NonTrivialValueType) {
> > + HashTable<FooBar> Table;
> > + uint32_t Cap = Table.capacity();
> > + for (uint32_t I = 0; I < Cap; ++I) {
> > + FooBar F;
> > + F.S = utostr(I);
> > + F.X = I;
> > + F.Y = I + 1;
> > + F.Z = static_cast<double>(I + 2);
> > + Table.set_as(utostr(I), F);
> > + }
> > +
> > + std::vector<uint8_t> Buffer(Table.calculateSerializedLength());
> > + MutableBinaryByteStream Stream(Buffer, little);
> > + BinaryStreamWriter Writer(Stream);
> > + EXPECT_THAT_ERROR(Table.commit(Writer), Succeeded());
> > + // We should have written precisely the number of bytes we calculated
> earlier.
> > + EXPECT_EQ(Buffer.size(), Writer.getOffset());
> > +
> > + HashTable<FooBar> Table2;
> > + BinaryStreamReader Reader(Stream);
> > + EXPECT_THAT_ERROR(Table2.load(Reader), Succeeded());
> > + // We should have read precisely the number of bytes we calculated
> earlier.
> > + EXPECT_EQ(Buffer.size(), Reader.getOffset());
> > +
> > + EXPECT_EQ(Table.size(), Table2.size());
> > + EXPECT_EQ(Table.capacity(), Table2.capacity());
> > + // EXPECT_EQ(Table.Buckets, Table2.Buckets);
> > + // EXPECT_EQ(Table.Present, Table2.Present);
> > + // EXPECT_EQ(Table.Deleted, Table2.Deleted);
> > +}
> >
> >
> > _______________________________________________
> > llvm-commits mailing list
> > llvm-commits at lists.llvm.org
> > http://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-commits
> >
>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.llvm.org/pipermail/llvm-commits/attachments/20180316/83d32db4/attachment.html>
More information about the llvm-commits
mailing list