[llvm] 48e93f5 - [Support] Add llvm::xxh3_64bits
Fangrui Song via llvm-commits
llvm-commits at lists.llvm.org
Tue Jul 18 13:36:16 PDT 2023
Author: Fangrui Song
Date: 2023-07-18T13:36:11-07:00
New Revision: 48e93f57f1ee914ca29aa31bf2ccd916565a3610
URL: https://github.com/llvm/llvm-project/commit/48e93f57f1ee914ca29aa31bf2ccd916565a3610
DIFF: https://github.com/llvm/llvm-project/commit/48e93f57f1ee914ca29aa31bf2ccd916565a3610.diff
LOG: [Support] Add llvm::xxh3_64bits
ld.lld SHF_MERGE|SHF_STRINGS duplicate elimination is computation heavy
and utilitizes llvm::xxHash64, a simplified version of XXH64.
Externally many sources confirm that a new variant XXH3 is much faster.
I have picked a few hash implementations and computed the
proportion of time spent on hashing in the overall link time (a debug
build of clang 16 on a machine using AMD Zen 2 architecture):
* llvm::xxHash64: 3.63%
* official XXH64 (`#define XXH_VECTOR XXH_SCALAR`): 3.53%
* official XXH3_64bits (`#define XXH_VECTOR XXH_SCALAR`): 1.21%
* official XXH3_64bits (default, essentially `XXH_SSE2`): 1.22%
* this patch llvm::xxh3_64bits: 1.19%
The remaining part of lld remains unchanged. Consequently, a lower ratio
indicates that hashing is faster. Therefore, it is evident that XXH3 from xxhash
is significantly faster than both the official version and our llvm::xxHash64.
(
string length: count
1-3: 393434
4-8: 2084056
9-16: 2846249
17-128: 5598928
129-240: 1317989
241-: 328058
)
This patch adds heavily simplified https://github.com/Cyan4973/xxHash,
taking account of many simplification ideas from Devin Hussey's xxhash-clean.
Important x86-64 optimization ideas:
* Make XXH3_len_129to240_64b and XXH3_hashLong_64b noinline
* Unroll XXH3_len_17to128_64b
* __restrict does not affect Clang code generation
Beside SHF_MERGE|SHF_STRINGS duplicate elimination, llvm/ADT/StringMap.h
StringMapImpl::LookupBucketFor and a few places in lld can potentially be
accelerated by switching to llvm::xxh3_64bits.
Link: https://github.com/llvm/llvm-project/issues/63750
Reviewed By: serge-sans-paille
Differential Revision: https://reviews.llvm.org/D154812
Added:
Modified:
llvm/include/llvm/Support/xxhash.h
llvm/lib/Support/xxhash.cpp
llvm/unittests/Support/xxhashTest.cpp
Removed:
################################################################################
diff --git a/llvm/include/llvm/Support/xxhash.h b/llvm/include/llvm/Support/xxhash.h
index 6fd67ff9ce1c55..0cef3a54e50d70 100644
--- a/llvm/include/llvm/Support/xxhash.h
+++ b/llvm/include/llvm/Support/xxhash.h
@@ -44,6 +44,11 @@
namespace llvm {
uint64_t xxHash64(llvm::StringRef Data);
uint64_t xxHash64(llvm::ArrayRef<uint8_t> Data);
+
+uint64_t xxh3_64bits(ArrayRef<uint8_t> data);
+inline uint64_t xxh3_64bits(StringRef data) {
+ return xxh3_64bits(ArrayRef(data.bytes_begin(), data.size()));
+}
}
#endif
diff --git a/llvm/lib/Support/xxhash.cpp b/llvm/lib/Support/xxhash.cpp
index 99b94a966bc969..577f14189caff7 100644
--- a/llvm/lib/Support/xxhash.cpp
+++ b/llvm/lib/Support/xxhash.cpp
@@ -1,6 +1,6 @@
/*
* xxHash - Fast Hash algorithm
-* Copyright (C) 2012-2016, Yann Collet
+* Copyright (C) 2012-2021, Yann Collet
*
* BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
*
@@ -32,10 +32,14 @@
* - xxHash source repository : https://github.com/Cyan4973/xxHash
*/
-/* based on revision d2df04efcbef7d7f6886d345861e5dfda4edacc1 Removed
- * everything but a simple interface for computing XXh64. */
+// xxhash64 is based on commit d2df04efcbef7d7f6886d345861e5dfda4edacc1. Removed
+// everything but a simple interface for computing xxh64.
+
+// xxh3_64bits is based on commit d5891596637d21366b9b1dcf2c0007a3edb26a9e (July
+// 2023).
#include "llvm/Support/xxhash.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/Endian.h"
#include <stdlib.h>
@@ -47,6 +51,10 @@ static uint64_t rotl64(uint64_t X, size_t R) {
return (X << R) | (X >> (64 - R));
}
+constexpr uint32_t PRIME32_1 = 0x9E3779B1;
+constexpr uint32_t PRIME32_2 = 0x85EBCA77;
+constexpr uint32_t PRIME32_3 = 0xC2B2AE3D;
+
static const uint64_t PRIME64_1 = 11400714785074694791ULL;
static const uint64_t PRIME64_2 = 14029467366897019727ULL;
static const uint64_t PRIME64_3 = 1609587929392839161ULL;
@@ -67,6 +75,15 @@ static uint64_t mergeRound(uint64_t Acc, uint64_t Val) {
return Acc;
}
+static uint64_t XXH64_avalanche(uint64_t hash) {
+ hash ^= hash >> 33;
+ hash *= PRIME64_2;
+ hash ^= hash >> 29;
+ hash *= PRIME64_3;
+ hash ^= hash >> 32;
+ return hash;
+}
+
uint64_t llvm::xxHash64(StringRef Data) {
size_t Len = Data.size();
uint64_t Seed = 0;
@@ -124,15 +141,267 @@ uint64_t llvm::xxHash64(StringRef Data) {
P++;
}
- H64 ^= H64 >> 33;
- H64 *= PRIME64_2;
- H64 ^= H64 >> 29;
- H64 *= PRIME64_3;
- H64 ^= H64 >> 32;
-
- return H64;
+ return XXH64_avalanche(H64);
}
uint64_t llvm::xxHash64(ArrayRef<uint8_t> Data) {
return xxHash64({(const char *)Data.data(), Data.size()});
}
+
+constexpr size_t XXH3_SECRETSIZE_MIN = 136;
+constexpr size_t XXH_SECRET_DEFAULT_SIZE = 192;
+
+/* Pseudorandom data taken directly from FARSH */
+// clang-format off
+constexpr uint8_t kSecret[XXH_SECRET_DEFAULT_SIZE] = {
+ 0xb8, 0xfe, 0x6c, 0x39, 0x23, 0xa4, 0x4b, 0xbe, 0x7c, 0x01, 0x81, 0x2c, 0xf7, 0x21, 0xad, 0x1c,
+ 0xde, 0xd4, 0x6d, 0xe9, 0x83, 0x90, 0x97, 0xdb, 0x72, 0x40, 0xa4, 0xa4, 0xb7, 0xb3, 0x67, 0x1f,
+ 0xcb, 0x79, 0xe6, 0x4e, 0xcc, 0xc0, 0xe5, 0x78, 0x82, 0x5a, 0xd0, 0x7d, 0xcc, 0xff, 0x72, 0x21,
+ 0xb8, 0x08, 0x46, 0x74, 0xf7, 0x43, 0x24, 0x8e, 0xe0, 0x35, 0x90, 0xe6, 0x81, 0x3a, 0x26, 0x4c,
+ 0x3c, 0x28, 0x52, 0xbb, 0x91, 0xc3, 0x00, 0xcb, 0x88, 0xd0, 0x65, 0x8b, 0x1b, 0x53, 0x2e, 0xa3,
+ 0x71, 0x64, 0x48, 0x97, 0xa2, 0x0d, 0xf9, 0x4e, 0x38, 0x19, 0xef, 0x46, 0xa9, 0xde, 0xac, 0xd8,
+ 0xa8, 0xfa, 0x76, 0x3f, 0xe3, 0x9c, 0x34, 0x3f, 0xf9, 0xdc, 0xbb, 0xc7, 0xc7, 0x0b, 0x4f, 0x1d,
+ 0x8a, 0x51, 0xe0, 0x4b, 0xcd, 0xb4, 0x59, 0x31, 0xc8, 0x9f, 0x7e, 0xc9, 0xd9, 0x78, 0x73, 0x64,
+ 0xea, 0xc5, 0xac, 0x83, 0x34, 0xd3, 0xeb, 0xc3, 0xc5, 0x81, 0xa0, 0xff, 0xfa, 0x13, 0x63, 0xeb,
+ 0x17, 0x0d, 0xdd, 0x51, 0xb7, 0xf0, 0xda, 0x49, 0xd3, 0x16, 0x55, 0x26, 0x29, 0xd4, 0x68, 0x9e,
+ 0x2b, 0x16, 0xbe, 0x58, 0x7d, 0x47, 0xa1, 0xfc, 0x8f, 0xf8, 0xb8, 0xd1, 0x7a, 0xd0, 0x31, 0xce,
+ 0x45, 0xcb, 0x3a, 0x8f, 0x95, 0x16, 0x04, 0x28, 0xaf, 0xd7, 0xfb, 0xca, 0xbb, 0x4b, 0x40, 0x7e,
+};
+// clang-format on
+
+constexpr uint64_t PRIME_MX1 = 0x165667919E3779F9;
+constexpr uint64_t PRIME_MX2 = 0x9FB21C651E98DF25;
+
+// Calculates a 64-bit to 128-bit multiply, then XOR folds it.
+static uint64_t XXH3_mul128_fold64(uint64_t lhs, uint64_t rhs) {
+#if defined(__SIZEOF_INT128__) || \
+ (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)
+ __uint128_t product = (__uint128_t)lhs * (__uint128_t)rhs;
+ return uint64_t(product) ^ uint64_t(product >> 64);
+
+#else
+ /* First calculate all of the cross products. */
+ const uint64_t lo_lo = (lhs & 0xFFFFFFFF) * (rhs & 0xFFFFFFFF);
+ const uint64_t hi_lo = (lhs >> 32) * (rhs & 0xFFFFFFFF);
+ const uint64_t lo_hi = (lhs & 0xFFFFFFFF) * (rhs >> 32);
+ const uint64_t hi_hi = (lhs >> 32) * (rhs >> 32);
+
+ /* Now add the products together. These will never overflow. */
+ const uint64_t cross = (lo_lo >> 32) + (hi_lo & 0xFFFFFFFF) + lo_hi;
+ const uint64_t upper = (hi_lo >> 32) + (cross >> 32) + hi_hi;
+ const uint64_t lower = (cross << 32) | (lo_lo & 0xFFFFFFFF);
+
+ return upper ^ lower;
+#endif
+}
+
+constexpr size_t XXH_STRIPE_LEN = 64;
+constexpr size_t XXH_SECRET_CONSUME_RATE = 8;
+constexpr size_t XXH_ACC_NB = XXH_STRIPE_LEN / sizeof(uint64_t);
+
+static uint64_t XXH3_avalanche(uint64_t hash) {
+ hash ^= hash >> 37;
+ hash *= PRIME_MX1;
+ hash ^= hash >> 32;
+ return hash;
+}
+
+static uint64_t XXH3_len_1to3_64b(const uint8_t *input, size_t len,
+ const uint8_t *secret, uint64_t seed) {
+ const uint8_t c1 = input[0];
+ const uint8_t c2 = input[len >> 1];
+ const uint8_t c3 = input[len - 1];
+ uint32_t combined = ((uint32_t)c1 << 16) | ((uint32_t)c2 << 24) |
+ ((uint32_t)c3 << 0) | ((uint32_t)len << 8);
+ uint64_t bitflip =
+ (uint64_t)(endian::read32le(secret) ^ endian::read32le(secret + 4)) +
+ seed;
+ return XXH64_avalanche(uint64_t(combined) ^ bitflip);
+}
+
+static uint64_t XXH3_len_4to8_64b(const uint8_t *input, size_t len,
+ const uint8_t *secret, uint64_t seed) {
+ seed ^= (uint64_t)byteswap(uint32_t(seed)) << 32;
+ const uint32_t input1 = endian::read32le(input);
+ const uint32_t input2 = endian::read32le(input + len - 4);
+ uint64_t acc =
+ (endian::read64le(secret + 8) ^ endian::read64le(secret + 16)) - seed;
+ const uint64_t input64 = (uint64_t)input2 | ((uint64_t)input1 << 32);
+ acc ^= input64;
+ // XXH3_rrmxmx(acc, len)
+ acc ^= rotl64(acc, 49) ^ rotl64(acc, 24);
+ acc *= PRIME_MX2;
+ acc ^= (acc >> 35) + (uint64_t)len;
+ acc *= PRIME_MX2;
+ return acc ^ (acc >> 28);
+}
+
+static uint64_t XXH3_len_9to16_64b(const uint8_t *input, size_t len,
+ const uint8_t *secret, uint64_t const seed) {
+ uint64_t input_lo =
+ (endian::read64le(secret + 24) ^ endian::read64le(secret + 32)) + seed;
+ uint64_t input_hi =
+ (endian::read64le(secret + 40) ^ endian::read64le(secret + 48)) - seed;
+ input_lo ^= endian::read64le(input);
+ input_hi ^= endian::read64le(input + len - 8);
+ uint64_t acc = uint64_t(len) + byteswap(input_lo) + input_hi +
+ XXH3_mul128_fold64(input_lo, input_hi);
+ return XXH3_avalanche(acc);
+}
+
+LLVM_ATTRIBUTE_ALWAYS_INLINE
+static uint64_t XXH3_len_0to16_64b(const uint8_t *input, size_t len,
+ const uint8_t *secret, uint64_t const seed) {
+ if (LLVM_LIKELY(len > 8))
+ return XXH3_len_9to16_64b(input, len, secret, seed);
+ if (LLVM_LIKELY(len >= 4))
+ return XXH3_len_4to8_64b(input, len, secret, seed);
+ if (len != 0)
+ return XXH3_len_1to3_64b(input, len, secret, seed);
+ return XXH64_avalanche(seed ^ endian::read64le(secret + 56) ^
+ endian::read64le(secret + 64));
+}
+
+static uint64_t XXH3_mix16B(const uint8_t *input, uint8_t const *secret,
+ uint64_t seed) {
+ uint64_t lhs = seed;
+ uint64_t rhs = 0U - seed;
+ lhs += endian::read64le(secret);
+ rhs += endian::read64le(secret + 8);
+ lhs ^= endian::read64le(input);
+ rhs ^= endian::read64le(input + 8);
+ return XXH3_mul128_fold64(lhs, rhs);
+}
+
+/* For mid range keys, XXH3 uses a Mum-hash variant. */
+LLVM_ATTRIBUTE_ALWAYS_INLINE
+static uint64_t XXH3_len_17to128_64b(const uint8_t *input, size_t len,
+ const uint8_t *secret,
+ uint64_t const seed) {
+ uint64_t acc = len * PRIME64_1, acc_end;
+ acc += XXH3_mix16B(input + 0, secret + 0, seed);
+ acc_end = XXH3_mix16B(input + len - 16, secret + 16, seed);
+ if (len > 32) {
+ acc += XXH3_mix16B(input + 16, secret + 32, seed);
+ acc_end += XXH3_mix16B(input + len - 32, secret + 48, seed);
+ if (len > 64) {
+ acc += XXH3_mix16B(input + 32, secret + 64, seed);
+ acc_end += XXH3_mix16B(input + len - 48, secret + 80, seed);
+ if (len > 96) {
+ acc += XXH3_mix16B(input + 48, secret + 96, seed);
+ acc_end += XXH3_mix16B(input + len - 64, secret + 112, seed);
+ }
+ }
+ }
+ return XXH3_avalanche(acc + acc_end);
+}
+
+constexpr size_t XXH3_MIDSIZE_MAX = 240;
+
+LLVM_ATTRIBUTE_NOINLINE
+static uint64_t XXH3_len_129to240_64b(const uint8_t *input, size_t len,
+ const uint8_t *secret, uint64_t seed) {
+ constexpr size_t XXH3_MIDSIZE_STARTOFFSET = 3;
+ constexpr size_t XXH3_MIDSIZE_LASTOFFSET = 17;
+ uint64_t acc = (uint64_t)len * PRIME64_1;
+ const unsigned nbRounds = len / 16;
+ for (unsigned i = 0; i < 8; ++i)
+ acc += XXH3_mix16B(input + 16 * i, secret + 16 * i, seed);
+ acc = XXH3_avalanche(acc);
+
+ for (unsigned i = 8; i < nbRounds; ++i) {
+ acc += XXH3_mix16B(input + 16 * i,
+ secret + 16 * (i - 8) + XXH3_MIDSIZE_STARTOFFSET, seed);
+ }
+ /* last bytes */
+ acc +=
+ XXH3_mix16B(input + len - 16,
+ secret + XXH3_SECRETSIZE_MIN - XXH3_MIDSIZE_LASTOFFSET, seed);
+ return XXH3_avalanche(acc);
+}
+
+LLVM_ATTRIBUTE_ALWAYS_INLINE
+static void XXH3_accumulate_512_scalar(uint64_t *acc, const uint8_t *input,
+ const uint8_t *secret) {
+ for (size_t i = 0; i < XXH_ACC_NB; ++i) {
+ uint64_t data_val = endian::read64le(input + 8 * i);
+ uint64_t data_key = data_val ^ endian::read64le(secret + 8 * i);
+ acc[i ^ 1] += data_val;
+ acc[i] += uint32_t(data_key) * (data_key >> 32);
+ }
+}
+
+LLVM_ATTRIBUTE_ALWAYS_INLINE
+static void XXH3_accumulate_scalar(uint64_t *acc, const uint8_t *input,
+ const uint8_t *secret, size_t nbStripes) {
+ for (size_t n = 0; n < nbStripes; ++n)
+ XXH3_accumulate_512_scalar(acc, input + n * XXH_STRIPE_LEN,
+ secret + n * XXH_SECRET_CONSUME_RATE);
+}
+
+static void XXH3_scrambleAcc(uint64_t *acc, const uint8_t *secret) {
+ for (size_t i = 0; i < XXH_ACC_NB; ++i) {
+ acc[i] ^= acc[i] >> 47;
+ acc[i] ^= endian::read64le(secret + 8 * i);
+ acc[i] *= PRIME32_1;
+ }
+}
+
+static uint64_t XXH3_mix2Accs(const uint64_t *acc, const uint8_t *secret) {
+ return XXH3_mul128_fold64(acc[0] ^ endian::read64le(secret),
+ acc[1] ^ endian::read64le(secret + 8));
+}
+
+static uint64_t XXH3_mergeAccs(const uint64_t *acc, const uint8_t *key,
+ uint64_t start) {
+ uint64_t result64 = start;
+ for (size_t i = 0; i < 4; ++i)
+ result64 += XXH3_mix2Accs(acc + 2 * i, key + 16 * i);
+ return XXH3_avalanche(result64);
+}
+
+LLVM_ATTRIBUTE_NOINLINE
+static uint64_t XXH3_hashLong_64b(const uint8_t *input, size_t len,
+ const uint8_t *secret, size_t secretSize) {
+ const size_t nbStripesPerBlock =
+ (secretSize - XXH_STRIPE_LEN) / XXH_SECRET_CONSUME_RATE;
+ const size_t block_len = XXH_STRIPE_LEN * nbStripesPerBlock;
+ const size_t nb_blocks = (len - 1) / block_len;
+ alignas(16) uint64_t acc[XXH_ACC_NB] = {
+ PRIME32_3, PRIME64_1, PRIME64_2, PRIME64_3,
+ PRIME64_4, PRIME32_2, PRIME64_5, PRIME32_1,
+ };
+ for (size_t n = 0; n < nb_blocks; ++n) {
+ XXH3_accumulate_scalar(acc, input + n * block_len, secret,
+ nbStripesPerBlock);
+ XXH3_scrambleAcc(acc, secret + secretSize - XXH_STRIPE_LEN);
+ }
+
+ /* last partial block */
+ const size_t nbStripes = (len - 1 - (block_len * nb_blocks)) / XXH_STRIPE_LEN;
+ assert(nbStripes <= secretSize / XXH_SECRET_CONSUME_RATE);
+ XXH3_accumulate_scalar(acc, input + nb_blocks * block_len, secret, nbStripes);
+
+ /* last stripe */
+ constexpr size_t XXH_SECRET_LASTACC_START = 7;
+ XXH3_accumulate_512_scalar(acc, input + len - XXH_STRIPE_LEN,
+ secret + secretSize - XXH_STRIPE_LEN -
+ XXH_SECRET_LASTACC_START);
+
+ /* converge into final hash */
+ constexpr size_t XXH_SECRET_MERGEACCS_START = 11;
+ return XXH3_mergeAccs(acc, secret + XXH_SECRET_MERGEACCS_START,
+ (uint64_t)len * PRIME64_1);
+}
+
+uint64_t llvm::xxh3_64bits(ArrayRef<uint8_t> data) {
+ auto *in = data.data();
+ size_t len = data.size();
+ if (len <= 16)
+ return XXH3_len_0to16_64b(in, len, kSecret, 0);
+ if (len <= 128)
+ return XXH3_len_17to128_64b(in, len, kSecret, 0);
+ if (len <= XXH3_MIDSIZE_MAX)
+ return XXH3_len_129to240_64b(in, len, kSecret, 0);
+ return XXH3_hashLong_64b(in, len, kSecret, sizeof(kSecret));
+}
diff --git a/llvm/unittests/Support/xxhashTest.cpp b/llvm/unittests/Support/xxhashTest.cpp
index f5c49e43df5425..7d78de6772b515 100644
--- a/llvm/unittests/Support/xxhashTest.cpp
+++ b/llvm/unittests/Support/xxhashTest.cpp
@@ -18,3 +18,46 @@ TEST(xxhashTest, Basic) {
EXPECT_EQ(0x69196c1b3af0bff9U,
xxHash64("0123456789abcdefghijklmnopqrstuvwxyz"));
}
+
+TEST(xxhashTest, xxh3) {
+ constexpr size_t size = 2243;
+ uint8_t a[size];
+ uint64_t x = 1;
+ for (size_t i = 0; i < size; ++i) {
+ x ^= x << 13;
+ x ^= x >> 7;
+ x ^= x << 17;
+ a[i] = uint8_t(x);
+ }
+
+#define F(len, expected) \
+ EXPECT_EQ(uint64_t(expected), xxh3_64bits(ArrayRef(a, size_t(len))))
+ F(0, 0x2d06800538d394c2);
+ F(1, 0xd0d496e05c553485);
+ F(2, 0x84d625edb7055eac);
+ F(3, 0x6ea2d59aca5c3778);
+ F(4, 0xbf65290914e80242);
+ F(5, 0xc01fd099ad4fc8e4);
+ F(6, 0x9e3ea8187399caa5);
+ F(7, 0x9da8b60540644f5a);
+ F(8, 0xabc1413da6cd0209);
+ F(9, 0x8bc89400bfed51f6);
+ F(16, 0x7e46916754d7c9b8);
+ F(17, 0xed4be912ba5f836d);
+ F(32, 0xf59b59b58c304fd1);
+ F(33, 0x9013fb74ca603e0c);
+ F(64, 0xfa5271fcce0db1c3);
+ F(65, 0x79c42431727f1012);
+ F(96, 0x591ee0ddf9c9ccd1);
+ F(97, 0x8ffc6a3111fe19da);
+ F(128, 0x06a146ee9a2da378);
+ F(129, 0xbc7138129bf065da);
+ F(403, 0xcefeb3ffa532ad8c);
+ F(512, 0xcdfa6b6268e3650f);
+ F(513, 0x4bb5d42742f9765f);
+ F(2048, 0x330ce110cbb79eae);
+ F(2049, 0x3ba6afa0249fef9a);
+ F(2240, 0xd61d4d2a94e926a8);
+ F(2243, 0x0979f786a24edde7);
+#undef F
+}
More information about the llvm-commits
mailing list