[llvm] 68e81d7 - [ADT] Use llvm::rotr (NFC)

Kazu Hirata via llvm-commits llvm-commits at lists.llvm.org
Wed Feb 15 22:13:36 PST 2023


Author: Kazu Hirata
Date: 2023-02-15T22:13:30-08:00
New Revision: 68e81d7e45c7ea1e2427bf6ac12ac5702693d5e9

URL: https://github.com/llvm/llvm-project/commit/68e81d7e45c7ea1e2427bf6ac12ac5702693d5e9
DIFF: https://github.com/llvm/llvm-project/commit/68e81d7e45c7ea1e2427bf6ac12ac5702693d5e9.diff

LOG: [ADT] Use llvm::rotr (NFC)

This patch replaces rotate with llvm::rotate<uint64_t> where the
rotate count is an immediate.

Added: 
    

Modified: 
    llvm/include/llvm/ADT/Hashing.h

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/ADT/Hashing.h b/llvm/include/llvm/ADT/Hashing.h
index 463a8d572aa17..ef983105c7bae 100644
--- a/llvm/include/llvm/ADT/Hashing.h
+++ b/llvm/include/llvm/ADT/Hashing.h
@@ -219,29 +219,30 @@ inline uint64_t hash_17to32_bytes(const char *s, size_t len, uint64_t seed) {
   uint64_t b = fetch64(s + 8);
   uint64_t c = fetch64(s + len - 8) * k2;
   uint64_t d = fetch64(s + len - 16) * k0;
-  return hash_16_bytes(rotate(a - b, 43) + rotate(c ^ seed, 30) + d,
-                       a + rotate(b ^ k3, 20) - c + len + seed);
+  return hash_16_bytes(llvm::rotr<uint64_t>(a - b, 43) +
+                           llvm::rotr<uint64_t>(c ^ seed, 30) + d,
+                       a + llvm::rotr<uint64_t>(b ^ k3, 20) - c + len + seed);
 }
 
 inline uint64_t hash_33to64_bytes(const char *s, size_t len, uint64_t seed) {
   uint64_t z = fetch64(s + 24);
   uint64_t a = fetch64(s) + (len + fetch64(s + len - 16)) * k0;
-  uint64_t b = rotate(a + z, 52);
-  uint64_t c = rotate(a, 37);
+  uint64_t b = llvm::rotr<uint64_t>(a + z, 52);
+  uint64_t c = llvm::rotr<uint64_t>(a, 37);
   a += fetch64(s + 8);
-  c += rotate(a, 7);
+  c += llvm::rotr<uint64_t>(a, 7);
   a += fetch64(s + 16);
   uint64_t vf = a + z;
-  uint64_t vs = b + rotate(a, 31) + c;
+  uint64_t vs = b + llvm::rotr<uint64_t>(a, 31) + c;
   a = fetch64(s + 16) + fetch64(s + len - 32);
   z = fetch64(s + len - 8);
-  b = rotate(a + z, 52);
-  c = rotate(a, 37);
+  b = llvm::rotr<uint64_t>(a + z, 52);
+  c = llvm::rotr<uint64_t>(a, 37);
   a += fetch64(s + len - 24);
-  c += rotate(a, 7);
+  c += llvm::rotr<uint64_t>(a, 7);
   a += fetch64(s + len - 16);
   uint64_t wf = a + z;
-  uint64_t ws = b + rotate(a, 31) + c;
+  uint64_t ws = b + llvm::rotr<uint64_t>(a, 31) + c;
   uint64_t r = shift_mix((vf + ws) * k2 + (wf + vs) * k0);
   return shift_mix((seed ^ (r * k0)) + vs) * k2;
 }
@@ -271,9 +272,13 @@ struct hash_state {
   /// seed and the first 64-byte chunk.
   /// This effectively performs the initial mix.
   static hash_state create(const char *s, uint64_t seed) {
-    hash_state state = {
-      0, seed, hash_16_bytes(seed, k1), rotate(seed ^ k1, 49),
-      seed * k1, shift_mix(seed), 0 };
+    hash_state state = {0,
+                        seed,
+                        hash_16_bytes(seed, k1),
+                        llvm::rotr<uint64_t>(seed ^ k1, 49),
+                        seed * k1,
+                        shift_mix(seed),
+                        0};
     state.h6 = hash_16_bytes(state.h4, state.h5);
     state.mix(s);
     return state;
@@ -284,10 +289,10 @@ struct hash_state {
   static void mix_32_bytes(const char *s, uint64_t &a, uint64_t &b) {
     a += fetch64(s);
     uint64_t c = fetch64(s + 24);
-    b = rotate(b + a + c, 21);
+    b = llvm::rotr<uint64_t>(b + a + c, 21);
     uint64_t d = a;
     a += fetch64(s + 8) + fetch64(s + 16);
-    b += rotate(a, 44) + d;
+    b += llvm::rotr<uint64_t>(a, 44) + d;
     a += c;
   }
 
@@ -295,11 +300,11 @@ struct hash_state {
   /// We mix all 64 bytes even when the chunk length is smaller, but we
   /// record the actual length.
   void mix(const char *s) {
-    h0 = rotate(h0 + h1 + h3 + fetch64(s + 8), 37) * k1;
-    h1 = rotate(h1 + h4 + fetch64(s + 48), 42) * k1;
+    h0 = llvm::rotr<uint64_t>(h0 + h1 + h3 + fetch64(s + 8), 37) * k1;
+    h1 = llvm::rotr<uint64_t>(h1 + h4 + fetch64(s + 48), 42) * k1;
     h0 ^= h6;
     h1 += h3 + fetch64(s + 40);
-    h2 = rotate(h2 + h5, 33) * k1;
+    h2 = llvm::rotr<uint64_t>(h2 + h5, 33) * k1;
     h3 = h4 * k1;
     h4 = h0 + h5;
     mix_32_bytes(s, h3, h4);


        


More information about the llvm-commits mailing list