[llvm] Calculate KnownBits from Metadata correctly for vector loads (PR #128908)

via llvm-commits llvm-commits at lists.llvm.org
Wed Feb 26 08:51:00 PST 2025


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-backend-amdgpu

Author: None (LU-JOHN)

<details>
<summary>Changes</summary>

Calculate KnownBits correctly from metadata for vector loads.
Utilize KnownBits to reduce 64-bit shl to 32-bit shl.

---
Full diff: https://github.com/llvm/llvm-project/pull/128908.diff


2 Files Affected:

- (modified) llvm/lib/Analysis/ValueTracking.cpp (+6-1) 
- (modified) llvm/test/CodeGen/AMDGPU/shl64_reduce.ll (+2-1) 


``````````diff
diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index e3e026f7979da..1d6368b408811 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -429,11 +429,16 @@ void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
     ConstantInt *Upper =
         mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
     ConstantRange Range(Lower->getValue(), Upper->getValue());
+    unsigned RangeBitWidth = Lower->getBitWidth();
+    // BitWidth > RangeBitWidth can happen if Known is set to the width of a
+    // vector load but Ranges describes a vector element.
+    assert(BitWidth >= RangeBitWidth);
 
     // The first CommonPrefixBits of all values in Range are equal.
     unsigned CommonPrefixBits =
         (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countl_zero();
-    APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits);
+    APInt Mask = APInt::getBitsSet(BitWidth, RangeBitWidth - CommonPrefixBits,
+                                   RangeBitWidth);
     APInt UnsignedMax = Range.getUnsignedMax().zextOrTrunc(BitWidth);
     Known.One &= UnsignedMax & Mask;
     Known.Zero &= ~UnsignedMax & Mask;
diff --git a/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll b/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll
index 69242f4e44840..3f8553180df30 100644
--- a/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll
+++ b/llvm/test/CodeGen/AMDGPU/shl64_reduce.ll
@@ -79,8 +79,9 @@ define <2 x i64> @shl_v2_metadata(<2 x i64> %arg0, ptr %arg1.ptr) {
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; CHECK-NEXT:    flat_load_dwordx4 v[4:7], v[4:5]
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    v_lshlrev_b64 v[0:1], v4, v[0:1]
 ; CHECK-NEXT:    v_lshlrev_b64 v[2:3], v6, v[2:3]
+; CHECK-NEXT:    v_lshlrev_b32_e32 v1, v4, v0
+; CHECK-NEXT:    v_mov_b32_e32 v0, 0
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %shift.amt = load <2 x i64>, ptr %arg1.ptr, !range !0, !noundef !{}
   %shl = shl <2 x i64> %arg0, %shift.amt

``````````

</details>


https://github.com/llvm/llvm-project/pull/128908


More information about the llvm-commits mailing list