[llvm] 70aeb89 - Calculate KnownBits from Metadata correctly for vector loads (#128908)

via llvm-commits llvm-commits at lists.llvm.org
Tue Mar 25 08:46:33 PDT 2025


Author: LU-JOHN
Date: 2025-03-25T22:46:30+07:00
New Revision: 70aeb89094e8109cd072b7cbfbf05060c05e139a

URL: https://github.com/llvm/llvm-project/commit/70aeb89094e8109cd072b7cbfbf05060c05e139a
DIFF: https://github.com/llvm/llvm-project/commit/70aeb89094e8109cd072b7cbfbf05060c05e139a.diff

LOG: Calculate KnownBits from Metadata correctly for vector loads (#128908)

Calculate KnownBits correctly from metadata for vector loads.

---------

Signed-off-by: John Lu <John.Lu at amd.com>

Added: 
    llvm/test/CodeGen/AMDGPU/vector_range_metadata.ll

Modified: 
    llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
    llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
index 1cacab9528caa..e2ae31c86bc48 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
@@ -690,6 +690,14 @@ void SelectionDAGLegalize::LegalizeLoadOps(SDNode *Node) {
       assert(NVT.getSizeInBits() == VT.getSizeInBits() &&
              "Can only promote loads to same size type");
 
+      // If the range metadata type does not match the legalized memory
+      // operation type, remove the range metadata.
+      if (const MDNode *MD = LD->getRanges()) {
+        ConstantInt *Lower = mdconst::extract<ConstantInt>(MD->getOperand(0));
+        if (Lower->getBitWidth() != NVT.getScalarSizeInBits() ||
+            !NVT.isInteger())
+          LD->getMemOperand()->clearRanges();
+      }
       SDValue Res = DAG.getLoad(NVT, dl, Chain, Ptr, LD->getMemOperand());
       RVal = DAG.getNode(ISD::BITCAST, dl, VT, Res);
       RChain = Res.getValue(1);

diff  --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 610e159be96bd..7ce4eebf685e1 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -4004,39 +4004,20 @@ KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
         }
       }
     } else if (Op.getResNo() == 0) {
-      KnownBits Known0(!LD->getMemoryVT().isScalableVT()
-                           ? LD->getMemoryVT().getFixedSizeInBits()
-                           : BitWidth);
-      EVT VT = Op.getValueType();
-      // Fill in any known bits from range information. There are 3 types being
-      // used. The results VT (same vector elt size as BitWidth), the loaded
-      // MemoryVT (which may or may not be vector) and the range VTs original
-      // type. The range matadata needs the full range (i.e
-      // MemoryVT().getSizeInBits()), which is truncated to the correct elt size
-      // if it is know. These are then extended to the original VT sizes below.
-      if (const MDNode *MD = LD->getRanges()) {
-        computeKnownBitsFromRangeMetadata(*MD, Known0);
-        if (VT.isVector()) {
-          // Handle truncation to the first demanded element.
-          // TODO: Figure out which demanded elements are covered
-          if (DemandedElts != 1 || !getDataLayout().isLittleEndian())
-            break;
-          Known0 = Known0.trunc(BitWidth);
-        }
-      }
-
-      if (LD->getMemoryVT().isVector())
-        Known0 = Known0.trunc(LD->getMemoryVT().getScalarSizeInBits());
+      unsigned ScalarMemorySize = LD->getMemoryVT().getScalarSizeInBits();
+      KnownBits KnownScalarMemory(ScalarMemorySize);
+      if (const MDNode *MD = LD->getRanges())
+        computeKnownBitsFromRangeMetadata(*MD, KnownScalarMemory);
 
-      // Extend the Known bits from memory to the size of the result.
+      // Extend the Known bits from memory to the size of the scalar result.
       if (ISD::isZEXTLoad(Op.getNode()))
-        Known = Known0.zext(BitWidth);
+        Known = KnownScalarMemory.zext(BitWidth);
       else if (ISD::isSEXTLoad(Op.getNode()))
-        Known = Known0.sext(BitWidth);
+        Known = KnownScalarMemory.sext(BitWidth);
       else if (ISD::isEXTLoad(Op.getNode()))
-        Known = Known0.anyext(BitWidth);
+        Known = KnownScalarMemory.anyext(BitWidth);
       else
-        Known = Known0;
+        Known = KnownScalarMemory;
       assert(Known.getBitWidth() == BitWidth);
       return Known;
     }

diff  --git a/llvm/test/CodeGen/AMDGPU/vector_range_metadata.ll b/llvm/test/CodeGen/AMDGPU/vector_range_metadata.ll
new file mode 100644
index 0000000000000..d496634ae474f
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/vector_range_metadata.ll
@@ -0,0 +1,115 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; Ensure that range metadata is handled correctly for vector loads.
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck %s
+
+define <2 x i16> @test_add2x16(ptr %a_ptr, ptr %b_ptr) {
+; CHECK-LABEL: test_add2x16:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_mov_b32_e32 v0, 0x300030
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %a = load <2 x i16>, ptr %a_ptr, !range !0, !noundef !{}
+  %b = load <2 x i16>, ptr %b_ptr, !range !1, !noundef !{}
+  %result = add <2 x i16> %a, %b
+  ret <2 x i16> %result
+}
+
+define <2 x i32> @test_add2x32(ptr %a_ptr, ptr %b_ptr) {
+; CHECK-LABEL: test_add2x32:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    flat_load_dword v4, v[2:3]
+; CHECK-NEXT:    flat_load_dword v5, v[0:1]
+; CHECK-NEXT:    v_mov_b32_e32 v1, 48
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v0, v5, v4
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %a = load <2 x i32>, ptr %a_ptr, !range !2, !noundef !{}
+  %b = load <2 x i32>, ptr %b_ptr, !range !3, !noundef !{}
+  %result = add <2 x i32> %a, %b
+  ret <2 x i32> %result
+}
+
+define <2 x i64> @test_add2x64(ptr %a_ptr, ptr %b_ptr) {
+; CHECK-LABEL: test_add2x64:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    flat_load_dwordx4 v[4:7], v[0:1]
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    flat_load_dwordx4 v[6:9], v[2:3]
+; CHECK-NEXT:    ; kill: killed $vgpr2 killed $vgpr3
+; CHECK-NEXT:    ; kill: killed $vgpr0 killed $vgpr1
+; CHECK-NEXT:    v_mov_b32_e32 v2, 48
+; CHECK-NEXT:    v_mov_b32_e32 v3, 0
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v1, v5, v7
+; CHECK-NEXT:    v_or_b32_e32 v0, v4, v6
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %a = load <2 x i64>, ptr %a_ptr, !range !4, !noundef !{}
+  %b = load <2 x i64>, ptr %b_ptr, !range !5, !noundef !{}
+  %result = add <2 x i64> %a, %b
+  ret <2 x i64> %result
+}
+
+define <3 x i16> @test_add3x16(ptr %a_ptr, ptr %b_ptr) {
+; CHECK-LABEL: test_add3x16:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    flat_load_dwordx2 v[4:5], v[0:1]
+; CHECK-NEXT:    flat_load_dwordx2 v[6:7], v[2:3]
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v1, v5, v7
+; CHECK-NEXT:    v_or_b32_e32 v0, v4, v6
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %a = load <3 x i16>, ptr %a_ptr, !range !0, !noundef !{}
+  %b = load <3 x i16>, ptr %b_ptr, !range !1, !noundef !{}
+  %result = add <3 x i16> %a, %b
+  ret <3 x i16> %result
+}
+
+define <3 x i32> @test_add3x32(ptr %a_ptr, ptr %b_ptr) {
+; CHECK-LABEL: test_add3x32:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    flat_load_dword v4, v[2:3]
+; CHECK-NEXT:    flat_load_dword v5, v[0:1]
+; CHECK-NEXT:    v_mov_b32_e32 v1, 48
+; CHECK-NEXT:    v_mov_b32_e32 v2, 48
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v0, v5, v4
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %a = load <3 x i32>, ptr %a_ptr, !range !2, !noundef !{}
+  %b = load <3 x i32>, ptr %b_ptr, !range !3, !noundef !{}
+  %result = add <3 x i32> %a, %b
+  ret <3 x i32> %result
+}
+
+define <3 x i64> @test_add3x64(ptr %a_ptr, ptr %b_ptr) {
+; CHECK-LABEL: test_add3x64:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    flat_load_dwordx4 v[4:7], v[0:1]
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    flat_load_dwordx4 v[6:9], v[2:3]
+; CHECK-NEXT:    ; kill: killed $vgpr2 killed $vgpr3
+; CHECK-NEXT:    ; kill: killed $vgpr0 killed $vgpr1
+; CHECK-NEXT:    v_mov_b32_e32 v2, 48
+; CHECK-NEXT:    v_mov_b32_e32 v3, 0
+; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_or_b32_e32 v1, v5, v7
+; CHECK-NEXT:    v_or_b32_e32 v0, v4, v6
+; CHECK-NEXT:    v_mov_b32_e32 v4, 48
+; CHECK-NEXT:    v_mov_b32_e32 v5, 0
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+  %a = load <3 x i64>, ptr %a_ptr, !range !4, !noundef !{}
+  %b = load <3 x i64>, ptr %b_ptr, !range !5, !noundef !{}
+  %result = add <3 x i64> %a, %b
+  ret <3 x i64> %result
+}
+
+!0 = !{i16 16, i16 17 }
+!1 = !{i16 32, i16 33 }
+!2 = !{i32 16, i32 17 }
+!3 = !{i32 32, i32 33 }
+!4 = !{i64 16, i64 17 }
+!5 = !{i64 32, i64 33 }


        


More information about the llvm-commits mailing list