[llvm] [SelectionDAG] Add support for extending masked loads in computeKnownBits (PR #115450)

via llvm-commits llvm-commits at lists.llvm.org
Fri Nov 8 01:45:41 PST 2024


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-backend-aarch64

Author: David Sherwood (david-arm)

<details>
<summary>Changes</summary>

We already support computing known bits for extending loads, but not for masked loads. For now I've only added support for zero-extends because that's the only thing currently tested. Even when the passthru value is poison we still know the top X bits are zero.

---
Full diff: https://github.com/llvm/llvm-project/pull/115450.diff


2 Files Affected:

- (modified) llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp (+13) 
- (modified) llvm/test/CodeGen/AArch64/sve-hadd.ll (+6-8) 


``````````diff
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 203e14f6cde3e3..901e63c47fac17 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -3920,6 +3920,19 @@ KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
     Known.Zero.setBitsFrom(1);
     break;
   }
+  case ISD::MGATHER:
+  case ISD::MLOAD: {
+    ISD::LoadExtType ETy =
+        (Opcode == ISD::MGATHER)
+            ? cast<MaskedGatherSDNode>(Op)->getExtensionType()
+            : cast<MaskedLoadSDNode>(Op)->getExtensionType();
+    if (ETy == ISD::ZEXTLOAD) {
+      EVT MemVT = cast<MemSDNode>(Op)->getMemoryVT();
+      KnownBits Known0(MemVT.getScalarSizeInBits());
+      return Known0.zext(BitWidth);
+    }
+    break;
+  }
   case ISD::LOAD: {
     LoadSDNode *LD = cast<LoadSDNode>(Op);
     const Constant *Cst = TLI->getTargetConstantFromLoad(LD);
diff --git a/llvm/test/CodeGen/AArch64/sve-hadd.ll b/llvm/test/CodeGen/AArch64/sve-hadd.ll
index 857a883d80ea3d..978ee4534e5e1a 100644
--- a/llvm/test/CodeGen/AArch64/sve-hadd.ll
+++ b/llvm/test/CodeGen/AArch64/sve-hadd.ll
@@ -1347,10 +1347,8 @@ define void @zext_mload_avgflooru(ptr %p1, ptr %p2, <vscale x 8 x i1> %mask) {
 ; SVE:       // %bb.0:
 ; SVE-NEXT:    ld1b { z0.h }, p0/z, [x0]
 ; SVE-NEXT:    ld1b { z1.h }, p0/z, [x1]
-; SVE-NEXT:    eor z2.d, z0.d, z1.d
-; SVE-NEXT:    and z0.d, z0.d, z1.d
-; SVE-NEXT:    lsr z1.h, z2.h, #1
 ; SVE-NEXT:    add z0.h, z0.h, z1.h
+; SVE-NEXT:    lsr z0.h, z0.h, #1
 ; SVE-NEXT:    st1h { z0.h }, p0, [x0]
 ; SVE-NEXT:    ret
 ;
@@ -1377,11 +1375,11 @@ define void @zext_mload_avgceilu(ptr %p1, ptr %p2, <vscale x 8 x i1> %mask) {
 ; SVE-LABEL: zext_mload_avgceilu:
 ; SVE:       // %bb.0:
 ; SVE-NEXT:    ld1b { z0.h }, p0/z, [x0]
-; SVE-NEXT:    ld1b { z1.h }, p0/z, [x1]
-; SVE-NEXT:    eor z2.d, z0.d, z1.d
-; SVE-NEXT:    orr z0.d, z0.d, z1.d
-; SVE-NEXT:    lsr z1.h, z2.h, #1
-; SVE-NEXT:    sub z0.h, z0.h, z1.h
+; SVE-NEXT:    mov z1.h, #-1 // =0xffffffffffffffff
+; SVE-NEXT:    ld1b { z2.h }, p0/z, [x1]
+; SVE-NEXT:    eor z0.d, z0.d, z1.d
+; SVE-NEXT:    sub z0.h, z2.h, z0.h
+; SVE-NEXT:    lsr z0.h, z0.h, #1
 ; SVE-NEXT:    st1b { z0.h }, p0, [x0]
 ; SVE-NEXT:    ret
 ;

``````````

</details>


https://github.com/llvm/llvm-project/pull/115450


More information about the llvm-commits mailing list