[llvm-branch-commits] [RISCV] Combine vwaddu_wv+vabd(u) to vwabda(u) (PR #184603)

via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Wed Mar 4 04:43:05 PST 2026


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-backend-risc-v

Author: Pengcheng Wang (wangpc-pp)

<details>
<summary>Changes</summary>

Note that we only support SEW=8/16 for `vwabda(u)`.


---
Full diff: https://github.com/llvm/llvm-project/pull/184603.diff


2 Files Affected:

- (modified) llvm/lib/Target/RISCV/RISCVISelLowering.cpp (+64-1) 
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll (+10-14) 


``````````diff
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 7e62957529fea..e1e46aadfa3c1 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -19128,6 +19128,66 @@ static SDValue performVWABDACombine(SDNode *N, SelectionDAG &DAG,
   return Result;
 }
 
+// vwaddu_wv C (vabd A B) -> vwabda(A B C)
+// vwaddu_wv C (vabdu A B) -> vwabdau(A B C)
+static SDValue performVWABDACombine_WV(SDNode *N, SelectionDAG &DAG,
+                                       const RISCVSubtarget &Subtarget) {
+  if (!Subtarget.hasStdExtZvabd())
+    return SDValue();
+
+  MVT VT = N->getSimpleValueType(0);
+  // The result is widened, so we can accept i16/i32 here.
+  if (VT.getVectorElementType() != MVT::i16 &&
+      VT.getVectorElementType() != MVT::i32)
+    return SDValue();
+
+  SDValue Op0 = N->getOperand(0);
+  SDValue Op1 = N->getOperand(1);
+  SDValue Passthru = N->getOperand(2);
+  if (!Passthru->isUndef())
+    return SDValue();
+
+  SDValue Mask = N->getOperand(3);
+  SDValue VL = N->getOperand(4);
+  bool HasZExt = 0;
+  MVT ExtVT = MVT::INVALID_SIMPLE_VALUE_TYPE;
+  auto IsABD = [&](SDValue Op) {
+    unsigned Opc = Op.getOpcode();
+    if (Opc != ISD::ABDS && Opc != ISD::ABDU) {
+      if (Opc == RISCVISD::VZEXT_VL) {
+        SDValue Src = Op->getOperand(0);
+        if (Src->getOpcode() == RISCVISD::ABDS_VL ||
+            Src->getOpcode() == RISCVISD::ABDU_VL) {
+          HasZExt = true;
+          ExtVT = Op->getSimpleValueType(0);
+          return Src;
+        }
+      }
+      return SDValue();
+    }
+    return Op;
+  };
+
+  SDValue Diff = IsABD(Op0);
+  Diff = Diff ? Diff : IsABD(Op1);
+  if (!Diff)
+    return SDValue();
+  SDValue Acc = Diff == Op0 ? Op1 : Op0;
+
+  SDLoc DL(N);
+  SDValue DiffA = Diff.getOperand(0);
+  SDValue DiffB = Diff.getOperand(1);
+  if (HasZExt) {
+    DiffA = DAG.getNode(RISCVISD::VZEXT_VL, DL, ExtVT, DiffA, Mask, VL);
+    DiffB = DAG.getNode(RISCVISD::VZEXT_VL, DL, ExtVT, DiffB, Mask, VL);
+  }
+  SDValue Result =
+      DAG.getNode(Diff.getOpcode() == ISD::ABDS ? RISCVISD::VWABDA_VL
+                                                : RISCVISD::VWABDAU_VL,
+                  DL, VT, DiffA, DiffB, Acc, Mask, VL);
+  return Result;
+}
+
 static SDValue performVWADDSUBW_VLCombine(SDNode *N,
                                           TargetLowering::DAGCombinerInfo &DCI,
                                           const RISCVSubtarget &Subtarget) {
@@ -22177,8 +22237,11 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
     return combineToVWMACC(N, DAG, Subtarget);
   case RISCVISD::VWADDU_VL:
     return performVWABDACombine(N, DAG, Subtarget);
-  case RISCVISD::VWADD_W_VL:
   case RISCVISD::VWADDU_W_VL:
+    if (SDValue V = performVWABDACombine_WV(N, DAG, Subtarget))
+      return V;
+    [[fallthrough]];
+  case RISCVISD::VWADD_W_VL:
   case RISCVISD::VWSUB_W_VL:
   case RISCVISD::VWSUBU_W_VL:
     return performVWADDSUBW_VLCombine(N, DCI, Subtarget);
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll
index dcb8b31c682b3..4450fd9a649b5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll
@@ -199,8 +199,8 @@ define signext i32 @sad_2block_16xi8_as_i32(ptr %a, ptr %b, i32 signext %stridea
 ; ZVABD-NEXT:    vle8.v v15, (a1)
 ; ZVABD-NEXT:    add a0, a0, a2
 ; ZVABD-NEXT:    add a1, a1, a3
-; ZVABD-NEXT:    vle8.v v16, (a0)
-; ZVABD-NEXT:    vle8.v v17, (a1)
+; ZVABD-NEXT:    vle8.v v20, (a0)
+; ZVABD-NEXT:    vle8.v v18, (a1)
 ; ZVABD-NEXT:    vabdu.vv v8, v8, v9
 ; ZVABD-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
 ; ZVABD-NEXT:    vzext.vf2 v12, v8
@@ -209,12 +209,10 @@ define signext i32 @sad_2block_16xi8_as_i32(ptr %a, ptr %b, i32 signext %stridea
 ; ZVABD-NEXT:    vabdu.vv v8, v14, v15
 ; ZVABD-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
 ; ZVABD-NEXT:    vzext.vf2 v14, v8
-; ZVABD-NEXT:    vsetvli zero, zero, e8, m1, ta, ma
-; ZVABD-NEXT:    vabdu.vv v16, v16, v17
-; ZVABD-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; ZVABD-NEXT:    vzext.vf2 v16, v18
+; ZVABD-NEXT:    vzext.vf2 v18, v20
 ; ZVABD-NEXT:    vwaddu.vv v8, v14, v12
-; ZVABD-NEXT:    vzext.vf2 v12, v16
-; ZVABD-NEXT:    vwaddu.wv v8, v8, v12
+; ZVABD-NEXT:    vwabdau.vv v8, v18, v16
 ; ZVABD-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; ZVABD-NEXT:    vmv.s.x v12, zero
 ; ZVABD-NEXT:    vredsum.vs v8, v8, v12
@@ -322,8 +320,8 @@ define signext i32 @sadu_2block_16xi8_as_i32(ptr %a, ptr %b, i32 signext %stride
 ; ZVABD-NEXT:    vle8.v v15, (a1)
 ; ZVABD-NEXT:    add a0, a0, a2
 ; ZVABD-NEXT:    add a1, a1, a3
-; ZVABD-NEXT:    vle8.v v16, (a0)
-; ZVABD-NEXT:    vle8.v v17, (a1)
+; ZVABD-NEXT:    vle8.v v20, (a0)
+; ZVABD-NEXT:    vle8.v v18, (a1)
 ; ZVABD-NEXT:    vabd.vv v8, v8, v9
 ; ZVABD-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
 ; ZVABD-NEXT:    vzext.vf2 v12, v8
@@ -332,12 +330,10 @@ define signext i32 @sadu_2block_16xi8_as_i32(ptr %a, ptr %b, i32 signext %stride
 ; ZVABD-NEXT:    vabd.vv v8, v14, v15
 ; ZVABD-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
 ; ZVABD-NEXT:    vzext.vf2 v14, v8
-; ZVABD-NEXT:    vsetvli zero, zero, e8, m1, ta, ma
-; ZVABD-NEXT:    vabd.vv v16, v16, v17
-; ZVABD-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; ZVABD-NEXT:    vzext.vf2 v16, v18
+; ZVABD-NEXT:    vzext.vf2 v18, v20
 ; ZVABD-NEXT:    vwaddu.vv v8, v14, v12
-; ZVABD-NEXT:    vzext.vf2 v12, v16
-; ZVABD-NEXT:    vwaddu.wv v8, v8, v12
+; ZVABD-NEXT:    vwabdau.vv v8, v18, v16
 ; ZVABD-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
 ; ZVABD-NEXT:    vmv.s.x v12, zero
 ; ZVABD-NEXT:    vredsum.vs v8, v8, v12

``````````

</details>


https://github.com/llvm/llvm-project/pull/184603


More information about the llvm-branch-commits mailing list