[llvm] 9166cd2 - [RISCV] DAG combine (mul (add x, 1), y) -> vmadd (#71495)

via llvm-commits llvm-commits at lists.llvm.org
Mon Nov 20 21:43:39 PST 2023


Author: Liao Chunyu
Date: 2023-11-21T13:43:34+08:00
New Revision: 9166cd2a71ba81d2c9c68f80371b1397943a813b

URL: https://github.com/llvm/llvm-project/commit/9166cd2a71ba81d2c9c68f80371b1397943a813b
DIFF: https://github.com/llvm/llvm-project/commit/9166cd2a71ba81d2c9c68f80371b1397943a813b.diff

LOG: [RISCV] DAG combine (mul (add x, 1), y) -> vmadd (#71495)

vmadd: (mul (add x, 1), y) -> (add (mul x, y), y)
           (mul x, add (y, 1)) -> (add x, (mul x, y))
    vnmsub: (mul (sub 1, x), y) -> (sub y, (mul x, y))
            (mul x, (sub 1, y)) -> (sub x, (mul x, y))
    
    Comparison with gcc:
    vmadd: https://gcc.godbolt.org/z/xjePx87Y7
    vnsub: https://gcc.godbolt.org/z/b17zG7nT1

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
    llvm/test/CodeGen/RISCV/rvv/vmadd-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vnmsub-sdnode.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index c054f53e62849e0..26190337eb3bd1b 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -1390,7 +1390,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
                          ISD::VP_GATHER, ISD::VP_SCATTER, ISD::SRA, ISD::SRL,
                          ISD::SHL, ISD::STORE, ISD::SPLAT_VECTOR,
                          ISD::BUILD_VECTOR, ISD::CONCAT_VECTORS,
-                         ISD::EXPERIMENTAL_VP_REVERSE});
+                         ISD::EXPERIMENTAL_VP_REVERSE, ISD::MUL});
   if (Subtarget.hasVendorXTHeadMemPair())
     setTargetDAGCombine({ISD::LOAD, ISD::STORE});
   if (Subtarget.useRVVForFixedLengthVectors())
@@ -12423,6 +12423,47 @@ static SDValue performXORCombine(SDNode *N, SelectionDAG &DAG,
   return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false, Subtarget);
 }
 
+static SDValue performMULCombine(SDNode *N, SelectionDAG &DAG) {
+  EVT VT = N->getValueType(0);
+  if (!VT.isVector())
+    return SDValue();
+
+  SDLoc DL(N);
+  SDValue N0 = N->getOperand(0);
+  SDValue N1 = N->getOperand(1);
+  SDValue MulOper;
+  unsigned AddSubOpc;
+
+  // vmadd: (mul (add x, 1), y) -> (add (mul x, y), y)
+  //        (mul x, add (y, 1)) -> (add x, (mul x, y))
+  // vnmsub: (mul (sub 1, x), y) -> (sub y, (mul x, y))
+  //         (mul x, (sub 1, y)) -> (sub x, (mul x, y))
+  auto IsAddSubWith1 = [&](SDValue V) -> bool {
+    AddSubOpc = V->getOpcode();
+    if ((AddSubOpc == ISD::ADD || AddSubOpc == ISD::SUB) && V->hasOneUse()) {
+      SDValue Opnd = V->getOperand(1);
+      MulOper = V->getOperand(0);
+      if (AddSubOpc == ISD::SUB)
+        std::swap(Opnd, MulOper);
+      if (isOneOrOneSplat(Opnd))
+        return true;
+    }
+    return false;
+  };
+
+  if (IsAddSubWith1(N0)) {
+    SDValue MulVal = DAG.getNode(ISD::MUL, DL, VT, N1, MulOper);
+    return DAG.getNode(AddSubOpc, DL, VT, N1, MulVal);
+  }
+
+  if (IsAddSubWith1(N1)) {
+    SDValue MulVal = DAG.getNode(ISD::MUL, DL, VT, N0, MulOper);
+    return DAG.getNode(AddSubOpc, DL, VT, N0, MulVal);
+  }
+
+  return SDValue();
+}
+
 /// According to the property that indexed load/store instructions zero-extend
 /// their indices, try to narrow the type of index operand.
 static bool narrowIndex(SDValue &N, ISD::MemIndexType IndexType, SelectionDAG &DAG) {
@@ -14613,6 +14654,8 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
     return performORCombine(N, DCI, Subtarget);
   case ISD::XOR:
     return performXORCombine(N, DAG, Subtarget);
+  case ISD::MUL:
+    return performMULCombine(N, DAG);
   case ISD::FADD:
   case ISD::UMAX:
   case ISD::UMIN:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
index c95d144a970895c..7a4620a53825843 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
@@ -8237,3 +8237,63 @@ define void @mulhs_vx_v2i64(ptr %x) {
   store <2 x i64> %b, ptr %x
   ret void
 }
+
+define void @madd_vv_v2i64(ptr %x, <2 x i64> %y) {
+; CHECK-LABEL: madd_vv_v2i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT:    vle64.v v9, (a0)
+; CHECK-NEXT:    vmadd.vv v9, v8, v8
+; CHECK-NEXT:    vse64.v v9, (a0)
+; CHECK-NEXT:    ret
+  %a = load <2 x i64>, ptr %x
+  %b = add <2 x i64> %a, <i64 1, i64 1>
+  %c = mul <2 x i64> %b, %y
+  store <2 x i64> %c, ptr %x
+  ret void
+}
+
+define void @madd_vv_v2i64_2(ptr %x, <2 x i64> %y) {
+; CHECK-LABEL: madd_vv_v2i64_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT:    vle64.v v9, (a0)
+; CHECK-NEXT:    vmadd.vv v9, v8, v8
+; CHECK-NEXT:    vse64.v v9, (a0)
+; CHECK-NEXT:    ret
+  %a = load <2 x i64>, ptr %x
+  %b = add <2 x i64> %a, <i64 1, i64 1>
+  %c = mul <2 x i64> %y, %b
+  store <2 x i64> %c, ptr %x
+  ret void
+}
+
+define void @msub_vv_v2i64(ptr %x, <2 x i64> %y) {
+; CHECK-LABEL: msub_vv_v2i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT:    vle64.v v9, (a0)
+; CHECK-NEXT:    vnmsub.vv v9, v8, v8
+; CHECK-NEXT:    vse64.v v9, (a0)
+; CHECK-NEXT:    ret
+  %a = load <2 x i64>, ptr %x
+  %b = sub <2 x i64> <i64 1, i64 1>, %a
+  %c = mul <2 x i64> %b, %y
+  store <2 x i64> %c, ptr %x
+  ret void
+}
+
+define void @msub_vv_v2i64_2(ptr %x, <2 x i64> %y) {
+; CHECK-LABEL: msub_vv_v2i64_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT:    vle64.v v9, (a0)
+; CHECK-NEXT:    vnmsub.vv v9, v8, v8
+; CHECK-NEXT:    vse64.v v9, (a0)
+; CHECK-NEXT:    ret
+  %a = load <2 x i64>, ptr %x
+  %b = sub <2 x i64> <i64 1, i64 1>, %a
+  %c = mul <2 x i64> %y, %b
+  store <2 x i64> %c, ptr %x
+  ret void
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vmadd-sdnode.ll
index 04f83237745ed81..07536407ace8d98 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmadd-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmadd-sdnode.ll
@@ -590,3 +590,25 @@ define <vscale x 8 x i64> @vmadd_vx_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8
   %y = add <vscale x 8 x i64> %x, %va
   ret <vscale x 8 x i64> %y
 }
+
+define <vscale x 4 x i32> @combine_mul_add_imm1(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
+; CHECK-LABEL: combine_mul_add_imm1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vmadd.vv v8, v10, v10
+; CHECK-NEXT:    ret
+  %x = add <vscale x 4 x i32> %a, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
+  %y = mul <vscale x 4 x i32> %x, %b
+  ret <vscale x 4 x i32> %y
+}
+
+define <vscale x 4 x i32> @combine_mul_add_imm1_2(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
+; CHECK-LABEL: combine_mul_add_imm1_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vmadd.vv v8, v10, v10
+; CHECK-NEXT:    ret
+  %x = add <vscale x 4 x i32> %a, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
+  %y = mul <vscale x 4 x i32> %b, %x
+  ret <vscale x 4 x i32> %y
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vnmsub-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vnmsub-sdnode.ll
index fbca9115b89a72d..a3c896ecca22a64 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnmsub-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnmsub-sdnode.ll
@@ -590,3 +590,25 @@ define <vscale x 8 x i64> @vnmsub_vx_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8
   %y = sub <vscale x 8 x i64> %va, %x
   ret <vscale x 8 x i64> %y
 }
+
+define <vscale x 4 x i32> @combine_mul_sub_imm1(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
+; CHECK-LABEL: combine_mul_sub_imm1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vnmsub.vv v8, v10, v10
+; CHECK-NEXT:    ret
+  %x = sub <vscale x 4 x i32> shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer), %a
+  %y = mul <vscale x 4 x i32> %x, %b
+  ret <vscale x 4 x i32> %y
+}
+
+define <vscale x 4 x i32> @combine_mul_sub_imm1_2(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
+; CHECK-LABEL: combine_mul_sub_imm1_2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vnmsub.vv v8, v10, v10
+; CHECK-NEXT:    ret
+  %x = sub <vscale x 4 x i32> shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer), %a
+  %y = mul <vscale x 4 x i32> %b, %x
+  ret <vscale x 4 x i32> %y
+}


        


More information about the llvm-commits mailing list