[llvm] [RISCV][VLOPT] Add support for widening integer mul-add instructions (PR #112219)

Michael Maitland via llvm-commits llvm-commits at lists.llvm.org
Fri Dec 6 11:57:24 PST 2024


https://github.com/michaelmaitland updated https://github.com/llvm/llvm-project/pull/112219

>From 4cb75a2eb618986a88923d8aee55379ac5ec432f Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Mon, 14 Oct 2024 07:46:51 -0700
Subject: [PATCH 1/2] [RISCV][VLOPT] Add support for 11.14 widening integer
 mul-add instructions

---
 llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp   |  35 +++--
 llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll | 147 ++++++++++++++++---
 llvm/test/CodeGen/RISCV/rvv/vl-opt.ll        |  14 ++
 3 files changed, 164 insertions(+), 32 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
index ee494c46815112..2869891014978d 100644
--- a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
+++ b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
@@ -404,7 +404,19 @@ static OperandInfo getOperandInfo(const MachineInstr &MI,
   case RISCV::VWMULSU_VV:
   case RISCV::VWMULSU_VX:
   case RISCV::VWMULU_VV:
-  case RISCV::VWMULU_VX: {
+  case RISCV::VWMULU_VX:
+  // Vector Widening Integer Multiply-Add Instructions
+  // Destination EEW=2*SEW and EMUL=2*LMUL. Source EEW=SEW and EMUL=LMUL.
+  // A SEW-bit*SEW-bit multiply of the sources forms a 2*SEW-bit value, which
+  // is then added to the 2*SEW-bit Dest. These instructions never have a
+  // passthru operand.
+  case RISCV::VWMACCU_VV:
+  case RISCV::VWMACCU_VX:
+  case RISCV::VWMACC_VV:
+  case RISCV::VWMACC_VX:
+  case RISCV::VWMACCSU_VV:
+  case RISCV::VWMACCSU_VX:
+  case RISCV::VWMACCUS_VX: {
     unsigned Log2EEW = IsMODef ? MILog2SEW + 1 : MILog2SEW;
     RISCVII::VLMUL EMUL =
         IsMODef ? RISCVVType::twoTimesVLMUL(MIVLMul) : MIVLMul;
@@ -419,18 +431,7 @@ static OperandInfo getOperandInfo(const MachineInstr &MI,
   case RISCV::VWADD_WV:
   case RISCV::VWADD_WX:
   case RISCV::VWSUB_WV:
-  case RISCV::VWSUB_WX:
-  // Vector Widening Integer Multiply-Add Instructions
-  // Destination EEW=2*SEW and EMUL=2*LMUL. Source EEW=SEW and EMUL=LMUL.
-  // Even though the add is a 2*SEW addition, the operands of the add are the
-  // Dest which is 2*SEW and the result of the multiply which is 2*SEW.
-  case RISCV::VWMACCU_VV:
-  case RISCV::VWMACCU_VX:
-  case RISCV::VWMACC_VV:
-  case RISCV::VWMACC_VX:
-  case RISCV::VWMACCSU_VV:
-  case RISCV::VWMACCSU_VX:
-  case RISCV::VWMACCUS_VX: {
+  case RISCV::VWSUB_WX: {
     bool IsOp1 = HasPassthru ? MO.getOperandNo() == 2 : MO.getOperandNo() == 1;
     bool TwoTimes = IsMODef || IsOp1;
     unsigned Log2EEW = TwoTimes ? MILog2SEW + 1 : MILog2SEW;
@@ -572,9 +573,13 @@ static bool isSupportedInstr(const MachineInstr &MI) {
   // Vector Single-Width Integer Multiply-Add Instructions
   // FIXME: Add support
   // Vector Widening Integer Multiply-Add Instructions
-  // FIXME: Add support
-  case RISCV::VWMACC_VX:
+  case RISCV::VWMACCU_VV:
   case RISCV::VWMACCU_VX:
+  case RISCV::VWMACC_VV:
+  case RISCV::VWMACC_VX:
+  case RISCV::VWMACCSU_VV:
+  case RISCV::VWMACCSU_VX:
+  case RISCV::VWMACCUS_VX:
   // Vector Integer Merge Instructions
   // FIXME: Add support
   // Vector Integer Move Instructions
diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll b/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll
index 11f603b56b6e56..6d9987934fdbdc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll
@@ -1248,44 +1248,157 @@ define <vscale x 4 x i64> @vwmulu_vx(<vscale x 4 x i32> %a, i32 %b, iXLen %vl) {
   ret <vscale x 4 x i64> %2
 }
 
-define <vscale x 4 x i32> @vwmacc_vx(<vscale x 4 x i16> %a, i16 %b, iXLen %vl) {
+define <vscale x 4 x i64> @vwmacc_vv(<vscale x 4 x i32> %a, <vscale x 4 x i16> %b, <vscale x 4 x i16> %c, <vscale x 4 x i64> %d, iXLen %vl) {
+; NOVLOPT-LABEL: vwmacc_vv:
+; NOVLOPT:       # %bb.0:
+; NOVLOPT-NEXT:    vsetvli a1, zero, e16, m1, tu, ma
+; NOVLOPT-NEXT:    vwmacc.vv v8, v10, v11
+; NOVLOPT-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; NOVLOPT-NEXT:    vwmacc.vv v12, v8, v8
+; NOVLOPT-NEXT:    vmv4r.v v8, v12
+; NOVLOPT-NEXT:    ret
+;
+; VLOPT-LABEL: vwmacc_vv:
+; VLOPT:       # %bb.0:
+; VLOPT-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; VLOPT-NEXT:    vwmacc.vv v8, v10, v11
+; VLOPT-NEXT:    vsetvli zero, zero, e32, m2, tu, ma
+; VLOPT-NEXT:    vwmacc.vv v12, v8, v8
+; VLOPT-NEXT:    vmv4r.v v8, v12
+; VLOPT-NEXT:    ret
+  %1 = call <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.nxv4i16(<vscale x 4 x i32> %a, <vscale x 4 x i16> %b, <vscale x 4 x i16> %c, iXLen -1, iXLen 0)
+  %2 = call <vscale x 4 x i64> @llvm.riscv.vwmacc.nxv4i32.nxv4i16(<vscale x 4 x i64> %d, <vscale x 4 x i32> %1, <vscale x 4 x i32> %1, iXLen %vl, iXLen 0)
+  ret <vscale x 4 x i64> %2
+}
+
+define <vscale x 4 x i32> @vwmacc_vx(<vscale x 4 x i32> %a, i16 %b, <vscale x 4 x i16> %c, iXLen %vl) {
 ; NOVLOPT-LABEL: vwmacc_vx:
 ; NOVLOPT:       # %bb.0:
-; NOVLOPT-NEXT:    vsetvli a2, zero, e16, m1, ta, ma
-; NOVLOPT-NEXT:    vwmacc.vx v10, a0, v8
+; NOVLOPT-NEXT:    vsetvli a2, zero, e16, m1, tu, ma
+; NOVLOPT-NEXT:    vwmacc.vx v8, a0, v10
 ; NOVLOPT-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; NOVLOPT-NEXT:    vadd.vv v8, v10, v10
+; NOVLOPT-NEXT:    vadd.vv v8, v8, v8
 ; NOVLOPT-NEXT:    ret
 ;
 ; VLOPT-LABEL: vwmacc_vx:
 ; VLOPT:       # %bb.0:
-; VLOPT-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; VLOPT-NEXT:    vwmacc.vx v10, a0, v8
+; VLOPT-NEXT:    vsetvli zero, a1, e16, m1, tu, ma
+; VLOPT-NEXT:    vwmacc.vx v8, a0, v10
 ; VLOPT-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; VLOPT-NEXT:    vadd.vv v8, v10, v10
+; VLOPT-NEXT:    vadd.vv v8, v8, v8
 ; VLOPT-NEXT:    ret
-  %1 = call <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.i16(<vscale x 4 x i32> poison, i16 %b, <vscale x 4 x i16> %a, iXLen -1, iXLen 0)
+  %1 = call <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.i16(<vscale x 4 x i32> %a, i16 %b, <vscale x 4 x i16> %c, iXLen -1, iXLen 0)
   %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %1, iXLen %vl)
   ret <vscale x 4 x i32> %2
 }
 
-define <vscale x 4 x i32> @vwmaccu_vx(<vscale x 4 x i16> %a, i16 %b, iXLen %vl) {
+define <vscale x 4 x i64> @vwmaccu_vv(<vscale x 4 x i32> %a, <vscale x 4 x i16> %b, <vscale x 4 x i16> %c, <vscale x 4 x i64> %d, iXLen %vl) {
+; NOVLOPT-LABEL: vwmaccu_vv:
+; NOVLOPT:       # %bb.0:
+; NOVLOPT-NEXT:    vmv2r.v v16, v8
+; NOVLOPT-NEXT:    vsetvli a1, zero, e16, m1, tu, ma
+; NOVLOPT-NEXT:    vwmaccu.vv v16, v10, v11
+; NOVLOPT-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; NOVLOPT-NEXT:    vwmaccu.vv v12, v8, v16
+; NOVLOPT-NEXT:    vmv4r.v v8, v12
+; NOVLOPT-NEXT:    ret
+;
+; VLOPT-LABEL: vwmaccu_vv:
+; VLOPT:       # %bb.0:
+; VLOPT-NEXT:    vmv2r.v v16, v8
+; VLOPT-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; VLOPT-NEXT:    vwmaccu.vv v16, v10, v11
+; VLOPT-NEXT:    vsetvli zero, zero, e32, m2, tu, ma
+; VLOPT-NEXT:    vwmaccu.vv v12, v8, v16
+; VLOPT-NEXT:    vmv4r.v v8, v12
+; VLOPT-NEXT:    ret
+  %1 = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.nxv4i16(<vscale x 4 x i32> %a, <vscale x 4 x i16> %b, <vscale x 4 x i16> %c, iXLen -1, iXLen 0)
+  %2 = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.nxv4i32(<vscale x 4 x i64> %d, <vscale x 4 x i32> %a, <vscale x 4 x i32> %1, iXLen %vl, iXLen 0)
+  ret <vscale x 4 x i64> %2
+}
+
+define <vscale x 4 x i64> @vwmaccu_vx(<vscale x 4 x i32> %a, i16 %b, <vscale x 4 x i16> %c, <vscale x 4 x i64> %d, i32 %e, iXLen %vl) {
 ; NOVLOPT-LABEL: vwmaccu_vx:
 ; NOVLOPT:       # %bb.0:
-; NOVLOPT-NEXT:    vsetvli a2, zero, e16, m1, ta, ma
-; NOVLOPT-NEXT:    vwmaccu.vx v10, a0, v8
-; NOVLOPT-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; NOVLOPT-NEXT:    vadd.vv v8, v10, v10
+; NOVLOPT-NEXT:    vsetvli a3, zero, e16, m1, tu, ma
+; NOVLOPT-NEXT:    vwmaccu.vx v8, a0, v10
+; NOVLOPT-NEXT:    vsetvli zero, a2, e32, m2, tu, ma
+; NOVLOPT-NEXT:    vwmaccu.vx v12, a1, v8
+; NOVLOPT-NEXT:    vmv4r.v v8, v12
 ; NOVLOPT-NEXT:    ret
 ;
 ; VLOPT-LABEL: vwmaccu_vx:
 ; VLOPT:       # %bb.0:
-; VLOPT-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; VLOPT-NEXT:    vwmaccu.vx v10, a0, v8
+; VLOPT-NEXT:    vsetvli zero, a2, e16, m1, tu, ma
+; VLOPT-NEXT:    vwmaccu.vx v8, a0, v10
+; VLOPT-NEXT:    vsetvli zero, zero, e32, m2, tu, ma
+; VLOPT-NEXT:    vwmaccu.vx v12, a1, v8
+; VLOPT-NEXT:    vmv4r.v v8, v12
+; VLOPT-NEXT:    ret
+  %1 = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.i16(<vscale x 4 x i32> %a, i16 %b, <vscale x 4 x i16> %c, iXLen -1, iXLen 0)
+  %2 = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.i32(<vscale x 4 x i64> %d, i32 %e, <vscale x 4 x i32> %1, iXLen %vl, iXLen 0)
+  ret <vscale x 4 x i64> %2
+}
+
+define <vscale x 4 x i32> @vwmaccsu_vv(<vscale x 4 x i32> %a, <vscale x 4 x i16> %b, <vscale x 4 x i16> %c, iXLen %vl) {
+; NOVLOPT-LABEL: vwmaccsu_vv:
+; NOVLOPT:       # %bb.0:
+; NOVLOPT-NEXT:    vsetvli a1, zero, e16, m1, tu, ma
+; NOVLOPT-NEXT:    vwmaccsu.vv v8, v10, v11
+; NOVLOPT-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; NOVLOPT-NEXT:    vadd.vv v8, v8, v8
+; NOVLOPT-NEXT:    ret
+;
+; VLOPT-LABEL: vwmaccsu_vv:
+; VLOPT:       # %bb.0:
+; VLOPT-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; VLOPT-NEXT:    vwmaccsu.vv v8, v10, v11
+; VLOPT-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; VLOPT-NEXT:    vadd.vv v8, v8, v8
+; VLOPT-NEXT:    ret
+  %1 = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.nxv4i16(<vscale x 4 x i32> %a, <vscale x 4 x i16> %b, <vscale x 4 x i16> %c, iXLen -1, iXLen 0)
+  %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %1, iXLen %vl)
+  ret <vscale x 4 x i32> %2
+}
+
+define <vscale x 4 x i32> @vwmaccsu_vx(<vscale x 4 x i32> %a, i16 %b, <vscale x 4 x i16> %c, iXLen %vl) {
+; NOVLOPT-LABEL: vwmaccsu_vx:
+; NOVLOPT:       # %bb.0:
+; NOVLOPT-NEXT:    vsetvli a2, zero, e16, m1, tu, ma
+; NOVLOPT-NEXT:    vwmaccsu.vx v8, a0, v10
+; NOVLOPT-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
+; NOVLOPT-NEXT:    vadd.vv v8, v8, v8
+; NOVLOPT-NEXT:    ret
+;
+; VLOPT-LABEL: vwmaccsu_vx:
+; VLOPT:       # %bb.0:
+; VLOPT-NEXT:    vsetvli zero, a1, e16, m1, tu, ma
+; VLOPT-NEXT:    vwmaccsu.vx v8, a0, v10
 ; VLOPT-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
-; VLOPT-NEXT:    vadd.vv v8, v10, v10
+; VLOPT-NEXT:    vadd.vv v8, v8, v8
+; VLOPT-NEXT:    ret
+  %1 = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.i16(<vscale x 4 x i32> %a, i16 %b, <vscale x 4 x i16> %c, iXLen -1, iXLen 0)
+  %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %1, iXLen %vl)
+  ret <vscale x 4 x i32> %2
+}
+
+define <vscale x 4 x i32> @vwmaccus_vx(<vscale x 4 x i32> %a, i16 %b, <vscale x 4 x i16> %c, iXLen %vl) {
+; NOVLOPT-LABEL: vwmaccus_vx:
+; NOVLOPT:       # %bb.0:
+; NOVLOPT-NEXT:    vsetvli a2, zero, e16, m1, tu, ma
+; NOVLOPT-NEXT:    vwmaccus.vx v8, a0, v10
+; NOVLOPT-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
+; NOVLOPT-NEXT:    vadd.vv v8, v8, v8
+; NOVLOPT-NEXT:    ret
+;
+; VLOPT-LABEL: vwmaccus_vx:
+; VLOPT:       # %bb.0:
+; VLOPT-NEXT:    vsetvli zero, a1, e16, m1, tu, ma
+; VLOPT-NEXT:    vwmaccus.vx v8, a0, v10
+; VLOPT-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; VLOPT-NEXT:    vadd.vv v8, v8, v8
 ; VLOPT-NEXT:    ret
-  %1 = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.i16(<vscale x 4 x i32> poison, i16 %b, <vscale x 4 x i16> %a, iXLen -1, iXLen 0)
+  %1 = call <vscale x 4 x i32> @llvm.riscv.vwmaccus.nxv4i32.i16(<vscale x 4 x i32> %a, i16 %b, <vscale x 4 x i16> %c, iXLen -1, iXLen 0)
   %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %1, iXLen %vl)
   ret <vscale x 4 x i32> %2
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt.ll b/llvm/test/CodeGen/RISCV/rvv/vl-opt.ll
index 1a1472fcfc66f5..0410ca34f7ba11 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vl-opt.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt.ll
@@ -136,3 +136,17 @@ define <vscale x 4 x i32> @different_imm_vl_with_tu(<vscale x 4 x i32> %passthru
   %w = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> %passthru, <vscale x 4 x i32> %v, <vscale x 4 x i32> %a,iXLen 4)
   ret <vscale x 4 x i32> %w
 }
+
+define <vscale x 4 x i32> @dont_optimize_tied_def(<vscale x 4 x i32> %a, <vscale x 4 x i16> %b, <vscale x 4 x i16> %c, iXLen %vl) {
+; CHECK-LABEL: dont_optimize_tied_def:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, m1, tu, ma
+; CHECK-NEXT:    vwmacc.vv v8, v10, v11
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vwmacc.vv v8, v10, v11
+; CHECK-NEXT:    ret
+  %1 = call <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.nxv4i16(<vscale x 4 x i32> %a, <vscale x 4 x i16> %b, <vscale x 4 x i16> %c, iXLen -1, iXLen 0)
+  %2 = call <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.nxv4i16(<vscale x 4 x i32> %1, <vscale x 4 x i16> %b, <vscale x 4 x i16> %c, iXLen %vl, iXLen 0)
+  ret <vscale x 4 x i32> %2
+}
+

>From cbe89ca2f79cfd1ebf4d8d572d754520353d13a1 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Fri, 6 Dec 2024 11:57:06 -0800
Subject: [PATCH 2/2] fixup! use vadd in tests

---
 llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll | 56 +++++++++-----------
 1 file changed, 24 insertions(+), 32 deletions(-)

diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll b/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll
index 6d9987934fdbdc..39cc90b812f99e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll
@@ -1248,27 +1248,25 @@ define <vscale x 4 x i64> @vwmulu_vx(<vscale x 4 x i32> %a, i32 %b, iXLen %vl) {
   ret <vscale x 4 x i64> %2
 }
 
-define <vscale x 4 x i64> @vwmacc_vv(<vscale x 4 x i32> %a, <vscale x 4 x i16> %b, <vscale x 4 x i16> %c, <vscale x 4 x i64> %d, iXLen %vl) {
+define <vscale x 4 x i32> @vwmacc_vv(<vscale x 4 x i32> %a, <vscale x 4 x i16> %b, <vscale x 4 x i16> %c, <vscale x 4 x i32> %d, iXLen %vl) {
 ; NOVLOPT-LABEL: vwmacc_vv:
 ; NOVLOPT:       # %bb.0:
 ; NOVLOPT-NEXT:    vsetvli a1, zero, e16, m1, tu, ma
 ; NOVLOPT-NEXT:    vwmacc.vv v8, v10, v11
-; NOVLOPT-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
-; NOVLOPT-NEXT:    vwmacc.vv v12, v8, v8
-; NOVLOPT-NEXT:    vmv4r.v v8, v12
+; NOVLOPT-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; NOVLOPT-NEXT:    vadd.vv v8, v8, v12
 ; NOVLOPT-NEXT:    ret
 ;
 ; VLOPT-LABEL: vwmacc_vv:
 ; VLOPT:       # %bb.0:
 ; VLOPT-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
 ; VLOPT-NEXT:    vwmacc.vv v8, v10, v11
-; VLOPT-NEXT:    vsetvli zero, zero, e32, m2, tu, ma
-; VLOPT-NEXT:    vwmacc.vv v12, v8, v8
-; VLOPT-NEXT:    vmv4r.v v8, v12
+; VLOPT-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; VLOPT-NEXT:    vadd.vv v8, v8, v12
 ; VLOPT-NEXT:    ret
   %1 = call <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.nxv4i16(<vscale x 4 x i32> %a, <vscale x 4 x i16> %b, <vscale x 4 x i16> %c, iXLen -1, iXLen 0)
-  %2 = call <vscale x 4 x i64> @llvm.riscv.vwmacc.nxv4i32.nxv4i16(<vscale x 4 x i64> %d, <vscale x 4 x i32> %1, <vscale x 4 x i32> %1, iXLen %vl, iXLen 0)
-  ret <vscale x 4 x i64> %2
+  %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %d, iXLen %vl)
+  ret <vscale x 4 x i32> %2
 }
 
 define <vscale x 4 x i32> @vwmacc_vx(<vscale x 4 x i32> %a, i16 %b, <vscale x 4 x i16> %c, iXLen %vl) {
@@ -1292,52 +1290,46 @@ define <vscale x 4 x i32> @vwmacc_vx(<vscale x 4 x i32> %a, i16 %b, <vscale x 4
   ret <vscale x 4 x i32> %2
 }
 
-define <vscale x 4 x i64> @vwmaccu_vv(<vscale x 4 x i32> %a, <vscale x 4 x i16> %b, <vscale x 4 x i16> %c, <vscale x 4 x i64> %d, iXLen %vl) {
+define <vscale x 4 x i32> @vwmaccu_vv(<vscale x 4 x i32> %a, <vscale x 4 x i16> %b, <vscale x 4 x i16> %c, <vscale x 4 x i32> %d, iXLen %vl) {
 ; NOVLOPT-LABEL: vwmaccu_vv:
 ; NOVLOPT:       # %bb.0:
-; NOVLOPT-NEXT:    vmv2r.v v16, v8
 ; NOVLOPT-NEXT:    vsetvli a1, zero, e16, m1, tu, ma
-; NOVLOPT-NEXT:    vwmaccu.vv v16, v10, v11
-; NOVLOPT-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
-; NOVLOPT-NEXT:    vwmaccu.vv v12, v8, v16
-; NOVLOPT-NEXT:    vmv4r.v v8, v12
+; NOVLOPT-NEXT:    vwmaccu.vv v8, v10, v11
+; NOVLOPT-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; NOVLOPT-NEXT:    vadd.vv v8, v8, v12
 ; NOVLOPT-NEXT:    ret
 ;
 ; VLOPT-LABEL: vwmaccu_vv:
 ; VLOPT:       # %bb.0:
-; VLOPT-NEXT:    vmv2r.v v16, v8
 ; VLOPT-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
-; VLOPT-NEXT:    vwmaccu.vv v16, v10, v11
-; VLOPT-NEXT:    vsetvli zero, zero, e32, m2, tu, ma
-; VLOPT-NEXT:    vwmaccu.vv v12, v8, v16
-; VLOPT-NEXT:    vmv4r.v v8, v12
+; VLOPT-NEXT:    vwmaccu.vv v8, v10, v11
+; VLOPT-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; VLOPT-NEXT:    vadd.vv v8, v8, v12
 ; VLOPT-NEXT:    ret
   %1 = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.nxv4i16(<vscale x 4 x i32> %a, <vscale x 4 x i16> %b, <vscale x 4 x i16> %c, iXLen -1, iXLen 0)
-  %2 = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.nxv4i32(<vscale x 4 x i64> %d, <vscale x 4 x i32> %a, <vscale x 4 x i32> %1, iXLen %vl, iXLen 0)
-  ret <vscale x 4 x i64> %2
+  %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %d, iXLen %vl)
+  ret <vscale x 4 x i32> %2
 }
 
-define <vscale x 4 x i64> @vwmaccu_vx(<vscale x 4 x i32> %a, i16 %b, <vscale x 4 x i16> %c, <vscale x 4 x i64> %d, i32 %e, iXLen %vl) {
+define <vscale x 4 x i32> @vwmaccu_vx(<vscale x 4 x i32> %a, i16 %b, <vscale x 4 x i16> %c, <vscale x 4 x i32> %d, i32 %e, iXLen %vl) {
 ; NOVLOPT-LABEL: vwmaccu_vx:
 ; NOVLOPT:       # %bb.0:
-; NOVLOPT-NEXT:    vsetvli a3, zero, e16, m1, tu, ma
+; NOVLOPT-NEXT:    vsetvli a1, zero, e16, m1, tu, ma
 ; NOVLOPT-NEXT:    vwmaccu.vx v8, a0, v10
-; NOVLOPT-NEXT:    vsetvli zero, a2, e32, m2, tu, ma
-; NOVLOPT-NEXT:    vwmaccu.vx v12, a1, v8
-; NOVLOPT-NEXT:    vmv4r.v v8, v12
+; NOVLOPT-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
+; NOVLOPT-NEXT:    vadd.vv v8, v8, v12
 ; NOVLOPT-NEXT:    ret
 ;
 ; VLOPT-LABEL: vwmaccu_vx:
 ; VLOPT:       # %bb.0:
 ; VLOPT-NEXT:    vsetvli zero, a2, e16, m1, tu, ma
 ; VLOPT-NEXT:    vwmaccu.vx v8, a0, v10
-; VLOPT-NEXT:    vsetvli zero, zero, e32, m2, tu, ma
-; VLOPT-NEXT:    vwmaccu.vx v12, a1, v8
-; VLOPT-NEXT:    vmv4r.v v8, v12
+; VLOPT-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; VLOPT-NEXT:    vadd.vv v8, v8, v12
 ; VLOPT-NEXT:    ret
   %1 = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.i16(<vscale x 4 x i32> %a, i16 %b, <vscale x 4 x i16> %c, iXLen -1, iXLen 0)
-  %2 = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.i32(<vscale x 4 x i64> %d, i32 %e, <vscale x 4 x i32> %1, iXLen %vl, iXLen 0)
-  ret <vscale x 4 x i64> %2
+  %2 = call <vscale x 4 x i32> @llvm.riscv.vadd.nxv4i32.nxv4i32(<vscale x 4 x i32> poison, <vscale x 4 x i32> %1, <vscale x 4 x i32> %d, iXLen %vl)
+  ret <vscale x 4 x i32> %2
 }
 
 define <vscale x 4 x i32> @vwmaccsu_vv(<vscale x 4 x i32> %a, <vscale x 4 x i16> %b, <vscale x 4 x i16> %c, iXLen %vl) {



More information about the llvm-commits mailing list