[llvm-branch-commits] [llvm] 4268783 - [RISCV] Add intrinsics for vwmacc[u|su|us] instructions
ShihPo Hung via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Tue Dec 22 18:28:20 PST 2020
Author: ShihPo Hung
Date: 2020-12-22T18:17:39-08:00
New Revision: 42687839980308bbed8fe909b9810a0fb48f9813
URL: https://github.com/llvm/llvm-project/commit/42687839980308bbed8fe909b9810a0fb48f9813
DIFF: https://github.com/llvm/llvm-project/commit/42687839980308bbed8fe909b9810a0fb48f9813.diff
LOG: [RISCV] Add intrinsics for vwmacc[u|su|us] instructions
This patch defines vwmacc[u|su|us] intrinsics and lower to V instructions.
We work with @rogfer01 from BSC to come out this patch.
Authored-by: Roger Ferrer Ibanez <rofirrim at gmail.com>
Co-Authored-by: ShihPo Hung <shihpo.hung at sifive.com>
Differential Revision: https://reviews.llvm.org/D93675
Added:
llvm/test/CodeGen/RISCV/rvv/vwmacc-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vwmacc-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv64.ll
Modified:
llvm/include/llvm/IR/IntrinsicsRISCV.td
llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
Removed:
################################################################################
diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td
index f2aed28440cc..ba0929b16ea5 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -338,6 +338,20 @@ let TargetPrefix = "riscv" in {
[IntrNoMem]>, RISCVVIntrinsic {
let ExtendOperand = 2;
}
+ class RISCVTernaryWideNoMask
+ : Intrinsic< [llvm_anyvector_ty],
+ [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty,
+ llvm_anyint_ty],
+ [IntrNoMem] >, RISCVVIntrinsic {
+ let ExtendOperand = 2;
+ }
+ class RISCVTernaryWideMask
+ : Intrinsic< [llvm_anyvector_ty],
+ [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty,
+ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
+ [IntrNoMem]>, RISCVVIntrinsic {
+ let ExtendOperand = 2;
+ }
multiclass RISCVUSLoad {
def "int_riscv_" # NAME : RISCVUSLoad;
@@ -406,6 +420,10 @@ let TargetPrefix = "riscv" in {
def "int_riscv_" # NAME : RISCVCompareNoMask;
def "int_riscv_" # NAME # "_mask" : RISCVCompareMask;
}
+ multiclass RISCVTernaryWide {
+ def "int_riscv_" # NAME : RISCVTernaryWideNoMask;
+ def "int_riscv_" # NAME # "_mask" : RISCVTernaryWideMask;
+ }
defm vle : RISCVUSLoad;
defm vleff : RISCVUSLoad;
@@ -481,6 +499,11 @@ let TargetPrefix = "riscv" in {
defm vmadd : RISCVTernaryAAXA;
defm vnmsub : RISCVTernaryAAXA;
+ defm vwmaccu : RISCVTernaryWide;
+ defm vwmacc : RISCVTernaryWide;
+ defm vwmaccus : RISCVTernaryWide;
+ defm vwmaccsu : RISCVTernaryWide;
+
defm vfadd : RISCVBinaryAAX;
defm vfsub : RISCVBinaryAAX;
defm vfrsub : RISCVBinaryAAX;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 7a3e33d6d7db..f3b6d2f5867f 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -890,6 +890,18 @@ multiclass VPseudoTernaryV_VX_AAXA<string Constraint = ""> {
defm _VX : VPseudoTernary<m.vrclass, GPR, m.vrclass, m, Constraint>;
}
+multiclass VPseudoTernaryW_VV {
+ defvar constraint = "@earlyclobber $rd";
+ foreach m = MxList.m in
+ defm _VV : VPseudoTernary<m.wvrclass, m.vrclass, m.vrclass, m, constraint>;
+}
+
+multiclass VPseudoTernaryW_VX {
+ defvar constraint = "@earlyclobber $rd";
+ foreach m = MxList.m in
+ defm _VX : VPseudoTernary<m.wvrclass, GPR, m.vrclass, m, constraint>;
+}
+
multiclass VPseudoTernaryV_VI<Operand ImmType = simm5, string Constraint = ""> {
foreach m = MxList.m in
defm _VI : VPseudoTernary<m.vrclass, m.vrclass, ImmType, m, Constraint>;
@@ -905,6 +917,11 @@ multiclass VPseudoTernaryV_VX_VI<Operand ImmType = simm5, string Constraint = ""
defm "" : VPseudoTernaryV_VI<ImmType, Constraint>;
}
+multiclass VPseudoTernaryW_VV_VX {
+ defm "" : VPseudoTernaryW_VV;
+ defm "" : VPseudoTernaryW_VX;
+}
+
multiclass VPseudoBinaryM_VV_VX_VI {
defm "" : VPseudoBinaryM_VV;
defm "" : VPseudoBinaryM_VX</*IsFloat=*/0>;
@@ -1591,6 +1608,30 @@ multiclass VPatTernaryV_VI<string intrinsic, string instruction,
vti.RegClass, Imm_type>;
}
+multiclass VPatTernaryW_VV<string intrinsic, string instruction,
+ list<VTypeInfoToWide> vtilist> {
+ foreach vtiToWti = vtilist in {
+ defvar vti = vtiToWti.Vti;
+ defvar wti = vtiToWti.Wti;
+ defm : VPatTernary<intrinsic, instruction, "VV",
+ wti.Vector, vti.Vector, vti.Vector,
+ vti.Mask, vti.SEW, vti.LMul,
+ wti.RegClass, vti.RegClass, vti.RegClass>;
+ }
+}
+
+multiclass VPatTernaryW_VX<string intrinsic, string instruction,
+ list<VTypeInfoToWide> vtilist> {
+ foreach vtiToWti = vtilist in {
+ defvar vti = vtiToWti.Vti;
+ defvar wti = vtiToWti.Wti;
+ defm : VPatTernary<intrinsic, instruction, "VX",
+ wti.Vector, XLenVT, vti.Vector,
+ vti.Mask, vti.SEW, vti.LMul,
+ wti.RegClass, vti.ScalarRegClass, vti.RegClass>;
+ }
+}
+
multiclass VPatTernaryV_VV_VX_AAXA<string intrinsic, string instruction,
list<VTypeInfo> vtilist> {
defm "" : VPatTernaryV_VV<intrinsic, instruction, vtilist>;
@@ -1611,6 +1652,12 @@ multiclass VPatBinaryM_VV_VX_VI<string intrinsic, string instruction,
defm "" : VPatBinaryM_VI<intrinsic, instruction, vtilist>;
}
+multiclass VPatTernaryW_VV_VX<string intrinsic, string instruction,
+ list<VTypeInfoToWide> vtilist> {
+ defm "" : VPatTernaryW_VV<intrinsic, instruction, vtilist>;
+ defm "" : VPatTernaryW_VX<intrinsic, instruction, vtilist>;
+}
+
multiclass VPatBinaryM_VV_VX<string intrinsic, string instruction,
list<VTypeInfo> vtilist>
{
@@ -1805,6 +1852,14 @@ defm PseudoVNMSAC : VPseudoTernaryV_VV_VX_AAXA;
defm PseudoVMADD : VPseudoTernaryV_VV_VX_AAXA;
defm PseudoVNMSUB : VPseudoTernaryV_VV_VX_AAXA;
+//===----------------------------------------------------------------------===//
+// 12.14. Vector Widening Integer Multiply-Add Instructions
+//===----------------------------------------------------------------------===//
+defm PseudoVWMACCU : VPseudoTernaryW_VV_VX;
+defm PseudoVWMACC : VPseudoTernaryW_VV_VX;
+defm PseudoVWMACCSU : VPseudoTernaryW_VV_VX;
+defm PseudoVWMACCUS : VPseudoTernaryW_VX;
+
//===----------------------------------------------------------------------===//
// 12.17. Vector Integer Move Instructions
//===----------------------------------------------------------------------===//
@@ -2173,6 +2228,14 @@ defm "" : VPatTernaryV_VV_VX_AAXA<"int_riscv_vnmsub", "PseudoVNMSUB", AllInteger
defm "" : VPatTernaryV_VV_VX_AAXA<"int_riscv_vmacc", "PseudoVMACC", AllIntegerVectors>;
defm "" : VPatTernaryV_VV_VX_AAXA<"int_riscv_vnmsac", "PseudoVNMSAC", AllIntegerVectors>;
+//===----------------------------------------------------------------------===//
+// 12.14. Vector Widening Integer Multiply-Add Instructions
+//===----------------------------------------------------------------------===//
+defm "" : VPatTernaryW_VV_VX<"int_riscv_vwmaccu", "PseudoVWMACCU", AllWidenableIntVectors>;
+defm "" : VPatTernaryW_VV_VX<"int_riscv_vwmacc", "PseudoVWMACC", AllWidenableIntVectors>;
+defm "" : VPatTernaryW_VV_VX<"int_riscv_vwmaccsu", "PseudoVWMACCSU", AllWidenableIntVectors>;
+defm "" : VPatTernaryW_VX<"int_riscv_vwmaccus", "PseudoVWMACCUS", AllWidenableIntVectors>;
+
//===----------------------------------------------------------------------===//
// 12.17. Vector Integer Move Instructions
//===----------------------------------------------------------------------===//
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv32.ll
new file mode 100644
index 000000000000..539177f8d78e
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv32.ll
@@ -0,0 +1,1034 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ i32);
+
+define <vscale x 1 x i16> @intrinsic_vwmacc_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_vv_nxv1i16_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vwmacc.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i16> @intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vwmacc.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vwmacc.nxv2i16.nxv2i8(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ i32);
+
+define <vscale x 2 x i16> @intrinsic_vwmacc_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_vv_nxv2i16_nxv2i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vwmacc.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vwmacc.nxv2i16.nxv2i8(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i8> %1,
+ <vscale x 2 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vwmacc.mask.nxv2i16.nxv2i8(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i16> @intrinsic_vwmacc_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv2i16_nxv2i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vwmacc.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vwmacc.mask.nxv2i16.nxv2i8(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i8> %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vwmacc.nxv4i16.nxv4i8(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vwmacc_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_vv_nxv4i16_nxv4i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vwmacc.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vwmacc.nxv4i16.nxv4i8(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vwmacc.mask.nxv4i16.nxv4i8(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vwmacc_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv4i16_nxv4i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vwmacc.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vwmacc.mask.nxv4i16.nxv4i8(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vwmacc.nxv8i16.nxv8i8(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i16> @intrinsic_vwmacc_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_vv_nxv8i16_nxv8i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT: vwmacc.vv v16, v18, v19
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vwmacc.nxv8i16.nxv8i8(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vwmacc.mask.nxv8i16.nxv8i8(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i16> @intrinsic_vwmacc_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv8i16_nxv8i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT: vwmacc.vv v16, v18, v19, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vwmacc.mask.nxv8i16.nxv8i8(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vwmacc.nxv16i16.nxv16i8(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ i32);
+
+define <vscale x 16 x i16> @intrinsic_vwmacc_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_vv_nxv16i16_nxv16i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu
+; CHECK-NEXT: vwmacc.vv v16, v20, v22
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vwmacc.nxv16i16.nxv16i8(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 16 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vwmacc.mask.nxv16i16.nxv16i8(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i16> @intrinsic_vwmacc_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv16i16_nxv16i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu
+; CHECK-NEXT: vwmacc.vv v16, v20, v22, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vwmacc.mask.nxv16i16.nxv16i8(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vwmacc.nxv32i16.nxv32i8(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ i32);
+
+define <vscale x 32 x i16> @intrinsic_vwmacc_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_vv_nxv32i16_nxv32i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu
+; CHECK-NEXT: vle8.v v28, (a1)
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, a2, e8,m4,ta,mu
+; CHECK-NEXT: vwmacc.vv v16, v8, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vwmacc.nxv32i16.nxv32i8(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 32 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vwmacc.mask.nxv32i16.nxv32i8(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x i16> @intrinsic_vwmacc_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv32i16_nxv32i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu
+; CHECK-NEXT: vle8.v v28, (a1)
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, a2, e8,m4,ta,mu
+; CHECK-NEXT: vwmacc.vv v16, v8, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vwmacc.mask.nxv32i16.nxv32i8(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vwmacc.nxv1i32.nxv1i16(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vwmacc_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_vv_nxv1i32_nxv1i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vwmacc.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vwmacc.nxv1i32.nxv1i16(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vwmacc.mask.nxv1i32.nxv1i16(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vwmacc_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i32_nxv1i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vwmacc.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vwmacc.mask.nxv1i32.nxv1i16(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vwmacc.nxv2i32.nxv2i16(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vwmacc_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_vv_nxv2i32_nxv2i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vwmacc.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vwmacc.nxv2i32.nxv2i16(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i16> %1,
+ <vscale x 2 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vwmacc.mask.nxv2i32.nxv2i16(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vwmacc_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv2i32_nxv2i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vwmacc.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vwmacc.mask.nxv2i32.nxv2i16(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i16> %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.nxv4i16(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vwmacc_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_vv_nxv4i32_nxv4i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vwmacc.vv v16, v18, v19
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.nxv4i16(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vwmacc.mask.nxv4i32.nxv4i16(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vwmacc_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv4i32_nxv4i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vwmacc.vv v16, v18, v19, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vwmacc.mask.nxv4i32.nxv4i16(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vwmacc.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vwmacc_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_vv_nxv8i32_nxv8i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vwmacc.vv v16, v20, v22
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vwmacc.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i16> %1,
+ <vscale x 8 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vwmacc.mask.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vwmacc_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv8i32_nxv8i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vwmacc.vv v16, v20, v22, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vwmacc.mask.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i16> %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vwmacc.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vwmacc_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_vv_nxv16i32_nxv16i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a1)
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu
+; CHECK-NEXT: vwmacc.vv v16, v8, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vwmacc.nxv16i32.nxv16i16(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i16> %1,
+ <vscale x 16 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vwmacc.mask.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vwmacc_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv16i32_nxv16i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a1)
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu
+; CHECK-NEXT: vwmacc.vv v16, v8, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vwmacc.mask.nxv16i32.nxv16i16(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i16> %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.i8(
+ <vscale x 1 x i16>,
+ i8,
+ <vscale x 1 x i8>,
+ i32);
+
+define <vscale x 1 x i16> @intrinsic_vwmacc_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_vx_nxv1i16_i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vwmacc.vx v16, a0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.i8(
+ <vscale x 1 x i16> %0,
+ i8 %1,
+ <vscale x 1 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.i8(
+ <vscale x 1 x i16>,
+ i8,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i16> @intrinsic_vwmacc_mask_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv1i16_i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vwmacc.vx v16, a0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.i8(
+ <vscale x 1 x i16> %0,
+ i8 %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vwmacc.nxv2i16.i8(
+ <vscale x 2 x i16>,
+ i8,
+ <vscale x 2 x i8>,
+ i32);
+
+define <vscale x 2 x i16> @intrinsic_vwmacc_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_vx_nxv2i16_i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vwmacc.vx v16, a0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vwmacc.nxv2i16.i8(
+ <vscale x 2 x i16> %0,
+ i8 %1,
+ <vscale x 2 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vwmacc.mask.nxv2i16.i8(
+ <vscale x 2 x i16>,
+ i8,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i16> @intrinsic_vwmacc_mask_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv2i16_i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vwmacc.vx v16, a0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vwmacc.mask.nxv2i16.i8(
+ <vscale x 2 x i16> %0,
+ i8 %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vwmacc.nxv4i16.i8(
+ <vscale x 4 x i16>,
+ i8,
+ <vscale x 4 x i8>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vwmacc_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_vx_nxv4i16_i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vwmacc.vx v16, a0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vwmacc.nxv4i16.i8(
+ <vscale x 4 x i16> %0,
+ i8 %1,
+ <vscale x 4 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vwmacc.mask.nxv4i16.i8(
+ <vscale x 4 x i16>,
+ i8,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vwmacc_mask_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv4i16_i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vwmacc.vx v16, a0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vwmacc.mask.nxv4i16.i8(
+ <vscale x 4 x i16> %0,
+ i8 %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vwmacc.nxv8i16.i8(
+ <vscale x 8 x i16>,
+ i8,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i16> @intrinsic_vwmacc_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_vx_nxv8i16_i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vwmacc.vx v16, a0, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vwmacc.nxv8i16.i8(
+ <vscale x 8 x i16> %0,
+ i8 %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vwmacc.mask.nxv8i16.i8(
+ <vscale x 8 x i16>,
+ i8,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i16> @intrinsic_vwmacc_mask_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv8i16_i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vwmacc.vx v16, a0, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vwmacc.mask.nxv8i16.i8(
+ <vscale x 8 x i16> %0,
+ i8 %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vwmacc.nxv16i16.i8(
+ <vscale x 16 x i16>,
+ i8,
+ <vscale x 16 x i8>,
+ i32);
+
+define <vscale x 16 x i16> @intrinsic_vwmacc_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_vx_nxv16i16_i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT: vwmacc.vx v16, a0, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vwmacc.nxv16i16.i8(
+ <vscale x 16 x i16> %0,
+ i8 %1,
+ <vscale x 16 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vwmacc.mask.nxv16i16.i8(
+ <vscale x 16 x i16>,
+ i8,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i16> @intrinsic_vwmacc_mask_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv16i16_i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT: vwmacc.vx v16, a0, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vwmacc.mask.nxv16i16.i8(
+ <vscale x 16 x i16> %0,
+ i8 %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vwmacc.nxv32i16.i8(
+ <vscale x 32 x i16>,
+ i8,
+ <vscale x 32 x i8>,
+ i32);
+
+define <vscale x 32 x i16> @intrinsic_vwmacc_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_vx_nxv32i16_i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu
+; CHECK-NEXT: vle8.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu
+; CHECK-NEXT: vwmacc.vx v16, a0, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vwmacc.nxv32i16.i8(
+ <vscale x 32 x i16> %0,
+ i8 %1,
+ <vscale x 32 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vwmacc.mask.nxv32i16.i8(
+ <vscale x 32 x i16>,
+ i8,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x i16> @intrinsic_vwmacc_mask_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv32i16_i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu
+; CHECK-NEXT: vle8.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu
+; CHECK-NEXT: vwmacc.vx v16, a0, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vwmacc.mask.nxv32i16.i8(
+ <vscale x 32 x i16> %0,
+ i8 %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vwmacc.nxv1i32.i16(
+ <vscale x 1 x i32>,
+ i16,
+ <vscale x 1 x i16>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vwmacc_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_vx_nxv1i32_i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vwmacc.vx v16, a0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vwmacc.nxv1i32.i16(
+ <vscale x 1 x i32> %0,
+ i16 %1,
+ <vscale x 1 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vwmacc.mask.nxv1i32.i16(
+ <vscale x 1 x i32>,
+ i16,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vwmacc_mask_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv1i32_i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vwmacc.vx v16, a0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vwmacc.mask.nxv1i32.i16(
+ <vscale x 1 x i32> %0,
+ i16 %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vwmacc.nxv2i32.i16(
+ <vscale x 2 x i32>,
+ i16,
+ <vscale x 2 x i16>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vwmacc_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_vx_nxv2i32_i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vwmacc.vx v16, a0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vwmacc.nxv2i32.i16(
+ <vscale x 2 x i32> %0,
+ i16 %1,
+ <vscale x 2 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vwmacc.mask.nxv2i32.i16(
+ <vscale x 2 x i32>,
+ i16,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vwmacc_mask_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv2i32_i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vwmacc.vx v16, a0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vwmacc.mask.nxv2i32.i16(
+ <vscale x 2 x i32> %0,
+ i16 %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.i16(
+ <vscale x 4 x i32>,
+ i16,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vwmacc_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_vx_nxv4i32_i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vwmacc.vx v16, a0, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.i16(
+ <vscale x 4 x i32> %0,
+ i16 %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vwmacc.mask.nxv4i32.i16(
+ <vscale x 4 x i32>,
+ i16,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vwmacc_mask_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv4i32_i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vwmacc.vx v16, a0, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vwmacc.mask.nxv4i32.i16(
+ <vscale x 4 x i32> %0,
+ i16 %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vwmacc.nxv8i32.i16(
+ <vscale x 8 x i32>,
+ i16,
+ <vscale x 8 x i16>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vwmacc_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_vx_nxv8i32_i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vwmacc.vx v16, a0, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vwmacc.nxv8i32.i16(
+ <vscale x 8 x i32> %0,
+ i16 %1,
+ <vscale x 8 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vwmacc.mask.nxv8i32.i16(
+ <vscale x 8 x i32>,
+ i16,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vwmacc_mask_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv8i32_i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vwmacc.vx v16, a0, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vwmacc.mask.nxv8i32.i16(
+ <vscale x 8 x i32> %0,
+ i16 %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vwmacc.nxv16i32.i16(
+ <vscale x 16 x i32>,
+ i16,
+ <vscale x 16 x i16>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vwmacc_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_vx_nxv16i32_i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu
+; CHECK-NEXT: vwmacc.vx v16, a0, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vwmacc.nxv16i32.i16(
+ <vscale x 16 x i32> %0,
+ i16 %1,
+ <vscale x 16 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vwmacc.mask.nxv16i32.i16(
+ <vscale x 16 x i32>,
+ i16,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vwmacc_mask_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv16i32_i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu
+; CHECK-NEXT: vwmacc.vx v16, a0, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vwmacc.mask.nxv16i32.i16(
+ <vscale x 16 x i32> %0,
+ i16 %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i32> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv64.ll
new file mode 100644
index 000000000000..a6ed911b7736
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv64.ll
@@ -0,0 +1,1412 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vwmacc_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_vv_nxv1i16_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vwmacc.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vwmacc.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vwmacc.nxv2i16.nxv2i8(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vwmacc_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_vv_nxv2i16_nxv2i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vwmacc.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vwmacc.nxv2i16.nxv2i8(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i8> %1,
+ <vscale x 2 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vwmacc.mask.nxv2i16.nxv2i8(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vwmacc_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv2i16_nxv2i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vwmacc.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vwmacc.mask.nxv2i16.nxv2i8(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i8> %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vwmacc.nxv4i16.nxv4i8(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vwmacc_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_vv_nxv4i16_nxv4i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vwmacc.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vwmacc.nxv4i16.nxv4i8(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vwmacc.mask.nxv4i16.nxv4i8(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vwmacc_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv4i16_nxv4i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vwmacc.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vwmacc.mask.nxv4i16.nxv4i8(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vwmacc.nxv8i16.nxv8i8(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vwmacc_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_vv_nxv8i16_nxv8i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT: vwmacc.vv v16, v18, v19
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vwmacc.nxv8i16.nxv8i8(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vwmacc.mask.nxv8i16.nxv8i8(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vwmacc_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv8i16_nxv8i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT: vwmacc.vv v16, v18, v19, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vwmacc.mask.nxv8i16.nxv8i8(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vwmacc.nxv16i16.nxv16i8(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ i64);
+
+define <vscale x 16 x i16> @intrinsic_vwmacc_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_vv_nxv16i16_nxv16i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu
+; CHECK-NEXT: vwmacc.vv v16, v20, v22
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vwmacc.nxv16i16.nxv16i8(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 16 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vwmacc.mask.nxv16i16.nxv16i8(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i16> @intrinsic_vwmacc_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv16i16_nxv16i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu
+; CHECK-NEXT: vwmacc.vv v16, v20, v22, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vwmacc.mask.nxv16i16.nxv16i8(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vwmacc.nxv32i16.nxv32i8(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ i64);
+
+define <vscale x 32 x i16> @intrinsic_vwmacc_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_vv_nxv32i16_nxv32i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu
+; CHECK-NEXT: vle8.v v28, (a1)
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, a2, e8,m4,ta,mu
+; CHECK-NEXT: vwmacc.vv v16, v8, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vwmacc.nxv32i16.nxv32i8(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 32 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vwmacc.mask.nxv32i16.nxv32i8(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x i16> @intrinsic_vwmacc_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv32i16_nxv32i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu
+; CHECK-NEXT: vle8.v v28, (a1)
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, a2, e8,m4,ta,mu
+; CHECK-NEXT: vwmacc.vv v16, v8, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vwmacc.mask.nxv32i16.nxv32i8(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vwmacc.nxv1i32.nxv1i16(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vwmacc_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_vv_nxv1i32_nxv1i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vwmacc.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vwmacc.nxv1i32.nxv1i16(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vwmacc.mask.nxv1i32.nxv1i16(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vwmacc_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i32_nxv1i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vwmacc.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vwmacc.mask.nxv1i32.nxv1i16(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vwmacc.nxv2i32.nxv2i16(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vwmacc_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_vv_nxv2i32_nxv2i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vwmacc.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vwmacc.nxv2i32.nxv2i16(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i16> %1,
+ <vscale x 2 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vwmacc.mask.nxv2i32.nxv2i16(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vwmacc_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv2i32_nxv2i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vwmacc.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vwmacc.mask.nxv2i32.nxv2i16(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i16> %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.nxv4i16(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vwmacc_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_vv_nxv4i32_nxv4i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vwmacc.vv v16, v18, v19
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.nxv4i16(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vwmacc.mask.nxv4i32.nxv4i16(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vwmacc_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv4i32_nxv4i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vwmacc.vv v16, v18, v19, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vwmacc.mask.nxv4i32.nxv4i16(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vwmacc.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vwmacc_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_vv_nxv8i32_nxv8i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vwmacc.vv v16, v20, v22
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vwmacc.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i16> %1,
+ <vscale x 8 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vwmacc.mask.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vwmacc_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv8i32_nxv8i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vwmacc.vv v16, v20, v22, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vwmacc.mask.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i16> %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vwmacc.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ i64);
+
+define <vscale x 16 x i32> @intrinsic_vwmacc_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_vv_nxv16i32_nxv16i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a1)
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu
+; CHECK-NEXT: vwmacc.vv v16, v8, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vwmacc.nxv16i32.nxv16i16(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i16> %1,
+ <vscale x 16 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vwmacc.mask.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i32> @intrinsic_vwmacc_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv16i32_nxv16i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a1)
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu
+; CHECK-NEXT: vwmacc.vv v16, v8, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vwmacc.mask.nxv16i32.nxv16i16(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i16> %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vwmacc_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_vv_nxv1i64_nxv1i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vwmacc.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i32> %1,
+ <vscale x 1 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vwmacc_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i64_nxv1i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vwmacc.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i32> %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vwmacc.nxv2i64.nxv2i32(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vwmacc_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_vv_nxv2i64_nxv2i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vwmacc.vv v16, v18, v19
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vwmacc.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vwmacc.mask.nxv2i64.nxv2i32(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vwmacc_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv2i64_nxv2i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vwmacc.vv v16, v18, v19, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vwmacc.mask.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vwmacc.nxv4i64.nxv4i32(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vwmacc_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_vv_nxv4i64_nxv4i32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vwmacc.vv v16, v20, v22
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vwmacc.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i32> %1,
+ <vscale x 4 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vwmacc.mask.nxv4i64.nxv4i32(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vwmacc_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv4i64_nxv4i32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vwmacc.vv v16, v20, v22, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vwmacc.mask.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i32> %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vwmacc.nxv8i64.nxv8i32(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vwmacc_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_vv_nxv8i64_nxv8i32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a1)
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, a2, e32,m4,ta,mu
+; CHECK-NEXT: vwmacc.vv v16, v8, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vwmacc.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i32> %1,
+ <vscale x 8 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vwmacc.mask.nxv8i64.nxv8i32(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vwmacc_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv8i64_nxv8i32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a1)
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, a2, e32,m4,ta,mu
+; CHECK-NEXT: vwmacc.vv v16, v8, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vwmacc.mask.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i32> %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.i8(
+ <vscale x 1 x i16>,
+ i8,
+ <vscale x 1 x i8>,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vwmacc_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_vx_nxv1i16_i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vwmacc.vx v16, a0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.i8(
+ <vscale x 1 x i16> %0,
+ i8 %1,
+ <vscale x 1 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.i8(
+ <vscale x 1 x i16>,
+ i8,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vwmacc_mask_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv1i16_i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vwmacc.vx v16, a0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.i8(
+ <vscale x 1 x i16> %0,
+ i8 %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vwmacc.nxv2i16.i8(
+ <vscale x 2 x i16>,
+ i8,
+ <vscale x 2 x i8>,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vwmacc_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_vx_nxv2i16_i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vwmacc.vx v16, a0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vwmacc.nxv2i16.i8(
+ <vscale x 2 x i16> %0,
+ i8 %1,
+ <vscale x 2 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vwmacc.mask.nxv2i16.i8(
+ <vscale x 2 x i16>,
+ i8,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vwmacc_mask_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv2i16_i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vwmacc.vx v16, a0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vwmacc.mask.nxv2i16.i8(
+ <vscale x 2 x i16> %0,
+ i8 %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vwmacc.nxv4i16.i8(
+ <vscale x 4 x i16>,
+ i8,
+ <vscale x 4 x i8>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vwmacc_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_vx_nxv4i16_i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vwmacc.vx v16, a0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vwmacc.nxv4i16.i8(
+ <vscale x 4 x i16> %0,
+ i8 %1,
+ <vscale x 4 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vwmacc.mask.nxv4i16.i8(
+ <vscale x 4 x i16>,
+ i8,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vwmacc_mask_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv4i16_i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vwmacc.vx v16, a0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vwmacc.mask.nxv4i16.i8(
+ <vscale x 4 x i16> %0,
+ i8 %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vwmacc.nxv8i16.i8(
+ <vscale x 8 x i16>,
+ i8,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vwmacc_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_vx_nxv8i16_i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vwmacc.vx v16, a0, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vwmacc.nxv8i16.i8(
+ <vscale x 8 x i16> %0,
+ i8 %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vwmacc.mask.nxv8i16.i8(
+ <vscale x 8 x i16>,
+ i8,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vwmacc_mask_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv8i16_i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vwmacc.vx v16, a0, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vwmacc.mask.nxv8i16.i8(
+ <vscale x 8 x i16> %0,
+ i8 %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vwmacc.nxv16i16.i8(
+ <vscale x 16 x i16>,
+ i8,
+ <vscale x 16 x i8>,
+ i64);
+
+define <vscale x 16 x i16> @intrinsic_vwmacc_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_vx_nxv16i16_i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT: vwmacc.vx v16, a0, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vwmacc.nxv16i16.i8(
+ <vscale x 16 x i16> %0,
+ i8 %1,
+ <vscale x 16 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vwmacc.mask.nxv16i16.i8(
+ <vscale x 16 x i16>,
+ i8,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i16> @intrinsic_vwmacc_mask_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv16i16_i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT: vwmacc.vx v16, a0, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vwmacc.mask.nxv16i16.i8(
+ <vscale x 16 x i16> %0,
+ i8 %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vwmacc.nxv32i16.i8(
+ <vscale x 32 x i16>,
+ i8,
+ <vscale x 32 x i8>,
+ i64);
+
+define <vscale x 32 x i16> @intrinsic_vwmacc_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_vx_nxv32i16_i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu
+; CHECK-NEXT: vle8.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu
+; CHECK-NEXT: vwmacc.vx v16, a0, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vwmacc.nxv32i16.i8(
+ <vscale x 32 x i16> %0,
+ i8 %1,
+ <vscale x 32 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vwmacc.mask.nxv32i16.i8(
+ <vscale x 32 x i16>,
+ i8,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x i16> @intrinsic_vwmacc_mask_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv32i16_i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu
+; CHECK-NEXT: vle8.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu
+; CHECK-NEXT: vwmacc.vx v16, a0, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vwmacc.mask.nxv32i16.i8(
+ <vscale x 32 x i16> %0,
+ i8 %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vwmacc.nxv1i32.i16(
+ <vscale x 1 x i32>,
+ i16,
+ <vscale x 1 x i16>,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vwmacc_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_vx_nxv1i32_i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vwmacc.vx v16, a0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vwmacc.nxv1i32.i16(
+ <vscale x 1 x i32> %0,
+ i16 %1,
+ <vscale x 1 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vwmacc.mask.nxv1i32.i16(
+ <vscale x 1 x i32>,
+ i16,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vwmacc_mask_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv1i32_i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vwmacc.vx v16, a0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vwmacc.mask.nxv1i32.i16(
+ <vscale x 1 x i32> %0,
+ i16 %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vwmacc.nxv2i32.i16(
+ <vscale x 2 x i32>,
+ i16,
+ <vscale x 2 x i16>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vwmacc_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_vx_nxv2i32_i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vwmacc.vx v16, a0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vwmacc.nxv2i32.i16(
+ <vscale x 2 x i32> %0,
+ i16 %1,
+ <vscale x 2 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vwmacc.mask.nxv2i32.i16(
+ <vscale x 2 x i32>,
+ i16,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vwmacc_mask_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv2i32_i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vwmacc.vx v16, a0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vwmacc.mask.nxv2i32.i16(
+ <vscale x 2 x i32> %0,
+ i16 %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.i16(
+ <vscale x 4 x i32>,
+ i16,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vwmacc_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_vx_nxv4i32_i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vwmacc.vx v16, a0, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.i16(
+ <vscale x 4 x i32> %0,
+ i16 %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vwmacc.mask.nxv4i32.i16(
+ <vscale x 4 x i32>,
+ i16,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vwmacc_mask_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv4i32_i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vwmacc.vx v16, a0, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vwmacc.mask.nxv4i32.i16(
+ <vscale x 4 x i32> %0,
+ i16 %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vwmacc.nxv8i32.i16(
+ <vscale x 8 x i32>,
+ i16,
+ <vscale x 8 x i16>,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vwmacc_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_vx_nxv8i32_i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vwmacc.vx v16, a0, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vwmacc.nxv8i32.i16(
+ <vscale x 8 x i32> %0,
+ i16 %1,
+ <vscale x 8 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vwmacc.mask.nxv8i32.i16(
+ <vscale x 8 x i32>,
+ i16,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vwmacc_mask_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv8i32_i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vwmacc.vx v16, a0, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vwmacc.mask.nxv8i32.i16(
+ <vscale x 8 x i32> %0,
+ i16 %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vwmacc.nxv16i32.i16(
+ <vscale x 16 x i32>,
+ i16,
+ <vscale x 16 x i16>,
+ i64);
+
+define <vscale x 16 x i32> @intrinsic_vwmacc_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_vx_nxv16i32_i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu
+; CHECK-NEXT: vwmacc.vx v16, a0, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vwmacc.nxv16i32.i16(
+ <vscale x 16 x i32> %0,
+ i16 %1,
+ <vscale x 16 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vwmacc.mask.nxv16i32.i16(
+ <vscale x 16 x i32>,
+ i16,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i32> @intrinsic_vwmacc_mask_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv16i32_i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu
+; CHECK-NEXT: vwmacc.vx v16, a0, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vwmacc.mask.nxv16i32.i16(
+ <vscale x 16 x i32> %0,
+ i16 %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.i32(
+ <vscale x 1 x i64>,
+ i32,
+ <vscale x 1 x i32>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vwmacc_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_vx_nxv1i64_i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vwmacc.vx v16, a0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.i32(
+ <vscale x 1 x i64> %0,
+ i32 %1,
+ <vscale x 1 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.i32(
+ <vscale x 1 x i64>,
+ i32,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vwmacc_mask_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv1i64_i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vwmacc.vx v16, a0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.i32(
+ <vscale x 1 x i64> %0,
+ i32 %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vwmacc.nxv2i64.i32(
+ <vscale x 2 x i64>,
+ i32,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vwmacc_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_vx_nxv2i64_i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vwmacc.vx v16, a0, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vwmacc.nxv2i64.i32(
+ <vscale x 2 x i64> %0,
+ i32 %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vwmacc.mask.nxv2i64.i32(
+ <vscale x 2 x i64>,
+ i32,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vwmacc_mask_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv2i64_i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vwmacc.vx v16, a0, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vwmacc.mask.nxv2i64.i32(
+ <vscale x 2 x i64> %0,
+ i32 %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vwmacc.nxv4i64.i32(
+ <vscale x 4 x i64>,
+ i32,
+ <vscale x 4 x i32>,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vwmacc_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_vx_nxv4i64_i32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vwmacc.vx v16, a0, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vwmacc.nxv4i64.i32(
+ <vscale x 4 x i64> %0,
+ i32 %1,
+ <vscale x 4 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vwmacc.mask.nxv4i64.i32(
+ <vscale x 4 x i64>,
+ i32,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vwmacc_mask_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv4i64_i32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vwmacc.vx v16, a0, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vwmacc.mask.nxv4i64.i32(
+ <vscale x 4 x i64> %0,
+ i32 %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vwmacc.nxv8i64.i32(
+ <vscale x 8 x i64>,
+ i32,
+ <vscale x 8 x i32>,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vwmacc_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_vx_nxv8i64_i32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m4,ta,mu
+; CHECK-NEXT: vwmacc.vx v16, a0, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vwmacc.nxv8i64.i32(
+ <vscale x 8 x i64> %0,
+ i32 %1,
+ <vscale x 8 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vwmacc.mask.nxv8i64.i32(
+ <vscale x 8 x i64>,
+ i32,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vwmacc_mask_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv8i64_i32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m4,ta,mu
+; CHECK-NEXT: vwmacc.vx v16, a0, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vwmacc.mask.nxv8i64.i32(
+ <vscale x 8 x i64> %0,
+ i32 %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i64> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv32.ll
new file mode 100644
index 000000000000..2d39ba95db2e
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv32.ll
@@ -0,0 +1,1034 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ i32);
+
+define <vscale x 1 x i16> @intrinsic_vwmaccsu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv1i16_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vwmaccsu.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vwmaccsu.mask.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i16> @intrinsic_vwmaccsu_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv1i16_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vwmaccsu.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.mask.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vwmaccsu.nxv2i16.nxv2i8(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ i32);
+
+define <vscale x 2 x i16> @intrinsic_vwmaccsu_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv2i16_nxv2i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vwmaccsu.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.nxv2i16.nxv2i8(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i8> %1,
+ <vscale x 2 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vwmaccsu.mask.nxv2i16.nxv2i8(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i16> @intrinsic_vwmaccsu_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv2i16_nxv2i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vwmaccsu.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.mask.nxv2i16.nxv2i8(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i8> %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vwmaccsu.nxv4i16.nxv4i8(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vwmaccsu_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv4i16_nxv4i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vwmaccsu.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.nxv4i16.nxv4i8(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vwmaccsu.mask.nxv4i16.nxv4i8(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vwmaccsu_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv4i16_nxv4i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vwmaccsu.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.mask.nxv4i16.nxv4i8(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vwmaccsu.nxv8i16.nxv8i8(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i16> @intrinsic_vwmaccsu_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv8i16_nxv8i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT: vwmaccsu.vv v16, v18, v19
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.nxv8i16.nxv8i8(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vwmaccsu.mask.nxv8i16.nxv8i8(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i16> @intrinsic_vwmaccsu_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv8i16_nxv8i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT: vwmaccsu.vv v16, v18, v19, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.mask.nxv8i16.nxv8i8(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vwmaccsu.nxv16i16.nxv16i8(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ i32);
+
+define <vscale x 16 x i16> @intrinsic_vwmaccsu_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv16i16_nxv16i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu
+; CHECK-NEXT: vwmaccsu.vv v16, v20, v22
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.nxv16i16.nxv16i8(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 16 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vwmaccsu.mask.nxv16i16.nxv16i8(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i16> @intrinsic_vwmaccsu_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv16i16_nxv16i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu
+; CHECK-NEXT: vwmaccsu.vv v16, v20, v22, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.mask.nxv16i16.nxv16i8(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vwmaccsu.nxv32i16.nxv32i8(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ i32);
+
+define <vscale x 32 x i16> @intrinsic_vwmaccsu_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv32i16_nxv32i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu
+; CHECK-NEXT: vle8.v v28, (a1)
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, a2, e8,m4,ta,mu
+; CHECK-NEXT: vwmaccsu.vv v16, v8, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.nxv32i16.nxv32i8(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 32 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vwmaccsu.mask.nxv32i16.nxv32i8(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x i16> @intrinsic_vwmaccsu_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv32i16_nxv32i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu
+; CHECK-NEXT: vle8.v v28, (a1)
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, a2, e8,m4,ta,mu
+; CHECK-NEXT: vwmaccsu.vv v16, v8, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.mask.nxv32i16.nxv32i8(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vwmaccsu.nxv1i32.nxv1i16(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vwmaccsu_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv1i32_nxv1i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vwmaccsu.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.nxv1i32.nxv1i16(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vwmaccsu.mask.nxv1i32.nxv1i16(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vwmaccsu_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv1i32_nxv1i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vwmaccsu.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.mask.nxv1i32.nxv1i16(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vwmaccsu.nxv2i32.nxv2i16(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vwmaccsu_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv2i32_nxv2i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vwmaccsu.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.nxv2i32.nxv2i16(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i16> %1,
+ <vscale x 2 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vwmaccsu.mask.nxv2i32.nxv2i16(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vwmaccsu_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv2i32_nxv2i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vwmaccsu.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.mask.nxv2i32.nxv2i16(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i16> %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.nxv4i16(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vwmaccsu_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv4i32_nxv4i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vwmaccsu.vv v16, v18, v19
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.nxv4i16(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vwmaccsu.mask.nxv4i32.nxv4i16(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vwmaccsu_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv4i32_nxv4i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vwmaccsu.vv v16, v18, v19, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.mask.nxv4i32.nxv4i16(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vwmaccsu.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vwmaccsu_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv8i32_nxv8i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vwmaccsu.vv v16, v20, v22
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i16> %1,
+ <vscale x 8 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vwmaccsu.mask.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vwmaccsu_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv8i32_nxv8i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vwmaccsu.vv v16, v20, v22, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.mask.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i16> %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vwmaccsu.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vwmaccsu_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv16i32_nxv16i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a1)
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu
+; CHECK-NEXT: vwmaccsu.vv v16, v8, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.nxv16i32.nxv16i16(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i16> %1,
+ <vscale x 16 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vwmaccsu.mask.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vwmaccsu_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv16i32_nxv16i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a1)
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu
+; CHECK-NEXT: vwmaccsu.vv v16, v8, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.mask.nxv16i32.nxv16i16(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i16> %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.i8(
+ <vscale x 1 x i16>,
+ i8,
+ <vscale x 1 x i8>,
+ i32);
+
+define <vscale x 1 x i16> @intrinsic_vwmaccsu_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv1i16_i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vwmaccsu.vx v16, a0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.i8(
+ <vscale x 1 x i16> %0,
+ i8 %1,
+ <vscale x 1 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vwmaccsu.mask.nxv1i16.i8(
+ <vscale x 1 x i16>,
+ i8,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i16> @intrinsic_vwmaccsu_mask_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv1i16_i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vwmaccsu.vx v16, a0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.mask.nxv1i16.i8(
+ <vscale x 1 x i16> %0,
+ i8 %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vwmaccsu.nxv2i16.i8(
+ <vscale x 2 x i16>,
+ i8,
+ <vscale x 2 x i8>,
+ i32);
+
+define <vscale x 2 x i16> @intrinsic_vwmaccsu_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv2i16_i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vwmaccsu.vx v16, a0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.nxv2i16.i8(
+ <vscale x 2 x i16> %0,
+ i8 %1,
+ <vscale x 2 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vwmaccsu.mask.nxv2i16.i8(
+ <vscale x 2 x i16>,
+ i8,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i16> @intrinsic_vwmaccsu_mask_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv2i16_i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vwmaccsu.vx v16, a0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.mask.nxv2i16.i8(
+ <vscale x 2 x i16> %0,
+ i8 %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vwmaccsu.nxv4i16.i8(
+ <vscale x 4 x i16>,
+ i8,
+ <vscale x 4 x i8>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vwmaccsu_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv4i16_i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vwmaccsu.vx v16, a0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.nxv4i16.i8(
+ <vscale x 4 x i16> %0,
+ i8 %1,
+ <vscale x 4 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vwmaccsu.mask.nxv4i16.i8(
+ <vscale x 4 x i16>,
+ i8,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vwmaccsu_mask_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv4i16_i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vwmaccsu.vx v16, a0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.mask.nxv4i16.i8(
+ <vscale x 4 x i16> %0,
+ i8 %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vwmaccsu.nxv8i16.i8(
+ <vscale x 8 x i16>,
+ i8,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i16> @intrinsic_vwmaccsu_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv8i16_i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vwmaccsu.vx v16, a0, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.nxv8i16.i8(
+ <vscale x 8 x i16> %0,
+ i8 %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vwmaccsu.mask.nxv8i16.i8(
+ <vscale x 8 x i16>,
+ i8,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i16> @intrinsic_vwmaccsu_mask_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv8i16_i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vwmaccsu.vx v16, a0, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.mask.nxv8i16.i8(
+ <vscale x 8 x i16> %0,
+ i8 %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vwmaccsu.nxv16i16.i8(
+ <vscale x 16 x i16>,
+ i8,
+ <vscale x 16 x i8>,
+ i32);
+
+define <vscale x 16 x i16> @intrinsic_vwmaccsu_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv16i16_i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT: vwmaccsu.vx v16, a0, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.nxv16i16.i8(
+ <vscale x 16 x i16> %0,
+ i8 %1,
+ <vscale x 16 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vwmaccsu.mask.nxv16i16.i8(
+ <vscale x 16 x i16>,
+ i8,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i16> @intrinsic_vwmaccsu_mask_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv16i16_i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT: vwmaccsu.vx v16, a0, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.mask.nxv16i16.i8(
+ <vscale x 16 x i16> %0,
+ i8 %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vwmaccsu.nxv32i16.i8(
+ <vscale x 32 x i16>,
+ i8,
+ <vscale x 32 x i8>,
+ i32);
+
+define <vscale x 32 x i16> @intrinsic_vwmaccsu_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv32i16_i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu
+; CHECK-NEXT: vle8.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu
+; CHECK-NEXT: vwmaccsu.vx v16, a0, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.nxv32i16.i8(
+ <vscale x 32 x i16> %0,
+ i8 %1,
+ <vscale x 32 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vwmaccsu.mask.nxv32i16.i8(
+ <vscale x 32 x i16>,
+ i8,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x i16> @intrinsic_vwmaccsu_mask_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv32i16_i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu
+; CHECK-NEXT: vle8.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu
+; CHECK-NEXT: vwmaccsu.vx v16, a0, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.mask.nxv32i16.i8(
+ <vscale x 32 x i16> %0,
+ i8 %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vwmaccsu.nxv1i32.i16(
+ <vscale x 1 x i32>,
+ i16,
+ <vscale x 1 x i16>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vwmaccsu_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv1i32_i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vwmaccsu.vx v16, a0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.nxv1i32.i16(
+ <vscale x 1 x i32> %0,
+ i16 %1,
+ <vscale x 1 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vwmaccsu.mask.nxv1i32.i16(
+ <vscale x 1 x i32>,
+ i16,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vwmaccsu_mask_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv1i32_i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vwmaccsu.vx v16, a0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.mask.nxv1i32.i16(
+ <vscale x 1 x i32> %0,
+ i16 %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vwmaccsu.nxv2i32.i16(
+ <vscale x 2 x i32>,
+ i16,
+ <vscale x 2 x i16>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vwmaccsu_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv2i32_i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vwmaccsu.vx v16, a0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.nxv2i32.i16(
+ <vscale x 2 x i32> %0,
+ i16 %1,
+ <vscale x 2 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vwmaccsu.mask.nxv2i32.i16(
+ <vscale x 2 x i32>,
+ i16,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vwmaccsu_mask_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv2i32_i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vwmaccsu.vx v16, a0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.mask.nxv2i32.i16(
+ <vscale x 2 x i32> %0,
+ i16 %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.i16(
+ <vscale x 4 x i32>,
+ i16,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vwmaccsu_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv4i32_i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vwmaccsu.vx v16, a0, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.i16(
+ <vscale x 4 x i32> %0,
+ i16 %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vwmaccsu.mask.nxv4i32.i16(
+ <vscale x 4 x i32>,
+ i16,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vwmaccsu_mask_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv4i32_i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vwmaccsu.vx v16, a0, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.mask.nxv4i32.i16(
+ <vscale x 4 x i32> %0,
+ i16 %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vwmaccsu.nxv8i32.i16(
+ <vscale x 8 x i32>,
+ i16,
+ <vscale x 8 x i16>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vwmaccsu_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv8i32_i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vwmaccsu.vx v16, a0, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.nxv8i32.i16(
+ <vscale x 8 x i32> %0,
+ i16 %1,
+ <vscale x 8 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vwmaccsu.mask.nxv8i32.i16(
+ <vscale x 8 x i32>,
+ i16,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vwmaccsu_mask_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv8i32_i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vwmaccsu.vx v16, a0, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.mask.nxv8i32.i16(
+ <vscale x 8 x i32> %0,
+ i16 %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vwmaccsu.nxv16i32.i16(
+ <vscale x 16 x i32>,
+ i16,
+ <vscale x 16 x i16>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vwmaccsu_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv16i32_i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu
+; CHECK-NEXT: vwmaccsu.vx v16, a0, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.nxv16i32.i16(
+ <vscale x 16 x i32> %0,
+ i16 %1,
+ <vscale x 16 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vwmaccsu.mask.nxv16i32.i16(
+ <vscale x 16 x i32>,
+ i16,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vwmaccsu_mask_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv16i32_i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu
+; CHECK-NEXT: vwmaccsu.vx v16, a0, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.mask.nxv16i32.i16(
+ <vscale x 16 x i32> %0,
+ i16 %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i32> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv64.ll
new file mode 100644
index 000000000000..c274246b7b53
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv64.ll
@@ -0,0 +1,1412 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vwmaccsu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv1i16_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vwmaccsu.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vwmaccsu.mask.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vwmaccsu_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv1i16_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vwmaccsu.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.mask.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vwmaccsu.nxv2i16.nxv2i8(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vwmaccsu_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv2i16_nxv2i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vwmaccsu.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.nxv2i16.nxv2i8(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i8> %1,
+ <vscale x 2 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vwmaccsu.mask.nxv2i16.nxv2i8(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vwmaccsu_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv2i16_nxv2i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vwmaccsu.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.mask.nxv2i16.nxv2i8(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i8> %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vwmaccsu.nxv4i16.nxv4i8(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vwmaccsu_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv4i16_nxv4i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vwmaccsu.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.nxv4i16.nxv4i8(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vwmaccsu.mask.nxv4i16.nxv4i8(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vwmaccsu_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv4i16_nxv4i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vwmaccsu.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.mask.nxv4i16.nxv4i8(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vwmaccsu.nxv8i16.nxv8i8(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vwmaccsu_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv8i16_nxv8i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT: vwmaccsu.vv v16, v18, v19
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.nxv8i16.nxv8i8(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vwmaccsu.mask.nxv8i16.nxv8i8(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vwmaccsu_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv8i16_nxv8i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT: vwmaccsu.vv v16, v18, v19, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.mask.nxv8i16.nxv8i8(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vwmaccsu.nxv16i16.nxv16i8(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ i64);
+
+define <vscale x 16 x i16> @intrinsic_vwmaccsu_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv16i16_nxv16i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu
+; CHECK-NEXT: vwmaccsu.vv v16, v20, v22
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.nxv16i16.nxv16i8(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 16 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vwmaccsu.mask.nxv16i16.nxv16i8(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i16> @intrinsic_vwmaccsu_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv16i16_nxv16i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu
+; CHECK-NEXT: vwmaccsu.vv v16, v20, v22, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.mask.nxv16i16.nxv16i8(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vwmaccsu.nxv32i16.nxv32i8(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ i64);
+
+define <vscale x 32 x i16> @intrinsic_vwmaccsu_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv32i16_nxv32i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu
+; CHECK-NEXT: vle8.v v28, (a1)
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, a2, e8,m4,ta,mu
+; CHECK-NEXT: vwmaccsu.vv v16, v8, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.nxv32i16.nxv32i8(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 32 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vwmaccsu.mask.nxv32i16.nxv32i8(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x i16> @intrinsic_vwmaccsu_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv32i16_nxv32i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu
+; CHECK-NEXT: vle8.v v28, (a1)
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, a2, e8,m4,ta,mu
+; CHECK-NEXT: vwmaccsu.vv v16, v8, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.mask.nxv32i16.nxv32i8(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vwmaccsu.nxv1i32.nxv1i16(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vwmaccsu_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv1i32_nxv1i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vwmaccsu.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.nxv1i32.nxv1i16(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vwmaccsu.mask.nxv1i32.nxv1i16(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vwmaccsu_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv1i32_nxv1i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vwmaccsu.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.mask.nxv1i32.nxv1i16(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vwmaccsu.nxv2i32.nxv2i16(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vwmaccsu_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv2i32_nxv2i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vwmaccsu.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.nxv2i32.nxv2i16(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i16> %1,
+ <vscale x 2 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vwmaccsu.mask.nxv2i32.nxv2i16(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vwmaccsu_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv2i32_nxv2i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vwmaccsu.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.mask.nxv2i32.nxv2i16(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i16> %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.nxv4i16(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vwmaccsu_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv4i32_nxv4i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vwmaccsu.vv v16, v18, v19
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.nxv4i16(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vwmaccsu.mask.nxv4i32.nxv4i16(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vwmaccsu_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv4i32_nxv4i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vwmaccsu.vv v16, v18, v19, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.mask.nxv4i32.nxv4i16(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vwmaccsu.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vwmaccsu_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv8i32_nxv8i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vwmaccsu.vv v16, v20, v22
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i16> %1,
+ <vscale x 8 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vwmaccsu.mask.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vwmaccsu_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv8i32_nxv8i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vwmaccsu.vv v16, v20, v22, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.mask.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i16> %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vwmaccsu.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ i64);
+
+define <vscale x 16 x i32> @intrinsic_vwmaccsu_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv16i32_nxv16i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a1)
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu
+; CHECK-NEXT: vwmaccsu.vv v16, v8, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.nxv16i32.nxv16i16(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i16> %1,
+ <vscale x 16 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vwmaccsu.mask.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i32> @intrinsic_vwmaccsu_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv16i32_nxv16i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a1)
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu
+; CHECK-NEXT: vwmaccsu.vv v16, v8, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.mask.nxv16i32.nxv16i16(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i16> %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vwmaccsu.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vwmaccsu_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv1i64_nxv1i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vwmaccsu.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i32> %1,
+ <vscale x 1 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vwmaccsu.mask.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vwmaccsu_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv1i64_nxv1i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vwmaccsu.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.mask.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i32> %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vwmaccsu.nxv2i64.nxv2i32(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vwmaccsu_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv2i64_nxv2i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vwmaccsu.vv v16, v18, v19
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vwmaccsu.mask.nxv2i64.nxv2i32(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vwmaccsu_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv2i64_nxv2i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vwmaccsu.vv v16, v18, v19, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.mask.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vwmaccsu.nxv4i64.nxv4i32(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vwmaccsu_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv4i64_nxv4i32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vwmaccsu.vv v16, v20, v22
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i32> %1,
+ <vscale x 4 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vwmaccsu.mask.nxv4i64.nxv4i32(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vwmaccsu_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv4i64_nxv4i32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vwmaccsu.vv v16, v20, v22, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.mask.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i32> %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vwmaccsu.nxv8i64.nxv8i32(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vwmaccsu_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_vv_nxv8i64_nxv8i32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a1)
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, a2, e32,m4,ta,mu
+; CHECK-NEXT: vwmaccsu.vv v16, v8, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i32> %1,
+ <vscale x 8 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vwmaccsu.mask.nxv8i64.nxv8i32(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vwmaccsu_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv8i64_nxv8i32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a1)
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, a2, e32,m4,ta,mu
+; CHECK-NEXT: vwmaccsu.vv v16, v8, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.mask.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i32> %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.i8(
+ <vscale x 1 x i16>,
+ i8,
+ <vscale x 1 x i8>,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vwmaccsu_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv1i16_i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vwmaccsu.vx v16, a0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.i8(
+ <vscale x 1 x i16> %0,
+ i8 %1,
+ <vscale x 1 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vwmaccsu.mask.nxv1i16.i8(
+ <vscale x 1 x i16>,
+ i8,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vwmaccsu_mask_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv1i16_i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vwmaccsu.vx v16, a0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.mask.nxv1i16.i8(
+ <vscale x 1 x i16> %0,
+ i8 %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vwmaccsu.nxv2i16.i8(
+ <vscale x 2 x i16>,
+ i8,
+ <vscale x 2 x i8>,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vwmaccsu_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv2i16_i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vwmaccsu.vx v16, a0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.nxv2i16.i8(
+ <vscale x 2 x i16> %0,
+ i8 %1,
+ <vscale x 2 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vwmaccsu.mask.nxv2i16.i8(
+ <vscale x 2 x i16>,
+ i8,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vwmaccsu_mask_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv2i16_i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vwmaccsu.vx v16, a0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.mask.nxv2i16.i8(
+ <vscale x 2 x i16> %0,
+ i8 %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vwmaccsu.nxv4i16.i8(
+ <vscale x 4 x i16>,
+ i8,
+ <vscale x 4 x i8>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vwmaccsu_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv4i16_i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vwmaccsu.vx v16, a0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.nxv4i16.i8(
+ <vscale x 4 x i16> %0,
+ i8 %1,
+ <vscale x 4 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vwmaccsu.mask.nxv4i16.i8(
+ <vscale x 4 x i16>,
+ i8,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vwmaccsu_mask_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv4i16_i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vwmaccsu.vx v16, a0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.mask.nxv4i16.i8(
+ <vscale x 4 x i16> %0,
+ i8 %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vwmaccsu.nxv8i16.i8(
+ <vscale x 8 x i16>,
+ i8,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vwmaccsu_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv8i16_i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vwmaccsu.vx v16, a0, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.nxv8i16.i8(
+ <vscale x 8 x i16> %0,
+ i8 %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vwmaccsu.mask.nxv8i16.i8(
+ <vscale x 8 x i16>,
+ i8,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vwmaccsu_mask_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv8i16_i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vwmaccsu.vx v16, a0, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.mask.nxv8i16.i8(
+ <vscale x 8 x i16> %0,
+ i8 %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vwmaccsu.nxv16i16.i8(
+ <vscale x 16 x i16>,
+ i8,
+ <vscale x 16 x i8>,
+ i64);
+
+define <vscale x 16 x i16> @intrinsic_vwmaccsu_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv16i16_i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT: vwmaccsu.vx v16, a0, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.nxv16i16.i8(
+ <vscale x 16 x i16> %0,
+ i8 %1,
+ <vscale x 16 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vwmaccsu.mask.nxv16i16.i8(
+ <vscale x 16 x i16>,
+ i8,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i16> @intrinsic_vwmaccsu_mask_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv16i16_i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT: vwmaccsu.vx v16, a0, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.mask.nxv16i16.i8(
+ <vscale x 16 x i16> %0,
+ i8 %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vwmaccsu.nxv32i16.i8(
+ <vscale x 32 x i16>,
+ i8,
+ <vscale x 32 x i8>,
+ i64);
+
+define <vscale x 32 x i16> @intrinsic_vwmaccsu_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv32i16_i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu
+; CHECK-NEXT: vle8.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu
+; CHECK-NEXT: vwmaccsu.vx v16, a0, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.nxv32i16.i8(
+ <vscale x 32 x i16> %0,
+ i8 %1,
+ <vscale x 32 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vwmaccsu.mask.nxv32i16.i8(
+ <vscale x 32 x i16>,
+ i8,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x i16> @intrinsic_vwmaccsu_mask_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv32i16_i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu
+; CHECK-NEXT: vle8.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu
+; CHECK-NEXT: vwmaccsu.vx v16, a0, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.mask.nxv32i16.i8(
+ <vscale x 32 x i16> %0,
+ i8 %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vwmaccsu.nxv1i32.i16(
+ <vscale x 1 x i32>,
+ i16,
+ <vscale x 1 x i16>,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vwmaccsu_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv1i32_i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vwmaccsu.vx v16, a0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.nxv1i32.i16(
+ <vscale x 1 x i32> %0,
+ i16 %1,
+ <vscale x 1 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vwmaccsu.mask.nxv1i32.i16(
+ <vscale x 1 x i32>,
+ i16,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vwmaccsu_mask_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv1i32_i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vwmaccsu.vx v16, a0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.mask.nxv1i32.i16(
+ <vscale x 1 x i32> %0,
+ i16 %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vwmaccsu.nxv2i32.i16(
+ <vscale x 2 x i32>,
+ i16,
+ <vscale x 2 x i16>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vwmaccsu_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv2i32_i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vwmaccsu.vx v16, a0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.nxv2i32.i16(
+ <vscale x 2 x i32> %0,
+ i16 %1,
+ <vscale x 2 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vwmaccsu.mask.nxv2i32.i16(
+ <vscale x 2 x i32>,
+ i16,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vwmaccsu_mask_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv2i32_i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vwmaccsu.vx v16, a0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.mask.nxv2i32.i16(
+ <vscale x 2 x i32> %0,
+ i16 %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.i16(
+ <vscale x 4 x i32>,
+ i16,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vwmaccsu_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv4i32_i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vwmaccsu.vx v16, a0, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.i16(
+ <vscale x 4 x i32> %0,
+ i16 %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vwmaccsu.mask.nxv4i32.i16(
+ <vscale x 4 x i32>,
+ i16,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vwmaccsu_mask_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv4i32_i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vwmaccsu.vx v16, a0, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.mask.nxv4i32.i16(
+ <vscale x 4 x i32> %0,
+ i16 %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vwmaccsu.nxv8i32.i16(
+ <vscale x 8 x i32>,
+ i16,
+ <vscale x 8 x i16>,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vwmaccsu_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv8i32_i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vwmaccsu.vx v16, a0, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.nxv8i32.i16(
+ <vscale x 8 x i32> %0,
+ i16 %1,
+ <vscale x 8 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vwmaccsu.mask.nxv8i32.i16(
+ <vscale x 8 x i32>,
+ i16,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vwmaccsu_mask_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv8i32_i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vwmaccsu.vx v16, a0, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.mask.nxv8i32.i16(
+ <vscale x 8 x i32> %0,
+ i16 %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vwmaccsu.nxv16i32.i16(
+ <vscale x 16 x i32>,
+ i16,
+ <vscale x 16 x i16>,
+ i64);
+
+define <vscale x 16 x i32> @intrinsic_vwmaccsu_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv16i32_i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu
+; CHECK-NEXT: vwmaccsu.vx v16, a0, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.nxv16i32.i16(
+ <vscale x 16 x i32> %0,
+ i16 %1,
+ <vscale x 16 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vwmaccsu.mask.nxv16i32.i16(
+ <vscale x 16 x i32>,
+ i16,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i32> @intrinsic_vwmaccsu_mask_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv16i32_i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu
+; CHECK-NEXT: vwmaccsu.vx v16, a0, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.mask.nxv16i32.i16(
+ <vscale x 16 x i32> %0,
+ i16 %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vwmaccsu.nxv1i64.i32(
+ <vscale x 1 x i64>,
+ i32,
+ <vscale x 1 x i32>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vwmaccsu_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv1i64_i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vwmaccsu.vx v16, a0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.nxv1i64.i32(
+ <vscale x 1 x i64> %0,
+ i32 %1,
+ <vscale x 1 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vwmaccsu.mask.nxv1i64.i32(
+ <vscale x 1 x i64>,
+ i32,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vwmaccsu_mask_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv1i64_i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vwmaccsu.vx v16, a0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.mask.nxv1i64.i32(
+ <vscale x 1 x i64> %0,
+ i32 %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vwmaccsu.nxv2i64.i32(
+ <vscale x 2 x i64>,
+ i32,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vwmaccsu_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv2i64_i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vwmaccsu.vx v16, a0, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.nxv2i64.i32(
+ <vscale x 2 x i64> %0,
+ i32 %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vwmaccsu.mask.nxv2i64.i32(
+ <vscale x 2 x i64>,
+ i32,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vwmaccsu_mask_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv2i64_i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vwmaccsu.vx v16, a0, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.mask.nxv2i64.i32(
+ <vscale x 2 x i64> %0,
+ i32 %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vwmaccsu.nxv4i64.i32(
+ <vscale x 4 x i64>,
+ i32,
+ <vscale x 4 x i32>,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vwmaccsu_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv4i64_i32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vwmaccsu.vx v16, a0, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.nxv4i64.i32(
+ <vscale x 4 x i64> %0,
+ i32 %1,
+ <vscale x 4 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vwmaccsu.mask.nxv4i64.i32(
+ <vscale x 4 x i64>,
+ i32,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vwmaccsu_mask_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv4i64_i32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vwmaccsu.vx v16, a0, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.mask.nxv4i64.i32(
+ <vscale x 4 x i64> %0,
+ i32 %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vwmaccsu.nxv8i64.i32(
+ <vscale x 8 x i64>,
+ i32,
+ <vscale x 8 x i32>,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vwmaccsu_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_vx_nxv8i64_i32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m4,ta,mu
+; CHECK-NEXT: vwmaccsu.vx v16, a0, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.nxv8i64.i32(
+ <vscale x 8 x i64> %0,
+ i32 %1,
+ <vscale x 8 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vwmaccsu.mask.nxv8i64.i32(
+ <vscale x 8 x i64>,
+ i32,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vwmaccsu_mask_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv8i64_i32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m4,ta,mu
+; CHECK-NEXT: vwmaccsu.vx v16, a0, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.mask.nxv8i64.i32(
+ <vscale x 8 x i64> %0,
+ i32 %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i64> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv32.ll
new file mode 100644
index 000000000000..2bc594e82d6b
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv32.ll
@@ -0,0 +1,1034 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ i32);
+
+define <vscale x 1 x i16> @intrinsic_vwmaccu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv1i16_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vwmaccu.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i16> @intrinsic_vwmaccu_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv1i16_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vwmaccu.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.nxv2i8(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ i32);
+
+define <vscale x 2 x i16> @intrinsic_vwmaccu_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv2i16_nxv2i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vwmaccu.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.nxv2i8(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i8> %1,
+ <vscale x 2 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.nxv2i8(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i16> @intrinsic_vwmaccu_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv2i16_nxv2i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vwmaccu.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.nxv2i8(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i8> %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.nxv4i8(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vwmaccu_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv4i16_nxv4i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vwmaccu.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.nxv4i8(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.nxv4i8(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vwmaccu_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv4i16_nxv4i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vwmaccu.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.nxv4i8(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.nxv8i8(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i16> @intrinsic_vwmaccu_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv8i16_nxv8i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT: vwmaccu.vv v16, v18, v19
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.nxv8i8(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.nxv8i8(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i16> @intrinsic_vwmaccu_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv8i16_nxv8i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT: vwmaccu.vv v16, v18, v19, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.nxv8i8(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.nxv16i8(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ i32);
+
+define <vscale x 16 x i16> @intrinsic_vwmaccu_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv16i16_nxv16i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu
+; CHECK-NEXT: vwmaccu.vv v16, v20, v22
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.nxv16i8(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 16 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.nxv16i8(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i16> @intrinsic_vwmaccu_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv16i16_nxv16i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu
+; CHECK-NEXT: vwmaccu.vv v16, v20, v22, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.nxv16i8(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.nxv32i8(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ i32);
+
+define <vscale x 32 x i16> @intrinsic_vwmaccu_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv32i16_nxv32i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu
+; CHECK-NEXT: vle8.v v28, (a1)
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, a2, e8,m4,ta,mu
+; CHECK-NEXT: vwmaccu.vv v16, v8, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.nxv32i8(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 32 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.nxv32i8(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x i16> @intrinsic_vwmaccu_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv32i16_nxv32i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu
+; CHECK-NEXT: vle8.v v28, (a1)
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, a2, e8,m4,ta,mu
+; CHECK-NEXT: vwmaccu.vv v16, v8, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.nxv32i8(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.nxv1i16(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vwmaccu_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv1i32_nxv1i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vwmaccu.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.nxv1i16(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.nxv1i16(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vwmaccu_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv1i32_nxv1i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vwmaccu.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.nxv1i16(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.nxv2i16(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vwmaccu_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv2i32_nxv2i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vwmaccu.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.nxv2i16(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i16> %1,
+ <vscale x 2 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.nxv2i16(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vwmaccu_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv2i32_nxv2i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vwmaccu.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.nxv2i16(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i16> %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.nxv4i16(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vwmaccu_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv4i32_nxv4i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vwmaccu.vv v16, v18, v19
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.nxv4i16(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.nxv4i16(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vwmaccu_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv4i32_nxv4i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vwmaccu.vv v16, v18, v19, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.nxv4i16(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vwmaccu_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv8i32_nxv8i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vwmaccu.vv v16, v20, v22
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i16> %1,
+ <vscale x 8 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vwmaccu_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv8i32_nxv8i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vwmaccu.vv v16, v20, v22, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i16> %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vwmaccu_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv16i32_nxv16i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a1)
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu
+; CHECK-NEXT: vwmaccu.vv v16, v8, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.nxv16i16(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i16> %1,
+ <vscale x 16 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vwmaccu_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv16i32_nxv16i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a1)
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu
+; CHECK-NEXT: vwmaccu.vv v16, v8, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.nxv16i16(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i16> %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.i8(
+ <vscale x 1 x i16>,
+ i8,
+ <vscale x 1 x i8>,
+ i32);
+
+define <vscale x 1 x i16> @intrinsic_vwmaccu_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv1i16_i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vwmaccu.vx v16, a0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.i8(
+ <vscale x 1 x i16> %0,
+ i8 %1,
+ <vscale x 1 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.i8(
+ <vscale x 1 x i16>,
+ i8,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i16> @intrinsic_vwmaccu_mask_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv1i16_i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vwmaccu.vx v16, a0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.i8(
+ <vscale x 1 x i16> %0,
+ i8 %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.i8(
+ <vscale x 2 x i16>,
+ i8,
+ <vscale x 2 x i8>,
+ i32);
+
+define <vscale x 2 x i16> @intrinsic_vwmaccu_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv2i16_i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vwmaccu.vx v16, a0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.i8(
+ <vscale x 2 x i16> %0,
+ i8 %1,
+ <vscale x 2 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.i8(
+ <vscale x 2 x i16>,
+ i8,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i16> @intrinsic_vwmaccu_mask_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv2i16_i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vwmaccu.vx v16, a0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.i8(
+ <vscale x 2 x i16> %0,
+ i8 %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.i8(
+ <vscale x 4 x i16>,
+ i8,
+ <vscale x 4 x i8>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vwmaccu_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv4i16_i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vwmaccu.vx v16, a0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.i8(
+ <vscale x 4 x i16> %0,
+ i8 %1,
+ <vscale x 4 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.i8(
+ <vscale x 4 x i16>,
+ i8,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vwmaccu_mask_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv4i16_i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vwmaccu.vx v16, a0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.i8(
+ <vscale x 4 x i16> %0,
+ i8 %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.i8(
+ <vscale x 8 x i16>,
+ i8,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i16> @intrinsic_vwmaccu_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv8i16_i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vwmaccu.vx v16, a0, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.i8(
+ <vscale x 8 x i16> %0,
+ i8 %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.i8(
+ <vscale x 8 x i16>,
+ i8,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i16> @intrinsic_vwmaccu_mask_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv8i16_i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vwmaccu.vx v16, a0, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.i8(
+ <vscale x 8 x i16> %0,
+ i8 %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.i8(
+ <vscale x 16 x i16>,
+ i8,
+ <vscale x 16 x i8>,
+ i32);
+
+define <vscale x 16 x i16> @intrinsic_vwmaccu_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv16i16_i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT: vwmaccu.vx v16, a0, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.i8(
+ <vscale x 16 x i16> %0,
+ i8 %1,
+ <vscale x 16 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.i8(
+ <vscale x 16 x i16>,
+ i8,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i16> @intrinsic_vwmaccu_mask_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv16i16_i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT: vwmaccu.vx v16, a0, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.i8(
+ <vscale x 16 x i16> %0,
+ i8 %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.i8(
+ <vscale x 32 x i16>,
+ i8,
+ <vscale x 32 x i8>,
+ i32);
+
+define <vscale x 32 x i16> @intrinsic_vwmaccu_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv32i16_i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu
+; CHECK-NEXT: vle8.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu
+; CHECK-NEXT: vwmaccu.vx v16, a0, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.i8(
+ <vscale x 32 x i16> %0,
+ i8 %1,
+ <vscale x 32 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.i8(
+ <vscale x 32 x i16>,
+ i8,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x i16> @intrinsic_vwmaccu_mask_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv32i16_i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu
+; CHECK-NEXT: vle8.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu
+; CHECK-NEXT: vwmaccu.vx v16, a0, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.i8(
+ <vscale x 32 x i16> %0,
+ i8 %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.i16(
+ <vscale x 1 x i32>,
+ i16,
+ <vscale x 1 x i16>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vwmaccu_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv1i32_i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vwmaccu.vx v16, a0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.i16(
+ <vscale x 1 x i32> %0,
+ i16 %1,
+ <vscale x 1 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.i16(
+ <vscale x 1 x i32>,
+ i16,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vwmaccu_mask_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv1i32_i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vwmaccu.vx v16, a0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.i16(
+ <vscale x 1 x i32> %0,
+ i16 %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.i16(
+ <vscale x 2 x i32>,
+ i16,
+ <vscale x 2 x i16>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vwmaccu_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv2i32_i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vwmaccu.vx v16, a0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.i16(
+ <vscale x 2 x i32> %0,
+ i16 %1,
+ <vscale x 2 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.i16(
+ <vscale x 2 x i32>,
+ i16,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vwmaccu_mask_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv2i32_i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vwmaccu.vx v16, a0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.i16(
+ <vscale x 2 x i32> %0,
+ i16 %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.i16(
+ <vscale x 4 x i32>,
+ i16,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vwmaccu_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv4i32_i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vwmaccu.vx v16, a0, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.i16(
+ <vscale x 4 x i32> %0,
+ i16 %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.i16(
+ <vscale x 4 x i32>,
+ i16,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vwmaccu_mask_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv4i32_i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vwmaccu.vx v16, a0, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.i16(
+ <vscale x 4 x i32> %0,
+ i16 %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.i16(
+ <vscale x 8 x i32>,
+ i16,
+ <vscale x 8 x i16>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vwmaccu_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv8i32_i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vwmaccu.vx v16, a0, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.i16(
+ <vscale x 8 x i32> %0,
+ i16 %1,
+ <vscale x 8 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.i16(
+ <vscale x 8 x i32>,
+ i16,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vwmaccu_mask_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv8i32_i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vwmaccu.vx v16, a0, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.i16(
+ <vscale x 8 x i32> %0,
+ i16 %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.i16(
+ <vscale x 16 x i32>,
+ i16,
+ <vscale x 16 x i16>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vwmaccu_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv16i32_i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu
+; CHECK-NEXT: vwmaccu.vx v16, a0, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.i16(
+ <vscale x 16 x i32> %0,
+ i16 %1,
+ <vscale x 16 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.i16(
+ <vscale x 16 x i32>,
+ i16,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vwmaccu_mask_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv16i32_i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu
+; CHECK-NEXT: vwmaccu.vx v16, a0, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.i16(
+ <vscale x 16 x i32> %0,
+ i16 %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i32> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv64.ll
new file mode 100644
index 000000000000..be5d1779a22c
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv64.ll
@@ -0,0 +1,1412 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vwmaccu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv1i16_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vwmaccu.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.nxv1i8(
+ <vscale x 1 x i16>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vwmaccu_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv1i16_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vwmaccu.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.nxv1i8(
+ <vscale x 1 x i16> %0,
+ <vscale x 1 x i8> %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.nxv2i8(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vwmaccu_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv2i16_nxv2i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vwmaccu.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.nxv2i8(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i8> %1,
+ <vscale x 2 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.nxv2i8(
+ <vscale x 2 x i16>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vwmaccu_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv2i16_nxv2i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vwmaccu.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.nxv2i8(
+ <vscale x 2 x i16> %0,
+ <vscale x 2 x i8> %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.nxv4i8(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vwmaccu_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv4i16_nxv4i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vwmaccu.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.nxv4i8(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.nxv4i8(
+ <vscale x 4 x i16>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vwmaccu_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv4i16_nxv4i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vwmaccu.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.nxv4i8(
+ <vscale x 4 x i16> %0,
+ <vscale x 4 x i8> %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.nxv8i8(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vwmaccu_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv8i16_nxv8i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT: vwmaccu.vv v16, v18, v19
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.nxv8i8(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.nxv8i8(
+ <vscale x 8 x i16>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vwmaccu_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv8i16_nxv8i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT: vwmaccu.vv v16, v18, v19, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.nxv8i8(
+ <vscale x 8 x i16> %0,
+ <vscale x 8 x i8> %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.nxv16i8(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ i64);
+
+define <vscale x 16 x i16> @intrinsic_vwmaccu_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv16i16_nxv16i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu
+; CHECK-NEXT: vwmaccu.vv v16, v20, v22
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.nxv16i8(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 16 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.nxv16i8(
+ <vscale x 16 x i16>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i16> @intrinsic_vwmaccu_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv16i16_nxv16i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu
+; CHECK-NEXT: vwmaccu.vv v16, v20, v22, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.nxv16i8(
+ <vscale x 16 x i16> %0,
+ <vscale x 16 x i8> %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.nxv32i8(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ i64);
+
+define <vscale x 32 x i16> @intrinsic_vwmaccu_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv32i16_nxv32i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu
+; CHECK-NEXT: vle8.v v28, (a1)
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, a2, e8,m4,ta,mu
+; CHECK-NEXT: vwmaccu.vv v16, v8, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.nxv32i8(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 32 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.nxv32i8(
+ <vscale x 32 x i16>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x i16> @intrinsic_vwmaccu_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv32i16_nxv32i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu
+; CHECK-NEXT: vle8.v v28, (a1)
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, a2, e8,m4,ta,mu
+; CHECK-NEXT: vwmaccu.vv v16, v8, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.nxv32i8(
+ <vscale x 32 x i16> %0,
+ <vscale x 32 x i8> %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.nxv1i16(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vwmaccu_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv1i32_nxv1i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vwmaccu.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.nxv1i16(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.nxv1i16(
+ <vscale x 1 x i32>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vwmaccu_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv1i32_nxv1i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vwmaccu.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.nxv1i16(
+ <vscale x 1 x i32> %0,
+ <vscale x 1 x i16> %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.nxv2i16(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vwmaccu_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv2i32_nxv2i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vwmaccu.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.nxv2i16(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i16> %1,
+ <vscale x 2 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.nxv2i16(
+ <vscale x 2 x i32>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vwmaccu_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv2i32_nxv2i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vwmaccu.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.nxv2i16(
+ <vscale x 2 x i32> %0,
+ <vscale x 2 x i16> %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.nxv4i16(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vwmaccu_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv4i32_nxv4i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vwmaccu.vv v16, v18, v19
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.nxv4i16(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.nxv4i16(
+ <vscale x 4 x i32>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vwmaccu_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv4i32_nxv4i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vwmaccu.vv v16, v18, v19, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.nxv4i16(
+ <vscale x 4 x i32> %0,
+ <vscale x 4 x i16> %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vwmaccu_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv8i32_nxv8i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vwmaccu.vv v16, v20, v22
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i16> %1,
+ <vscale x 8 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.nxv8i16(
+ <vscale x 8 x i32>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vwmaccu_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv8i32_nxv8i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vwmaccu.vv v16, v20, v22, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.nxv8i16(
+ <vscale x 8 x i32> %0,
+ <vscale x 8 x i16> %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ i64);
+
+define <vscale x 16 x i32> @intrinsic_vwmaccu_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv16i32_nxv16i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a1)
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu
+; CHECK-NEXT: vwmaccu.vv v16, v8, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.nxv16i16(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i16> %1,
+ <vscale x 16 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.nxv16i16(
+ <vscale x 16 x i32>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i32> @intrinsic_vwmaccu_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv16i32_nxv16i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a1)
+; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, a2, e16,m4,ta,mu
+; CHECK-NEXT: vwmaccu.vv v16, v8, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.nxv16i16(
+ <vscale x 16 x i32> %0,
+ <vscale x 16 x i16> %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vwmaccu_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv1i64_nxv1i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vwmaccu.vv v16, v17, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i32> %1,
+ <vscale x 1 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32(
+ <vscale x 1 x i64>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vwmaccu_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv1i64_nxv1i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vwmaccu.vv v16, v17, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32(
+ <vscale x 1 x i64> %0,
+ <vscale x 1 x i32> %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.nxv2i32(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vwmaccu_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv2i64_nxv2i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vwmaccu.vv v16, v18, v19
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.nxv2i32(
+ <vscale x 2 x i64>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vwmaccu_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv2i64_nxv2i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vwmaccu.vv v16, v18, v19, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.nxv2i32(
+ <vscale x 2 x i64> %0,
+ <vscale x 2 x i32> %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.nxv4i32(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vwmaccu_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv4i64_nxv4i32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vwmaccu.vv v16, v20, v22
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i32> %1,
+ <vscale x 4 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.nxv4i32(
+ <vscale x 4 x i64>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vwmaccu_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv4i64_nxv4i32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vwmaccu.vv v16, v20, v22, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.nxv4i32(
+ <vscale x 4 x i64> %0,
+ <vscale x 4 x i32> %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.nxv8i32(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vwmaccu_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv8i64_nxv8i32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a1)
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, a2, e32,m4,ta,mu
+; CHECK-NEXT: vwmaccu.vv v16, v8, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i32> %1,
+ <vscale x 8 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.nxv8i32(
+ <vscale x 8 x i64>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vwmaccu_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv8i64_nxv8i32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a1)
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, a2, e32,m4,ta,mu
+; CHECK-NEXT: vwmaccu.vv v16, v8, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.nxv8i32(
+ <vscale x 8 x i64> %0,
+ <vscale x 8 x i32> %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.i8(
+ <vscale x 1 x i16>,
+ i8,
+ <vscale x 1 x i8>,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vwmaccu_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv1i16_i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vwmaccu.vx v16, a0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.i8(
+ <vscale x 1 x i16> %0,
+ i8 %1,
+ <vscale x 1 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.i8(
+ <vscale x 1 x i16>,
+ i8,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vwmaccu_mask_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv1i16_i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vwmaccu.vx v16, a0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.i8(
+ <vscale x 1 x i16> %0,
+ i8 %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.i8(
+ <vscale x 2 x i16>,
+ i8,
+ <vscale x 2 x i8>,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vwmaccu_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv2i16_i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vwmaccu.vx v16, a0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.i8(
+ <vscale x 2 x i16> %0,
+ i8 %1,
+ <vscale x 2 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.i8(
+ <vscale x 2 x i16>,
+ i8,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vwmaccu_mask_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv2i16_i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vwmaccu.vx v16, a0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.i8(
+ <vscale x 2 x i16> %0,
+ i8 %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.i8(
+ <vscale x 4 x i16>,
+ i8,
+ <vscale x 4 x i8>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vwmaccu_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv4i16_i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vwmaccu.vx v16, a0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.i8(
+ <vscale x 4 x i16> %0,
+ i8 %1,
+ <vscale x 4 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.i8(
+ <vscale x 4 x i16>,
+ i8,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vwmaccu_mask_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv4i16_i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vwmaccu.vx v16, a0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.i8(
+ <vscale x 4 x i16> %0,
+ i8 %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.i8(
+ <vscale x 8 x i16>,
+ i8,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vwmaccu_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv8i16_i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vwmaccu.vx v16, a0, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.i8(
+ <vscale x 8 x i16> %0,
+ i8 %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.i8(
+ <vscale x 8 x i16>,
+ i8,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vwmaccu_mask_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv8i16_i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vwmaccu.vx v16, a0, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.i8(
+ <vscale x 8 x i16> %0,
+ i8 %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.i8(
+ <vscale x 16 x i16>,
+ i8,
+ <vscale x 16 x i8>,
+ i64);
+
+define <vscale x 16 x i16> @intrinsic_vwmaccu_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv16i16_i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT: vwmaccu.vx v16, a0, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.i8(
+ <vscale x 16 x i16> %0,
+ i8 %1,
+ <vscale x 16 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.i8(
+ <vscale x 16 x i16>,
+ i8,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i16> @intrinsic_vwmaccu_mask_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv16i16_i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT: vwmaccu.vx v16, a0, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.i8(
+ <vscale x 16 x i16> %0,
+ i8 %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.i8(
+ <vscale x 32 x i16>,
+ i8,
+ <vscale x 32 x i8>,
+ i64);
+
+define <vscale x 32 x i16> @intrinsic_vwmaccu_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv32i16_i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu
+; CHECK-NEXT: vle8.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu
+; CHECK-NEXT: vwmaccu.vx v16, a0, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.i8(
+ <vscale x 32 x i16> %0,
+ i8 %1,
+ <vscale x 32 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.i8(
+ <vscale x 32 x i16>,
+ i8,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x i16> @intrinsic_vwmaccu_mask_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv32i16_i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu
+; CHECK-NEXT: vle8.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu
+; CHECK-NEXT: vwmaccu.vx v16, a0, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.i8(
+ <vscale x 32 x i16> %0,
+ i8 %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.i16(
+ <vscale x 1 x i32>,
+ i16,
+ <vscale x 1 x i16>,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vwmaccu_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv1i32_i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vwmaccu.vx v16, a0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.i16(
+ <vscale x 1 x i32> %0,
+ i16 %1,
+ <vscale x 1 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.i16(
+ <vscale x 1 x i32>,
+ i16,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vwmaccu_mask_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv1i32_i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vwmaccu.vx v16, a0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.i16(
+ <vscale x 1 x i32> %0,
+ i16 %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.i16(
+ <vscale x 2 x i32>,
+ i16,
+ <vscale x 2 x i16>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vwmaccu_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv2i32_i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vwmaccu.vx v16, a0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.i16(
+ <vscale x 2 x i32> %0,
+ i16 %1,
+ <vscale x 2 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.i16(
+ <vscale x 2 x i32>,
+ i16,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vwmaccu_mask_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv2i32_i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vwmaccu.vx v16, a0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.i16(
+ <vscale x 2 x i32> %0,
+ i16 %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.i16(
+ <vscale x 4 x i32>,
+ i16,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vwmaccu_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv4i32_i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vwmaccu.vx v16, a0, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.i16(
+ <vscale x 4 x i32> %0,
+ i16 %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.i16(
+ <vscale x 4 x i32>,
+ i16,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vwmaccu_mask_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv4i32_i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vwmaccu.vx v16, a0, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.i16(
+ <vscale x 4 x i32> %0,
+ i16 %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.i16(
+ <vscale x 8 x i32>,
+ i16,
+ <vscale x 8 x i16>,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vwmaccu_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv8i32_i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vwmaccu.vx v16, a0, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.i16(
+ <vscale x 8 x i32> %0,
+ i16 %1,
+ <vscale x 8 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.i16(
+ <vscale x 8 x i32>,
+ i16,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vwmaccu_mask_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv8i32_i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vwmaccu.vx v16, a0, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.i16(
+ <vscale x 8 x i32> %0,
+ i16 %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.i16(
+ <vscale x 16 x i32>,
+ i16,
+ <vscale x 16 x i16>,
+ i64);
+
+define <vscale x 16 x i32> @intrinsic_vwmaccu_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv16i32_i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu
+; CHECK-NEXT: vwmaccu.vx v16, a0, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.i16(
+ <vscale x 16 x i32> %0,
+ i16 %1,
+ <vscale x 16 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.i16(
+ <vscale x 16 x i32>,
+ i16,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i32> @intrinsic_vwmaccu_mask_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv16i32_i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu
+; CHECK-NEXT: vwmaccu.vx v16, a0, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.i16(
+ <vscale x 16 x i32> %0,
+ i16 %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.i32(
+ <vscale x 1 x i64>,
+ i32,
+ <vscale x 1 x i32>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vwmaccu_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv1i64_i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vwmaccu.vx v16, a0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.i32(
+ <vscale x 1 x i64> %0,
+ i32 %1,
+ <vscale x 1 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.i32(
+ <vscale x 1 x i64>,
+ i32,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vwmaccu_mask_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv1i64_i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vwmaccu.vx v16, a0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.i32(
+ <vscale x 1 x i64> %0,
+ i32 %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.i32(
+ <vscale x 2 x i64>,
+ i32,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vwmaccu_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv2i64_i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vwmaccu.vx v16, a0, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.i32(
+ <vscale x 2 x i64> %0,
+ i32 %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.i32(
+ <vscale x 2 x i64>,
+ i32,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vwmaccu_mask_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv2i64_i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vwmaccu.vx v16, a0, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.i32(
+ <vscale x 2 x i64> %0,
+ i32 %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.i32(
+ <vscale x 4 x i64>,
+ i32,
+ <vscale x 4 x i32>,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vwmaccu_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv4i64_i32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vwmaccu.vx v16, a0, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.i32(
+ <vscale x 4 x i64> %0,
+ i32 %1,
+ <vscale x 4 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.i32(
+ <vscale x 4 x i64>,
+ i32,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vwmaccu_mask_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv4i64_i32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vwmaccu.vx v16, a0, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.i32(
+ <vscale x 4 x i64> %0,
+ i32 %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.i32(
+ <vscale x 8 x i64>,
+ i32,
+ <vscale x 8 x i32>,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vwmaccu_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv8i64_i32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m4,ta,mu
+; CHECK-NEXT: vwmaccu.vx v16, a0, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.i32(
+ <vscale x 8 x i64> %0,
+ i32 %1,
+ <vscale x 8 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.i32(
+ <vscale x 8 x i64>,
+ i32,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vwmaccu_mask_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv8i64_i32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m4,ta,mu
+; CHECK-NEXT: vwmaccu.vx v16, a0, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.i32(
+ <vscale x 8 x i64> %0,
+ i32 %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i64> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv32.ll
new file mode 100644
index 000000000000..b9e0207f381a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv32.ll
@@ -0,0 +1,516 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i16> @llvm.riscv.vwmaccus.nxv1i16.i8(
+ <vscale x 1 x i16>,
+ i8,
+ <vscale x 1 x i8>,
+ i32);
+
+define <vscale x 1 x i16> @intrinsic_vwmaccus_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv1i16_i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vwmaccus.vx v16, a0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccus.nxv1i16.i8(
+ <vscale x 1 x i16> %0,
+ i8 %1,
+ <vscale x 1 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vwmaccus.mask.nxv1i16.i8(
+ <vscale x 1 x i16>,
+ i8,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i16> @intrinsic_vwmaccus_mask_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv1i16_i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vwmaccus.vx v16, a0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccus.mask.nxv1i16.i8(
+ <vscale x 1 x i16> %0,
+ i8 %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vwmaccus.nxv2i16.i8(
+ <vscale x 2 x i16>,
+ i8,
+ <vscale x 2 x i8>,
+ i32);
+
+define <vscale x 2 x i16> @intrinsic_vwmaccus_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv2i16_i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vwmaccus.vx v16, a0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccus.nxv2i16.i8(
+ <vscale x 2 x i16> %0,
+ i8 %1,
+ <vscale x 2 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vwmaccus.mask.nxv2i16.i8(
+ <vscale x 2 x i16>,
+ i8,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i16> @intrinsic_vwmaccus_mask_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv2i16_i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vwmaccus.vx v16, a0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccus.mask.nxv2i16.i8(
+ <vscale x 2 x i16> %0,
+ i8 %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vwmaccus.nxv4i16.i8(
+ <vscale x 4 x i16>,
+ i8,
+ <vscale x 4 x i8>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vwmaccus_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv4i16_i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vwmaccus.vx v16, a0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccus.nxv4i16.i8(
+ <vscale x 4 x i16> %0,
+ i8 %1,
+ <vscale x 4 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vwmaccus.mask.nxv4i16.i8(
+ <vscale x 4 x i16>,
+ i8,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i16> @intrinsic_vwmaccus_mask_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv4i16_i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vwmaccus.vx v16, a0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccus.mask.nxv4i16.i8(
+ <vscale x 4 x i16> %0,
+ i8 %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vwmaccus.nxv8i16.i8(
+ <vscale x 8 x i16>,
+ i8,
+ <vscale x 8 x i8>,
+ i32);
+
+define <vscale x 8 x i16> @intrinsic_vwmaccus_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv8i16_i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vwmaccus.vx v16, a0, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccus.nxv8i16.i8(
+ <vscale x 8 x i16> %0,
+ i8 %1,
+ <vscale x 8 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vwmaccus.mask.nxv8i16.i8(
+ <vscale x 8 x i16>,
+ i8,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i16> @intrinsic_vwmaccus_mask_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv8i16_i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vwmaccus.vx v16, a0, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccus.mask.nxv8i16.i8(
+ <vscale x 8 x i16> %0,
+ i8 %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vwmaccus.nxv16i16.i8(
+ <vscale x 16 x i16>,
+ i8,
+ <vscale x 16 x i8>,
+ i32);
+
+define <vscale x 16 x i16> @intrinsic_vwmaccus_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv16i16_i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT: vwmaccus.vx v16, a0, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccus.nxv16i16.i8(
+ <vscale x 16 x i16> %0,
+ i8 %1,
+ <vscale x 16 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vwmaccus.mask.nxv16i16.i8(
+ <vscale x 16 x i16>,
+ i8,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i16> @intrinsic_vwmaccus_mask_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv16i16_i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT: vwmaccus.vx v16, a0, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccus.mask.nxv16i16.i8(
+ <vscale x 16 x i16> %0,
+ i8 %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vwmaccus.nxv32i16.i8(
+ <vscale x 32 x i16>,
+ i8,
+ <vscale x 32 x i8>,
+ i32);
+
+define <vscale x 32 x i16> @intrinsic_vwmaccus_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv32i16_i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu
+; CHECK-NEXT: vle8.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu
+; CHECK-NEXT: vwmaccus.vx v16, a0, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccus.nxv32i16.i8(
+ <vscale x 32 x i16> %0,
+ i8 %1,
+ <vscale x 32 x i8> %2,
+ i32 %3)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vwmaccus.mask.nxv32i16.i8(
+ <vscale x 32 x i16>,
+ i8,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x i16> @intrinsic_vwmaccus_mask_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv32i16_i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu
+; CHECK-NEXT: vle8.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu
+; CHECK-NEXT: vwmaccus.vx v16, a0, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccus.mask.nxv32i16.i8(
+ <vscale x 32 x i16> %0,
+ i8 %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vwmaccus.nxv1i32.i16(
+ <vscale x 1 x i32>,
+ i16,
+ <vscale x 1 x i16>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vwmaccus_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv1i32_i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vwmaccus.vx v16, a0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccus.nxv1i32.i16(
+ <vscale x 1 x i32> %0,
+ i16 %1,
+ <vscale x 1 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vwmaccus.mask.nxv1i32.i16(
+ <vscale x 1 x i32>,
+ i16,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i32> @intrinsic_vwmaccus_mask_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv1i32_i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vwmaccus.vx v16, a0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccus.mask.nxv1i32.i16(
+ <vscale x 1 x i32> %0,
+ i16 %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vwmaccus.nxv2i32.i16(
+ <vscale x 2 x i32>,
+ i16,
+ <vscale x 2 x i16>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vwmaccus_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv2i32_i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vwmaccus.vx v16, a0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccus.nxv2i32.i16(
+ <vscale x 2 x i32> %0,
+ i16 %1,
+ <vscale x 2 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vwmaccus.mask.nxv2i32.i16(
+ <vscale x 2 x i32>,
+ i16,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i32> @intrinsic_vwmaccus_mask_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv2i32_i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vwmaccus.vx v16, a0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccus.mask.nxv2i32.i16(
+ <vscale x 2 x i32> %0,
+ i16 %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vwmaccus.nxv4i32.i16(
+ <vscale x 4 x i32>,
+ i16,
+ <vscale x 4 x i16>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vwmaccus_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv4i32_i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vwmaccus.vx v16, a0, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccus.nxv4i32.i16(
+ <vscale x 4 x i32> %0,
+ i16 %1,
+ <vscale x 4 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vwmaccus.mask.nxv4i32.i16(
+ <vscale x 4 x i32>,
+ i16,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i32> @intrinsic_vwmaccus_mask_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv4i32_i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vwmaccus.vx v16, a0, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccus.mask.nxv4i32.i16(
+ <vscale x 4 x i32> %0,
+ i16 %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vwmaccus.nxv8i32.i16(
+ <vscale x 8 x i32>,
+ i16,
+ <vscale x 8 x i16>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vwmaccus_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv8i32_i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vwmaccus.vx v16, a0, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccus.nxv8i32.i16(
+ <vscale x 8 x i32> %0,
+ i16 %1,
+ <vscale x 8 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vwmaccus.mask.nxv8i32.i16(
+ <vscale x 8 x i32>,
+ i16,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i32> @intrinsic_vwmaccus_mask_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv8i32_i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vwmaccus.vx v16, a0, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccus.mask.nxv8i32.i16(
+ <vscale x 8 x i32> %0,
+ i16 %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vwmaccus.nxv16i32.i16(
+ <vscale x 16 x i32>,
+ i16,
+ <vscale x 16 x i16>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vwmaccus_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv16i32_i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu
+; CHECK-NEXT: vwmaccus.vx v16, a0, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccus.nxv16i32.i16(
+ <vscale x 16 x i32> %0,
+ i16 %1,
+ <vscale x 16 x i16> %2,
+ i32 %3)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vwmaccus.mask.nxv16i32.i16(
+ <vscale x 16 x i32>,
+ i16,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i32> @intrinsic_vwmaccus_mask_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv16i32_i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu
+; CHECK-NEXT: vwmaccus.vx v16, a0, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccus.mask.nxv16i32.i16(
+ <vscale x 16 x i32> %0,
+ i16 %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i32 %4)
+
+ ret <vscale x 16 x i32> %a
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv64.ll
new file mode 100644
index 000000000000..56964b8819d7
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv64.ll
@@ -0,0 +1,704 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
+; RUN: --riscv-no-aliases < %s | FileCheck %s
+declare <vscale x 1 x i16> @llvm.riscv.vwmaccus.nxv1i16.i8(
+ <vscale x 1 x i16>,
+ i8,
+ <vscale x 1 x i8>,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vwmaccus_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv1i16_i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vwmaccus.vx v16, a0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccus.nxv1i16.i8(
+ <vscale x 1 x i16> %0,
+ i8 %1,
+ <vscale x 1 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vwmaccus.mask.nxv1i16.i8(
+ <vscale x 1 x i16>,
+ i8,
+ <vscale x 1 x i8>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i16> @intrinsic_vwmaccus_mask_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv1i16_i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vwmaccus.vx v16, a0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccus.mask.nxv1i16.i8(
+ <vscale x 1 x i16> %0,
+ i8 %1,
+ <vscale x 1 x i8> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vwmaccus.nxv2i16.i8(
+ <vscale x 2 x i16>,
+ i8,
+ <vscale x 2 x i8>,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vwmaccus_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv2i16_i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vwmaccus.vx v16, a0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccus.nxv2i16.i8(
+ <vscale x 2 x i16> %0,
+ i8 %1,
+ <vscale x 2 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vwmaccus.mask.nxv2i16.i8(
+ <vscale x 2 x i16>,
+ i8,
+ <vscale x 2 x i8>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i16> @intrinsic_vwmaccus_mask_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv2i16_i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vwmaccus.vx v16, a0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccus.mask.nxv2i16.i8(
+ <vscale x 2 x i16> %0,
+ i8 %1,
+ <vscale x 2 x i8> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vwmaccus.nxv4i16.i8(
+ <vscale x 4 x i16>,
+ i8,
+ <vscale x 4 x i8>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vwmaccus_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv4i16_i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vwmaccus.vx v16, a0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccus.nxv4i16.i8(
+ <vscale x 4 x i16> %0,
+ i8 %1,
+ <vscale x 4 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vwmaccus.mask.nxv4i16.i8(
+ <vscale x 4 x i16>,
+ i8,
+ <vscale x 4 x i8>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i16> @intrinsic_vwmaccus_mask_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv4i16_i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vwmaccus.vx v16, a0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccus.mask.nxv4i16.i8(
+ <vscale x 4 x i16> %0,
+ i8 %1,
+ <vscale x 4 x i8> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vwmaccus.nxv8i16.i8(
+ <vscale x 8 x i16>,
+ i8,
+ <vscale x 8 x i8>,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vwmaccus_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv8i16_i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vwmaccus.vx v16, a0, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccus.nxv8i16.i8(
+ <vscale x 8 x i16> %0,
+ i8 %1,
+ <vscale x 8 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vwmaccus.mask.nxv8i16.i8(
+ <vscale x 8 x i16>,
+ i8,
+ <vscale x 8 x i8>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i16> @intrinsic_vwmaccus_mask_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv8i16_i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vwmaccus.vx v16, a0, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccus.mask.nxv8i16.i8(
+ <vscale x 8 x i16> %0,
+ i8 %1,
+ <vscale x 8 x i8> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vwmaccus.nxv16i16.i8(
+ <vscale x 16 x i16>,
+ i8,
+ <vscale x 16 x i8>,
+ i64);
+
+define <vscale x 16 x i16> @intrinsic_vwmaccus_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv16i16_i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT: vwmaccus.vx v16, a0, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccus.nxv16i16.i8(
+ <vscale x 16 x i16> %0,
+ i8 %1,
+ <vscale x 16 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 16 x i16> @llvm.riscv.vwmaccus.mask.nxv16i16.i8(
+ <vscale x 16 x i16>,
+ i8,
+ <vscale x 16 x i8>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i16> @intrinsic_vwmaccus_mask_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv16i16_i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT: vwmaccus.vx v16, a0, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccus.mask.nxv16i16.i8(
+ <vscale x 16 x i16> %0,
+ i8 %1,
+ <vscale x 16 x i8> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vwmaccus.nxv32i16.i8(
+ <vscale x 32 x i16>,
+ i8,
+ <vscale x 32 x i8>,
+ i64);
+
+define <vscale x 32 x i16> @intrinsic_vwmaccus_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv32i16_i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu
+; CHECK-NEXT: vle8.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu
+; CHECK-NEXT: vwmaccus.vx v16, a0, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccus.nxv32i16.i8(
+ <vscale x 32 x i16> %0,
+ i8 %1,
+ <vscale x 32 x i8> %2,
+ i64 %3)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 32 x i16> @llvm.riscv.vwmaccus.mask.nxv32i16.i8(
+ <vscale x 32 x i16>,
+ i8,
+ <vscale x 32 x i8>,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x i16> @intrinsic_vwmaccus_mask_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv32i16_i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e8,m4,ta,mu
+; CHECK-NEXT: vle8.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e8,m4,ta,mu
+; CHECK-NEXT: vwmaccus.vx v16, a0, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccus.mask.nxv32i16.i8(
+ <vscale x 32 x i16> %0,
+ i8 %1,
+ <vscale x 32 x i8> %2,
+ <vscale x 32 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 32 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vwmaccus.nxv1i32.i16(
+ <vscale x 1 x i32>,
+ i16,
+ <vscale x 1 x i16>,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vwmaccus_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv1i32_i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vwmaccus.vx v16, a0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccus.nxv1i32.i16(
+ <vscale x 1 x i32> %0,
+ i16 %1,
+ <vscale x 1 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vwmaccus.mask.nxv1i32.i16(
+ <vscale x 1 x i32>,
+ i16,
+ <vscale x 1 x i16>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i32> @intrinsic_vwmaccus_mask_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv1i32_i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vwmaccus.vx v16, a0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccus.mask.nxv1i32.i16(
+ <vscale x 1 x i32> %0,
+ i16 %1,
+ <vscale x 1 x i16> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vwmaccus.nxv2i32.i16(
+ <vscale x 2 x i32>,
+ i16,
+ <vscale x 2 x i16>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vwmaccus_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv2i32_i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vwmaccus.vx v16, a0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccus.nxv2i32.i16(
+ <vscale x 2 x i32> %0,
+ i16 %1,
+ <vscale x 2 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vwmaccus.mask.nxv2i32.i16(
+ <vscale x 2 x i32>,
+ i16,
+ <vscale x 2 x i16>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i32> @intrinsic_vwmaccus_mask_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv2i32_i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vwmaccus.vx v16, a0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccus.mask.nxv2i32.i16(
+ <vscale x 2 x i32> %0,
+ i16 %1,
+ <vscale x 2 x i16> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vwmaccus.nxv4i32.i16(
+ <vscale x 4 x i32>,
+ i16,
+ <vscale x 4 x i16>,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vwmaccus_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv4i32_i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vwmaccus.vx v16, a0, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccus.nxv4i32.i16(
+ <vscale x 4 x i32> %0,
+ i16 %1,
+ <vscale x 4 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vwmaccus.mask.nxv4i32.i16(
+ <vscale x 4 x i32>,
+ i16,
+ <vscale x 4 x i16>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i32> @intrinsic_vwmaccus_mask_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv4i32_i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vwmaccus.vx v16, a0, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccus.mask.nxv4i32.i16(
+ <vscale x 4 x i32> %0,
+ i16 %1,
+ <vscale x 4 x i16> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vwmaccus.nxv8i32.i16(
+ <vscale x 8 x i32>,
+ i16,
+ <vscale x 8 x i16>,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vwmaccus_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv8i32_i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vwmaccus.vx v16, a0, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccus.nxv8i32.i16(
+ <vscale x 8 x i32> %0,
+ i16 %1,
+ <vscale x 8 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vwmaccus.mask.nxv8i32.i16(
+ <vscale x 8 x i32>,
+ i16,
+ <vscale x 8 x i16>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i32> @intrinsic_vwmaccus_mask_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv8i32_i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vwmaccus.vx v16, a0, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccus.mask.nxv8i32.i16(
+ <vscale x 8 x i32> %0,
+ i16 %1,
+ <vscale x 8 x i16> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vwmaccus.nxv16i32.i16(
+ <vscale x 16 x i32>,
+ i16,
+ <vscale x 16 x i16>,
+ i64);
+
+define <vscale x 16 x i32> @intrinsic_vwmaccus_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv16i32_i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu
+; CHECK-NEXT: vwmaccus.vx v16, a0, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccus.nxv16i32.i16(
+ <vscale x 16 x i32> %0,
+ i16 %1,
+ <vscale x 16 x i16> %2,
+ i64 %3)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 16 x i32> @llvm.riscv.vwmaccus.mask.nxv16i32.i16(
+ <vscale x 16 x i32>,
+ i16,
+ <vscale x 16 x i16>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i32> @intrinsic_vwmaccus_mask_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv16i32_i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e16,m4,ta,mu
+; CHECK-NEXT: vle16.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e16,m4,ta,mu
+; CHECK-NEXT: vwmaccus.vx v16, a0, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccus.mask.nxv16i32.i16(
+ <vscale x 16 x i32> %0,
+ i16 %1,
+ <vscale x 16 x i16> %2,
+ <vscale x 16 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 16 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vwmaccus.nxv1i64.i32(
+ <vscale x 1 x i64>,
+ i32,
+ <vscale x 1 x i32>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vwmaccus_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv1i64_i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vwmaccus.vx v16, a0, v17
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccus.nxv1i64.i32(
+ <vscale x 1 x i64> %0,
+ i32 %1,
+ <vscale x 1 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vwmaccus.mask.nxv1i64.i32(
+ <vscale x 1 x i64>,
+ i32,
+ <vscale x 1 x i32>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i64> @intrinsic_vwmaccus_mask_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv1i64_i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vwmaccus.vx v16, a0, v17, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccus.mask.nxv1i64.i32(
+ <vscale x 1 x i64> %0,
+ i32 %1,
+ <vscale x 1 x i32> %2,
+ <vscale x 1 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vwmaccus.nxv2i64.i32(
+ <vscale x 2 x i64>,
+ i32,
+ <vscale x 2 x i32>,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vwmaccus_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv2i64_i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vwmaccus.vx v16, a0, v18
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccus.nxv2i64.i32(
+ <vscale x 2 x i64> %0,
+ i32 %1,
+ <vscale x 2 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vwmaccus.mask.nxv2i64.i32(
+ <vscale x 2 x i64>,
+ i32,
+ <vscale x 2 x i32>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i64> @intrinsic_vwmaccus_mask_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv2i64_i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vwmaccus.vx v16, a0, v18, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccus.mask.nxv2i64.i32(
+ <vscale x 2 x i64> %0,
+ i32 %1,
+ <vscale x 2 x i32> %2,
+ <vscale x 2 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vwmaccus.nxv4i64.i32(
+ <vscale x 4 x i64>,
+ i32,
+ <vscale x 4 x i32>,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vwmaccus_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv4i64_i32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vwmaccus.vx v16, a0, v20
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccus.nxv4i64.i32(
+ <vscale x 4 x i64> %0,
+ i32 %1,
+ <vscale x 4 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vwmaccus.mask.nxv4i64.i32(
+ <vscale x 4 x i64>,
+ i32,
+ <vscale x 4 x i32>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i64> @intrinsic_vwmaccus_mask_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv4i64_i32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vwmaccus.vx v16, a0, v20, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccus.mask.nxv4i64.i32(
+ <vscale x 4 x i64> %0,
+ i32 %1,
+ <vscale x 4 x i32> %2,
+ <vscale x 4 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vwmaccus.nxv8i64.i32(
+ <vscale x 8 x i64>,
+ i32,
+ <vscale x 8 x i32>,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vwmaccus_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccus_vx_nxv8i64_i32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m4,ta,mu
+; CHECK-NEXT: vwmaccus.vx v16, a0, v28
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccus.nxv8i64.i32(
+ <vscale x 8 x i64> %0,
+ i32 %1,
+ <vscale x 8 x i32> %2,
+ i64 %3)
+
+ ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vwmaccus.mask.nxv8i64.i32(
+ <vscale x 8 x i64>,
+ i32,
+ <vscale x 8 x i32>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i64> @intrinsic_vwmaccus_mask_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv8i64_i32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a3, zero, e32,m4,ta,mu
+; CHECK-NEXT: vle32.v v28, (a1)
+; CHECK-NEXT: vsetvli a1, a2, e32,m4,ta,mu
+; CHECK-NEXT: vwmaccus.vx v16, a0, v28, v0.t
+; CHECK-NEXT: jalr zero, 0(ra)
+entry:
+ %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccus.mask.nxv8i64.i32(
+ <vscale x 8 x i64> %0,
+ i32 %1,
+ <vscale x 8 x i32> %2,
+ <vscale x 8 x i1> %3,
+ i64 %4)
+
+ ret <vscale x 8 x i64> %a
+}
More information about the llvm-branch-commits
mailing list