[llvm] c5d3b23 - [RISCV] Add support for matching vwmaccsu/vwmaccus from fixed vectors

Ben Shi via llvm-commits llvm-commits at lists.llvm.org
Wed Feb 9 17:59:42 PST 2022


Author: Chenbing.Zheng
Date: 2022-02-10T01:59:31Z
New Revision: c5d3b231e0b5728f2f4ffbc3234321eb3c5aad0b

URL: https://github.com/llvm/llvm-project/commit/c5d3b231e0b5728f2f4ffbc3234321eb3c5aad0b
DIFF: https://github.com/llvm/llvm-project/commit/c5d3b231e0b5728f2f4ffbc3234321eb3c5aad0b.diff

LOG: [RISCV] Add support for matching vwmaccsu/vwmaccus from fixed vectors

Add pattern to match add and widening mul to vwmacc, and
two multipliers are sext and zext.

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D119314

Added: 
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmaccsu.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmaccus.ll

Modified: 
    llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
index 0c0fd6958def9..c6059b505e915 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
@@ -274,6 +274,12 @@ def riscv_vwmulu_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D),
   return N->hasOneUse();
 }]>;
 
+def riscv_vwmulsu_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D),
+                                      (riscv_vwmulsu_vl node:$A, node:$B, node:$C,
+                                                        node:$D), [{
+  return N->hasOneUse();
+}]>;
+
 foreach kind = ["ADD", "UMAX", "SMAX", "UMIN", "SMIN", "AND", "OR", "XOR",
                 "FADD", "SEQ_FADD", "FMIN", "FMAX"] in
   def rvv_vecreduce_#kind#_vl : SDNode<"RISCVISD::VECREDUCE_"#kind#"_VL", SDTRVVVecReduce>;
@@ -940,6 +946,15 @@ foreach vtiTowti = AllWidenableIntVectors in {
             (!cast<Instruction>("PseudoVWMACCU_VV_" # vti.LMul.MX)
                  wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
                  GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
+  def : Pat<(wti.Vector
+             (riscv_add_vl wti.RegClass:$rd,
+                           (riscv_vwmulsu_vl_oneuse vti.RegClass:$rs1,
+                                                    (vti.Vector vti.RegClass:$rs2),
+                                                    (vti.Mask true_mask), VLOpFrag),
+                           (vti.Mask true_mask), VLOpFrag)),
+            (!cast<Instruction>("PseudoVWMACCSU_VV_" # vti.LMul.MX)
+                 wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
+                 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
 
   def : Pat<(wti.Vector
              (riscv_add_vl wti.RegClass:$rd,
@@ -959,6 +974,24 @@ foreach vtiTowti = AllWidenableIntVectors in {
             (!cast<Instruction>("PseudoVWMACCU_VX_" # vti.LMul.MX)
                  wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
                  GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
+  def : Pat<(wti.Vector
+             (riscv_add_vl wti.RegClass:$rd,
+                           (riscv_vwmulsu_vl_oneuse (SplatPat XLenVT:$rs1),
+                                                    (vti.Vector vti.RegClass:$rs2),
+                                                    (vti.Mask true_mask), VLOpFrag),
+                           (vti.Mask true_mask), VLOpFrag)),
+            (!cast<Instruction>("PseudoVWMACCSU_VX_" # vti.LMul.MX)
+                 wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
+                 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
+  def : Pat<(wti.Vector
+             (riscv_add_vl wti.RegClass:$rd,
+                           (riscv_vwmulsu_vl_oneuse (vti.Vector vti.RegClass:$rs1),
+                                                    (SplatPat XLenVT:$rs2),
+                                                    (vti.Mask true_mask), VLOpFrag),
+                           (vti.Mask true_mask), VLOpFrag)),
+            (!cast<Instruction>("PseudoVWMACCUS_VX_" # vti.LMul.MX)
+                 wti.RegClass:$rd, vti.ScalarRegClass:$rs2, vti.RegClass:$rs1,
+                 GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
 }
 
 // 12.15. Vector Integer Merge Instructions

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmaccsu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmaccsu.ll
new file mode 100644
index 0000000000000..35b0a4b9961ba
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmaccsu.ll
@@ -0,0 +1,519 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
+; RUN: llc -mtriple=riscv64 -mattr=+v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
+
+define <2 x i16> @vwmaccsu_v2i16(<2 x i8>* %x, <2 x i8>* %y, <2 x i16> %z) {
+; CHECK-LABEL: vwmaccsu_v2i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
+; CHECK-NEXT:    vle8.v v9, (a0)
+; CHECK-NEXT:    vle8.v v10, (a1)
+; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10
+; CHECK-NEXT:    ret
+  %a = load <2 x i8>, <2 x i8>* %x
+  %b = load <2 x i8>, <2 x i8>* %y
+  %c = sext <2 x i8> %a to <2 x i16>
+  %d = zext <2 x i8> %b to <2 x i16>
+  %e = mul <2 x i16> %c, %d
+  %f = add <2 x i16> %e, %z
+  ret <2 x i16> %f
+}
+
+define <4 x i16> @vwmaccsu_v4i16(<4 x i8>* %x, <4 x i8>* %y, <4 x i16> %z) {
+; CHECK-LABEL: vwmaccsu_v4i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
+; CHECK-NEXT:    vle8.v v9, (a0)
+; CHECK-NEXT:    vle8.v v10, (a1)
+; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10
+; CHECK-NEXT:    ret
+  %a = load <4 x i8>, <4 x i8>* %x
+  %b = load <4 x i8>, <4 x i8>* %y
+  %c = sext <4 x i8> %a to <4 x i16>
+  %d = zext <4 x i8> %b to <4 x i16>
+  %e = mul <4 x i16> %c, %d
+  %f = add <4 x i16> %e, %z
+  ret <4 x i16> %f
+}
+
+define <2 x i32> @vwmaccsu_v2i32(<2 x i16>* %x, <2 x i16>* %y, <2 x i32> %z) {
+; CHECK-LABEL: vwmaccsu_v2i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
+; CHECK-NEXT:    vle16.v v9, (a0)
+; CHECK-NEXT:    vle16.v v10, (a1)
+; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10
+; CHECK-NEXT:    ret
+  %a = load <2 x i16>, <2 x i16>* %x
+  %b = load <2 x i16>, <2 x i16>* %y
+  %c = sext <2 x i16> %a to <2 x i32>
+  %d = zext <2 x i16> %b to <2 x i32>
+  %e = mul <2 x i32> %c, %d
+  %f = add <2 x i32> %e, %z
+  ret <2 x i32> %f
+}
+
+define <8 x i16> @vwmaccsu_v8i16(<8 x i8>* %x, <8 x i8>* %y, <8 x i16> %z) {
+; CHECK-LABEL: vwmaccsu_v8i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
+; CHECK-NEXT:    vle8.v v9, (a0)
+; CHECK-NEXT:    vle8.v v10, (a1)
+; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10
+; CHECK-NEXT:    ret
+  %a = load <8 x i8>, <8 x i8>* %x
+  %b = load <8 x i8>, <8 x i8>* %y
+  %c = sext <8 x i8> %a to <8 x i16>
+  %d = zext <8 x i8> %b to <8 x i16>
+  %e = mul <8 x i16> %c, %d
+  %f = add <8 x i16> %e, %z
+  ret <8 x i16> %f
+}
+
+define <4 x i32> @vwmaccsu_v4i32(<4 x i16>* %x, <4 x i16>* %y, <4 x i32> %z) {
+; CHECK-LABEL: vwmaccsu_v4i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
+; CHECK-NEXT:    vle16.v v9, (a0)
+; CHECK-NEXT:    vle16.v v10, (a1)
+; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10
+; CHECK-NEXT:    ret
+  %a = load <4 x i16>, <4 x i16>* %x
+  %b = load <4 x i16>, <4 x i16>* %y
+  %c = sext <4 x i16> %a to <4 x i32>
+  %d = zext <4 x i16> %b to <4 x i32>
+  %e = mul <4 x i32> %c, %d
+  %f = add <4 x i32> %e, %z
+  ret <4 x i32> %f
+}
+
+define <2 x i64> @vwmaccsu_v2i64(<2 x i32>* %x, <2 x i32>* %y, <2 x i64> %z) {
+; CHECK-LABEL: vwmaccsu_v2i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
+; CHECK-NEXT:    vle32.v v9, (a0)
+; CHECK-NEXT:    vle32.v v10, (a1)
+; CHECK-NEXT:    vwmaccsu.vv v8, v9, v10
+; CHECK-NEXT:    ret
+  %a = load <2 x i32>, <2 x i32>* %x
+  %b = load <2 x i32>, <2 x i32>* %y
+  %c = sext <2 x i32> %a to <2 x i64>
+  %d = zext <2 x i32> %b to <2 x i64>
+  %e = mul <2 x i64> %c, %d
+  %f = add <2 x i64> %e, %z
+  ret <2 x i64> %f
+}
+
+define <16 x i16> @vwmaccsu_v16i16(<16 x i8>* %x, <16 x i8>* %y, <16 x i16> %z) {
+; CHECK-LABEL: vwmaccsu_v16i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
+; CHECK-NEXT:    vle8.v v10, (a0)
+; CHECK-NEXT:    vle8.v v11, (a1)
+; CHECK-NEXT:    vwmaccsu.vv v8, v10, v11
+; CHECK-NEXT:    ret
+  %a = load <16 x i8>, <16 x i8>* %x
+  %b = load <16 x i8>, <16 x i8>* %y
+  %c = sext <16 x i8> %a to <16 x i16>
+  %d = zext <16 x i8> %b to <16 x i16>
+  %e = mul <16 x i16> %c, %d
+  %f = add <16 x i16> %e, %z
+  ret <16 x i16> %f
+}
+
+define <8 x i32> @vwmaccsu_v8i32(<8 x i16>* %x, <8 x i16>* %y, <8 x i32> %z) {
+; CHECK-LABEL: vwmaccsu_v8i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
+; CHECK-NEXT:    vle16.v v10, (a0)
+; CHECK-NEXT:    vle16.v v11, (a1)
+; CHECK-NEXT:    vwmaccsu.vv v8, v10, v11
+; CHECK-NEXT:    ret
+  %a = load <8 x i16>, <8 x i16>* %x
+  %b = load <8 x i16>, <8 x i16>* %y
+  %c = sext <8 x i16> %a to <8 x i32>
+  %d = zext <8 x i16> %b to <8 x i32>
+  %e = mul <8 x i32> %c, %d
+  %f = add <8 x i32> %e, %z
+  ret <8 x i32> %f
+}
+
+define <4 x i64> @vwmaccsu_v4i64(<4 x i32>* %x, <4 x i32>* %y, <4 x i64> %z) {
+; CHECK-LABEL: vwmaccsu_v4i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
+; CHECK-NEXT:    vle32.v v10, (a0)
+; CHECK-NEXT:    vle32.v v11, (a1)
+; CHECK-NEXT:    vwmaccsu.vv v8, v10, v11
+; CHECK-NEXT:    ret
+  %a = load <4 x i32>, <4 x i32>* %x
+  %b = load <4 x i32>, <4 x i32>* %y
+  %c = sext <4 x i32> %a to <4 x i64>
+  %d = zext <4 x i32> %b to <4 x i64>
+  %e = mul <4 x i64> %c, %d
+  %f = add <4 x i64> %e, %z
+  ret <4 x i64> %f
+}
+
+define <32 x i16> @vwmaccsu_v32i16(<32 x i8>* %x, <32 x i8>* %y, <32 x i16> %z) {
+; CHECK-LABEL: vwmaccsu_v32i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a2, 32
+; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
+; CHECK-NEXT:    vle8.v v12, (a0)
+; CHECK-NEXT:    vle8.v v14, (a1)
+; CHECK-NEXT:    vwmaccsu.vv v8, v12, v14
+; CHECK-NEXT:    ret
+  %a = load <32 x i8>, <32 x i8>* %x
+  %b = load <32 x i8>, <32 x i8>* %y
+  %c = sext <32 x i8> %a to <32 x i16>
+  %d = zext <32 x i8> %b to <32 x i16>
+  %e = mul <32 x i16> %c, %d
+  %f = add <32 x i16> %e, %z
+  ret <32 x i16> %f
+}
+
+define <16 x i32> @vwmaccsu_v16i32(<16 x i16>* %x, <16 x i16>* %y, <16 x i32> %z) {
+; CHECK-LABEL: vwmaccsu_v16i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, mu
+; CHECK-NEXT:    vle16.v v12, (a0)
+; CHECK-NEXT:    vle16.v v14, (a1)
+; CHECK-NEXT:    vwmaccsu.vv v8, v12, v14
+; CHECK-NEXT:    ret
+  %a = load <16 x i16>, <16 x i16>* %x
+  %b = load <16 x i16>, <16 x i16>* %y
+  %c = sext <16 x i16> %a to <16 x i32>
+  %d = zext <16 x i16> %b to <16 x i32>
+  %e = mul <16 x i32> %c, %d
+  %f = add <16 x i32> %e, %z
+  ret <16 x i32> %f
+}
+
+define <8 x i64> @vwmaccsu_v8i64(<8 x i32>* %x, <8 x i32>* %y, <8 x i64> %z) {
+; CHECK-LABEL: vwmaccsu_v8i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; CHECK-NEXT:    vle32.v v12, (a0)
+; CHECK-NEXT:    vle32.v v14, (a1)
+; CHECK-NEXT:    vwmaccsu.vv v8, v12, v14
+; CHECK-NEXT:    ret
+  %a = load <8 x i32>, <8 x i32>* %x
+  %b = load <8 x i32>, <8 x i32>* %y
+  %c = sext <8 x i32> %a to <8 x i64>
+  %d = zext <8 x i32> %b to <8 x i64>
+  %e = mul <8 x i64> %c, %d
+  %f = add <8 x i64> %e, %z
+  ret <8 x i64> %f
+}
+
+define <64 x i16> @vwmaccsu_v64i16(<64 x i8>* %x, <64 x i8>* %y, <64 x i16> %z) {
+; CHECK-LABEL: vwmaccsu_v64i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a2, 64
+; CHECK-NEXT:    vsetvli zero, a2, e8, m4, ta, mu
+; CHECK-NEXT:    vle8.v v16, (a0)
+; CHECK-NEXT:    vle8.v v20, (a1)
+; CHECK-NEXT:    vwmaccsu.vv v8, v16, v20
+; CHECK-NEXT:    ret
+  %a = load <64 x i8>, <64 x i8>* %x
+  %b = load <64 x i8>, <64 x i8>* %y
+  %c = sext <64 x i8> %a to <64 x i16>
+  %d = zext <64 x i8> %b to <64 x i16>
+  %e = mul <64 x i16> %c, %d
+  %f = add <64 x i16> %e, %z
+  ret <64 x i16> %f
+}
+
+define <32 x i32> @vwmaccsu_v32i32(<32 x i16>* %x, <32 x i16>* %y, <32 x i32> %z) {
+; CHECK-LABEL: vwmaccsu_v32i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a2, 32
+; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, mu
+; CHECK-NEXT:    vle16.v v16, (a0)
+; CHECK-NEXT:    vle16.v v20, (a1)
+; CHECK-NEXT:    vwmaccsu.vv v8, v16, v20
+; CHECK-NEXT:    ret
+  %a = load <32 x i16>, <32 x i16>* %x
+  %b = load <32 x i16>, <32 x i16>* %y
+  %c = sext <32 x i16> %a to <32 x i32>
+  %d = zext <32 x i16> %b to <32 x i32>
+  %e = mul <32 x i32> %c, %d
+  %f = add <32 x i32> %e, %z
+  ret <32 x i32> %f
+}
+
+define <16 x i64> @vwmaccsu_v16i64(<16 x i32>* %x, <16 x i32>* %y, <16 x i64> %z) {
+; CHECK-LABEL: vwmaccsu_v16i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, mu
+; CHECK-NEXT:    vle32.v v16, (a0)
+; CHECK-NEXT:    vle32.v v20, (a1)
+; CHECK-NEXT:    vwmaccsu.vv v8, v16, v20
+; CHECK-NEXT:    ret
+  %a = load <16 x i32>, <16 x i32>* %x
+  %b = load <16 x i32>, <16 x i32>* %y
+  %c = sext <16 x i32> %a to <16 x i64>
+  %d = zext <16 x i32> %b to <16 x i64>
+  %e = mul <16 x i64> %c, %d
+  %f = add <16 x i64> %e, %z
+  ret <16 x i64> %f
+}
+
+define <2 x i16> @vwmaccsu_vx_v2i16(<2 x i8>* %x, i8 %y, <2 x i16> %z) {
+; CHECK-LABEL: vwmaccsu_vx_v2i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
+; CHECK-NEXT:    vle8.v v9, (a0)
+; CHECK-NEXT:    vwmaccsu.vx v8, a1, v9
+; CHECK-NEXT:    ret
+  %a = load <2 x i8>, <2 x i8>* %x
+  %b = insertelement <2 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <2 x i8> %b, <2 x i8> poison, <2 x i32> zeroinitializer
+  %d = zext <2 x i8> %a to <2 x i16>
+  %e = sext <2 x i8> %c to <2 x i16>
+  %f = mul <2 x i16> %d, %e
+  %g = add <2 x i16> %f, %z
+  ret <2 x i16> %g
+}
+
+define <4 x i16> @vwmaccsu_vx_v4i16(<4 x i8>* %x, i8 %y, <4 x i16> %z) {
+; CHECK-LABEL: vwmaccsu_vx_v4i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
+; CHECK-NEXT:    vle8.v v9, (a0)
+; CHECK-NEXT:    vwmaccsu.vx v8, a1, v9
+; CHECK-NEXT:    ret
+  %a = load <4 x i8>, <4 x i8>* %x
+  %b = insertelement <4 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <4 x i8> %b, <4 x i8> poison, <4 x i32> zeroinitializer
+  %d = zext <4 x i8> %a to <4 x i16>
+  %e = sext <4 x i8> %c to <4 x i16>
+  %f = mul <4 x i16> %d, %e
+  %g = add <4 x i16> %f, %z
+  ret <4 x i16> %g
+}
+
+define <2 x i32> @vwmaccsu_vx_v2i32(<2 x i16>* %x, i16 %y, <2 x i32> %z) {
+; CHECK-LABEL: vwmaccsu_vx_v2i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
+; CHECK-NEXT:    vle16.v v9, (a0)
+; CHECK-NEXT:    vwmaccsu.vx v8, a1, v9
+; CHECK-NEXT:    ret
+  %a = load <2 x i16>, <2 x i16>* %x
+  %b = insertelement <2 x i16> poison, i16 %y, i32 0
+  %c = shufflevector <2 x i16> %b, <2 x i16> poison, <2 x i32> zeroinitializer
+  %d = zext <2 x i16> %a to <2 x i32>
+  %e = sext <2 x i16> %c to <2 x i32>
+  %f = mul <2 x i32> %d, %e
+  %g = add <2 x i32> %f, %z
+  ret <2 x i32> %g
+}
+
+define <8 x i16> @vwmaccsu_vx_v8i16(<8 x i8>* %x, i8 %y, <8 x i16> %z) {
+; CHECK-LABEL: vwmaccsu_vx_v8i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
+; CHECK-NEXT:    vle8.v v9, (a0)
+; CHECK-NEXT:    vwmaccsu.vx v8, a1, v9
+; CHECK-NEXT:    ret
+  %a = load <8 x i8>, <8 x i8>* %x
+  %b = insertelement <8 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <8 x i8> %b, <8 x i8> poison, <8 x i32> zeroinitializer
+  %d = zext <8 x i8> %a to <8 x i16>
+  %e = sext <8 x i8> %c to <8 x i16>
+  %f = mul <8 x i16> %d, %e
+  %g = add <8 x i16> %f, %z
+  ret <8 x i16> %g
+}
+
+define <4 x i32> @vwmaccsu_vx_v4i32(<4 x i16>* %x, i16 %y, <4 x i32> %z) {
+; CHECK-LABEL: vwmaccsu_vx_v4i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
+; CHECK-NEXT:    vle16.v v9, (a0)
+; CHECK-NEXT:    vwmaccsu.vx v8, a1, v9
+; CHECK-NEXT:    ret
+  %a = load <4 x i16>, <4 x i16>* %x
+  %b = insertelement <4 x i16> poison, i16 %y, i32 0
+  %c = shufflevector <4 x i16> %b, <4 x i16> poison, <4 x i32> zeroinitializer
+  %d = zext <4 x i16> %a to <4 x i32>
+  %e = sext <4 x i16> %c to <4 x i32>
+  %f = mul <4 x i32> %d, %e
+  %g = add <4 x i32> %f, %z
+  ret <4 x i32> %g
+}
+
+define <2 x i64> @vwmaccsu_vx_v2i64(<2 x i32>* %x, i32 %y, <2 x i64> %z) {
+; CHECK-LABEL: vwmaccsu_vx_v2i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
+; CHECK-NEXT:    vle32.v v9, (a0)
+; CHECK-NEXT:    vwmaccsu.vx v8, a1, v9
+; CHECK-NEXT:    ret
+  %a = load <2 x i32>, <2 x i32>* %x
+  %b = insertelement <2 x i32> poison, i32 %y, i64 0
+  %c = shufflevector <2 x i32> %b, <2 x i32> poison, <2 x i32> zeroinitializer
+  %d = zext <2 x i32> %a to <2 x i64>
+  %e = sext <2 x i32> %c to <2 x i64>
+  %f = mul <2 x i64> %d, %e
+  %g = add <2 x i64> %f, %z
+  ret <2 x i64> %g
+}
+
+define <16 x i16> @vwmaccsu_vx_v16i16(<16 x i8>* %x, i8 %y, <16 x i16> %z) {
+; CHECK-LABEL: vwmaccsu_vx_v16i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
+; CHECK-NEXT:    vle8.v v10, (a0)
+; CHECK-NEXT:    vwmaccsu.vx v8, a1, v10
+; CHECK-NEXT:    ret
+  %a = load <16 x i8>, <16 x i8>* %x
+  %b = insertelement <16 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
+  %d = zext <16 x i8> %a to <16 x i16>
+  %e = sext <16 x i8> %c to <16 x i16>
+  %f = mul <16 x i16> %d, %e
+  %g = add <16 x i16> %f, %z
+  ret <16 x i16> %g
+}
+
+define <8 x i32> @vwmaccsu_vx_v8i32(<8 x i16>* %x, i16 %y, <8 x i32> %z) {
+; CHECK-LABEL: vwmaccsu_vx_v8i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
+; CHECK-NEXT:    vle16.v v10, (a0)
+; CHECK-NEXT:    vwmaccsu.vx v8, a1, v10
+; CHECK-NEXT:    ret
+  %a = load <8 x i16>, <8 x i16>* %x
+  %b = insertelement <8 x i16> poison, i16 %y, i32 0
+  %c = shufflevector <8 x i16> %b, <8 x i16> poison, <8 x i32> zeroinitializer
+  %d = zext <8 x i16> %a to <8 x i32>
+  %e = sext <8 x i16> %c to <8 x i32>
+  %f = mul <8 x i32> %d, %e
+  %g = add <8 x i32> %f, %z
+  ret <8 x i32> %g
+}
+
+define <4 x i64> @vwmaccsu_vx_v4i64(<4 x i32>* %x, i32 %y, <4 x i64> %z) {
+; CHECK-LABEL: vwmaccsu_vx_v4i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
+; CHECK-NEXT:    vle32.v v10, (a0)
+; CHECK-NEXT:    vwmaccsu.vx v8, a1, v10
+; CHECK-NEXT:    ret
+  %a = load <4 x i32>, <4 x i32>* %x
+  %b = insertelement <4 x i32> poison, i32 %y, i64 0
+  %c = shufflevector <4 x i32> %b, <4 x i32> poison, <4 x i32> zeroinitializer
+  %d = zext <4 x i32> %a to <4 x i64>
+  %e = sext <4 x i32> %c to <4 x i64>
+  %f = mul <4 x i64> %d, %e
+  %g = add <4 x i64> %f, %z
+  ret <4 x i64> %g
+}
+
+define <32 x i16> @vwmaccsu_vx_v32i16(<32 x i8>* %x, i8 %y, <32 x i16> %z) {
+; CHECK-LABEL: vwmaccsu_vx_v32i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a2, 32
+; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
+; CHECK-NEXT:    vle8.v v12, (a0)
+; CHECK-NEXT:    vwmaccsu.vx v8, a1, v12
+; CHECK-NEXT:    ret
+  %a = load <32 x i8>, <32 x i8>* %x
+  %b = insertelement <32 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <32 x i8> %b, <32 x i8> poison, <32 x i32> zeroinitializer
+  %d = zext <32 x i8> %a to <32 x i16>
+  %e = sext <32 x i8> %c to <32 x i16>
+  %f = mul <32 x i16> %d, %e
+  %g = add <32 x i16> %f, %z
+  ret <32 x i16> %g
+}
+
+define <16 x i32> @vwmaccsu_vx_v16i32(<16 x i16>* %x, i16 %y, <16 x i32> %z) {
+; CHECK-LABEL: vwmaccsu_vx_v16i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, mu
+; CHECK-NEXT:    vle16.v v12, (a0)
+; CHECK-NEXT:    vwmaccsu.vx v8, a1, v12
+; CHECK-NEXT:    ret
+  %a = load <16 x i16>, <16 x i16>* %x
+  %b = insertelement <16 x i16> poison, i16 %y, i32 0
+  %c = shufflevector <16 x i16> %b, <16 x i16> poison, <16 x i32> zeroinitializer
+  %d = zext <16 x i16> %a to <16 x i32>
+  %e = sext <16 x i16> %c to <16 x i32>
+  %f = mul <16 x i32> %d, %e
+  %g = add <16 x i32> %f, %z
+  ret <16 x i32> %g
+}
+
+define <8 x i64> @vwmaccsu_vx_v8i64(<8 x i32>* %x, i32 %y, <8 x i64> %z) {
+; CHECK-LABEL: vwmaccsu_vx_v8i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; CHECK-NEXT:    vle32.v v12, (a0)
+; CHECK-NEXT:    vwmaccsu.vx v8, a1, v12
+; CHECK-NEXT:    ret
+  %a = load <8 x i32>, <8 x i32>* %x
+  %b = insertelement <8 x i32> poison, i32 %y, i64 0
+  %c = shufflevector <8 x i32> %b, <8 x i32> poison, <8 x i32> zeroinitializer
+  %d = zext <8 x i32> %a to <8 x i64>
+  %e = sext <8 x i32> %c to <8 x i64>
+  %f = mul <8 x i64> %d, %e
+  %g = add <8 x i64> %f, %z
+  ret <8 x i64> %g
+}
+
+define <64 x i16> @vwmaccsu_vx_v64i16(<64 x i8>* %x, i8 %y, <64 x i16> %z) {
+; CHECK-LABEL: vwmaccsu_vx_v64i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a2, 64
+; CHECK-NEXT:    vsetvli zero, a2, e8, m4, ta, mu
+; CHECK-NEXT:    vle8.v v16, (a0)
+; CHECK-NEXT:    vwmaccsu.vx v8, a1, v16
+; CHECK-NEXT:    ret
+  %a = load <64 x i8>, <64 x i8>* %x
+  %b = insertelement <64 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <64 x i8> %b, <64 x i8> poison, <64 x i32> zeroinitializer
+  %d = zext <64 x i8> %a to <64 x i16>
+  %e = sext <64 x i8> %c to <64 x i16>
+  %f = mul <64 x i16> %d, %e
+  %g = add <64 x i16> %f, %z
+  ret <64 x i16> %g
+}
+
+define <32 x i32> @vwmaccsu_vx_v32i32(<32 x i16>* %x, i16 %y, <32 x i32> %z) {
+; CHECK-LABEL: vwmaccsu_vx_v32i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a2, 32
+; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, mu
+; CHECK-NEXT:    vle16.v v16, (a0)
+; CHECK-NEXT:    vwmaccsu.vx v8, a1, v16
+; CHECK-NEXT:    ret
+  %a = load <32 x i16>, <32 x i16>* %x
+  %b = insertelement <32 x i16> poison, i16 %y, i32 0
+  %c = shufflevector <32 x i16> %b, <32 x i16> poison, <32 x i32> zeroinitializer
+  %d = zext <32 x i16> %a to <32 x i32>
+  %e = sext <32 x i16> %c to <32 x i32>
+  %f = mul <32 x i32> %d, %e
+  %g = add <32 x i32> %f, %z
+  ret <32 x i32> %g
+}
+
+define <16 x i64> @vwmaccsu_vx_v16i64(<16 x i32>* %x, i32 %y, <16 x i64> %z) {
+; CHECK-LABEL: vwmaccsu_vx_v16i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, mu
+; CHECK-NEXT:    vle32.v v16, (a0)
+; CHECK-NEXT:    vwmaccsu.vx v8, a1, v16
+; CHECK-NEXT:    ret
+  %a = load <16 x i32>, <16 x i32>* %x
+  %b = insertelement <16 x i32> poison, i32 %y, i64 0
+  %c = shufflevector <16 x i32> %b, <16 x i32> poison, <16 x i32> zeroinitializer
+  %d = zext <16 x i32> %a to <16 x i64>
+  %e = sext <16 x i32> %c to <16 x i64>
+  %f = mul <16 x i64> %d, %e
+  %g = add <16 x i64> %f, %z
+  ret <16 x i64> %g
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmaccus.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmaccus.ll
new file mode 100644
index 0000000000000..9edc2cb83fb0e
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmaccus.ll
@@ -0,0 +1,261 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
+; RUN: llc -mtriple=riscv64 -mattr=+v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
+
+define <2 x i16> @vwmaccus_vx_v2i16(<2 x i8>* %x, i8 %y, <2 x i16> %z) {
+; CHECK-LABEL: vwmaccus_vx_v2i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, mu
+; CHECK-NEXT:    vle8.v v9, (a0)
+; CHECK-NEXT:    vwmaccus.vx v8, a1, v9
+; CHECK-NEXT:    ret
+  %a = load <2 x i8>, <2 x i8>* %x
+  %b = insertelement <2 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <2 x i8> %b, <2 x i8> poison, <2 x i32> zeroinitializer
+  %d = sext <2 x i8> %a to <2 x i16>
+  %e = zext <2 x i8> %c to <2 x i16>
+  %f = mul <2 x i16> %d, %e
+  %g = add <2 x i16> %f, %z
+  ret <2 x i16> %g
+}
+
+define <4 x i16> @vwmaccus_vx_v4i16(<4 x i8>* %x, i8 %y, <4 x i16> %z) {
+; CHECK-LABEL: vwmaccus_vx_v4i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
+; CHECK-NEXT:    vle8.v v9, (a0)
+; CHECK-NEXT:    vwmaccus.vx v8, a1, v9
+; CHECK-NEXT:    ret
+  %a = load <4 x i8>, <4 x i8>* %x
+  %b = insertelement <4 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <4 x i8> %b, <4 x i8> poison, <4 x i32> zeroinitializer
+  %d = sext <4 x i8> %a to <4 x i16>
+  %e = zext <4 x i8> %c to <4 x i16>
+  %f = mul <4 x i16> %d, %e
+  %g = add <4 x i16> %f, %z
+  ret <4 x i16> %g
+}
+
+define <2 x i32> @vwmaccus_vx_v2i32(<2 x i16>* %x, i16 %y, <2 x i32> %z) {
+; CHECK-LABEL: vwmaccus_vx_v2i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, mu
+; CHECK-NEXT:    vle16.v v9, (a0)
+; CHECK-NEXT:    vwmaccus.vx v8, a1, v9
+; CHECK-NEXT:    ret
+  %a = load <2 x i16>, <2 x i16>* %x
+  %b = insertelement <2 x i16> poison, i16 %y, i32 0
+  %c = shufflevector <2 x i16> %b, <2 x i16> poison, <2 x i32> zeroinitializer
+  %d = sext <2 x i16> %a to <2 x i32>
+  %e = zext <2 x i16> %c to <2 x i32>
+  %f = mul <2 x i32> %d, %e
+  %g = add <2 x i32> %f, %z
+  ret <2 x i32> %g
+}
+
+define <8 x i16> @vwmaccus_vx_v8i16(<8 x i8>* %x, i8 %y, <8 x i16> %z) {
+; CHECK-LABEL: vwmaccus_vx_v8i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
+; CHECK-NEXT:    vle8.v v9, (a0)
+; CHECK-NEXT:    vwmaccus.vx v8, a1, v9
+; CHECK-NEXT:    ret
+  %a = load <8 x i8>, <8 x i8>* %x
+  %b = insertelement <8 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <8 x i8> %b, <8 x i8> poison, <8 x i32> zeroinitializer
+  %d = sext <8 x i8> %a to <8 x i16>
+  %e = zext <8 x i8> %c to <8 x i16>
+  %f = mul <8 x i16> %d, %e
+  %g = add <8 x i16> %f, %z
+  ret <8 x i16> %g
+}
+
+define <4 x i32> @vwmaccus_vx_v4i32(<4 x i16>* %x, i16 %y, <4 x i32> %z) {
+; CHECK-LABEL: vwmaccus_vx_v4i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, mu
+; CHECK-NEXT:    vle16.v v9, (a0)
+; CHECK-NEXT:    vwmaccus.vx v8, a1, v9
+; CHECK-NEXT:    ret
+  %a = load <4 x i16>, <4 x i16>* %x
+  %b = insertelement <4 x i16> poison, i16 %y, i32 0
+  %c = shufflevector <4 x i16> %b, <4 x i16> poison, <4 x i32> zeroinitializer
+  %d = sext <4 x i16> %a to <4 x i32>
+  %e = zext <4 x i16> %c to <4 x i32>
+  %f = mul <4 x i32> %d, %e
+  %g = add <4 x i32> %f, %z
+  ret <4 x i32> %g
+}
+
+define <2 x i64> @vwmaccus_vx_v2i64(<2 x i32>* %x, i32 %y, <2 x i64> %z) {
+; CHECK-LABEL: vwmaccus_vx_v2i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, mu
+; CHECK-NEXT:    vle32.v v9, (a0)
+; CHECK-NEXT:    vwmaccus.vx v8, a1, v9
+; CHECK-NEXT:    ret
+  %a = load <2 x i32>, <2 x i32>* %x
+  %b = insertelement <2 x i32> poison, i32 %y, i64 0
+  %c = shufflevector <2 x i32> %b, <2 x i32> poison, <2 x i32> zeroinitializer
+  %d = sext <2 x i32> %a to <2 x i64>
+  %e = zext <2 x i32> %c to <2 x i64>
+  %f = mul <2 x i64> %d, %e
+  %g = add <2 x i64> %f, %z
+  ret <2 x i64> %g
+}
+
+define <16 x i16> @vwmaccus_vx_v16i16(<16 x i8>* %x, i8 %y, <16 x i16> %z) {
+; CHECK-LABEL: vwmaccus_vx_v16i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
+; CHECK-NEXT:    vle8.v v10, (a0)
+; CHECK-NEXT:    vwmaccus.vx v8, a1, v10
+; CHECK-NEXT:    ret
+  %a = load <16 x i8>, <16 x i8>* %x
+  %b = insertelement <16 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <16 x i8> %b, <16 x i8> poison, <16 x i32> zeroinitializer
+  %d = sext <16 x i8> %a to <16 x i16>
+  %e = zext <16 x i8> %c to <16 x i16>
+  %f = mul <16 x i16> %d, %e
+  %g = add <16 x i16> %f, %z
+  ret <16 x i16> %g
+}
+
+define <8 x i32> @vwmaccus_vx_v8i32(<8 x i16>* %x, i16 %y, <8 x i32> %z) {
+; CHECK-LABEL: vwmaccus_vx_v8i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
+; CHECK-NEXT:    vle16.v v10, (a0)
+; CHECK-NEXT:    vwmaccus.vx v8, a1, v10
+; CHECK-NEXT:    ret
+  %a = load <8 x i16>, <8 x i16>* %x
+  %b = insertelement <8 x i16> poison, i16 %y, i32 0
+  %c = shufflevector <8 x i16> %b, <8 x i16> poison, <8 x i32> zeroinitializer
+  %d = sext <8 x i16> %a to <8 x i32>
+  %e = zext <8 x i16> %c to <8 x i32>
+  %f = mul <8 x i32> %d, %e
+  %g = add <8 x i32> %f, %z
+  ret <8 x i32> %g
+}
+
+define <4 x i64> @vwmaccus_vx_v4i64(<4 x i32>* %x, i32 %y, <4 x i64> %z) {
+; CHECK-LABEL: vwmaccus_vx_v4i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
+; CHECK-NEXT:    vle32.v v10, (a0)
+; CHECK-NEXT:    vwmaccus.vx v8, a1, v10
+; CHECK-NEXT:    ret
+  %a = load <4 x i32>, <4 x i32>* %x
+  %b = insertelement <4 x i32> poison, i32 %y, i64 0
+  %c = shufflevector <4 x i32> %b, <4 x i32> poison, <4 x i32> zeroinitializer
+  %d = sext <4 x i32> %a to <4 x i64>
+  %e = zext <4 x i32> %c to <4 x i64>
+  %f = mul <4 x i64> %d, %e
+  %g = add <4 x i64> %f, %z
+  ret <4 x i64> %g
+}
+
+define <32 x i16> @vwmaccus_vx_v32i16(<32 x i8>* %x, i8 %y, <32 x i16> %z) {
+; CHECK-LABEL: vwmaccus_vx_v32i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a2, 32
+; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
+; CHECK-NEXT:    vle8.v v12, (a0)
+; CHECK-NEXT:    vwmaccus.vx v8, a1, v12
+; CHECK-NEXT:    ret
+  %a = load <32 x i8>, <32 x i8>* %x
+  %b = insertelement <32 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <32 x i8> %b, <32 x i8> poison, <32 x i32> zeroinitializer
+  %d = sext <32 x i8> %a to <32 x i16>
+  %e = zext <32 x i8> %c to <32 x i16>
+  %f = mul <32 x i16> %d, %e
+  %g = add <32 x i16> %f, %z
+  ret <32 x i16> %g
+}
+
+define <16 x i32> @vwmaccus_vx_v16i32(<16 x i16>* %x, i16 %y, <16 x i32> %z) {
+; CHECK-LABEL: vwmaccus_vx_v16i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, mu
+; CHECK-NEXT:    vle16.v v12, (a0)
+; CHECK-NEXT:    vwmaccus.vx v8, a1, v12
+; CHECK-NEXT:    ret
+  %a = load <16 x i16>, <16 x i16>* %x
+  %b = insertelement <16 x i16> poison, i16 %y, i32 0
+  %c = shufflevector <16 x i16> %b, <16 x i16> poison, <16 x i32> zeroinitializer
+  %d = sext <16 x i16> %a to <16 x i32>
+  %e = zext <16 x i16> %c to <16 x i32>
+  %f = mul <16 x i32> %d, %e
+  %g = add <16 x i32> %f, %z
+  ret <16 x i32> %g
+}
+
+define <8 x i64> @vwmaccus_vx_v8i64(<8 x i32>* %x, i32 %y, <8 x i64> %z) {
+; CHECK-LABEL: vwmaccus_vx_v8i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
+; CHECK-NEXT:    vle32.v v12, (a0)
+; CHECK-NEXT:    vwmaccus.vx v8, a1, v12
+; CHECK-NEXT:    ret
+  %a = load <8 x i32>, <8 x i32>* %x
+  %b = insertelement <8 x i32> poison, i32 %y, i64 0
+  %c = shufflevector <8 x i32> %b, <8 x i32> poison, <8 x i32> zeroinitializer
+  %d = sext <8 x i32> %a to <8 x i64>
+  %e = zext <8 x i32> %c to <8 x i64>
+  %f = mul <8 x i64> %d, %e
+  %g = add <8 x i64> %f, %z
+  ret <8 x i64> %g
+}
+
+define <64 x i16> @vwmaccus_vx_v64i16(<64 x i8>* %x, i8 %y, <64 x i16> %z) {
+; CHECK-LABEL: vwmaccus_vx_v64i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a2, 64
+; CHECK-NEXT:    vsetvli zero, a2, e8, m4, ta, mu
+; CHECK-NEXT:    vle8.v v16, (a0)
+; CHECK-NEXT:    vwmaccus.vx v8, a1, v16
+; CHECK-NEXT:    ret
+  %a = load <64 x i8>, <64 x i8>* %x
+  %b = insertelement <64 x i8> poison, i8 %y, i32 0
+  %c = shufflevector <64 x i8> %b, <64 x i8> poison, <64 x i32> zeroinitializer
+  %d = sext <64 x i8> %a to <64 x i16>
+  %e = zext <64 x i8> %c to <64 x i16>
+  %f = mul <64 x i16> %d, %e
+  %g = add <64 x i16> %f, %z
+  ret <64 x i16> %g
+}
+
+define <32 x i32> @vwmaccus_vx_v32i32(<32 x i16>* %x, i16 %y, <32 x i32> %z) {
+; CHECK-LABEL: vwmaccus_vx_v32i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a2, 32
+; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, mu
+; CHECK-NEXT:    vle16.v v16, (a0)
+; CHECK-NEXT:    vwmaccus.vx v8, a1, v16
+; CHECK-NEXT:    ret
+  %a = load <32 x i16>, <32 x i16>* %x
+  %b = insertelement <32 x i16> poison, i16 %y, i32 0
+  %c = shufflevector <32 x i16> %b, <32 x i16> poison, <32 x i32> zeroinitializer
+  %d = sext <32 x i16> %a to <32 x i32>
+  %e = zext <32 x i16> %c to <32 x i32>
+  %f = mul <32 x i32> %d, %e
+  %g = add <32 x i32> %f, %z
+  ret <32 x i32> %g
+}
+
+define <16 x i64> @vwmaccus_vx_v16i64(<16 x i32>* %x, i32 %y, <16 x i64> %z) {
+; CHECK-LABEL: vwmaccus_vx_v16i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, mu
+; CHECK-NEXT:    vle32.v v16, (a0)
+; CHECK-NEXT:    vwmaccus.vx v8, a1, v16
+; CHECK-NEXT:    ret
+  %a = load <16 x i32>, <16 x i32>* %x
+  %b = insertelement <16 x i32> poison, i32 %y, i64 0
+  %c = shufflevector <16 x i32> %b, <16 x i32> poison, <16 x i32> zeroinitializer
+  %d = sext <16 x i32> %a to <16 x i64>
+  %e = zext <16 x i32> %c to <16 x i64>
+  %f = mul <16 x i64> %d, %e
+  %g = add <16 x i64> %f, %z
+  ret <16 x i64> %g
+}


        


More information about the llvm-commits mailing list