[llvm] 8910ac4 - [RISCV] Add patterns for vector widening integer multiply

via llvm-commits llvm-commits at lists.llvm.org
Thu Mar 24 00:26:22 PDT 2022


Author: jacquesguan
Date: 2022-03-24T15:26:08+08:00
New Revision: 8910ac400c4de81bde8ca388142500e2d440276c

URL: https://github.com/llvm/llvm-project/commit/8910ac400c4de81bde8ca388142500e2d440276c
DIFF: https://github.com/llvm/llvm-project/commit/8910ac400c4de81bde8ca388142500e2d440276c.diff

LOG: [RISCV] Add patterns for vector widening integer multiply

Add patterns for vector widening integer multiply instructions

Differential Revision: https://reviews.llvm.org/D117385

Added: 
    llvm/test/CodeGen/RISCV/rvv/vwmul-sdnode.ll

Modified: 
    llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
index 46e6b37f4033a..c1c8fc56ca9a2 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
@@ -340,18 +340,25 @@ multiclass VPatNConvertFP2ISDNode_V<SDNode vop, string instruction_name> {
   }
 }
 
-multiclass VPatWidenBinarySDNode_VV_VX_WV_WX<SDNode op, PatFrags extop, string instruction_name> {
+multiclass VPatWidenBinarySDNode_VV_VX<SDNode op, PatFrags extop1, PatFrags extop2,
+                                       string instruction_name> {
   foreach vti = AllWidenableIntVectors in {
-    def : Pat<(op (vti.Wti.Vector (extop (vti.Vti.Vector vti.Vti.RegClass:$rs2))),
-                  (vti.Wti.Vector (extop (vti.Vti.Vector vti.Vti.RegClass:$rs1)))),
+    def : Pat<(op (vti.Wti.Vector (extop1 (vti.Vti.Vector vti.Vti.RegClass:$rs2))),
+                  (vti.Wti.Vector (extop2 (vti.Vti.Vector vti.Vti.RegClass:$rs1)))),
               (!cast<Instruction>(instruction_name#"_VV_"#vti.Vti.LMul.MX)
                  vti.Vti.RegClass:$rs2, vti.Vti.RegClass:$rs1,
                  vti.Vti.AVL, vti.Vti.Log2SEW)>;
-    def : Pat<(op (vti.Wti.Vector (extop (vti.Vti.Vector vti.Vti.RegClass:$rs2))),
-                  (vti.Wti.Vector (extop (vti.Vti.Vector (SplatPat GPR:$rs1))))),
+    def : Pat<(op (vti.Wti.Vector (extop1 (vti.Vti.Vector vti.Vti.RegClass:$rs2))),
+                  (vti.Wti.Vector (extop2 (vti.Vti.Vector (SplatPat GPR:$rs1))))),
               (!cast<Instruction>(instruction_name#"_VX_"#vti.Vti.LMul.MX)
                  vti.Vti.RegClass:$rs2, GPR:$rs1,
                  vti.Vti.AVL, vti.Vti.Log2SEW)>;
+  }
+}
+
+multiclass VPatWidenBinarySDNode_WV_WX<SDNode op, PatFrags extop,
+                                       string instruction_name> {
+  foreach vti = AllWidenableIntVectors in {
     def : Pat<(op (vti.Wti.Vector vti.Wti.RegClass:$rs2),
                   (vti.Wti.Vector (extop (vti.Vti.Vector vti.Vti.RegClass:$rs1)))),
               (!cast<Instruction>(instruction_name#"_WV_"#vti.Vti.LMul.MX)
@@ -365,6 +372,12 @@ multiclass VPatWidenBinarySDNode_VV_VX_WV_WX<SDNode op, PatFrags extop, string i
   }
 }
 
+multiclass VPatWidenBinarySDNode_VV_VX_WV_WX<SDNode op, PatFrags extop,
+                                             string instruction_name> {
+  defm : VPatWidenBinarySDNode_VV_VX<op, extop, extop, instruction_name>;
+  defm : VPatWidenBinarySDNode_WV_WX<op, extop, instruction_name>;
+}
+
 multiclass VPatWidenMulAddSDNode_VV<PatFrags extop1, PatFrags extop2, string instruction_name> {
   foreach vti = AllWidenableIntVectors in {
     def : Pat<
@@ -632,6 +645,20 @@ defm : VPatBinarySDNode_VV_VX<sdiv, "PseudoVDIV">;
 defm : VPatBinarySDNode_VV_VX<urem, "PseudoVREMU">;
 defm : VPatBinarySDNode_VV_VX<srem, "PseudoVREM">;
 
+// 12.12. Vector Widening Integer Multiply Instructions
+defm : VPatWidenBinarySDNode_VV_VX<mul, sext_oneuse, sext_oneuse,
+                                   "PseudoVWMUL">;
+defm : VPatWidenBinarySDNode_VV_VX<mul, zext_oneuse, zext_oneuse,
+                                   "PseudoVWMULU">;
+defm : VPatWidenBinarySDNode_VV_VX<mul, anyext_oneuse, anyext_oneuse,
+                                   "PseudoVWMULU">;
+defm : VPatWidenBinarySDNode_VV_VX<mul, zext_oneuse, anyext_oneuse,
+                                   "PseudoVWMULU">;
+defm : VPatWidenBinarySDNode_VV_VX<mul, sext_oneuse, zext_oneuse,
+                                   "PseudoVWMULSU">;
+defm : VPatWidenBinarySDNode_VV_VX<mul, sext_oneuse, anyext_oneuse,
+                                   "PseudoVWMULSU">;
+
 // 12.13 Vector Single-Width Integer Multiply-Add Instructions.
 foreach vti = AllIntegerVectors in {
   // NOTE: We choose VMADD because it has the most commuting freedom. So it

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwmul-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vwmul-sdnode.ll
new file mode 100644
index 0000000000000..513cbe3d0e4de
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmul-sdnode.ll
@@ -0,0 +1,339 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
+
+define <vscale x 1 x i64> @vwmul_vv_nxv1i64(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb) {
+; CHECK-LABEL: vwmul_vv_nxv1i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
+; CHECK-NEXT:    vwmul.vv v10, v8, v9
+; CHECK-NEXT:    vmv1r.v v8, v10
+; CHECK-NEXT:    ret
+  %vc = sext <vscale x 1 x i32> %va to <vscale x 1 x i64>
+  %vd = sext <vscale x 1 x i32> %vb to <vscale x 1 x i64>
+  %ve = mul <vscale x 1 x i64> %vc, %vd
+  ret <vscale x 1 x i64> %ve
+}
+
+define <vscale x 1 x i64> @vwmulu_vv_nxv1i64(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb) {
+; CHECK-LABEL: vwmulu_vv_nxv1i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
+; CHECK-NEXT:    vwmulu.vv v10, v8, v9
+; CHECK-NEXT:    vmv1r.v v8, v10
+; CHECK-NEXT:    ret
+  %vc = zext <vscale x 1 x i32> %va to <vscale x 1 x i64>
+  %vd = zext <vscale x 1 x i32> %vb to <vscale x 1 x i64>
+  %ve = mul <vscale x 1 x i64> %vc, %vd
+  ret <vscale x 1 x i64> %ve
+}
+
+define <vscale x 1 x i64> @vwmulsu_vv_nxv1i64(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb) {
+; CHECK-LABEL: vwmulsu_vv_nxv1i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
+; CHECK-NEXT:    vwmulsu.vv v10, v8, v9
+; CHECK-NEXT:    vmv1r.v v8, v10
+; CHECK-NEXT:    ret
+  %vc = sext <vscale x 1 x i32> %va to <vscale x 1 x i64>
+  %vd = zext <vscale x 1 x i32> %vb to <vscale x 1 x i64>
+  %ve = mul <vscale x 1 x i64> %vc, %vd
+  ret <vscale x 1 x i64> %ve
+}
+
+define <vscale x 1 x i64> @vwmul_vx_nxv1i64(<vscale x 1 x i32> %va, i32 %b) {
+; CHECK-LABEL: vwmul_vx_nxv1i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, mu
+; CHECK-NEXT:    vwmul.vx v9, v8, a0
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x i32> undef, i32 %b, i32 0
+  %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
+  %vc = sext <vscale x 1 x i32> %va to <vscale x 1 x i64>
+  %vd = sext <vscale x 1 x i32> %splat to <vscale x 1 x i64>
+  %ve = mul <vscale x 1 x i64> %vc, %vd
+  ret <vscale x 1 x i64> %ve
+}
+
+define <vscale x 1 x i64> @vwmulu_vx_nxv1i64(<vscale x 1 x i32> %va, i32 %b) {
+; CHECK-LABEL: vwmulu_vx_nxv1i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, mu
+; CHECK-NEXT:    vwmulu.vx v9, v8, a0
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x i32> undef, i32 %b, i32 0
+  %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
+  %vc = zext <vscale x 1 x i32> %va to <vscale x 1 x i64>
+  %vd = zext <vscale x 1 x i32> %splat to <vscale x 1 x i64>
+  %ve = mul <vscale x 1 x i64> %vc, %vd
+  ret <vscale x 1 x i64> %ve
+}
+
+define <vscale x 1 x i64> @vwmulsu_vx_nxv1i64(<vscale x 1 x i32> %va, i32 %b) {
+; CHECK-LABEL: vwmulsu_vx_nxv1i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, mu
+; CHECK-NEXT:    vwmulsu.vx v9, v8, a0
+; CHECK-NEXT:    vmv1r.v v8, v9
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x i32> undef, i32 %b, i32 0
+  %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
+  %vc = sext <vscale x 1 x i32> %va to <vscale x 1 x i64>
+  %vd = zext <vscale x 1 x i32> %splat to <vscale x 1 x i64>
+  %ve = mul <vscale x 1 x i64> %vc, %vd
+  ret <vscale x 1 x i64> %ve
+}
+
+define <vscale x 2 x i64> @vwmul_vv_nxv2i64(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb) {
+; CHECK-LABEL: vwmul_vv_nxv2i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
+; CHECK-NEXT:    vwmul.vv v10, v8, v9
+; CHECK-NEXT:    vmv2r.v v8, v10
+; CHECK-NEXT:    ret
+  %vc = sext <vscale x 2 x i32> %va to <vscale x 2 x i64>
+  %vd = sext <vscale x 2 x i32> %vb to <vscale x 2 x i64>
+  %ve = mul <vscale x 2 x i64> %vc, %vd
+  ret <vscale x 2 x i64> %ve
+}
+
+define <vscale x 2 x i64> @vwmulu_vv_nxv2i64(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb) {
+; CHECK-LABEL: vwmulu_vv_nxv2i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
+; CHECK-NEXT:    vwmulu.vv v10, v8, v9
+; CHECK-NEXT:    vmv2r.v v8, v10
+; CHECK-NEXT:    ret
+  %vc = zext <vscale x 2 x i32> %va to <vscale x 2 x i64>
+  %vd = zext <vscale x 2 x i32> %vb to <vscale x 2 x i64>
+  %ve = mul <vscale x 2 x i64> %vc, %vd
+  ret <vscale x 2 x i64> %ve
+}
+
+define <vscale x 2 x i64> @vwmulsu_vv_nxv2i64(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb) {
+; CHECK-LABEL: vwmulsu_vv_nxv2i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
+; CHECK-NEXT:    vwmulsu.vv v10, v8, v9
+; CHECK-NEXT:    vmv2r.v v8, v10
+; CHECK-NEXT:    ret
+  %vc = sext <vscale x 2 x i32> %va to <vscale x 2 x i64>
+  %vd = zext <vscale x 2 x i32> %vb to <vscale x 2 x i64>
+  %ve = mul <vscale x 2 x i64> %vc, %vd
+  ret <vscale x 2 x i64> %ve
+}
+
+define <vscale x 2 x i64> @vwmul_vx_nxv2i64(<vscale x 2 x i32> %va, i32 %b) {
+; CHECK-LABEL: vwmul_vx_nxv2i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
+; CHECK-NEXT:    vwmul.vx v10, v8, a0
+; CHECK-NEXT:    vmv2r.v v8, v10
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x i32> undef, i32 %b, i32 0
+  %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
+  %vc = sext <vscale x 2 x i32> %va to <vscale x 2 x i64>
+  %vd = sext <vscale x 2 x i32> %splat to <vscale x 2 x i64>
+  %ve = mul <vscale x 2 x i64> %vc, %vd
+  ret <vscale x 2 x i64> %ve
+}
+
+define <vscale x 2 x i64> @vwmulu_vx_nxv2i64(<vscale x 2 x i32> %va, i32 %b) {
+; CHECK-LABEL: vwmulu_vx_nxv2i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
+; CHECK-NEXT:    vwmulu.vx v10, v8, a0
+; CHECK-NEXT:    vmv2r.v v8, v10
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x i32> undef, i32 %b, i32 0
+  %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
+  %vc = zext <vscale x 2 x i32> %va to <vscale x 2 x i64>
+  %vd = zext <vscale x 2 x i32> %splat to <vscale x 2 x i64>
+  %ve = mul <vscale x 2 x i64> %vc, %vd
+  ret <vscale x 2 x i64> %ve
+}
+
+define <vscale x 2 x i64> @vwmulsu_vx_nxv2i64(<vscale x 2 x i32> %va, i32 %b) {
+; CHECK-LABEL: vwmulsu_vx_nxv2i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
+; CHECK-NEXT:    vwmulsu.vx v10, v8, a0
+; CHECK-NEXT:    vmv2r.v v8, v10
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x i32> undef, i32 %b, i32 0
+  %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
+  %vc = sext <vscale x 2 x i32> %va to <vscale x 2 x i64>
+  %vd = zext <vscale x 2 x i32> %splat to <vscale x 2 x i64>
+  %ve = mul <vscale x 2 x i64> %vc, %vd
+  ret <vscale x 2 x i64> %ve
+}
+
+define <vscale x 4 x i64> @vwmul_vv_nxv4i64(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb) {
+; CHECK-LABEL: vwmul_vv_nxv4i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
+; CHECK-NEXT:    vwmul.vv v12, v8, v10
+; CHECK-NEXT:    vmv4r.v v8, v12
+; CHECK-NEXT:    ret
+  %vc = sext <vscale x 4 x i32> %va to <vscale x 4 x i64>
+  %vd = sext <vscale x 4 x i32> %vb to <vscale x 4 x i64>
+  %ve = mul <vscale x 4 x i64> %vc, %vd
+  ret <vscale x 4 x i64> %ve
+}
+
+define <vscale x 4 x i64> @vwmulu_vv_nxv4i64(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb) {
+; CHECK-LABEL: vwmulu_vv_nxv4i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
+; CHECK-NEXT:    vwmulu.vv v12, v8, v10
+; CHECK-NEXT:    vmv4r.v v8, v12
+; CHECK-NEXT:    ret
+  %vc = zext <vscale x 4 x i32> %va to <vscale x 4 x i64>
+  %vd = zext <vscale x 4 x i32> %vb to <vscale x 4 x i64>
+  %ve = mul <vscale x 4 x i64> %vc, %vd
+  ret <vscale x 4 x i64> %ve
+}
+
+define <vscale x 4 x i64> @vwmulsu_vv_nxv4i64(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb) {
+; CHECK-LABEL: vwmulsu_vv_nxv4i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
+; CHECK-NEXT:    vwmulsu.vv v12, v8, v10
+; CHECK-NEXT:    vmv4r.v v8, v12
+; CHECK-NEXT:    ret
+  %vc = sext <vscale x 4 x i32> %va to <vscale x 4 x i64>
+  %vd = zext <vscale x 4 x i32> %vb to <vscale x 4 x i64>
+  %ve = mul <vscale x 4 x i64> %vc, %vd
+  ret <vscale x 4 x i64> %ve
+}
+
+define <vscale x 4 x i64> @vwmul_vx_nxv4i64(<vscale x 4 x i32> %va, i32 %b) {
+; CHECK-LABEL: vwmul_vx_nxv4i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
+; CHECK-NEXT:    vwmul.vx v12, v8, a0
+; CHECK-NEXT:    vmv4r.v v8, v12
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x i32> undef, i32 %b, i32 0
+  %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+  %vc = sext <vscale x 4 x i32> %va to <vscale x 4 x i64>
+  %vd = sext <vscale x 4 x i32> %splat to <vscale x 4 x i64>
+  %ve = mul <vscale x 4 x i64> %vc, %vd
+  ret <vscale x 4 x i64> %ve
+}
+
+define <vscale x 4 x i64> @vwmulu_vx_nxv4i64(<vscale x 4 x i32> %va, i32 %b) {
+; CHECK-LABEL: vwmulu_vx_nxv4i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
+; CHECK-NEXT:    vwmulu.vx v12, v8, a0
+; CHECK-NEXT:    vmv4r.v v8, v12
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x i32> undef, i32 %b, i32 0
+  %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+  %vc = zext <vscale x 4 x i32> %va to <vscale x 4 x i64>
+  %vd = zext <vscale x 4 x i32> %splat to <vscale x 4 x i64>
+  %ve = mul <vscale x 4 x i64> %vc, %vd
+  ret <vscale x 4 x i64> %ve
+}
+
+define <vscale x 4 x i64> @vwmulsu_vx_nxv4i64(<vscale x 4 x i32> %va, i32 %b) {
+; CHECK-LABEL: vwmulsu_vx_nxv4i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
+; CHECK-NEXT:    vwmulsu.vx v12, v8, a0
+; CHECK-NEXT:    vmv4r.v v8, v12
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x i32> undef, i32 %b, i32 0
+  %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
+  %vc = sext <vscale x 4 x i32> %va to <vscale x 4 x i64>
+  %vd = zext <vscale x 4 x i32> %splat to <vscale x 4 x i64>
+  %ve = mul <vscale x 4 x i64> %vc, %vd
+  ret <vscale x 4 x i64> %ve
+}
+
+define <vscale x 8 x i64> @vwmul_vv_nxv8i64(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
+; CHECK-LABEL: vwmul_vv_nxv8i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
+; CHECK-NEXT:    vwmul.vv v16, v8, v12
+; CHECK-NEXT:    vmv8r.v v8, v16
+; CHECK-NEXT:    ret
+  %vc = sext <vscale x 8 x i32> %va to <vscale x 8 x i64>
+  %vd = sext <vscale x 8 x i32> %vb to <vscale x 8 x i64>
+  %ve = mul <vscale x 8 x i64> %vc, %vd
+  ret <vscale x 8 x i64> %ve
+}
+
+define <vscale x 8 x i64> @vwmulu_vv_nxv8i64(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
+; CHECK-LABEL: vwmulu_vv_nxv8i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
+; CHECK-NEXT:    vwmulu.vv v16, v8, v12
+; CHECK-NEXT:    vmv8r.v v8, v16
+; CHECK-NEXT:    ret
+  %vc = zext <vscale x 8 x i32> %va to <vscale x 8 x i64>
+  %vd = zext <vscale x 8 x i32> %vb to <vscale x 8 x i64>
+  %ve = mul <vscale x 8 x i64> %vc, %vd
+  ret <vscale x 8 x i64> %ve
+}
+
+define <vscale x 8 x i64> @vwmulsu_vv_nxv8i64(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
+; CHECK-LABEL: vwmulsu_vv_nxv8i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
+; CHECK-NEXT:    vwmulsu.vv v16, v8, v12
+; CHECK-NEXT:    vmv8r.v v8, v16
+; CHECK-NEXT:    ret
+  %vc = sext <vscale x 8 x i32> %va to <vscale x 8 x i64>
+  %vd = zext <vscale x 8 x i32> %vb to <vscale x 8 x i64>
+  %ve = mul <vscale x 8 x i64> %vc, %vd
+  ret <vscale x 8 x i64> %ve
+}
+
+define <vscale x 8 x i64> @vwmul_vx_nxv8i64(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: vwmul_vx_nxv8i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
+; CHECK-NEXT:    vwmul.vx v16, v8, a0
+; CHECK-NEXT:    vmv8r.v v8, v16
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = sext <vscale x 8 x i32> %va to <vscale x 8 x i64>
+  %vd = sext <vscale x 8 x i32> %splat to <vscale x 8 x i64>
+  %ve = mul <vscale x 8 x i64> %vc, %vd
+  ret <vscale x 8 x i64> %ve
+}
+
+define <vscale x 8 x i64> @vwmulu_vx_nxv8i64(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: vwmulu_vx_nxv8i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
+; CHECK-NEXT:    vwmulu.vx v16, v8, a0
+; CHECK-NEXT:    vmv8r.v v8, v16
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = zext <vscale x 8 x i32> %va to <vscale x 8 x i64>
+  %vd = zext <vscale x 8 x i32> %splat to <vscale x 8 x i64>
+  %ve = mul <vscale x 8 x i64> %vc, %vd
+  ret <vscale x 8 x i64> %ve
+}
+
+define <vscale x 8 x i64> @vwmulsu_vx_nxv8i64(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: vwmulsu_vx_nxv8i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
+; CHECK-NEXT:    vwmulsu.vx v16, v8, a0
+; CHECK-NEXT:    vmv8r.v v8, v16
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+  %vc = sext <vscale x 8 x i32> %va to <vscale x 8 x i64>
+  %vd = zext <vscale x 8 x i32> %splat to <vscale x 8 x i64>
+  %ve = mul <vscale x 8 x i64> %vc, %vd
+  ret <vscale x 8 x i64> %ve
+}


        


More information about the llvm-commits mailing list