[llvm] 33a93a4 - [RISCV] Add SDNode patterns for vwsll.[vv,vx,vi]

Luke Lau via llvm-commits llvm-commits at lists.llvm.org
Wed Jul 26 12:26:53 PDT 2023


Author: Luke Lau
Date: 2023-07-26T20:26:35+01:00
New Revision: 33a93a41df260f6853ee6103b5b159b87544559c

URL: https://github.com/llvm/llvm-project/commit/33a93a41df260f6853ee6103b5b159b87544559c
DIFF: https://github.com/llvm/llvm-project/commit/33a93a41df260f6853ee6103b5b159b87544559c.diff

LOG: [RISCV] Add SDNode patterns for vwsll.[vv,vx,vi]

This reuses the patterns introduced to help lower vnsr[a,l].vx in D155698.

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D155936

Added: 
    llvm/test/CodeGen/RISCV/rvv/vwsll-sdnode.ll

Modified: 
    llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
    llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 69d753423c0ee6..ecd02ccfe62e82 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -2166,6 +2166,11 @@ multiclass VPseudoBinaryW_VX<LMULInfo m> {
                              "@earlyclobber $rd">;
 }
 
+multiclass VPseudoBinaryW_VI<Operand ImmType, LMULInfo m> {
+  defm "_VI" : VPseudoBinary<m.wvrclass, m.vrclass, ImmType, m,
+                             "@earlyclobber $rd">;
+}
+
 multiclass VPseudoBinaryW_VF<LMULInfo m, FPR_Info f> {
   defm "_V" # f.FX : VPseudoBinary<m.wvrclass, m.vrclass,
                                    f.fprclass, m,
@@ -2745,6 +2750,12 @@ multiclass VPseudoVWALU_VV_VX {
   }
 }
 
+multiclass VPseudoVWALU_VV_VX_VI<Operand ImmType> : VPseudoVWALU_VV_VX {
+  foreach m = MxListW in {
+    defm "" : VPseudoBinaryW_VI<ImmType, m>;
+  }
+}
+
 multiclass VPseudoVWMUL_VV_VX {
   foreach m = MxListW in {
     defvar mx = m.MX;

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td
index 13c98ce92d1481..f923aaf73a2593 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td
@@ -214,6 +214,8 @@ defm PseudoVCPOP : VPseudoUnaryV_V;
 defm PseudoVROL : VPseudoVALU_VV_VX;
 defm PseudoVROR : VPseudoVALU_VV_VX_VI<uimm6>;
 
+defm PseudoVWSLL : VPseudoVWALU_VV_VX_VI<uimm5>;
+
 //===----------------------------------------------------------------------===//
 // SDNode patterns
 //===----------------------------------------------------------------------===//
@@ -288,6 +290,35 @@ foreach vti = AllIntegerVectors in {
 }
 defm : VPatBinarySDNode_VV_VX_VI<rotr, "PseudoVROR", uimm6>;
 
+foreach vtiToWti = AllWidenableIntVectors in {
+  defvar vti = vtiToWti.Vti;
+  defvar wti = vtiToWti.Wti;
+  let Predicates = !listconcat([HasStdExtZvbb],
+                               GetVTypePredicates<vti>.Predicates,
+                               GetVTypePredicates<wti>.Predicates) in {
+    def : Pat<(shl (wti.Vector (zext_oneuse (vti.Vector vti.RegClass:$rs2))),
+                   (wti.Vector (ext_oneuse (vti.Vector vti.RegClass:$rs1)))),
+              (!cast<Instruction>("PseudoVWSLL_VV_"#vti.LMul.MX)
+                 (wti.Vector (IMPLICIT_DEF)),
+                 vti.RegClass:$rs2, vti.RegClass:$rs1,
+                 vti.AVL, vti.Log2SEW, TA_MA)>;
+
+    def : Pat<(shl (wti.Vector (zext_oneuse (vti.Vector vti.RegClass:$rs2))),
+                   (wti.Vector (Low8BitsSplatPat (XLenVT GPR:$rs1)))),
+              (!cast<Instruction>("PseudoVWSLL_VX_"#vti.LMul.MX)
+                 (wti.Vector (IMPLICIT_DEF)),
+                 vti.RegClass:$rs2, GPR:$rs1,
+                 vti.AVL, vti.Log2SEW, TA_MA)>;
+
+    def : Pat<(shl (wti.Vector (zext_oneuse (vti.Vector vti.RegClass:$rs2))),
+                   (wti.Vector (SplatPat_uimm5 uimm5:$rs1))),
+              (!cast<Instruction>("PseudoVWSLL_VI_"#vti.LMul.MX)
+                 (wti.Vector (IMPLICIT_DEF)),
+                 vti.RegClass:$rs2, uimm5:$rs1,
+                 vti.AVL, vti.Log2SEW, TA_MA)>;
+  }
+}
+
 //===----------------------------------------------------------------------===//
 // VL patterns
 //===----------------------------------------------------------------------===//

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwsll-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vwsll-sdnode.ll
new file mode 100644
index 00000000000000..db559d0f84f4dc
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vwsll-sdnode.ll
@@ -0,0 +1,707 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
+; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-RV32
+; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-RV64
+; RUN: llc -mtriple=riscv32 -mattr=+v,+experimental-zvbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVBB,CHECK-ZVBB32
+; RUN: llc -mtriple=riscv64 -mattr=+v,+experimental-zvbb -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVBB,CHECK-ZVBB64
+
+; ==============================================================================
+; i32 -> i64
+; ==============================================================================
+
+define <vscale x 2 x i64> @vwsll_vv_nxv2i64_sext(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b) {
+; CHECK-LABEL: vwsll_vv_nxv2i64_sext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; CHECK-NEXT:    vzext.vf2 v10, v8
+; CHECK-NEXT:    vsext.vf2 v12, v9
+; CHECK-NEXT:    vsll.vv v8, v10, v12
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vv_nxv2i64_sext:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vwsll.vv v10, v8, v9
+; CHECK-ZVBB-NEXT:    vmv2r.v v8, v10
+; CHECK-ZVBB-NEXT:    ret
+  %x = zext <vscale x 2 x i32> %a to <vscale x 2 x i64>
+  %y = sext <vscale x 2 x i32> %b to <vscale x 2 x i64>
+  %z = shl <vscale x 2 x i64> %x, %y
+  ret <vscale x 2 x i64> %z
+}
+
+define <vscale x 2 x i64> @vwsll_vv_nxv2i64_zext(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b) {
+; CHECK-LABEL: vwsll_vv_nxv2i64_zext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; CHECK-NEXT:    vzext.vf2 v10, v8
+; CHECK-NEXT:    vzext.vf2 v12, v9
+; CHECK-NEXT:    vsll.vv v8, v10, v12
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vv_nxv2i64_zext:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vwsll.vv v10, v8, v9
+; CHECK-ZVBB-NEXT:    vmv2r.v v8, v10
+; CHECK-ZVBB-NEXT:    ret
+  %x = zext <vscale x 2 x i32> %a to <vscale x 2 x i64>
+  %y = zext <vscale x 2 x i32> %b to <vscale x 2 x i64>
+  %z = shl <vscale x 2 x i64> %x, %y
+  ret <vscale x 2 x i64> %z
+}
+
+define <vscale x 2 x i64> @vwsll_vx_i64_nxv2i64(<vscale x 2 x i32> %a, i64 %b) {
+; CHECK-LABEL: vwsll_vx_i64_nxv2i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
+; CHECK-NEXT:    vzext.vf2 v10, v8
+; CHECK-NEXT:    vsll.vx v8, v10, a0
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i64_nxv2i64:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vwsll.vx v10, v8, a0
+; CHECK-ZVBB-NEXT:    vmv2r.v v8, v10
+; CHECK-ZVBB-NEXT:    ret
+  %head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
+  %x = zext <vscale x 2 x i32> %a to <vscale x 2 x i64>
+  %z = shl <vscale x 2 x i64> %x, %splat
+  ret <vscale x 2 x i64> %z
+}
+
+define <vscale x 2 x i64> @vwsll_vx_i32_nxv2i64_sext(<vscale x 2 x i32> %a, i32 %b) {
+; CHECK-LABEL: vwsll_vx_i32_nxv2i64_sext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vmv.v.x v9, a0
+; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT:    vzext.vf2 v10, v8
+; CHECK-NEXT:    vsext.vf2 v12, v9
+; CHECK-NEXT:    vsll.vv v8, v10, v12
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i32_nxv2i64_sext:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vwsll.vx v10, v8, a0
+; CHECK-ZVBB-NEXT:    vmv2r.v v8, v10
+; CHECK-ZVBB-NEXT:    ret
+  %head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
+  %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
+  %x = zext <vscale x 2 x i32> %a to <vscale x 2 x i64>
+  %y = sext <vscale x 2 x i32> %splat to <vscale x 2 x i64>
+  %z = shl <vscale x 2 x i64> %x, %y
+  ret <vscale x 2 x i64> %z
+}
+
+define <vscale x 2 x i64> @vwsll_vx_i32_nxv2i64_zext(<vscale x 2 x i32> %a, i32 %b) {
+; CHECK-LABEL: vwsll_vx_i32_nxv2i64_zext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vmv.v.x v9, a0
+; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT:    vzext.vf2 v10, v8
+; CHECK-NEXT:    vzext.vf2 v12, v9
+; CHECK-NEXT:    vsll.vv v8, v10, v12
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i32_nxv2i64_zext:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vwsll.vx v10, v8, a0
+; CHECK-ZVBB-NEXT:    vmv2r.v v8, v10
+; CHECK-ZVBB-NEXT:    ret
+  %head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
+  %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
+  %x = zext <vscale x 2 x i32> %a to <vscale x 2 x i64>
+  %y = zext <vscale x 2 x i32> %splat to <vscale x 2 x i64>
+  %z = shl <vscale x 2 x i64> %x, %y
+  ret <vscale x 2 x i64> %z
+}
+
+define <vscale x 2 x i64> @vwsll_vx_i16_nxv2i64_sext(<vscale x 2 x i32> %a, i16 %b) {
+; CHECK-LABEL: vwsll_vx_i16_nxv2i64_sext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vmv.v.x v9, a0
+; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT:    vzext.vf2 v10, v8
+; CHECK-NEXT:    vsext.vf4 v12, v9
+; CHECK-NEXT:    vsll.vv v8, v10, v12
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i16_nxv2i64_sext:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vwsll.vx v10, v8, a0
+; CHECK-ZVBB-NEXT:    vmv2r.v v8, v10
+; CHECK-ZVBB-NEXT:    ret
+  %head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
+  %splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
+  %x = zext <vscale x 2 x i32> %a to <vscale x 2 x i64>
+  %y = sext <vscale x 2 x i16> %splat to <vscale x 2 x i64>
+  %z = shl <vscale x 2 x i64> %x, %y
+  ret <vscale x 2 x i64> %z
+}
+
+define <vscale x 2 x i64> @vwsll_vx_i16_nxv2i64_zext(<vscale x 2 x i32> %a, i16 %b) {
+; CHECK-LABEL: vwsll_vx_i16_nxv2i64_zext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vmv.v.x v9, a0
+; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT:    vzext.vf2 v10, v8
+; CHECK-NEXT:    vzext.vf4 v12, v9
+; CHECK-NEXT:    vsll.vv v8, v10, v12
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i16_nxv2i64_zext:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vwsll.vx v10, v8, a0
+; CHECK-ZVBB-NEXT:    vmv2r.v v8, v10
+; CHECK-ZVBB-NEXT:    ret
+  %head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
+  %splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
+  %x = zext <vscale x 2 x i32> %a to <vscale x 2 x i64>
+  %y = zext <vscale x 2 x i16> %splat to <vscale x 2 x i64>
+  %z = shl <vscale x 2 x i64> %x, %y
+  ret <vscale x 2 x i64> %z
+}
+
+define <vscale x 2 x i64> @vwsll_vx_i8_nxv2i64_sext(<vscale x 2 x i32> %a, i8 %b) {
+; CHECK-LABEL: vwsll_vx_i8_nxv2i64_sext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
+; CHECK-NEXT:    vmv.v.x v9, a0
+; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT:    vzext.vf2 v10, v8
+; CHECK-NEXT:    vsext.vf8 v12, v9
+; CHECK-NEXT:    vsll.vv v8, v10, v12
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i8_nxv2i64_sext:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vwsll.vx v10, v8, a0
+; CHECK-ZVBB-NEXT:    vmv2r.v v8, v10
+; CHECK-ZVBB-NEXT:    ret
+  %head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
+  %splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
+  %x = zext <vscale x 2 x i32> %a to <vscale x 2 x i64>
+  %y = sext <vscale x 2 x i8> %splat to <vscale x 2 x i64>
+  %z = shl <vscale x 2 x i64> %x, %y
+  ret <vscale x 2 x i64> %z
+}
+
+define <vscale x 2 x i64> @vwsll_vx_i8_nxv2i64_zext(<vscale x 2 x i32> %a, i8 %b) {
+; CHECK-LABEL: vwsll_vx_i8_nxv2i64_zext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
+; CHECK-NEXT:    vmv.v.x v9, a0
+; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT:    vzext.vf2 v10, v8
+; CHECK-NEXT:    vzext.vf8 v12, v9
+; CHECK-NEXT:    vsll.vv v8, v10, v12
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i8_nxv2i64_zext:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e32, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vwsll.vx v10, v8, a0
+; CHECK-ZVBB-NEXT:    vmv2r.v v8, v10
+; CHECK-ZVBB-NEXT:    ret
+  %head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
+  %splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
+  %x = zext <vscale x 2 x i32> %a to <vscale x 2 x i64>
+  %y = zext <vscale x 2 x i8> %splat to <vscale x 2 x i64>
+  %z = shl <vscale x 2 x i64> %x, %y
+  ret <vscale x 2 x i64> %z
+}
+
+define <vscale x 2 x i64> @vwsll_vi_nxv2i64(<vscale x 2 x i32> %a) {
+; CHECK-LABEL: vwsll_vi_nxv2i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
+; CHECK-NEXT:    vzext.vf2 v10, v8
+; CHECK-NEXT:    vsll.vi v8, v10, 2
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vi_nxv2i64:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vwsll.vi v10, v8, 2
+; CHECK-ZVBB-NEXT:    vmv2r.v v8, v10
+; CHECK-ZVBB-NEXT:    ret
+  %x = zext <vscale x 2 x i32> %a to <vscale x 2 x i64>
+  %z = shl <vscale x 2 x i64> %x, shufflevector(<vscale x 2 x i64> insertelement(<vscale x 2 x i64> poison, i64 2, i32 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
+  ret <vscale x 2 x i64> %z
+}
+
+; ==============================================================================
+; i16 -> i32
+; ==============================================================================
+
+define <vscale x 4 x i32> @vwsll_vv_nxv4i32_sext(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b) {
+; CHECK-LABEL: vwsll_vv_nxv4i32_sext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vzext.vf2 v10, v8
+; CHECK-NEXT:    vsext.vf2 v12, v9
+; CHECK-NEXT:    vsll.vv v8, v10, v12
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vv_nxv4i32_sext:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vwsll.vv v10, v8, v9
+; CHECK-ZVBB-NEXT:    vmv2r.v v8, v10
+; CHECK-ZVBB-NEXT:    ret
+  %x = zext <vscale x 4 x i16> %a to <vscale x 4 x i32>
+  %y = sext <vscale x 4 x i16> %b to <vscale x 4 x i32>
+  %z = shl <vscale x 4 x i32> %x, %y
+  ret <vscale x 4 x i32> %z
+}
+
+define <vscale x 4 x i32> @vwsll_vv_nxv4i32_zext(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b) {
+; CHECK-LABEL: vwsll_vv_nxv4i32_zext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vzext.vf2 v10, v8
+; CHECK-NEXT:    vzext.vf2 v12, v9
+; CHECK-NEXT:    vsll.vv v8, v10, v12
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vv_nxv4i32_zext:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vwsll.vv v10, v8, v9
+; CHECK-ZVBB-NEXT:    vmv2r.v v8, v10
+; CHECK-ZVBB-NEXT:    ret
+  %x = zext <vscale x 4 x i16> %a to <vscale x 4 x i32>
+  %y = zext <vscale x 4 x i16> %b to <vscale x 4 x i32>
+  %z = shl <vscale x 4 x i32> %x, %y
+  ret <vscale x 4 x i32> %z
+}
+
+define <vscale x 4 x i32> @vwsll_vx_i64_nxv4i32(<vscale x 4 x i16> %a, i64 %b) {
+; CHECK-RV32-LABEL: vwsll_vx_i64_nxv4i32:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    addi sp, sp, -16
+; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-RV32-NEXT:    sw a1, 12(sp)
+; CHECK-RV32-NEXT:    sw a0, 8(sp)
+; CHECK-RV32-NEXT:    addi a0, sp, 8
+; CHECK-RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
+; CHECK-RV32-NEXT:    vlse64.v v12, (a0), zero
+; CHECK-RV32-NEXT:    vzext.vf2 v10, v8
+; CHECK-RV32-NEXT:    vnsrl.wi v8, v12, 0
+; CHECK-RV32-NEXT:    vsll.vv v8, v10, v8
+; CHECK-RV32-NEXT:    addi sp, sp, 16
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: vwsll_vx_i64_nxv4i32:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
+; CHECK-RV64-NEXT:    vmv.v.x v12, a0
+; CHECK-RV64-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-RV64-NEXT:    vzext.vf2 v10, v8
+; CHECK-RV64-NEXT:    vnsrl.wi v8, v12, 0
+; CHECK-RV64-NEXT:    vsll.vv v8, v10, v8
+; CHECK-RV64-NEXT:    ret
+;
+; CHECK-ZVBB32-LABEL: vwsll_vx_i64_nxv4i32:
+; CHECK-ZVBB32:       # %bb.0:
+; CHECK-ZVBB32-NEXT:    addi sp, sp, -16
+; CHECK-ZVBB32-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-ZVBB32-NEXT:    sw a1, 12(sp)
+; CHECK-ZVBB32-NEXT:    sw a0, 8(sp)
+; CHECK-ZVBB32-NEXT:    addi a0, sp, 8
+; CHECK-ZVBB32-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
+; CHECK-ZVBB32-NEXT:    vlse64.v v12, (a0), zero
+; CHECK-ZVBB32-NEXT:    vzext.vf2 v10, v8
+; CHECK-ZVBB32-NEXT:    vnsrl.wi v8, v12, 0
+; CHECK-ZVBB32-NEXT:    vsll.vv v8, v10, v8
+; CHECK-ZVBB32-NEXT:    addi sp, sp, 16
+; CHECK-ZVBB32-NEXT:    ret
+;
+; CHECK-ZVBB64-LABEL: vwsll_vx_i64_nxv4i32:
+; CHECK-ZVBB64:       # %bb.0:
+; CHECK-ZVBB64-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
+; CHECK-ZVBB64-NEXT:    vwsll.vx v10, v8, a0
+; CHECK-ZVBB64-NEXT:    vmv2r.v v8, v10
+; CHECK-ZVBB64-NEXT:    ret
+  %head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
+  %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+  %x = zext <vscale x 4 x i16> %a to <vscale x 4 x i32>
+  %y = trunc <vscale x 4 x i64> %splat to <vscale x 4 x i32>
+  %z = shl <vscale x 4 x i32> %x, %y
+  ret <vscale x 4 x i32> %z
+}
+
+define <vscale x 4 x i32> @vwsll_vx_i32_nxv4i32(<vscale x 4 x i16> %a, i32 %b) {
+; CHECK-LABEL: vwsll_vx_i32_nxv4i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vzext.vf2 v10, v8
+; CHECK-NEXT:    vsll.vx v8, v10, a0
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i32_nxv4i32:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vwsll.vx v10, v8, a0
+; CHECK-ZVBB-NEXT:    vmv2r.v v8, v10
+; CHECK-ZVBB-NEXT:    ret
+  %head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
+  %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+  %x = zext <vscale x 4 x i16> %a to <vscale x 4 x i32>
+  %z = shl <vscale x 4 x i32> %x, %splat
+  ret <vscale x 4 x i32> %z
+}
+
+define <vscale x 4 x i32> @vwsll_vx_i16_nxv4i32_sext(<vscale x 4 x i16> %a, i16 %b) {
+; CHECK-LABEL: vwsll_vx_i16_nxv4i32_sext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vmv.v.x v9, a0
+; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vzext.vf2 v10, v8
+; CHECK-NEXT:    vsext.vf2 v12, v9
+; CHECK-NEXT:    vsll.vv v8, v10, v12
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i16_nxv4i32_sext:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vwsll.vx v10, v8, a0
+; CHECK-ZVBB-NEXT:    vmv2r.v v8, v10
+; CHECK-ZVBB-NEXT:    ret
+  %head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
+  %splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
+  %x = zext <vscale x 4 x i16> %a to <vscale x 4 x i32>
+  %y = sext <vscale x 4 x i16> %splat to <vscale x 4 x i32>
+  %z = shl <vscale x 4 x i32> %x, %y
+  ret <vscale x 4 x i32> %z
+}
+
+define <vscale x 4 x i32> @vwsll_vx_i16_nxv4i32_zext(<vscale x 4 x i16> %a, i16 %b) {
+; CHECK-LABEL: vwsll_vx_i16_nxv4i32_zext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vmv.v.x v9, a0
+; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vzext.vf2 v10, v8
+; CHECK-NEXT:    vzext.vf2 v12, v9
+; CHECK-NEXT:    vsll.vv v8, v10, v12
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i16_nxv4i32_zext:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vwsll.vx v10, v8, a0
+; CHECK-ZVBB-NEXT:    vmv2r.v v8, v10
+; CHECK-ZVBB-NEXT:    ret
+  %head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
+  %splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
+  %x = zext <vscale x 4 x i16> %a to <vscale x 4 x i32>
+  %y = zext <vscale x 4 x i16> %splat to <vscale x 4 x i32>
+  %z = shl <vscale x 4 x i32> %x, %y
+  ret <vscale x 4 x i32> %z
+}
+
+define <vscale x 4 x i32> @vwsll_vx_i8_nxv4i32_sext(<vscale x 4 x i16> %a, i8 %b) {
+; CHECK-LABEL: vwsll_vx_i8_nxv4i32_sext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
+; CHECK-NEXT:    vmv.v.x v9, a0
+; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vzext.vf2 v10, v8
+; CHECK-NEXT:    vsext.vf4 v12, v9
+; CHECK-NEXT:    vsll.vv v8, v10, v12
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i8_nxv4i32_sext:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vwsll.vx v10, v8, a0
+; CHECK-ZVBB-NEXT:    vmv2r.v v8, v10
+; CHECK-ZVBB-NEXT:    ret
+  %head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
+  %splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
+  %x = zext <vscale x 4 x i16> %a to <vscale x 4 x i32>
+  %y = sext <vscale x 4 x i8> %splat to <vscale x 4 x i32>
+  %z = shl <vscale x 4 x i32> %x, %y
+  ret <vscale x 4 x i32> %z
+}
+
+define <vscale x 4 x i32> @vwsll_vx_i8_nxv4i32_zext(<vscale x 4 x i16> %a, i8 %b) {
+; CHECK-LABEL: vwsll_vx_i8_nxv4i32_zext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
+; CHECK-NEXT:    vmv.v.x v9, a0
+; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vzext.vf2 v10, v8
+; CHECK-NEXT:    vzext.vf4 v12, v9
+; CHECK-NEXT:    vsll.vv v8, v10, v12
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i8_nxv4i32_zext:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vwsll.vx v10, v8, a0
+; CHECK-ZVBB-NEXT:    vmv2r.v v8, v10
+; CHECK-ZVBB-NEXT:    ret
+  %head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
+  %splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
+  %x = zext <vscale x 4 x i16> %a to <vscale x 4 x i32>
+  %y = zext <vscale x 4 x i8> %splat to <vscale x 4 x i32>
+  %z = shl <vscale x 4 x i32> %x, %y
+  ret <vscale x 4 x i32> %z
+}
+
+define <vscale x 4 x i32> @vwsll_vi_nxv4i32(<vscale x 4 x i16> %a) {
+; CHECK-LABEL: vwsll_vi_nxv4i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vzext.vf2 v10, v8
+; CHECK-NEXT:    vsll.vi v8, v10, 2
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vi_nxv4i32:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vwsll.vi v10, v8, 2
+; CHECK-ZVBB-NEXT:    vmv2r.v v8, v10
+; CHECK-ZVBB-NEXT:    ret
+  %x = zext <vscale x 4 x i16> %a to <vscale x 4 x i32>
+  %z = shl <vscale x 4 x i32> %x, shufflevector(<vscale x 4 x i32> insertelement(<vscale x 4 x i32> poison, i32 2, i32 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
+  ret <vscale x 4 x i32> %z
+}
+
+; ==============================================================================
+; i8 -> i16
+; ==============================================================================
+
+define <vscale x 8 x i16> @vwsll_vv_nxv8i16_sext(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; CHECK-LABEL: vwsll_vv_nxv8i16_sext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vzext.vf2 v10, v8
+; CHECK-NEXT:    vsext.vf2 v12, v9
+; CHECK-NEXT:    vsll.vv v8, v10, v12
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vv_nxv8i16_sext:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vwsll.vv v10, v8, v9
+; CHECK-ZVBB-NEXT:    vmv2r.v v8, v10
+; CHECK-ZVBB-NEXT:    ret
+  %x = zext <vscale x 8 x i8> %a to <vscale x 8 x i16>
+  %y = sext <vscale x 8 x i8> %b to <vscale x 8 x i16>
+  %z = shl <vscale x 8 x i16> %x, %y
+  ret <vscale x 8 x i16> %z
+}
+
+define <vscale x 8 x i16> @vwsll_vv_nxv8i16_zext(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b) {
+; CHECK-LABEL: vwsll_vv_nxv8i16_zext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vzext.vf2 v10, v8
+; CHECK-NEXT:    vzext.vf2 v12, v9
+; CHECK-NEXT:    vsll.vv v8, v10, v12
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vv_nxv8i16_zext:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vwsll.vv v10, v8, v9
+; CHECK-ZVBB-NEXT:    vmv2r.v v8, v10
+; CHECK-ZVBB-NEXT:    ret
+  %x = zext <vscale x 8 x i8> %a to <vscale x 8 x i16>
+  %y = zext <vscale x 8 x i8> %b to <vscale x 8 x i16>
+  %z = shl <vscale x 8 x i16> %x, %y
+  ret <vscale x 8 x i16> %z
+}
+
+define <vscale x 8 x i16> @vwsll_vx_i64_nxv8i16(<vscale x 8 x i8> %a, i64 %b) {
+; CHECK-RV32-LABEL: vwsll_vx_i64_nxv8i16:
+; CHECK-RV32:       # %bb.0:
+; CHECK-RV32-NEXT:    addi sp, sp, -16
+; CHECK-RV32-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-RV32-NEXT:    sw a1, 12(sp)
+; CHECK-RV32-NEXT:    sw a0, 8(sp)
+; CHECK-RV32-NEXT:    addi a0, sp, 8
+; CHECK-RV32-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
+; CHECK-RV32-NEXT:    vlse64.v v16, (a0), zero
+; CHECK-RV32-NEXT:    vzext.vf2 v10, v8
+; CHECK-RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-RV32-NEXT:    vnsrl.wi v12, v16, 0
+; CHECK-RV32-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-RV32-NEXT:    vnsrl.wi v8, v12, 0
+; CHECK-RV32-NEXT:    vsll.vv v8, v10, v8
+; CHECK-RV32-NEXT:    addi sp, sp, 16
+; CHECK-RV32-NEXT:    ret
+;
+; CHECK-RV64-LABEL: vwsll_vx_i64_nxv8i16:
+; CHECK-RV64:       # %bb.0:
+; CHECK-RV64-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
+; CHECK-RV64-NEXT:    vmv.v.x v16, a0
+; CHECK-RV64-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-RV64-NEXT:    vzext.vf2 v10, v8
+; CHECK-RV64-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-RV64-NEXT:    vnsrl.wi v12, v16, 0
+; CHECK-RV64-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-RV64-NEXT:    vnsrl.wi v8, v12, 0
+; CHECK-RV64-NEXT:    vsll.vv v8, v10, v8
+; CHECK-RV64-NEXT:    ret
+;
+; CHECK-ZVBB32-LABEL: vwsll_vx_i64_nxv8i16:
+; CHECK-ZVBB32:       # %bb.0:
+; CHECK-ZVBB32-NEXT:    addi sp, sp, -16
+; CHECK-ZVBB32-NEXT:    .cfi_def_cfa_offset 16
+; CHECK-ZVBB32-NEXT:    sw a1, 12(sp)
+; CHECK-ZVBB32-NEXT:    sw a0, 8(sp)
+; CHECK-ZVBB32-NEXT:    addi a0, sp, 8
+; CHECK-ZVBB32-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
+; CHECK-ZVBB32-NEXT:    vlse64.v v16, (a0), zero
+; CHECK-ZVBB32-NEXT:    vzext.vf2 v10, v8
+; CHECK-ZVBB32-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-ZVBB32-NEXT:    vnsrl.wi v12, v16, 0
+; CHECK-ZVBB32-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-ZVBB32-NEXT:    vnsrl.wi v8, v12, 0
+; CHECK-ZVBB32-NEXT:    vsll.vv v8, v10, v8
+; CHECK-ZVBB32-NEXT:    addi sp, sp, 16
+; CHECK-ZVBB32-NEXT:    ret
+;
+; CHECK-ZVBB64-LABEL: vwsll_vx_i64_nxv8i16:
+; CHECK-ZVBB64:       # %bb.0:
+; CHECK-ZVBB64-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
+; CHECK-ZVBB64-NEXT:    vwsll.vx v10, v8, a0
+; CHECK-ZVBB64-NEXT:    vmv2r.v v8, v10
+; CHECK-ZVBB64-NEXT:    ret
+  %head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
+  %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 8 x i32> zeroinitializer
+  %x = zext <vscale x 8 x i8> %a to <vscale x 8 x i16>
+  %y = trunc <vscale x 8 x i64> %splat to <vscale x 8 x i16>
+  %z = shl <vscale x 8 x i16> %x, %y
+  ret <vscale x 8 x i16> %z
+}
+
+define <vscale x 8 x i16> @vwsll_vx_i32_nxv8i16(<vscale x 8 x i8> %a, i32 %b) {
+; CHECK-LABEL: vwsll_vx_i32_nxv8i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vmv.v.x v12, a0
+; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vzext.vf2 v10, v8
+; CHECK-NEXT:    vnsrl.wi v8, v12, 0
+; CHECK-NEXT:    vsll.vv v8, v10, v8
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i32_nxv8i16:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vwsll.vx v10, v8, a0
+; CHECK-ZVBB-NEXT:    vmv2r.v v8, v10
+; CHECK-ZVBB-NEXT:    ret
+  %head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
+  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
+  %x = zext <vscale x 8 x i8> %a to <vscale x 8 x i16>
+  %y = trunc <vscale x 8 x i32> %splat to <vscale x 8 x i16>
+  %z = shl <vscale x 8 x i16> %x, %y
+  ret <vscale x 8 x i16> %z
+}
+
+define <vscale x 8 x i16> @vwsll_vx_i16_nxv8i16(<vscale x 8 x i8> %a, i16 %b) {
+; CHECK-LABEL: vwsll_vx_i16_nxv8i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vzext.vf2 v10, v8
+; CHECK-NEXT:    vsll.vx v8, v10, a0
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i16_nxv8i16:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vwsll.vx v10, v8, a0
+; CHECK-ZVBB-NEXT:    vmv2r.v v8, v10
+; CHECK-ZVBB-NEXT:    ret
+  %head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
+  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
+  %x = zext <vscale x 8 x i8> %a to <vscale x 8 x i16>
+  %z = shl <vscale x 8 x i16> %x, %splat
+  ret <vscale x 8 x i16> %z
+}
+
+define <vscale x 8 x i16> @vwsll_vx_i8_nxv8i16_sext(<vscale x 8 x i8> %a, i8 %b) {
+; CHECK-LABEL: vwsll_vx_i8_nxv8i16_sext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
+; CHECK-NEXT:    vmv.v.x v9, a0
+; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vzext.vf2 v10, v8
+; CHECK-NEXT:    vsext.vf2 v12, v9
+; CHECK-NEXT:    vsll.vv v8, v10, v12
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i8_nxv8i16_sext:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vwsll.vx v10, v8, a0
+; CHECK-ZVBB-NEXT:    vmv2r.v v8, v10
+; CHECK-ZVBB-NEXT:    ret
+  %head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
+  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
+  %x = zext <vscale x 8 x i8> %a to <vscale x 8 x i16>
+  %y = sext <vscale x 8 x i8> %splat to <vscale x 8 x i16>
+  %z = shl <vscale x 8 x i16> %x, %y
+  ret <vscale x 8 x i16> %z
+}
+
+define <vscale x 8 x i16> @vwsll_vx_i8_nxv8i16_zext(<vscale x 8 x i8> %a, i8 %b) {
+; CHECK-LABEL: vwsll_vx_i8_nxv8i16_zext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
+; CHECK-NEXT:    vmv.v.x v9, a0
+; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vzext.vf2 v10, v8
+; CHECK-NEXT:    vzext.vf2 v12, v9
+; CHECK-NEXT:    vsll.vv v8, v10, v12
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vx_i8_nxv8i16_zext:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vwsll.vx v10, v8, a0
+; CHECK-ZVBB-NEXT:    vmv2r.v v8, v10
+; CHECK-ZVBB-NEXT:    ret
+  %head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
+  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
+  %x = zext <vscale x 8 x i8> %a to <vscale x 8 x i16>
+  %y = zext <vscale x 8 x i8> %splat to <vscale x 8 x i16>
+  %z = shl <vscale x 8 x i16> %x, %y
+  ret <vscale x 8 x i16> %z
+}
+
+define <vscale x 8 x i16> @vwsll_vi_nxv8i16(<vscale x 8 x i8> %a) {
+; CHECK-LABEL: vwsll_vi_nxv8i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vzext.vf2 v10, v8
+; CHECK-NEXT:    vsll.vi v8, v10, 2
+; CHECK-NEXT:    ret
+;
+; CHECK-ZVBB-LABEL: vwsll_vi_nxv8i16:
+; CHECK-ZVBB:       # %bb.0:
+; CHECK-ZVBB-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-ZVBB-NEXT:    vwsll.vi v10, v8, 2
+; CHECK-ZVBB-NEXT:    vmv2r.v v8, v10
+; CHECK-ZVBB-NEXT:    ret
+  %x = zext <vscale x 8 x i8> %a to <vscale x 8 x i16>
+  %z = shl <vscale x 8 x i16> %x, shufflevector(<vscale x 8 x i16> insertelement(<vscale x 8 x i16> poison, i16 2, i32 0), <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer)
+  ret <vscale x 8 x i16> %z
+}


        


More information about the llvm-commits mailing list