[llvm] 418e678 - [RISCV] Add tests for vnsr[l,a].wx patterns that could be matched

Luke Lau via llvm-commits llvm-commits at lists.llvm.org
Fri Jul 21 02:13:36 PDT 2023


Author: Luke Lau
Date: 2023-07-21T10:13:26+01:00
New Revision: 418e678ba3b4bbe3433f5fcd94959ffa82a917a8

URL: https://github.com/llvm/llvm-project/commit/418e678ba3b4bbe3433f5fcd94959ffa82a917a8
DIFF: https://github.com/llvm/llvm-project/commit/418e678ba3b4bbe3433f5fcd94959ffa82a917a8.diff

LOG: [RISCV] Add tests for vnsr[l,a].wx patterns that could be matched

These patterns of ([l,a]shr v, ([s,z]ext splat)) only pick up the cases where
the scalar has the same type as the vector element. However since only the low
log2(SEW) bits of the scalar are read, we could use any scalar type that has
been extended.

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D155697

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/rvv/vnsra-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vnsrl-sdnode.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/vnsra-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vnsra-sdnode.ll
index 10dc4de292abd8..aafcf19dcadfae 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnsra-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnsra-sdnode.ll
@@ -4,6 +4,66 @@
 ; RUN: llc -mtriple=riscv64 -mattr=+v -target-abi=lp64 \
 ; RUN:     -verify-machineinstrs < %s | FileCheck %s
 
+define <vscale x 1 x i32> @vnsra_wx_i64_nxv1i32(<vscale x 1 x i64> %va, i64 %b) {
+; CHECK-LABEL: vnsra_wx_i64_nxv1i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
+; CHECK-NEXT:    vsra.vx v8, v8, a0
+; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
+  %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
+  %x = ashr <vscale x 1 x i64> %va, %splat
+  %y = trunc <vscale x 1 x i64> %x to <vscale x 1 x i32>
+  ret <vscale x 1 x i32> %y
+}
+
+define <vscale x 2 x i32> @vnsra_wx_i64_nxv2i32(<vscale x 2 x i64> %va, i64 %b) {
+; CHECK-LABEL: vnsra_wx_i64_nxv2i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
+; CHECK-NEXT:    vsra.vx v10, v8, a0
+; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v10, 0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
+  %x = ashr <vscale x 2 x i64> %va, %splat
+  %y = trunc <vscale x 2 x i64> %x to <vscale x 2 x i32>
+  ret <vscale x 2 x i32> %y
+}
+
+define <vscale x 4 x i32> @vnsra_wx_i64_nxv4i32(<vscale x 4 x i64> %va, i64 %b) {
+; CHECK-LABEL: vnsra_wx_i64_nxv4i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
+; CHECK-NEXT:    vsra.vx v12, v8, a0
+; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v12, 0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
+  %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+  %x = ashr <vscale x 4 x i64> %va, %splat
+  %y = trunc <vscale x 4 x i64> %x to <vscale x 4 x i32>
+  ret <vscale x 4 x i32> %y
+}
+
+define <vscale x 8 x i32> @vnsra_wx_i64_nxv8i32(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: vnsra_wx_i64_nxv8i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
+; CHECK-NEXT:    vsra.vx v16, v8, a0
+; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v16, 0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
+  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
+  %x = ashr <vscale x 8 x i64> %va, %splat
+  %y = trunc <vscale x 8 x i64> %x to <vscale x 8 x i32>
+  ret <vscale x 8 x i32> %y
+}
+
 define <vscale x 1 x i32> @vnsra_wv_nxv1i32_sext(<vscale x 1 x i64> %va, <vscale x 1 x i32> %vb) {
 ; CHECK-LABEL: vnsra_wv_nxv1i32_sext:
 ; CHECK:       # %bb.0:
@@ -30,6 +90,44 @@ define <vscale x 1 x i32> @vnsra_wx_i32_nxv1i32_sext(<vscale x 1 x i64> %va, i32
   ret <vscale x 1 x i32> %y
 }
 
+define <vscale x 1 x i32> @vnsra_wx_i16_nxv1i32_sext(<vscale x 1 x i64> %va, i16 %b) {
+; CHECK-LABEL: vnsra_wx_i16_nxv1i32_sext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vmv.v.x v9, a0
+; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
+; CHECK-NEXT:    vsext.vf4 v10, v9
+; CHECK-NEXT:    vsra.vv v8, v8, v10
+; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
+  %splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
+  %vb = sext <vscale x 1 x i16> %splat to <vscale x 1 x i64>
+  %x = ashr <vscale x 1 x i64> %va, %vb
+  %y = trunc <vscale x 1 x i64> %x to <vscale x 1 x i32>
+  ret <vscale x 1 x i32> %y
+}
+
+define <vscale x 1 x i32> @vnsra_wx_i8_nxv1i32_sext(<vscale x 1 x i64> %va, i8 %b) {
+; CHECK-LABEL: vnsra_wx_i8_nxv1i32_sext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; CHECK-NEXT:    vmv.v.x v9, a0
+; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
+; CHECK-NEXT:    vsext.vf8 v10, v9
+; CHECK-NEXT:    vsra.vv v8, v8, v10
+; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
+  %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
+  %vb = sext <vscale x 1 x i8> %splat to <vscale x 1 x i64>
+  %x = ashr <vscale x 1 x i64> %va, %vb
+  %y = trunc <vscale x 1 x i64> %x to <vscale x 1 x i32>
+  ret <vscale x 1 x i32> %y
+}
+
 define <vscale x 1 x i32> @vnsra_wi_i32_nxv1i32_sext(<vscale x 1 x i64> %va) {
 ; CHECK-LABEL: vnsra_wi_i32_nxv1i32_sext:
 ; CHECK:       # %bb.0:
@@ -72,6 +170,44 @@ define <vscale x 2 x i32> @vnsra_wx_i32_nxv2i32_sext(<vscale x 2 x i64> %va, i32
   ret <vscale x 2 x i32> %y
 }
 
+define <vscale x 2 x i32> @vnsra_wx_i16_nxv2i32_sext(<vscale x 2 x i64> %va, i16 %b) {
+; CHECK-LABEL: vnsra_wx_i16_nxv2i32_sext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vmv.v.x v10, a0
+; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT:    vsext.vf4 v12, v10
+; CHECK-NEXT:    vsra.vv v10, v8, v12
+; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v10, 0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
+  %splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
+  %vb = sext <vscale x 2 x i16> %splat to <vscale x 2 x i64>
+  %x = ashr <vscale x 2 x i64> %va, %vb
+  %y = trunc <vscale x 2 x i64> %x to <vscale x 2 x i32>
+  ret <vscale x 2 x i32> %y
+}
+
+define <vscale x 2 x i32> @vnsra_wx_i8_nxv2i32_sext(<vscale x 2 x i64> %va, i8 %b) {
+; CHECK-LABEL: vnsra_wx_i8_nxv2i32_sext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
+; CHECK-NEXT:    vmv.v.x v10, a0
+; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT:    vsext.vf8 v12, v10
+; CHECK-NEXT:    vsra.vv v10, v8, v12
+; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v10, 0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
+  %splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
+  %vb = sext <vscale x 2 x i8> %splat to <vscale x 2 x i64>
+  %x = ashr <vscale x 2 x i64> %va, %vb
+  %y = trunc <vscale x 2 x i64> %x to <vscale x 2 x i32>
+  ret <vscale x 2 x i32> %y
+}
+
 define <vscale x 2 x i32> @vnsra_wi_i32_nxv2i32_sext(<vscale x 2 x i64> %va) {
 ; CHECK-LABEL: vnsra_wi_i32_nxv2i32_sext:
 ; CHECK:       # %bb.0:
@@ -115,6 +251,44 @@ define <vscale x 4 x i32> @vnsra_wx_i32_nxv4i32_sext(<vscale x 4 x i64> %va, i32
   ret <vscale x 4 x i32> %y
 }
 
+define <vscale x 4 x i32> @vnsra_wx_i16_nxv4i32_sext(<vscale x 4 x i64> %va, i16 %b) {
+; CHECK-LABEL: vnsra_wx_i16_nxv4i32_sext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vmv.v.x v12, a0
+; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
+; CHECK-NEXT:    vsext.vf4 v16, v12
+; CHECK-NEXT:    vsra.vv v12, v8, v16
+; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v12, 0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
+  %splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
+  %vb = sext <vscale x 4 x i16> %splat to <vscale x 4 x i64>
+  %x = ashr <vscale x 4 x i64> %va, %vb
+  %y = trunc <vscale x 4 x i64> %x to <vscale x 4 x i32>
+  ret <vscale x 4 x i32> %y
+}
+
+define <vscale x 4 x i32> @vnsra_wx_i8_nxv4i32_sext(<vscale x 4 x i64> %va, i8 %b) {
+; CHECK-LABEL: vnsra_wx_i8_nxv4i32_sext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
+; CHECK-NEXT:    vmv.v.x v12, a0
+; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
+; CHECK-NEXT:    vsext.vf8 v16, v12
+; CHECK-NEXT:    vsra.vv v12, v8, v16
+; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v12, 0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
+  %splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
+  %vb = sext <vscale x 4 x i8> %splat to <vscale x 4 x i64>
+  %x = ashr <vscale x 4 x i64> %va, %vb
+  %y = trunc <vscale x 4 x i64> %x to <vscale x 4 x i32>
+  ret <vscale x 4 x i32> %y
+}
+
 define <vscale x 4 x i32> @vnsra_wi_i32_nxv4i32_sext(<vscale x 4 x i64> %va) {
 ; CHECK-LABEL: vnsra_wi_i32_nxv4i32_sext:
 ; CHECK:       # %bb.0:
@@ -158,6 +332,44 @@ define <vscale x 8 x i32> @vnsra_wx_i32_nxv8i32_sext(<vscale x 8 x i64> %va, i32
   ret <vscale x 8 x i32> %y
 }
 
+define <vscale x 8 x i32> @vnsra_wx_i16_nxv8i32_sext(<vscale x 8 x i64> %va, i16 %b) {
+; CHECK-LABEL: vnsra_wx_i16_nxv8i32_sext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vmv.v.x v16, a0
+; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; CHECK-NEXT:    vsext.vf4 v24, v16
+; CHECK-NEXT:    vsra.vv v16, v8, v24
+; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v16, 0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
+  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
+  %vb = sext <vscale x 8 x i16> %splat to <vscale x 8 x i64>
+  %x = ashr <vscale x 8 x i64> %va, %vb
+  %y = trunc <vscale x 8 x i64> %x to <vscale x 8 x i32>
+  ret <vscale x 8 x i32> %y
+}
+
+define <vscale x 8 x i32> @vnsra_wx_i8_nxv8i32_sext(<vscale x 8 x i64> %va, i8 %b) {
+; CHECK-LABEL: vnsra_wx_i8_nxv8i32_sext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
+; CHECK-NEXT:    vmv.v.x v16, a0
+; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; CHECK-NEXT:    vsext.vf8 v24, v16
+; CHECK-NEXT:    vsra.vv v16, v8, v24
+; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v16, 0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
+  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
+  %vb = sext <vscale x 8 x i8> %splat to <vscale x 8 x i64>
+  %x = ashr <vscale x 8 x i64> %va, %vb
+  %y = trunc <vscale x 8 x i64> %x to <vscale x 8 x i32>
+  ret <vscale x 8 x i32> %y
+}
+
 define <vscale x 8 x i32> @vnsra_wi_i32_nxv8i32_sext(<vscale x 8 x i64> %va) {
 ; CHECK-LABEL: vnsra_wi_i32_nxv8i32_sext:
 ; CHECK:       # %bb.0:
@@ -199,6 +411,44 @@ define <vscale x 1 x i32> @vnsra_wx_i32_nxv1i32_zext(<vscale x 1 x i64> %va, i32
   ret <vscale x 1 x i32> %y
 }
 
+define <vscale x 1 x i32> @vnsra_wx_i16_nxv1i32_zext(<vscale x 1 x i64> %va, i16 %b) {
+; CHECK-LABEL: vnsra_wx_i16_nxv1i32_zext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vmv.v.x v9, a0
+; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
+; CHECK-NEXT:    vzext.vf4 v10, v9
+; CHECK-NEXT:    vsra.vv v8, v8, v10
+; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
+  %splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
+  %vb = zext <vscale x 1 x i16> %splat to <vscale x 1 x i64>
+  %x = ashr <vscale x 1 x i64> %va, %vb
+  %y = trunc <vscale x 1 x i64> %x to <vscale x 1 x i32>
+  ret <vscale x 1 x i32> %y
+}
+
+define <vscale x 1 x i32> @vnsra_wx_i8_nxv1i32_zext(<vscale x 1 x i64> %va, i8 %b) {
+; CHECK-LABEL: vnsra_wx_i8_nxv1i32_zext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; CHECK-NEXT:    vmv.v.x v9, a0
+; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
+; CHECK-NEXT:    vzext.vf8 v10, v9
+; CHECK-NEXT:    vsra.vv v8, v8, v10
+; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
+  %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
+  %vb = zext <vscale x 1 x i8> %splat to <vscale x 1 x i64>
+  %x = ashr <vscale x 1 x i64> %va, %vb
+  %y = trunc <vscale x 1 x i64> %x to <vscale x 1 x i32>
+  ret <vscale x 1 x i32> %y
+}
+
 define <vscale x 1 x i32> @vnsra_wi_i32_nxv1i32_zext(<vscale x 1 x i64> %va) {
 ; CHECK-LABEL: vnsra_wi_i32_nxv1i32_zext:
 ; CHECK:       # %bb.0:
@@ -241,6 +491,44 @@ define <vscale x 2 x i32> @vnsra_wx_i32_nxv2i32_zext(<vscale x 2 x i64> %va, i32
   ret <vscale x 2 x i32> %y
 }
 
+define <vscale x 2 x i32> @vnsra_wx_i16_nxv2i32_zext(<vscale x 2 x i64> %va, i16 %b) {
+; CHECK-LABEL: vnsra_wx_i16_nxv2i32_zext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vmv.v.x v10, a0
+; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT:    vzext.vf4 v12, v10
+; CHECK-NEXT:    vsra.vv v10, v8, v12
+; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v10, 0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
+  %splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
+  %vb = zext <vscale x 2 x i16> %splat to <vscale x 2 x i64>
+  %x = ashr <vscale x 2 x i64> %va, %vb
+  %y = trunc <vscale x 2 x i64> %x to <vscale x 2 x i32>
+  ret <vscale x 2 x i32> %y
+}
+
+define <vscale x 2 x i32> @vnsra_wx_i8_nxv2i32_zext(<vscale x 2 x i64> %va, i8 %b) {
+; CHECK-LABEL: vnsra_wx_i8_nxv2i32_zext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
+; CHECK-NEXT:    vmv.v.x v10, a0
+; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT:    vzext.vf8 v12, v10
+; CHECK-NEXT:    vsra.vv v10, v8, v12
+; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v10, 0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
+  %splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
+  %vb = zext <vscale x 2 x i8> %splat to <vscale x 2 x i64>
+  %x = ashr <vscale x 2 x i64> %va, %vb
+  %y = trunc <vscale x 2 x i64> %x to <vscale x 2 x i32>
+  ret <vscale x 2 x i32> %y
+}
+
 define <vscale x 2 x i32> @vnsra_wi_i32_nxv2i32_zext(<vscale x 2 x i64> %va) {
 ; CHECK-LABEL: vnsra_wi_i32_nxv2i32_zext:
 ; CHECK:       # %bb.0:
@@ -284,6 +572,44 @@ define <vscale x 4 x i32> @vnsra_wx_i32_nxv4i32_zext(<vscale x 4 x i64> %va, i32
   ret <vscale x 4 x i32> %y
 }
 
+define <vscale x 4 x i32> @vnsra_wx_i16_nxv4i32_zext(<vscale x 4 x i64> %va, i16 %b) {
+; CHECK-LABEL: vnsra_wx_i16_nxv4i32_zext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vmv.v.x v12, a0
+; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
+; CHECK-NEXT:    vzext.vf4 v16, v12
+; CHECK-NEXT:    vsra.vv v12, v8, v16
+; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v12, 0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
+  %splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
+  %vb = zext <vscale x 4 x i16> %splat to <vscale x 4 x i64>
+  %x = ashr <vscale x 4 x i64> %va, %vb
+  %y = trunc <vscale x 4 x i64> %x to <vscale x 4 x i32>
+  ret <vscale x 4 x i32> %y
+}
+
+define <vscale x 4 x i32> @vnsra_wx_i8_nxv4i32_zext(<vscale x 4 x i64> %va, i8 %b) {
+; CHECK-LABEL: vnsra_wx_i8_nxv4i32_zext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
+; CHECK-NEXT:    vmv.v.x v12, a0
+; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
+; CHECK-NEXT:    vzext.vf8 v16, v12
+; CHECK-NEXT:    vsra.vv v12, v8, v16
+; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v12, 0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
+  %splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
+  %vb = zext <vscale x 4 x i8> %splat to <vscale x 4 x i64>
+  %x = ashr <vscale x 4 x i64> %va, %vb
+  %y = trunc <vscale x 4 x i64> %x to <vscale x 4 x i32>
+  ret <vscale x 4 x i32> %y
+}
+
 define <vscale x 4 x i32> @vnsra_wi_i32_nxv4i32_zext(<vscale x 4 x i64> %va) {
 ; CHECK-LABEL: vnsra_wi_i32_nxv4i32_zext:
 ; CHECK:       # %bb.0:
@@ -327,6 +653,44 @@ define <vscale x 8 x i32> @vnsra_wx_i32_nxv8i32_zext(<vscale x 8 x i64> %va, i32
   ret <vscale x 8 x i32> %y
 }
 
+define <vscale x 8 x i32> @vnsra_wx_i16_nxv8i32_zext(<vscale x 8 x i64> %va, i16 %b) {
+; CHECK-LABEL: vnsra_wx_i16_nxv8i32_zext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vmv.v.x v16, a0
+; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; CHECK-NEXT:    vzext.vf4 v24, v16
+; CHECK-NEXT:    vsra.vv v16, v8, v24
+; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v16, 0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
+  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
+  %vb = zext <vscale x 8 x i16> %splat to <vscale x 8 x i64>
+  %x = ashr <vscale x 8 x i64> %va, %vb
+  %y = trunc <vscale x 8 x i64> %x to <vscale x 8 x i32>
+  ret <vscale x 8 x i32> %y
+}
+
+define <vscale x 8 x i32> @vnsra_wx_i8_nxv8i32_zext(<vscale x 8 x i64> %va, i8 %b) {
+; CHECK-LABEL: vnsra_wx_i8_nxv8i32_zext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
+; CHECK-NEXT:    vmv.v.x v16, a0
+; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; CHECK-NEXT:    vzext.vf8 v24, v16
+; CHECK-NEXT:    vsra.vv v16, v8, v24
+; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v16, 0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
+  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
+  %vb = zext <vscale x 8 x i8> %splat to <vscale x 8 x i64>
+  %x = ashr <vscale x 8 x i64> %va, %vb
+  %y = trunc <vscale x 8 x i64> %x to <vscale x 8 x i32>
+  ret <vscale x 8 x i32> %y
+}
+
 define <vscale x 8 x i32> @vnsra_wi_i32_nxv8i32_zext(<vscale x 8 x i64> %va) {
 ; CHECK-LABEL: vnsra_wi_i32_nxv8i32_zext:
 ; CHECK:       # %bb.0:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vnsrl-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vnsrl-sdnode.ll
index baa9b3d7414e7c..1c81292caf7f5b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnsrl-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnsrl-sdnode.ll
@@ -4,6 +4,66 @@
 ; RUN: llc -mtriple=riscv64 -mattr=+v -target-abi=lp64 \
 ; RUN:     -verify-machineinstrs < %s | FileCheck %s
 
+define <vscale x 1 x i32> @vnsrl_wx_i64_nxv1i32(<vscale x 1 x i64> %va, i64 %b) {
+; CHECK-LABEL: vnsrl_wx_i64_nxv1i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, ma
+; CHECK-NEXT:    vsrl.vx v8, v8, a0
+; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
+  %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
+  %x = lshr <vscale x 1 x i64> %va, %splat
+  %y = trunc <vscale x 1 x i64> %x to <vscale x 1 x i32>
+  ret <vscale x 1 x i32> %y
+}
+
+define <vscale x 2 x i32> @vnsrl_wx_i64_nxv2i32(<vscale x 2 x i64> %va, i64 %b) {
+; CHECK-LABEL: vnsrl_wx_i64_nxv2i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
+; CHECK-NEXT:    vsrl.vx v10, v8, a0
+; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v10, 0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
+  %x = lshr <vscale x 2 x i64> %va, %splat
+  %y = trunc <vscale x 2 x i64> %x to <vscale x 2 x i32>
+  ret <vscale x 2 x i32> %y
+}
+
+define <vscale x 4 x i32> @vnsrl_wx_i64_nxv4i32(<vscale x 4 x i64> %va, i64 %b) {
+; CHECK-LABEL: vnsrl_wx_i64_nxv4i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, ma
+; CHECK-NEXT:    vsrl.vx v12, v8, a0
+; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v12, 0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
+  %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
+  %x = lshr <vscale x 4 x i64> %va, %splat
+  %y = trunc <vscale x 4 x i64> %x to <vscale x 4 x i32>
+  ret <vscale x 4 x i32> %y
+}
+
+define <vscale x 8 x i32> @vnsrl_wx_i64_nxv8i32(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: vnsrl_wx_i64_nxv8i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, ma
+; CHECK-NEXT:    vsrl.vx v16, v8, a0
+; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v16, 0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
+  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
+  %x = lshr <vscale x 8 x i64> %va, %splat
+  %y = trunc <vscale x 8 x i64> %x to <vscale x 8 x i32>
+  ret <vscale x 8 x i32> %y
+}
+
 define <vscale x 1 x i32> @vnsrl_wv_nxv1i32_sext(<vscale x 1 x i64> %va, <vscale x 1 x i32> %vb) {
 ; CHECK-LABEL: vnsrl_wv_nxv1i32_sext:
 ; CHECK:       # %bb.0:
@@ -30,6 +90,44 @@ define <vscale x 1 x i32> @vnsrl_wx_i32_nxv1i32_sext(<vscale x 1 x i64> %va, i32
   ret <vscale x 1 x i32> %y
 }
 
+define <vscale x 1 x i32> @vnsrl_wx_i16_nxv1i32_sext(<vscale x 1 x i64> %va, i16 %b) {
+; CHECK-LABEL: vnsrl_wx_i16_nxv1i32_sext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vmv.v.x v9, a0
+; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
+; CHECK-NEXT:    vsext.vf4 v10, v9
+; CHECK-NEXT:    vsrl.vv v8, v8, v10
+; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
+  %splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
+  %vb = sext <vscale x 1 x i16> %splat to <vscale x 1 x i64>
+  %x = lshr <vscale x 1 x i64> %va, %vb
+  %y = trunc <vscale x 1 x i64> %x to <vscale x 1 x i32>
+  ret <vscale x 1 x i32> %y
+}
+
+define <vscale x 1 x i32> @vnsrl_wx_i8_nxv1i32_sext(<vscale x 1 x i64> %va, i8 %b) {
+; CHECK-LABEL: vnsrl_wx_i8_nxv1i32_sext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; CHECK-NEXT:    vmv.v.x v9, a0
+; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
+; CHECK-NEXT:    vsext.vf8 v10, v9
+; CHECK-NEXT:    vsrl.vv v8, v8, v10
+; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
+  %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
+  %vb = sext <vscale x 1 x i8> %splat to <vscale x 1 x i64>
+  %x = lshr <vscale x 1 x i64> %va, %vb
+  %y = trunc <vscale x 1 x i64> %x to <vscale x 1 x i32>
+  ret <vscale x 1 x i32> %y
+}
+
 define <vscale x 1 x i32> @vnsrl_wi_i32_nxv1i32_sext(<vscale x 1 x i64> %va) {
 ; CHECK-LABEL: vnsrl_wi_i32_nxv1i32_sext:
 ; CHECK:       # %bb.0:
@@ -72,6 +170,44 @@ define <vscale x 2 x i32> @vnsrl_wx_i32_nxv2i32_sext(<vscale x 2 x i64> %va, i32
   ret <vscale x 2 x i32> %y
 }
 
+define <vscale x 2 x i32> @vnsrl_wx_i16_nxv2i32_sext(<vscale x 2 x i64> %va, i16 %b) {
+; CHECK-LABEL: vnsrl_wx_i16_nxv2i32_sext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vmv.v.x v10, a0
+; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT:    vsext.vf4 v12, v10
+; CHECK-NEXT:    vsrl.vv v10, v8, v12
+; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v10, 0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
+  %splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
+  %vb = sext <vscale x 2 x i16> %splat to <vscale x 2 x i64>
+  %x = lshr <vscale x 2 x i64> %va, %vb
+  %y = trunc <vscale x 2 x i64> %x to <vscale x 2 x i32>
+  ret <vscale x 2 x i32> %y
+}
+
+define <vscale x 2 x i32> @vnsrl_wx_i8_nxv2i32_sext(<vscale x 2 x i64> %va, i8 %b) {
+; CHECK-LABEL: vnsrl_wx_i8_nxv2i32_sext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
+; CHECK-NEXT:    vmv.v.x v10, a0
+; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT:    vsext.vf8 v12, v10
+; CHECK-NEXT:    vsrl.vv v10, v8, v12
+; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v10, 0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
+  %splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
+  %vb = sext <vscale x 2 x i8> %splat to <vscale x 2 x i64>
+  %x = lshr <vscale x 2 x i64> %va, %vb
+  %y = trunc <vscale x 2 x i64> %x to <vscale x 2 x i32>
+  ret <vscale x 2 x i32> %y
+}
+
 define <vscale x 2 x i32> @vnsrl_wi_i32_nxv2i32_sext(<vscale x 2 x i64> %va) {
 ; CHECK-LABEL: vnsrl_wi_i32_nxv2i32_sext:
 ; CHECK:       # %bb.0:
@@ -115,6 +251,44 @@ define <vscale x 4 x i32> @vnsrl_wx_i32_nxv4i32_sext(<vscale x 4 x i64> %va, i32
   ret <vscale x 4 x i32> %y
 }
 
+define <vscale x 4 x i32> @vnsrl_wx_i16_nxv4i32_sext(<vscale x 4 x i64> %va, i16 %b) {
+; CHECK-LABEL: vnsrl_wx_i16_nxv4i32_sext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vmv.v.x v12, a0
+; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
+; CHECK-NEXT:    vsext.vf4 v16, v12
+; CHECK-NEXT:    vsrl.vv v12, v8, v16
+; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v12, 0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
+  %splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
+  %vb = sext <vscale x 4 x i16> %splat to <vscale x 4 x i64>
+  %x = lshr <vscale x 4 x i64> %va, %vb
+  %y = trunc <vscale x 4 x i64> %x to <vscale x 4 x i32>
+  ret <vscale x 4 x i32> %y
+}
+
+define <vscale x 4 x i32> @vnsrl_wx_i8_nxv4i32_sext(<vscale x 4 x i64> %va, i8 %b) {
+; CHECK-LABEL: vnsrl_wx_i8_nxv4i32_sext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
+; CHECK-NEXT:    vmv.v.x v12, a0
+; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
+; CHECK-NEXT:    vsext.vf8 v16, v12
+; CHECK-NEXT:    vsrl.vv v12, v8, v16
+; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v12, 0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
+  %splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
+  %vb = sext <vscale x 4 x i8> %splat to <vscale x 4 x i64>
+  %x = lshr <vscale x 4 x i64> %va, %vb
+  %y = trunc <vscale x 4 x i64> %x to <vscale x 4 x i32>
+  ret <vscale x 4 x i32> %y
+}
+
 define <vscale x 4 x i32> @vnsrl_wi_i32_nxv4i32_sext(<vscale x 4 x i64> %va) {
 ; CHECK-LABEL: vnsrl_wi_i32_nxv4i32_sext:
 ; CHECK:       # %bb.0:
@@ -158,6 +332,44 @@ define <vscale x 8 x i32> @vnsrl_wx_i32_nxv8i32_sext(<vscale x 8 x i64> %va, i32
   ret <vscale x 8 x i32> %y
 }
 
+define <vscale x 8 x i32> @vnsrl_wx_i16_nxv8i32_sext(<vscale x 8 x i64> %va, i16 %b) {
+; CHECK-LABEL: vnsrl_wx_i16_nxv8i32_sext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vmv.v.x v16, a0
+; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; CHECK-NEXT:    vsext.vf4 v24, v16
+; CHECK-NEXT:    vsrl.vv v16, v8, v24
+; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v16, 0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
+  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
+  %vb = sext <vscale x 8 x i16> %splat to <vscale x 8 x i64>
+  %x = lshr <vscale x 8 x i64> %va, %vb
+  %y = trunc <vscale x 8 x i64> %x to <vscale x 8 x i32>
+  ret <vscale x 8 x i32> %y
+}
+
+define <vscale x 8 x i32> @vnsrl_wx_i8_nxv8i32_sext(<vscale x 8 x i64> %va, i8 %b) {
+; CHECK-LABEL: vnsrl_wx_i8_nxv8i32_sext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
+; CHECK-NEXT:    vmv.v.x v16, a0
+; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; CHECK-NEXT:    vsext.vf8 v24, v16
+; CHECK-NEXT:    vsrl.vv v16, v8, v24
+; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v16, 0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
+  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
+  %vb = sext <vscale x 8 x i8> %splat to <vscale x 8 x i64>
+  %x = lshr <vscale x 8 x i64> %va, %vb
+  %y = trunc <vscale x 8 x i64> %x to <vscale x 8 x i32>
+  ret <vscale x 8 x i32> %y
+}
+
 define <vscale x 8 x i32> @vnsrl_wi_i32_nxv8i32_sext(<vscale x 8 x i64> %va) {
 ; CHECK-LABEL: vnsrl_wi_i32_nxv8i32_sext:
 ; CHECK:       # %bb.0:
@@ -199,6 +411,44 @@ define <vscale x 1 x i32> @vnsrl_wx_i32_nxv1i32_zext(<vscale x 1 x i64> %va, i32
   ret <vscale x 1 x i32> %y
 }
 
+define <vscale x 1 x i32> @vnsrl_wx_i16_nxv1i32_zext(<vscale x 1 x i64> %va, i16 %b) {
+; CHECK-LABEL: vnsrl_wx_i16_nxv1i32_zext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, ma
+; CHECK-NEXT:    vmv.v.x v9, a0
+; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
+; CHECK-NEXT:    vzext.vf4 v10, v9
+; CHECK-NEXT:    vsrl.vv v8, v8, v10
+; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
+  %splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> poison, <vscale x 1 x i32> zeroinitializer
+  %vb = zext <vscale x 1 x i16> %splat to <vscale x 1 x i64>
+  %x = lshr <vscale x 1 x i64> %va, %vb
+  %y = trunc <vscale x 1 x i64> %x to <vscale x 1 x i32>
+  ret <vscale x 1 x i32> %y
+}
+
+define <vscale x 1 x i32> @vnsrl_wx_i8_nxv1i32_zext(<vscale x 1 x i64> %va, i8 %b) {
+; CHECK-LABEL: vnsrl_wx_i8_nxv1i32_zext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, ma
+; CHECK-NEXT:    vmv.v.x v9, a0
+; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
+; CHECK-NEXT:    vsext.vf8 v10, v9
+; CHECK-NEXT:    vsrl.vv v8, v8, v10
+; CHECK-NEXT:    vsetvli zero, zero, e32, mf2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v8, 0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
+  %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> poison, <vscale x 1 x i32> zeroinitializer
+  %vb = sext <vscale x 1 x i8> %splat to <vscale x 1 x i64>
+  %x = lshr <vscale x 1 x i64> %va, %vb
+  %y = trunc <vscale x 1 x i64> %x to <vscale x 1 x i32>
+  ret <vscale x 1 x i32> %y
+}
+
 define <vscale x 1 x i32> @vnsrl_wi_i32_nxv1i32_zext(<vscale x 1 x i64> %va) {
 ; CHECK-LABEL: vnsrl_wi_i32_nxv1i32_zext:
 ; CHECK:       # %bb.0:
@@ -241,6 +491,44 @@ define <vscale x 2 x i32> @vnsrl_wx_i32_nxv2i32_zext(<vscale x 2 x i64> %va, i32
   ret <vscale x 2 x i32> %y
 }
 
+define <vscale x 2 x i32> @vnsrl_wx_i16_nxv2i32_zext(<vscale x 2 x i64> %va, i16 %b) {
+; CHECK-LABEL: vnsrl_wx_i16_nxv2i32_zext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, ma
+; CHECK-NEXT:    vmv.v.x v10, a0
+; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT:    vzext.vf4 v12, v10
+; CHECK-NEXT:    vsrl.vv v10, v8, v12
+; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v10, 0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
+  %splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> poison, <vscale x 2 x i32> zeroinitializer
+  %vb = zext <vscale x 2 x i16> %splat to <vscale x 2 x i64>
+  %x = lshr <vscale x 2 x i64> %va, %vb
+  %y = trunc <vscale x 2 x i64> %x to <vscale x 2 x i32>
+  ret <vscale x 2 x i32> %y
+}
+
+define <vscale x 2 x i32> @vnsrl_wx_i8_nxv2i32_zext(<vscale x 2 x i64> %va, i8 %b) {
+; CHECK-LABEL: vnsrl_wx_i8_nxv2i32_zext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, ma
+; CHECK-NEXT:    vmv.v.x v10, a0
+; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT:    vsext.vf8 v12, v10
+; CHECK-NEXT:    vsrl.vv v10, v8, v12
+; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v10, 0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
+  %splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> poison, <vscale x 2 x i32> zeroinitializer
+  %vb = sext <vscale x 2 x i8> %splat to <vscale x 2 x i64>
+  %x = lshr <vscale x 2 x i64> %va, %vb
+  %y = trunc <vscale x 2 x i64> %x to <vscale x 2 x i32>
+  ret <vscale x 2 x i32> %y
+}
+
 define <vscale x 2 x i32> @vnsrl_wi_i32_nxv2i32_zext(<vscale x 2 x i64> %va) {
 ; CHECK-LABEL: vnsrl_wi_i32_nxv2i32_zext:
 ; CHECK:       # %bb.0:
@@ -284,6 +572,44 @@ define <vscale x 4 x i32> @vnsrl_wx_i32_nxv4i32_zext(<vscale x 4 x i64> %va, i32
   ret <vscale x 4 x i32> %y
 }
 
+define <vscale x 4 x i32> @vnsrl_wx_i16_nxv4i32_zext(<vscale x 4 x i64> %va, i16 %b) {
+; CHECK-LABEL: vnsrl_wx_i16_nxv4i32_zext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vmv.v.x v12, a0
+; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
+; CHECK-NEXT:    vzext.vf4 v16, v12
+; CHECK-NEXT:    vsrl.vv v12, v8, v16
+; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v12, 0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
+  %splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> poison, <vscale x 4 x i32> zeroinitializer
+  %vb = zext <vscale x 4 x i16> %splat to <vscale x 4 x i64>
+  %x = lshr <vscale x 4 x i64> %va, %vb
+  %y = trunc <vscale x 4 x i64> %x to <vscale x 4 x i32>
+  ret <vscale x 4 x i32> %y
+}
+
+define <vscale x 4 x i32> @vnsrl_wx_i8_nxv4i32_zext(<vscale x 4 x i64> %va, i8 %b) {
+; CHECK-LABEL: vnsrl_wx_i8_nxv4i32_zext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
+; CHECK-NEXT:    vmv.v.x v12, a0
+; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
+; CHECK-NEXT:    vsext.vf8 v16, v12
+; CHECK-NEXT:    vsrl.vv v12, v8, v16
+; CHECK-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v12, 0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
+  %splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> poison, <vscale x 4 x i32> zeroinitializer
+  %vb = sext <vscale x 4 x i8> %splat to <vscale x 4 x i64>
+  %x = lshr <vscale x 4 x i64> %va, %vb
+  %y = trunc <vscale x 4 x i64> %x to <vscale x 4 x i32>
+  ret <vscale x 4 x i32> %y
+}
+
 define <vscale x 4 x i32> @vnsrl_wi_i32_nxv4i32_zext(<vscale x 4 x i64> %va) {
 ; CHECK-LABEL: vnsrl_wi_i32_nxv4i32_zext:
 ; CHECK:       # %bb.0:
@@ -327,6 +653,44 @@ define <vscale x 8 x i32> @vnsrl_wx_i32_nxv8i32_zext(<vscale x 8 x i64> %va, i32
   ret <vscale x 8 x i32> %y
 }
 
+define <vscale x 8 x i32> @vnsrl_wx_i16_nxv8i32_zext(<vscale x 8 x i64> %va, i16 %b) {
+; CHECK-LABEL: vnsrl_wx_i16_nxv8i32_zext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vmv.v.x v16, a0
+; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; CHECK-NEXT:    vzext.vf4 v24, v16
+; CHECK-NEXT:    vsrl.vv v16, v8, v24
+; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v16, 0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
+  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
+  %vb = zext <vscale x 8 x i16> %splat to <vscale x 8 x i64>
+  %x = lshr <vscale x 8 x i64> %va, %vb
+  %y = trunc <vscale x 8 x i64> %x to <vscale x 8 x i32>
+  ret <vscale x 8 x i32> %y
+}
+
+define <vscale x 8 x i32> @vnsrl_wx_i8_nxv8i32_zext(<vscale x 8 x i64> %va, i8 %b) {
+; CHECK-LABEL: vnsrl_wx_i8_nxv8i32_zext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, ma
+; CHECK-NEXT:    vmv.v.x v16, a0
+; CHECK-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
+; CHECK-NEXT:    vsext.vf8 v24, v16
+; CHECK-NEXT:    vsrl.vv v16, v8, v24
+; CHECK-NEXT:    vsetvli zero, zero, e32, m4, ta, ma
+; CHECK-NEXT:    vnsrl.wi v8, v16, 0
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
+  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
+  %vb = sext <vscale x 8 x i8> %splat to <vscale x 8 x i64>
+  %x = lshr <vscale x 8 x i64> %va, %vb
+  %y = trunc <vscale x 8 x i64> %x to <vscale x 8 x i32>
+  ret <vscale x 8 x i32> %y
+}
+
 define <vscale x 8 x i32> @vnsrl_wi_i32_nxv8i32_zext(<vscale x 8 x i64> %va) {
 ; CHECK-LABEL: vnsrl_wi_i32_nxv8i32_zext:
 ; CHECK:       # %bb.0:


        


More information about the llvm-commits mailing list