[llvm] 214afc7 - [RISCV] Add patterns for vnsrl.wi and vnsra.wi instructions

Lian Wang via llvm-commits llvm-commits at lists.llvm.org
Thu Mar 17 00:26:58 PDT 2022


Author: Lian Wang
Date: 2022-03-17T07:22:32Z
New Revision: 214afc71161463e2701daf0187b7c3fe8a9f0b70

URL: https://github.com/llvm/llvm-project/commit/214afc71161463e2701daf0187b7c3fe8a9f0b70
DIFF: https://github.com/llvm/llvm-project/commit/214afc71161463e2701daf0187b7c3fe8a9f0b70.diff

LOG: [RISCV] Add patterns for vnsrl.wi and vnsra.wi instructions

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D121675

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
    llvm/test/CodeGen/RISCV/rvv/constant-folding.ll
    llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vnsra-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vnsrl-sdnode.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
index 07f83e98471fa..a66fde5b56986 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
@@ -648,45 +648,46 @@ multiclass VPatReductionVL<SDNode vop, string instruction_name, bit is_float> {
   }
 }
 
-multiclass VPatBinarySDNodeExt_V_WV<SDNode op, PatFrags extop, string instruction_name> {
+multiclass VPatBinarySDNodeExt_V_WV_WX<SDNode op, PatFrags extop, string instruction_name> {
   foreach vti = AllWidenableIntVectors in {
     def : Pat<
       (vti.Vti.Vector
         (riscv_trunc_vector_vl
           (op (vti.Wti.Vector vti.Wti.RegClass:$rs2),
               (vti.Wti.Vector (extop (vti.Vti.Vector vti.Vti.RegClass:$rs1)))),
-          (riscv_vmset_vl X0),
-          X0)),
+          (riscv_vmset_vl VLOpFrag),
+          VLOpFrag)),
       (!cast<Instruction>(instruction_name#"_WV_"#vti.Vti.LMul.MX)
         vti.Wti.RegClass:$rs2, vti.Vti.RegClass:$rs1,
         vti.Vti.AVL, vti.Vti.Log2SEW)>;
-  }
-}
-
-multiclass VPatBinarySDNodeExt_V_WX<SDNode op, PatFrags extop, string instruction_name> {
-  foreach vti = AllWidenableIntVectors in {
     def : Pat<
       (vti.Vti.Vector
         (riscv_trunc_vector_vl
           (op (vti.Wti.Vector vti.Wti.RegClass:$rs2),
               (vti.Wti.Vector (extop (vti.Vti.Vector (SplatPat GPR:$rs1))))),
-          (riscv_vmset_vl X0),
-          X0)),
+          (riscv_vmset_vl VLOpFrag),
+          VLOpFrag)),
       (!cast<Instruction>(instruction_name#"_WX_"#vti.Vti.LMul.MX)
         vti.Wti.RegClass:$rs2, GPR:$rs1,
         vti.Vti.AVL, vti.Vti.Log2SEW)>;
   }
 }
 
-
-multiclass VPatBinarySDNode_V_WV<SDNode op, string instruction_name> {
-  defm : VPatBinarySDNodeExt_V_WV<op, sext_oneuse, instruction_name>;
-  defm : VPatBinarySDNodeExt_V_WV<op, zext_oneuse, instruction_name>;
-}
-
-multiclass VPatBinarySDNode_V_WX<SDNode op, string instruction_name> {
-  defm : VPatBinarySDNodeExt_V_WX<op, sext_oneuse, instruction_name>;
-  defm : VPatBinarySDNodeExt_V_WX<op, zext_oneuse, instruction_name>;
+multiclass VPatBinarySDNode_V_WV_WX_WI<SDNode op, string instruction_name> {
+  defm : VPatBinarySDNodeExt_V_WV_WX<op, sext_oneuse, instruction_name>;
+  defm : VPatBinarySDNodeExt_V_WV_WX<op, zext_oneuse, instruction_name>;
+  foreach vti = AllWidenableIntVectors in {
+    def : Pat<
+      (vti.Vti.Vector
+        (riscv_trunc_vector_vl
+          (op (vti.Wti.Vector vti.Wti.RegClass:$rs2),
+              (vti.Wti.Vector (SplatPat_uimm5 uimm5:$rs1))),
+          (riscv_vmset_vl VLOpFrag),
+          VLOpFrag)),
+      (!cast<Instruction>(instruction_name#"_WI_"#vti.Vti.LMul.MX)
+        vti.Wti.RegClass:$rs2, uimm5:$rs1,
+        vti.Vti.AVL, vti.Vti.Log2SEW)>;
+  }
 }
 
 multiclass VPatWidenReductionVL<SDNode vop, PatFrags extop, string instruction_name, bit is_float> {
@@ -796,10 +797,8 @@ foreach vti = AllIntegerVectors in {
 }
 
 // 12.7. Vector Narrowing Integer Right Shift Instructions
-defm : VPatBinarySDNode_V_WV<srl, "PseudoVNSRL">;
-defm : VPatBinarySDNode_V_WX<srl, "PseudoVNSRL">;
-defm : VPatBinarySDNode_V_WV<sra, "PseudoVNSRA">;
-defm : VPatBinarySDNode_V_WX<sra, "PseudoVNSRA">;
+defm : VPatBinarySDNode_V_WV_WX_WI<srl, "PseudoVNSRL">;
+defm : VPatBinarySDNode_V_WV_WX_WI<sra, "PseudoVNSRA">;
 
 foreach vtiTowti = AllWidenableIntVectors in {
   defvar vti = vtiTowti.Vti;

diff  --git a/llvm/test/CodeGen/RISCV/rvv/constant-folding.ll b/llvm/test/CodeGen/RISCV/rvv/constant-folding.ll
index 2c950916f9ce5..a44e59e2accdc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/constant-folding.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/constant-folding.ll
@@ -48,10 +48,8 @@ define <2 x i16> @fixedlen(<2 x i32> %x) {
 define <vscale x 2 x i16> @scalable(<vscale x 2 x i32> %x) {
 ; CHECK-LABEL: scalable:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
-; CHECK-NEXT:    vsrl.vi v8, v8, 16
-; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; CHECK-NEXT:    vnsrl.wx v8, v8, zero
+; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
+; CHECK-NEXT:    vnsrl.wi v8, v8, 16
 ; CHECK-NEXT:    lui a0, 1048568
 ; CHECK-NEXT:    vand.vx v8, v8, a0
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll
index e307e2630f3cd..b08d67c5f5ebb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll
@@ -58,9 +58,8 @@ define <vscale x 1 x i8> @ctlz_nxv1i8(<vscale x 1 x i8> %va) {
 ; RV32D-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
 ; RV32D-NEXT:    vzext.vf4 v9, v8
 ; RV32D-NEXT:    vfcvt.f.xu.v v9, v9
-; RV32D-NEXT:    vsrl.vi v9, v9, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; RV32D-NEXT:    vnsrl.wx v9, v9, zero
+; RV32D-NEXT:    vnsrl.wi v9, v9, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
 ; RV32D-NEXT:    vnsrl.wx v9, v9, zero
 ; RV32D-NEXT:    li a0, 134
@@ -74,9 +73,8 @@ define <vscale x 1 x i8> @ctlz_nxv1i8(<vscale x 1 x i8> %va) {
 ; RV64D-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
 ; RV64D-NEXT:    vzext.vf4 v9, v8
 ; RV64D-NEXT:    vfcvt.f.xu.v v9, v9
-; RV64D-NEXT:    vsrl.vi v9, v9, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; RV64D-NEXT:    vnsrl.wx v9, v9, zero
+; RV64D-NEXT:    vnsrl.wi v9, v9, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
 ; RV64D-NEXT:    vnsrl.wx v9, v9, zero
 ; RV64D-NEXT:    li a0, 134
@@ -143,9 +141,8 @@ define <vscale x 2 x i8> @ctlz_nxv2i8(<vscale x 2 x i8> %va) {
 ; RV32D-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
 ; RV32D-NEXT:    vzext.vf4 v9, v8
 ; RV32D-NEXT:    vfcvt.f.xu.v v9, v9
-; RV32D-NEXT:    vsrl.vi v9, v9, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; RV32D-NEXT:    vnsrl.wx v9, v9, zero
+; RV32D-NEXT:    vnsrl.wi v9, v9, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
 ; RV32D-NEXT:    vnsrl.wx v9, v9, zero
 ; RV32D-NEXT:    li a0, 134
@@ -159,9 +156,8 @@ define <vscale x 2 x i8> @ctlz_nxv2i8(<vscale x 2 x i8> %va) {
 ; RV64D-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
 ; RV64D-NEXT:    vzext.vf4 v9, v8
 ; RV64D-NEXT:    vfcvt.f.xu.v v9, v9
-; RV64D-NEXT:    vsrl.vi v9, v9, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; RV64D-NEXT:    vnsrl.wx v9, v9, zero
+; RV64D-NEXT:    vnsrl.wi v9, v9, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
 ; RV64D-NEXT:    vnsrl.wx v9, v9, zero
 ; RV64D-NEXT:    li a0, 134
@@ -228,9 +224,8 @@ define <vscale x 4 x i8> @ctlz_nxv4i8(<vscale x 4 x i8> %va) {
 ; RV32D-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
 ; RV32D-NEXT:    vzext.vf4 v10, v8
 ; RV32D-NEXT:    vfcvt.f.xu.v v10, v10
-; RV32D-NEXT:    vsrl.vi v10, v10, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; RV32D-NEXT:    vnsrl.wx v9, v10, zero
+; RV32D-NEXT:    vnsrl.wi v9, v10, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
 ; RV32D-NEXT:    vnsrl.wx v9, v9, zero
 ; RV32D-NEXT:    li a0, 134
@@ -244,9 +239,8 @@ define <vscale x 4 x i8> @ctlz_nxv4i8(<vscale x 4 x i8> %va) {
 ; RV64D-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
 ; RV64D-NEXT:    vzext.vf4 v10, v8
 ; RV64D-NEXT:    vfcvt.f.xu.v v10, v10
-; RV64D-NEXT:    vsrl.vi v10, v10, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; RV64D-NEXT:    vnsrl.wx v9, v10, zero
+; RV64D-NEXT:    vnsrl.wi v9, v10, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
 ; RV64D-NEXT:    vnsrl.wx v9, v9, zero
 ; RV64D-NEXT:    li a0, 134
@@ -313,9 +307,8 @@ define <vscale x 8 x i8> @ctlz_nxv8i8(<vscale x 8 x i8> %va) {
 ; RV32D-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
 ; RV32D-NEXT:    vzext.vf4 v12, v8
 ; RV32D-NEXT:    vfcvt.f.xu.v v12, v12
-; RV32D-NEXT:    vsrl.vi v12, v12, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; RV32D-NEXT:    vnsrl.wx v10, v12, zero
+; RV32D-NEXT:    vnsrl.wi v10, v12, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
 ; RV32D-NEXT:    vnsrl.wx v9, v10, zero
 ; RV32D-NEXT:    li a0, 134
@@ -329,9 +322,8 @@ define <vscale x 8 x i8> @ctlz_nxv8i8(<vscale x 8 x i8> %va) {
 ; RV64D-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
 ; RV64D-NEXT:    vzext.vf4 v12, v8
 ; RV64D-NEXT:    vfcvt.f.xu.v v12, v12
-; RV64D-NEXT:    vsrl.vi v12, v12, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; RV64D-NEXT:    vnsrl.wx v10, v12, zero
+; RV64D-NEXT:    vnsrl.wi v10, v12, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
 ; RV64D-NEXT:    vnsrl.wx v9, v10, zero
 ; RV64D-NEXT:    li a0, 134
@@ -398,9 +390,8 @@ define <vscale x 16 x i8> @ctlz_nxv16i8(<vscale x 16 x i8> %va) {
 ; RV32D-NEXT:    vsetvli a0, zero, e32, m8, ta, mu
 ; RV32D-NEXT:    vzext.vf4 v16, v8
 ; RV32D-NEXT:    vfcvt.f.xu.v v16, v16
-; RV32D-NEXT:    vsrl.vi v16, v16, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
-; RV32D-NEXT:    vnsrl.wx v12, v16, zero
+; RV32D-NEXT:    vnsrl.wi v12, v16, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e8, m2, ta, mu
 ; RV32D-NEXT:    vnsrl.wx v10, v12, zero
 ; RV32D-NEXT:    li a0, 134
@@ -414,9 +405,8 @@ define <vscale x 16 x i8> @ctlz_nxv16i8(<vscale x 16 x i8> %va) {
 ; RV64D-NEXT:    vsetvli a0, zero, e32, m8, ta, mu
 ; RV64D-NEXT:    vzext.vf4 v16, v8
 ; RV64D-NEXT:    vfcvt.f.xu.v v16, v16
-; RV64D-NEXT:    vsrl.vi v16, v16, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
-; RV64D-NEXT:    vnsrl.wx v12, v16, zero
+; RV64D-NEXT:    vnsrl.wi v12, v16, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e8, m2, ta, mu
 ; RV64D-NEXT:    vnsrl.wx v10, v12, zero
 ; RV64D-NEXT:    li a0, 134
@@ -558,10 +548,7 @@ define <vscale x 1 x i16> @ctlz_nxv1i16(<vscale x 1 x i16> %va) {
 ; RV32D:       # %bb.0:
 ; RV32D-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
 ; RV32D-NEXT:    vfwcvt.f.xu.v v9, v8
-; RV32D-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; RV32D-NEXT:    vsrl.vi v9, v9, 23
-; RV32D-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; RV32D-NEXT:    vnsrl.wx v9, v9, zero
+; RV32D-NEXT:    vnsrl.wi v9, v9, 23
 ; RV32D-NEXT:    li a0, 142
 ; RV32D-NEXT:    vrsub.vx v9, v9, a0
 ; RV32D-NEXT:    vmseq.vi v0, v8, 0
@@ -573,10 +560,7 @@ define <vscale x 1 x i16> @ctlz_nxv1i16(<vscale x 1 x i16> %va) {
 ; RV64D:       # %bb.0:
 ; RV64D-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
 ; RV64D-NEXT:    vfwcvt.f.xu.v v9, v8
-; RV64D-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; RV64D-NEXT:    vsrl.vi v9, v9, 23
-; RV64D-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; RV64D-NEXT:    vnsrl.wx v9, v9, zero
+; RV64D-NEXT:    vnsrl.wi v9, v9, 23
 ; RV64D-NEXT:    li a0, 142
 ; RV64D-NEXT:    vrsub.vx v9, v9, a0
 ; RV64D-NEXT:    vmseq.vi v0, v8, 0
@@ -659,10 +643,7 @@ define <vscale x 2 x i16> @ctlz_nxv2i16(<vscale x 2 x i16> %va) {
 ; RV32D:       # %bb.0:
 ; RV32D-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
 ; RV32D-NEXT:    vfwcvt.f.xu.v v9, v8
-; RV32D-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; RV32D-NEXT:    vsrl.vi v9, v9, 23
-; RV32D-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; RV32D-NEXT:    vnsrl.wx v9, v9, zero
+; RV32D-NEXT:    vnsrl.wi v9, v9, 23
 ; RV32D-NEXT:    li a0, 142
 ; RV32D-NEXT:    vrsub.vx v9, v9, a0
 ; RV32D-NEXT:    vmseq.vi v0, v8, 0
@@ -674,10 +655,7 @@ define <vscale x 2 x i16> @ctlz_nxv2i16(<vscale x 2 x i16> %va) {
 ; RV64D:       # %bb.0:
 ; RV64D-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
 ; RV64D-NEXT:    vfwcvt.f.xu.v v9, v8
-; RV64D-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; RV64D-NEXT:    vsrl.vi v9, v9, 23
-; RV64D-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; RV64D-NEXT:    vnsrl.wx v9, v9, zero
+; RV64D-NEXT:    vnsrl.wi v9, v9, 23
 ; RV64D-NEXT:    li a0, 142
 ; RV64D-NEXT:    vrsub.vx v9, v9, a0
 ; RV64D-NEXT:    vmseq.vi v0, v8, 0
@@ -760,10 +738,7 @@ define <vscale x 4 x i16> @ctlz_nxv4i16(<vscale x 4 x i16> %va) {
 ; RV32D:       # %bb.0:
 ; RV32D-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
 ; RV32D-NEXT:    vfwcvt.f.xu.v v10, v8
-; RV32D-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; RV32D-NEXT:    vsrl.vi v10, v10, 23
-; RV32D-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; RV32D-NEXT:    vnsrl.wx v9, v10, zero
+; RV32D-NEXT:    vnsrl.wi v9, v10, 23
 ; RV32D-NEXT:    li a0, 142
 ; RV32D-NEXT:    vrsub.vx v9, v9, a0
 ; RV32D-NEXT:    vmseq.vi v0, v8, 0
@@ -775,10 +750,7 @@ define <vscale x 4 x i16> @ctlz_nxv4i16(<vscale x 4 x i16> %va) {
 ; RV64D:       # %bb.0:
 ; RV64D-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
 ; RV64D-NEXT:    vfwcvt.f.xu.v v10, v8
-; RV64D-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; RV64D-NEXT:    vsrl.vi v10, v10, 23
-; RV64D-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; RV64D-NEXT:    vnsrl.wx v9, v10, zero
+; RV64D-NEXT:    vnsrl.wi v9, v10, 23
 ; RV64D-NEXT:    li a0, 142
 ; RV64D-NEXT:    vrsub.vx v9, v9, a0
 ; RV64D-NEXT:    vmseq.vi v0, v8, 0
@@ -861,10 +833,7 @@ define <vscale x 8 x i16> @ctlz_nxv8i16(<vscale x 8 x i16> %va) {
 ; RV32D:       # %bb.0:
 ; RV32D-NEXT:    vsetvli a0, zero, e16, m2, ta, mu
 ; RV32D-NEXT:    vfwcvt.f.xu.v v12, v8
-; RV32D-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32D-NEXT:    vsrl.vi v12, v12, 23
-; RV32D-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; RV32D-NEXT:    vnsrl.wx v10, v12, zero
+; RV32D-NEXT:    vnsrl.wi v10, v12, 23
 ; RV32D-NEXT:    li a0, 142
 ; RV32D-NEXT:    vrsub.vx v10, v10, a0
 ; RV32D-NEXT:    vmseq.vi v0, v8, 0
@@ -876,10 +845,7 @@ define <vscale x 8 x i16> @ctlz_nxv8i16(<vscale x 8 x i16> %va) {
 ; RV64D:       # %bb.0:
 ; RV64D-NEXT:    vsetvli a0, zero, e16, m2, ta, mu
 ; RV64D-NEXT:    vfwcvt.f.xu.v v12, v8
-; RV64D-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV64D-NEXT:    vsrl.vi v12, v12, 23
-; RV64D-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; RV64D-NEXT:    vnsrl.wx v10, v12, zero
+; RV64D-NEXT:    vnsrl.wi v10, v12, 23
 ; RV64D-NEXT:    li a0, 142
 ; RV64D-NEXT:    vrsub.vx v10, v10, a0
 ; RV64D-NEXT:    vmseq.vi v0, v8, 0
@@ -962,10 +928,7 @@ define <vscale x 16 x i16> @ctlz_nxv16i16(<vscale x 16 x i16> %va) {
 ; RV32D:       # %bb.0:
 ; RV32D-NEXT:    vsetvli a0, zero, e16, m4, ta, mu
 ; RV32D-NEXT:    vfwcvt.f.xu.v v16, v8
-; RV32D-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; RV32D-NEXT:    vsrl.vi v16, v16, 23
-; RV32D-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
-; RV32D-NEXT:    vnsrl.wx v12, v16, zero
+; RV32D-NEXT:    vnsrl.wi v12, v16, 23
 ; RV32D-NEXT:    li a0, 142
 ; RV32D-NEXT:    vrsub.vx v12, v12, a0
 ; RV32D-NEXT:    vmseq.vi v0, v8, 0
@@ -977,10 +940,7 @@ define <vscale x 16 x i16> @ctlz_nxv16i16(<vscale x 16 x i16> %va) {
 ; RV64D:       # %bb.0:
 ; RV64D-NEXT:    vsetvli a0, zero, e16, m4, ta, mu
 ; RV64D-NEXT:    vfwcvt.f.xu.v v16, v8
-; RV64D-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; RV64D-NEXT:    vsrl.vi v16, v16, 23
-; RV64D-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
-; RV64D-NEXT:    vnsrl.wx v12, v16, zero
+; RV64D-NEXT:    vnsrl.wi v12, v16, 23
 ; RV64D-NEXT:    li a0, 142
 ; RV64D-NEXT:    vrsub.vx v12, v12, a0
 ; RV64D-NEXT:    vmseq.vi v0, v8, 0
@@ -2046,9 +2006,8 @@ define <vscale x 1 x i8> @ctlz_zero_undef_nxv1i8(<vscale x 1 x i8> %va) {
 ; RV32D-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
 ; RV32D-NEXT:    vzext.vf4 v9, v8
 ; RV32D-NEXT:    vfcvt.f.xu.v v8, v9
-; RV32D-NEXT:    vsrl.vi v8, v8, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; RV32D-NEXT:    vnsrl.wx v8, v8, zero
+; RV32D-NEXT:    vnsrl.wi v8, v8, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
 ; RV32D-NEXT:    vnsrl.wx v8, v8, zero
 ; RV32D-NEXT:    li a0, 134
@@ -2060,9 +2019,8 @@ define <vscale x 1 x i8> @ctlz_zero_undef_nxv1i8(<vscale x 1 x i8> %va) {
 ; RV64D-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
 ; RV64D-NEXT:    vzext.vf4 v9, v8
 ; RV64D-NEXT:    vfcvt.f.xu.v v8, v9
-; RV64D-NEXT:    vsrl.vi v8, v8, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; RV64D-NEXT:    vnsrl.wx v8, v8, zero
+; RV64D-NEXT:    vnsrl.wi v8, v8, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
 ; RV64D-NEXT:    vnsrl.wx v8, v8, zero
 ; RV64D-NEXT:    li a0, 134
@@ -2126,9 +2084,8 @@ define <vscale x 2 x i8> @ctlz_zero_undef_nxv2i8(<vscale x 2 x i8> %va) {
 ; RV32D-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
 ; RV32D-NEXT:    vzext.vf4 v9, v8
 ; RV32D-NEXT:    vfcvt.f.xu.v v8, v9
-; RV32D-NEXT:    vsrl.vi v8, v8, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; RV32D-NEXT:    vnsrl.wx v8, v8, zero
+; RV32D-NEXT:    vnsrl.wi v8, v8, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
 ; RV32D-NEXT:    vnsrl.wx v8, v8, zero
 ; RV32D-NEXT:    li a0, 134
@@ -2140,9 +2097,8 @@ define <vscale x 2 x i8> @ctlz_zero_undef_nxv2i8(<vscale x 2 x i8> %va) {
 ; RV64D-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
 ; RV64D-NEXT:    vzext.vf4 v9, v8
 ; RV64D-NEXT:    vfcvt.f.xu.v v8, v9
-; RV64D-NEXT:    vsrl.vi v8, v8, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; RV64D-NEXT:    vnsrl.wx v8, v8, zero
+; RV64D-NEXT:    vnsrl.wi v8, v8, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
 ; RV64D-NEXT:    vnsrl.wx v8, v8, zero
 ; RV64D-NEXT:    li a0, 134
@@ -2206,9 +2162,8 @@ define <vscale x 4 x i8> @ctlz_zero_undef_nxv4i8(<vscale x 4 x i8> %va) {
 ; RV32D-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
 ; RV32D-NEXT:    vzext.vf4 v10, v8
 ; RV32D-NEXT:    vfcvt.f.xu.v v8, v10
-; RV32D-NEXT:    vsrl.vi v8, v8, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; RV32D-NEXT:    vnsrl.wx v10, v8, zero
+; RV32D-NEXT:    vnsrl.wi v10, v8, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
 ; RV32D-NEXT:    vnsrl.wx v8, v10, zero
 ; RV32D-NEXT:    li a0, 134
@@ -2220,9 +2175,8 @@ define <vscale x 4 x i8> @ctlz_zero_undef_nxv4i8(<vscale x 4 x i8> %va) {
 ; RV64D-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
 ; RV64D-NEXT:    vzext.vf4 v10, v8
 ; RV64D-NEXT:    vfcvt.f.xu.v v8, v10
-; RV64D-NEXT:    vsrl.vi v8, v8, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; RV64D-NEXT:    vnsrl.wx v10, v8, zero
+; RV64D-NEXT:    vnsrl.wi v10, v8, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
 ; RV64D-NEXT:    vnsrl.wx v8, v10, zero
 ; RV64D-NEXT:    li a0, 134
@@ -2286,9 +2240,8 @@ define <vscale x 8 x i8> @ctlz_zero_undef_nxv8i8(<vscale x 8 x i8> %va) {
 ; RV32D-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
 ; RV32D-NEXT:    vzext.vf4 v12, v8
 ; RV32D-NEXT:    vfcvt.f.xu.v v8, v12
-; RV32D-NEXT:    vsrl.vi v8, v8, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; RV32D-NEXT:    vnsrl.wx v12, v8, zero
+; RV32D-NEXT:    vnsrl.wi v12, v8, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
 ; RV32D-NEXT:    vnsrl.wx v8, v12, zero
 ; RV32D-NEXT:    li a0, 134
@@ -2300,9 +2253,8 @@ define <vscale x 8 x i8> @ctlz_zero_undef_nxv8i8(<vscale x 8 x i8> %va) {
 ; RV64D-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
 ; RV64D-NEXT:    vzext.vf4 v12, v8
 ; RV64D-NEXT:    vfcvt.f.xu.v v8, v12
-; RV64D-NEXT:    vsrl.vi v8, v8, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; RV64D-NEXT:    vnsrl.wx v12, v8, zero
+; RV64D-NEXT:    vnsrl.wi v12, v8, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
 ; RV64D-NEXT:    vnsrl.wx v8, v12, zero
 ; RV64D-NEXT:    li a0, 134
@@ -2366,9 +2318,8 @@ define <vscale x 16 x i8> @ctlz_zero_undef_nxv16i8(<vscale x 16 x i8> %va) {
 ; RV32D-NEXT:    vsetvli a0, zero, e32, m8, ta, mu
 ; RV32D-NEXT:    vzext.vf4 v16, v8
 ; RV32D-NEXT:    vfcvt.f.xu.v v8, v16
-; RV32D-NEXT:    vsrl.vi v8, v8, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
-; RV32D-NEXT:    vnsrl.wx v16, v8, zero
+; RV32D-NEXT:    vnsrl.wi v16, v8, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e8, m2, ta, mu
 ; RV32D-NEXT:    vnsrl.wx v8, v16, zero
 ; RV32D-NEXT:    li a0, 134
@@ -2380,9 +2331,8 @@ define <vscale x 16 x i8> @ctlz_zero_undef_nxv16i8(<vscale x 16 x i8> %va) {
 ; RV64D-NEXT:    vsetvli a0, zero, e32, m8, ta, mu
 ; RV64D-NEXT:    vzext.vf4 v16, v8
 ; RV64D-NEXT:    vfcvt.f.xu.v v8, v16
-; RV64D-NEXT:    vsrl.vi v8, v8, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
-; RV64D-NEXT:    vnsrl.wx v16, v8, zero
+; RV64D-NEXT:    vnsrl.wi v16, v8, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e8, m2, ta, mu
 ; RV64D-NEXT:    vnsrl.wx v8, v16, zero
 ; RV64D-NEXT:    li a0, 134
@@ -2519,10 +2469,7 @@ define <vscale x 1 x i16> @ctlz_zero_undef_nxv1i16(<vscale x 1 x i16> %va) {
 ; RV32D:       # %bb.0:
 ; RV32D-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
 ; RV32D-NEXT:    vfwcvt.f.xu.v v9, v8
-; RV32D-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; RV32D-NEXT:    vsrl.vi v8, v9, 23
-; RV32D-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; RV32D-NEXT:    vnsrl.wx v8, v8, zero
+; RV32D-NEXT:    vnsrl.wi v8, v9, 23
 ; RV32D-NEXT:    li a0, 142
 ; RV32D-NEXT:    vrsub.vx v8, v8, a0
 ; RV32D-NEXT:    ret
@@ -2531,10 +2478,7 @@ define <vscale x 1 x i16> @ctlz_zero_undef_nxv1i16(<vscale x 1 x i16> %va) {
 ; RV64D:       # %bb.0:
 ; RV64D-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
 ; RV64D-NEXT:    vfwcvt.f.xu.v v9, v8
-; RV64D-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; RV64D-NEXT:    vsrl.vi v8, v9, 23
-; RV64D-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; RV64D-NEXT:    vnsrl.wx v8, v8, zero
+; RV64D-NEXT:    vnsrl.wi v8, v9, 23
 ; RV64D-NEXT:    li a0, 142
 ; RV64D-NEXT:    vrsub.vx v8, v8, a0
 ; RV64D-NEXT:    ret
@@ -2613,10 +2557,7 @@ define <vscale x 2 x i16> @ctlz_zero_undef_nxv2i16(<vscale x 2 x i16> %va) {
 ; RV32D:       # %bb.0:
 ; RV32D-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
 ; RV32D-NEXT:    vfwcvt.f.xu.v v9, v8
-; RV32D-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; RV32D-NEXT:    vsrl.vi v8, v9, 23
-; RV32D-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; RV32D-NEXT:    vnsrl.wx v8, v8, zero
+; RV32D-NEXT:    vnsrl.wi v8, v9, 23
 ; RV32D-NEXT:    li a0, 142
 ; RV32D-NEXT:    vrsub.vx v8, v8, a0
 ; RV32D-NEXT:    ret
@@ -2625,10 +2566,7 @@ define <vscale x 2 x i16> @ctlz_zero_undef_nxv2i16(<vscale x 2 x i16> %va) {
 ; RV64D:       # %bb.0:
 ; RV64D-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
 ; RV64D-NEXT:    vfwcvt.f.xu.v v9, v8
-; RV64D-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; RV64D-NEXT:    vsrl.vi v8, v9, 23
-; RV64D-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; RV64D-NEXT:    vnsrl.wx v8, v8, zero
+; RV64D-NEXT:    vnsrl.wi v8, v9, 23
 ; RV64D-NEXT:    li a0, 142
 ; RV64D-NEXT:    vrsub.vx v8, v8, a0
 ; RV64D-NEXT:    ret
@@ -2707,24 +2645,18 @@ define <vscale x 4 x i16> @ctlz_zero_undef_nxv4i16(<vscale x 4 x i16> %va) {
 ; RV32D:       # %bb.0:
 ; RV32D-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
 ; RV32D-NEXT:    vfwcvt.f.xu.v v10, v8
-; RV32D-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; RV32D-NEXT:    vsrl.vi v8, v10, 23
-; RV32D-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; RV32D-NEXT:    vnsrl.wx v10, v8, zero
+; RV32D-NEXT:    vnsrl.wi v8, v10, 23
 ; RV32D-NEXT:    li a0, 142
-; RV32D-NEXT:    vrsub.vx v8, v10, a0
+; RV32D-NEXT:    vrsub.vx v8, v8, a0
 ; RV32D-NEXT:    ret
 ;
 ; RV64D-LABEL: ctlz_zero_undef_nxv4i16:
 ; RV64D:       # %bb.0:
 ; RV64D-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
 ; RV64D-NEXT:    vfwcvt.f.xu.v v10, v8
-; RV64D-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; RV64D-NEXT:    vsrl.vi v8, v10, 23
-; RV64D-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; RV64D-NEXT:    vnsrl.wx v10, v8, zero
+; RV64D-NEXT:    vnsrl.wi v8, v10, 23
 ; RV64D-NEXT:    li a0, 142
-; RV64D-NEXT:    vrsub.vx v8, v10, a0
+; RV64D-NEXT:    vrsub.vx v8, v8, a0
 ; RV64D-NEXT:    ret
   %a = call <vscale x 4 x i16> @llvm.ctlz.nxv4i16(<vscale x 4 x i16> %va, i1 true)
   ret <vscale x 4 x i16> %a
@@ -2801,24 +2733,18 @@ define <vscale x 8 x i16> @ctlz_zero_undef_nxv8i16(<vscale x 8 x i16> %va) {
 ; RV32D:       # %bb.0:
 ; RV32D-NEXT:    vsetvli a0, zero, e16, m2, ta, mu
 ; RV32D-NEXT:    vfwcvt.f.xu.v v12, v8
-; RV32D-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32D-NEXT:    vsrl.vi v8, v12, 23
-; RV32D-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; RV32D-NEXT:    vnsrl.wx v12, v8, zero
+; RV32D-NEXT:    vnsrl.wi v8, v12, 23
 ; RV32D-NEXT:    li a0, 142
-; RV32D-NEXT:    vrsub.vx v8, v12, a0
+; RV32D-NEXT:    vrsub.vx v8, v8, a0
 ; RV32D-NEXT:    ret
 ;
 ; RV64D-LABEL: ctlz_zero_undef_nxv8i16:
 ; RV64D:       # %bb.0:
 ; RV64D-NEXT:    vsetvli a0, zero, e16, m2, ta, mu
 ; RV64D-NEXT:    vfwcvt.f.xu.v v12, v8
-; RV64D-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV64D-NEXT:    vsrl.vi v8, v12, 23
-; RV64D-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; RV64D-NEXT:    vnsrl.wx v12, v8, zero
+; RV64D-NEXT:    vnsrl.wi v8, v12, 23
 ; RV64D-NEXT:    li a0, 142
-; RV64D-NEXT:    vrsub.vx v8, v12, a0
+; RV64D-NEXT:    vrsub.vx v8, v8, a0
 ; RV64D-NEXT:    ret
   %a = call <vscale x 8 x i16> @llvm.ctlz.nxv8i16(<vscale x 8 x i16> %va, i1 true)
   ret <vscale x 8 x i16> %a
@@ -2895,24 +2821,18 @@ define <vscale x 16 x i16> @ctlz_zero_undef_nxv16i16(<vscale x 16 x i16> %va) {
 ; RV32D:       # %bb.0:
 ; RV32D-NEXT:    vsetvli a0, zero, e16, m4, ta, mu
 ; RV32D-NEXT:    vfwcvt.f.xu.v v16, v8
-; RV32D-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; RV32D-NEXT:    vsrl.vi v8, v16, 23
-; RV32D-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
-; RV32D-NEXT:    vnsrl.wx v16, v8, zero
+; RV32D-NEXT:    vnsrl.wi v8, v16, 23
 ; RV32D-NEXT:    li a0, 142
-; RV32D-NEXT:    vrsub.vx v8, v16, a0
+; RV32D-NEXT:    vrsub.vx v8, v8, a0
 ; RV32D-NEXT:    ret
 ;
 ; RV64D-LABEL: ctlz_zero_undef_nxv16i16:
 ; RV64D:       # %bb.0:
 ; RV64D-NEXT:    vsetvli a0, zero, e16, m4, ta, mu
 ; RV64D-NEXT:    vfwcvt.f.xu.v v16, v8
-; RV64D-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; RV64D-NEXT:    vsrl.vi v8, v16, 23
-; RV64D-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
-; RV64D-NEXT:    vnsrl.wx v16, v8, zero
+; RV64D-NEXT:    vnsrl.wi v8, v16, 23
 ; RV64D-NEXT:    li a0, 142
-; RV64D-NEXT:    vrsub.vx v8, v16, a0
+; RV64D-NEXT:    vrsub.vx v8, v8, a0
 ; RV64D-NEXT:    ret
   %a = call <vscale x 16 x i16> @llvm.ctlz.nxv16i16(<vscale x 16 x i16> %va, i1 true)
   ret <vscale x 16 x i16> %a

diff  --git a/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll
index 73e8585fe3431..d33ad60e09618 100644
--- a/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll
@@ -57,9 +57,8 @@ define <vscale x 1 x i8> @cttz_nxv1i8(<vscale x 1 x i8> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; RV32D-NEXT:    vzext.vf4 v9, v8
 ; RV32D-NEXT:    vfcvt.f.xu.v v8, v9
-; RV32D-NEXT:    vsrl.vi v8, v8, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; RV32D-NEXT:    vnsrl.wx v8, v8, zero
+; RV32D-NEXT:    vnsrl.wi v8, v8, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
 ; RV32D-NEXT:    vnsrl.wx v8, v8, zero
 ; RV32D-NEXT:    li a0, 127
@@ -77,9 +76,8 @@ define <vscale x 1 x i8> @cttz_nxv1i8(<vscale x 1 x i8> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; RV64D-NEXT:    vzext.vf4 v9, v8
 ; RV64D-NEXT:    vfcvt.f.xu.v v8, v9
-; RV64D-NEXT:    vsrl.vi v8, v8, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; RV64D-NEXT:    vnsrl.wx v8, v8, zero
+; RV64D-NEXT:    vnsrl.wi v8, v8, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
 ; RV64D-NEXT:    vnsrl.wx v8, v8, zero
 ; RV64D-NEXT:    li a0, 127
@@ -144,9 +142,8 @@ define <vscale x 2 x i8> @cttz_nxv2i8(<vscale x 2 x i8> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; RV32D-NEXT:    vzext.vf4 v9, v8
 ; RV32D-NEXT:    vfcvt.f.xu.v v8, v9
-; RV32D-NEXT:    vsrl.vi v8, v8, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; RV32D-NEXT:    vnsrl.wx v8, v8, zero
+; RV32D-NEXT:    vnsrl.wi v8, v8, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
 ; RV32D-NEXT:    vnsrl.wx v8, v8, zero
 ; RV32D-NEXT:    li a0, 127
@@ -164,9 +161,8 @@ define <vscale x 2 x i8> @cttz_nxv2i8(<vscale x 2 x i8> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; RV64D-NEXT:    vzext.vf4 v9, v8
 ; RV64D-NEXT:    vfcvt.f.xu.v v8, v9
-; RV64D-NEXT:    vsrl.vi v8, v8, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; RV64D-NEXT:    vnsrl.wx v8, v8, zero
+; RV64D-NEXT:    vnsrl.wi v8, v8, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
 ; RV64D-NEXT:    vnsrl.wx v8, v8, zero
 ; RV64D-NEXT:    li a0, 127
@@ -231,9 +227,8 @@ define <vscale x 4 x i8> @cttz_nxv4i8(<vscale x 4 x i8> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; RV32D-NEXT:    vzext.vf4 v10, v8
 ; RV32D-NEXT:    vfcvt.f.xu.v v8, v10
-; RV32D-NEXT:    vsrl.vi v8, v8, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; RV32D-NEXT:    vnsrl.wx v10, v8, zero
+; RV32D-NEXT:    vnsrl.wi v10, v8, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
 ; RV32D-NEXT:    vnsrl.wx v8, v10, zero
 ; RV32D-NEXT:    li a0, 127
@@ -251,9 +246,8 @@ define <vscale x 4 x i8> @cttz_nxv4i8(<vscale x 4 x i8> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; RV64D-NEXT:    vzext.vf4 v10, v8
 ; RV64D-NEXT:    vfcvt.f.xu.v v8, v10
-; RV64D-NEXT:    vsrl.vi v8, v8, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; RV64D-NEXT:    vnsrl.wx v10, v8, zero
+; RV64D-NEXT:    vnsrl.wi v10, v8, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
 ; RV64D-NEXT:    vnsrl.wx v8, v10, zero
 ; RV64D-NEXT:    li a0, 127
@@ -318,9 +312,8 @@ define <vscale x 8 x i8> @cttz_nxv8i8(<vscale x 8 x i8> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; RV32D-NEXT:    vzext.vf4 v12, v8
 ; RV32D-NEXT:    vfcvt.f.xu.v v8, v12
-; RV32D-NEXT:    vsrl.vi v8, v8, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; RV32D-NEXT:    vnsrl.wx v12, v8, zero
+; RV32D-NEXT:    vnsrl.wi v12, v8, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
 ; RV32D-NEXT:    vnsrl.wx v8, v12, zero
 ; RV32D-NEXT:    li a0, 127
@@ -338,9 +331,8 @@ define <vscale x 8 x i8> @cttz_nxv8i8(<vscale x 8 x i8> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; RV64D-NEXT:    vzext.vf4 v12, v8
 ; RV64D-NEXT:    vfcvt.f.xu.v v8, v12
-; RV64D-NEXT:    vsrl.vi v8, v8, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; RV64D-NEXT:    vnsrl.wx v12, v8, zero
+; RV64D-NEXT:    vnsrl.wi v12, v8, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
 ; RV64D-NEXT:    vnsrl.wx v8, v12, zero
 ; RV64D-NEXT:    li a0, 127
@@ -405,9 +397,8 @@ define <vscale x 16 x i8> @cttz_nxv16i8(<vscale x 16 x i8> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; RV32D-NEXT:    vzext.vf4 v16, v8
 ; RV32D-NEXT:    vfcvt.f.xu.v v8, v16
-; RV32D-NEXT:    vsrl.vi v8, v8, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
-; RV32D-NEXT:    vnsrl.wx v16, v8, zero
+; RV32D-NEXT:    vnsrl.wi v16, v8, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e8, m2, ta, mu
 ; RV32D-NEXT:    vnsrl.wx v8, v16, zero
 ; RV32D-NEXT:    li a0, 127
@@ -425,9 +416,8 @@ define <vscale x 16 x i8> @cttz_nxv16i8(<vscale x 16 x i8> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; RV64D-NEXT:    vzext.vf4 v16, v8
 ; RV64D-NEXT:    vfcvt.f.xu.v v8, v16
-; RV64D-NEXT:    vsrl.vi v8, v8, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
-; RV64D-NEXT:    vnsrl.wx v16, v8, zero
+; RV64D-NEXT:    vnsrl.wi v16, v8, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e8, m2, ta, mu
 ; RV64D-NEXT:    vnsrl.wx v8, v16, zero
 ; RV64D-NEXT:    li a0, 127
@@ -556,10 +546,7 @@ define <vscale x 1 x i16> @cttz_nxv1i16(<vscale x 1 x i16> %va) {
 ; RV32D-NEXT:    vrsub.vi v9, v8, 0
 ; RV32D-NEXT:    vand.vv v8, v8, v9
 ; RV32D-NEXT:    vfwcvt.f.xu.v v9, v8
-; RV32D-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; RV32D-NEXT:    vsrl.vi v8, v9, 23
-; RV32D-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; RV32D-NEXT:    vnsrl.wx v8, v8, zero
+; RV32D-NEXT:    vnsrl.wi v8, v9, 23
 ; RV32D-NEXT:    li a0, 127
 ; RV32D-NEXT:    vsub.vx v8, v8, a0
 ; RV32D-NEXT:    li a0, 16
@@ -574,10 +561,7 @@ define <vscale x 1 x i16> @cttz_nxv1i16(<vscale x 1 x i16> %va) {
 ; RV64D-NEXT:    vrsub.vi v9, v8, 0
 ; RV64D-NEXT:    vand.vv v8, v8, v9
 ; RV64D-NEXT:    vfwcvt.f.xu.v v9, v8
-; RV64D-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; RV64D-NEXT:    vsrl.vi v8, v9, 23
-; RV64D-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; RV64D-NEXT:    vnsrl.wx v8, v8, zero
+; RV64D-NEXT:    vnsrl.wi v8, v9, 23
 ; RV64D-NEXT:    li a0, 127
 ; RV64D-NEXT:    vsub.vx v8, v8, a0
 ; RV64D-NEXT:    li a0, 16
@@ -653,10 +637,7 @@ define <vscale x 2 x i16> @cttz_nxv2i16(<vscale x 2 x i16> %va) {
 ; RV32D-NEXT:    vrsub.vi v9, v8, 0
 ; RV32D-NEXT:    vand.vv v8, v8, v9
 ; RV32D-NEXT:    vfwcvt.f.xu.v v9, v8
-; RV32D-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; RV32D-NEXT:    vsrl.vi v8, v9, 23
-; RV32D-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; RV32D-NEXT:    vnsrl.wx v8, v8, zero
+; RV32D-NEXT:    vnsrl.wi v8, v9, 23
 ; RV32D-NEXT:    li a0, 127
 ; RV32D-NEXT:    vsub.vx v8, v8, a0
 ; RV32D-NEXT:    li a0, 16
@@ -671,10 +652,7 @@ define <vscale x 2 x i16> @cttz_nxv2i16(<vscale x 2 x i16> %va) {
 ; RV64D-NEXT:    vrsub.vi v9, v8, 0
 ; RV64D-NEXT:    vand.vv v8, v8, v9
 ; RV64D-NEXT:    vfwcvt.f.xu.v v9, v8
-; RV64D-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; RV64D-NEXT:    vsrl.vi v8, v9, 23
-; RV64D-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; RV64D-NEXT:    vnsrl.wx v8, v8, zero
+; RV64D-NEXT:    vnsrl.wi v8, v9, 23
 ; RV64D-NEXT:    li a0, 127
 ; RV64D-NEXT:    vsub.vx v8, v8, a0
 ; RV64D-NEXT:    li a0, 16
@@ -750,12 +728,9 @@ define <vscale x 4 x i16> @cttz_nxv4i16(<vscale x 4 x i16> %va) {
 ; RV32D-NEXT:    vrsub.vi v9, v8, 0
 ; RV32D-NEXT:    vand.vv v8, v8, v9
 ; RV32D-NEXT:    vfwcvt.f.xu.v v10, v8
-; RV32D-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; RV32D-NEXT:    vsrl.vi v8, v10, 23
-; RV32D-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; RV32D-NEXT:    vnsrl.wx v10, v8, zero
+; RV32D-NEXT:    vnsrl.wi v8, v10, 23
 ; RV32D-NEXT:    li a0, 127
-; RV32D-NEXT:    vsub.vx v8, v10, a0
+; RV32D-NEXT:    vsub.vx v8, v8, a0
 ; RV32D-NEXT:    li a0, 16
 ; RV32D-NEXT:    vmerge.vxm v8, v8, a0, v0
 ; RV32D-NEXT:    ret
@@ -768,12 +743,9 @@ define <vscale x 4 x i16> @cttz_nxv4i16(<vscale x 4 x i16> %va) {
 ; RV64D-NEXT:    vrsub.vi v9, v8, 0
 ; RV64D-NEXT:    vand.vv v8, v8, v9
 ; RV64D-NEXT:    vfwcvt.f.xu.v v10, v8
-; RV64D-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; RV64D-NEXT:    vsrl.vi v8, v10, 23
-; RV64D-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; RV64D-NEXT:    vnsrl.wx v10, v8, zero
+; RV64D-NEXT:    vnsrl.wi v8, v10, 23
 ; RV64D-NEXT:    li a0, 127
-; RV64D-NEXT:    vsub.vx v8, v10, a0
+; RV64D-NEXT:    vsub.vx v8, v8, a0
 ; RV64D-NEXT:    li a0, 16
 ; RV64D-NEXT:    vmerge.vxm v8, v8, a0, v0
 ; RV64D-NEXT:    ret
@@ -847,12 +819,9 @@ define <vscale x 8 x i16> @cttz_nxv8i16(<vscale x 8 x i16> %va) {
 ; RV32D-NEXT:    vrsub.vi v10, v8, 0
 ; RV32D-NEXT:    vand.vv v8, v8, v10
 ; RV32D-NEXT:    vfwcvt.f.xu.v v12, v8
-; RV32D-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32D-NEXT:    vsrl.vi v8, v12, 23
-; RV32D-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; RV32D-NEXT:    vnsrl.wx v12, v8, zero
+; RV32D-NEXT:    vnsrl.wi v8, v12, 23
 ; RV32D-NEXT:    li a0, 127
-; RV32D-NEXT:    vsub.vx v8, v12, a0
+; RV32D-NEXT:    vsub.vx v8, v8, a0
 ; RV32D-NEXT:    li a0, 16
 ; RV32D-NEXT:    vmerge.vxm v8, v8, a0, v0
 ; RV32D-NEXT:    ret
@@ -865,12 +834,9 @@ define <vscale x 8 x i16> @cttz_nxv8i16(<vscale x 8 x i16> %va) {
 ; RV64D-NEXT:    vrsub.vi v10, v8, 0
 ; RV64D-NEXT:    vand.vv v8, v8, v10
 ; RV64D-NEXT:    vfwcvt.f.xu.v v12, v8
-; RV64D-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV64D-NEXT:    vsrl.vi v8, v12, 23
-; RV64D-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; RV64D-NEXT:    vnsrl.wx v12, v8, zero
+; RV64D-NEXT:    vnsrl.wi v8, v12, 23
 ; RV64D-NEXT:    li a0, 127
-; RV64D-NEXT:    vsub.vx v8, v12, a0
+; RV64D-NEXT:    vsub.vx v8, v8, a0
 ; RV64D-NEXT:    li a0, 16
 ; RV64D-NEXT:    vmerge.vxm v8, v8, a0, v0
 ; RV64D-NEXT:    ret
@@ -944,12 +910,9 @@ define <vscale x 16 x i16> @cttz_nxv16i16(<vscale x 16 x i16> %va) {
 ; RV32D-NEXT:    vrsub.vi v12, v8, 0
 ; RV32D-NEXT:    vand.vv v8, v8, v12
 ; RV32D-NEXT:    vfwcvt.f.xu.v v16, v8
-; RV32D-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; RV32D-NEXT:    vsrl.vi v8, v16, 23
-; RV32D-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
-; RV32D-NEXT:    vnsrl.wx v16, v8, zero
+; RV32D-NEXT:    vnsrl.wi v8, v16, 23
 ; RV32D-NEXT:    li a0, 127
-; RV32D-NEXT:    vsub.vx v8, v16, a0
+; RV32D-NEXT:    vsub.vx v8, v8, a0
 ; RV32D-NEXT:    li a0, 16
 ; RV32D-NEXT:    vmerge.vxm v8, v8, a0, v0
 ; RV32D-NEXT:    ret
@@ -962,12 +925,9 @@ define <vscale x 16 x i16> @cttz_nxv16i16(<vscale x 16 x i16> %va) {
 ; RV64D-NEXT:    vrsub.vi v12, v8, 0
 ; RV64D-NEXT:    vand.vv v8, v8, v12
 ; RV64D-NEXT:    vfwcvt.f.xu.v v16, v8
-; RV64D-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; RV64D-NEXT:    vsrl.vi v8, v16, 23
-; RV64D-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
-; RV64D-NEXT:    vnsrl.wx v16, v8, zero
+; RV64D-NEXT:    vnsrl.wi v8, v16, 23
 ; RV64D-NEXT:    li a0, 127
-; RV64D-NEXT:    vsub.vx v8, v16, a0
+; RV64D-NEXT:    vsub.vx v8, v8, a0
 ; RV64D-NEXT:    li a0, 16
 ; RV64D-NEXT:    vmerge.vxm v8, v8, a0, v0
 ; RV64D-NEXT:    ret
@@ -1887,9 +1847,8 @@ define <vscale x 1 x i8> @cttz_zero_undef_nxv1i8(<vscale x 1 x i8> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; RV32D-NEXT:    vzext.vf4 v9, v8
 ; RV32D-NEXT:    vfcvt.f.xu.v v8, v9
-; RV32D-NEXT:    vsrl.vi v8, v8, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; RV32D-NEXT:    vnsrl.wx v8, v8, zero
+; RV32D-NEXT:    vnsrl.wi v8, v8, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
 ; RV32D-NEXT:    vnsrl.wx v8, v8, zero
 ; RV32D-NEXT:    li a0, 127
@@ -1904,9 +1863,8 @@ define <vscale x 1 x i8> @cttz_zero_undef_nxv1i8(<vscale x 1 x i8> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
 ; RV64D-NEXT:    vzext.vf4 v9, v8
 ; RV64D-NEXT:    vfcvt.f.xu.v v8, v9
-; RV64D-NEXT:    vsrl.vi v8, v8, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; RV64D-NEXT:    vnsrl.wx v8, v8, zero
+; RV64D-NEXT:    vnsrl.wi v8, v8, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
 ; RV64D-NEXT:    vnsrl.wx v8, v8, zero
 ; RV64D-NEXT:    li a0, 127
@@ -1967,9 +1925,8 @@ define <vscale x 2 x i8> @cttz_zero_undef_nxv2i8(<vscale x 2 x i8> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; RV32D-NEXT:    vzext.vf4 v9, v8
 ; RV32D-NEXT:    vfcvt.f.xu.v v8, v9
-; RV32D-NEXT:    vsrl.vi v8, v8, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; RV32D-NEXT:    vnsrl.wx v8, v8, zero
+; RV32D-NEXT:    vnsrl.wi v8, v8, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
 ; RV32D-NEXT:    vnsrl.wx v8, v8, zero
 ; RV32D-NEXT:    li a0, 127
@@ -1984,9 +1941,8 @@ define <vscale x 2 x i8> @cttz_zero_undef_nxv2i8(<vscale x 2 x i8> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
 ; RV64D-NEXT:    vzext.vf4 v9, v8
 ; RV64D-NEXT:    vfcvt.f.xu.v v8, v9
-; RV64D-NEXT:    vsrl.vi v8, v8, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; RV64D-NEXT:    vnsrl.wx v8, v8, zero
+; RV64D-NEXT:    vnsrl.wi v8, v8, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
 ; RV64D-NEXT:    vnsrl.wx v8, v8, zero
 ; RV64D-NEXT:    li a0, 127
@@ -2047,9 +2003,8 @@ define <vscale x 4 x i8> @cttz_zero_undef_nxv4i8(<vscale x 4 x i8> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; RV32D-NEXT:    vzext.vf4 v10, v8
 ; RV32D-NEXT:    vfcvt.f.xu.v v8, v10
-; RV32D-NEXT:    vsrl.vi v8, v8, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; RV32D-NEXT:    vnsrl.wx v10, v8, zero
+; RV32D-NEXT:    vnsrl.wi v10, v8, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
 ; RV32D-NEXT:    vnsrl.wx v8, v10, zero
 ; RV32D-NEXT:    li a0, 127
@@ -2064,9 +2019,8 @@ define <vscale x 4 x i8> @cttz_zero_undef_nxv4i8(<vscale x 4 x i8> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
 ; RV64D-NEXT:    vzext.vf4 v10, v8
 ; RV64D-NEXT:    vfcvt.f.xu.v v8, v10
-; RV64D-NEXT:    vsrl.vi v8, v8, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; RV64D-NEXT:    vnsrl.wx v10, v8, zero
+; RV64D-NEXT:    vnsrl.wi v10, v8, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
 ; RV64D-NEXT:    vnsrl.wx v8, v10, zero
 ; RV64D-NEXT:    li a0, 127
@@ -2127,9 +2081,8 @@ define <vscale x 8 x i8> @cttz_zero_undef_nxv8i8(<vscale x 8 x i8> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; RV32D-NEXT:    vzext.vf4 v12, v8
 ; RV32D-NEXT:    vfcvt.f.xu.v v8, v12
-; RV32D-NEXT:    vsrl.vi v8, v8, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; RV32D-NEXT:    vnsrl.wx v12, v8, zero
+; RV32D-NEXT:    vnsrl.wi v12, v8, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
 ; RV32D-NEXT:    vnsrl.wx v8, v12, zero
 ; RV32D-NEXT:    li a0, 127
@@ -2144,9 +2097,8 @@ define <vscale x 8 x i8> @cttz_zero_undef_nxv8i8(<vscale x 8 x i8> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
 ; RV64D-NEXT:    vzext.vf4 v12, v8
 ; RV64D-NEXT:    vfcvt.f.xu.v v8, v12
-; RV64D-NEXT:    vsrl.vi v8, v8, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; RV64D-NEXT:    vnsrl.wx v12, v8, zero
+; RV64D-NEXT:    vnsrl.wi v12, v8, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
 ; RV64D-NEXT:    vnsrl.wx v8, v12, zero
 ; RV64D-NEXT:    li a0, 127
@@ -2207,9 +2159,8 @@ define <vscale x 16 x i8> @cttz_zero_undef_nxv16i8(<vscale x 16 x i8> %va) {
 ; RV32D-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; RV32D-NEXT:    vzext.vf4 v16, v8
 ; RV32D-NEXT:    vfcvt.f.xu.v v8, v16
-; RV32D-NEXT:    vsrl.vi v8, v8, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
-; RV32D-NEXT:    vnsrl.wx v16, v8, zero
+; RV32D-NEXT:    vnsrl.wi v16, v8, 23
 ; RV32D-NEXT:    vsetvli zero, zero, e8, m2, ta, mu
 ; RV32D-NEXT:    vnsrl.wx v8, v16, zero
 ; RV32D-NEXT:    li a0, 127
@@ -2224,9 +2175,8 @@ define <vscale x 16 x i8> @cttz_zero_undef_nxv16i8(<vscale x 16 x i8> %va) {
 ; RV64D-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
 ; RV64D-NEXT:    vzext.vf4 v16, v8
 ; RV64D-NEXT:    vfcvt.f.xu.v v8, v16
-; RV64D-NEXT:    vsrl.vi v8, v8, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
-; RV64D-NEXT:    vnsrl.wx v16, v8, zero
+; RV64D-NEXT:    vnsrl.wi v16, v8, 23
 ; RV64D-NEXT:    vsetvli zero, zero, e8, m2, ta, mu
 ; RV64D-NEXT:    vnsrl.wx v8, v16, zero
 ; RV64D-NEXT:    li a0, 127
@@ -2349,10 +2299,7 @@ define <vscale x 1 x i16> @cttz_zero_undef_nxv1i16(<vscale x 1 x i16> %va) {
 ; RV32D-NEXT:    vrsub.vi v9, v8, 0
 ; RV32D-NEXT:    vand.vv v8, v8, v9
 ; RV32D-NEXT:    vfwcvt.f.xu.v v9, v8
-; RV32D-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; RV32D-NEXT:    vsrl.vi v8, v9, 23
-; RV32D-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; RV32D-NEXT:    vnsrl.wx v8, v8, zero
+; RV32D-NEXT:    vnsrl.wi v8, v9, 23
 ; RV32D-NEXT:    li a0, 127
 ; RV32D-NEXT:    vsub.vx v8, v8, a0
 ; RV32D-NEXT:    ret
@@ -2363,10 +2310,7 @@ define <vscale x 1 x i16> @cttz_zero_undef_nxv1i16(<vscale x 1 x i16> %va) {
 ; RV64D-NEXT:    vrsub.vi v9, v8, 0
 ; RV64D-NEXT:    vand.vv v8, v8, v9
 ; RV64D-NEXT:    vfwcvt.f.xu.v v9, v8
-; RV64D-NEXT:    vsetvli zero, zero, e32, mf2, ta, mu
-; RV64D-NEXT:    vsrl.vi v8, v9, 23
-; RV64D-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
-; RV64D-NEXT:    vnsrl.wx v8, v8, zero
+; RV64D-NEXT:    vnsrl.wi v8, v9, 23
 ; RV64D-NEXT:    li a0, 127
 ; RV64D-NEXT:    vsub.vx v8, v8, a0
 ; RV64D-NEXT:    ret
@@ -2437,10 +2381,7 @@ define <vscale x 2 x i16> @cttz_zero_undef_nxv2i16(<vscale x 2 x i16> %va) {
 ; RV32D-NEXT:    vrsub.vi v9, v8, 0
 ; RV32D-NEXT:    vand.vv v8, v8, v9
 ; RV32D-NEXT:    vfwcvt.f.xu.v v9, v8
-; RV32D-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; RV32D-NEXT:    vsrl.vi v8, v9, 23
-; RV32D-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; RV32D-NEXT:    vnsrl.wx v8, v8, zero
+; RV32D-NEXT:    vnsrl.wi v8, v9, 23
 ; RV32D-NEXT:    li a0, 127
 ; RV32D-NEXT:    vsub.vx v8, v8, a0
 ; RV32D-NEXT:    ret
@@ -2451,10 +2392,7 @@ define <vscale x 2 x i16> @cttz_zero_undef_nxv2i16(<vscale x 2 x i16> %va) {
 ; RV64D-NEXT:    vrsub.vi v9, v8, 0
 ; RV64D-NEXT:    vand.vv v8, v8, v9
 ; RV64D-NEXT:    vfwcvt.f.xu.v v9, v8
-; RV64D-NEXT:    vsetvli zero, zero, e32, m1, ta, mu
-; RV64D-NEXT:    vsrl.vi v8, v9, 23
-; RV64D-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
-; RV64D-NEXT:    vnsrl.wx v8, v8, zero
+; RV64D-NEXT:    vnsrl.wi v8, v9, 23
 ; RV64D-NEXT:    li a0, 127
 ; RV64D-NEXT:    vsub.vx v8, v8, a0
 ; RV64D-NEXT:    ret
@@ -2525,12 +2463,9 @@ define <vscale x 4 x i16> @cttz_zero_undef_nxv4i16(<vscale x 4 x i16> %va) {
 ; RV32D-NEXT:    vrsub.vi v9, v8, 0
 ; RV32D-NEXT:    vand.vv v8, v8, v9
 ; RV32D-NEXT:    vfwcvt.f.xu.v v10, v8
-; RV32D-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; RV32D-NEXT:    vsrl.vi v8, v10, 23
-; RV32D-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; RV32D-NEXT:    vnsrl.wx v10, v8, zero
+; RV32D-NEXT:    vnsrl.wi v8, v10, 23
 ; RV32D-NEXT:    li a0, 127
-; RV32D-NEXT:    vsub.vx v8, v10, a0
+; RV32D-NEXT:    vsub.vx v8, v8, a0
 ; RV32D-NEXT:    ret
 ;
 ; RV64D-LABEL: cttz_zero_undef_nxv4i16:
@@ -2539,12 +2474,9 @@ define <vscale x 4 x i16> @cttz_zero_undef_nxv4i16(<vscale x 4 x i16> %va) {
 ; RV64D-NEXT:    vrsub.vi v9, v8, 0
 ; RV64D-NEXT:    vand.vv v8, v8, v9
 ; RV64D-NEXT:    vfwcvt.f.xu.v v10, v8
-; RV64D-NEXT:    vsetvli zero, zero, e32, m2, ta, mu
-; RV64D-NEXT:    vsrl.vi v8, v10, 23
-; RV64D-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
-; RV64D-NEXT:    vnsrl.wx v10, v8, zero
+; RV64D-NEXT:    vnsrl.wi v8, v10, 23
 ; RV64D-NEXT:    li a0, 127
-; RV64D-NEXT:    vsub.vx v8, v10, a0
+; RV64D-NEXT:    vsub.vx v8, v8, a0
 ; RV64D-NEXT:    ret
   %a = call <vscale x 4 x i16> @llvm.cttz.nxv4i16(<vscale x 4 x i16> %va, i1 true)
   ret <vscale x 4 x i16> %a
@@ -2613,12 +2545,9 @@ define <vscale x 8 x i16> @cttz_zero_undef_nxv8i16(<vscale x 8 x i16> %va) {
 ; RV32D-NEXT:    vrsub.vi v10, v8, 0
 ; RV32D-NEXT:    vand.vv v8, v8, v10
 ; RV32D-NEXT:    vfwcvt.f.xu.v v12, v8
-; RV32D-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV32D-NEXT:    vsrl.vi v8, v12, 23
-; RV32D-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; RV32D-NEXT:    vnsrl.wx v12, v8, zero
+; RV32D-NEXT:    vnsrl.wi v8, v12, 23
 ; RV32D-NEXT:    li a0, 127
-; RV32D-NEXT:    vsub.vx v8, v12, a0
+; RV32D-NEXT:    vsub.vx v8, v8, a0
 ; RV32D-NEXT:    ret
 ;
 ; RV64D-LABEL: cttz_zero_undef_nxv8i16:
@@ -2627,12 +2556,9 @@ define <vscale x 8 x i16> @cttz_zero_undef_nxv8i16(<vscale x 8 x i16> %va) {
 ; RV64D-NEXT:    vrsub.vi v10, v8, 0
 ; RV64D-NEXT:    vand.vv v8, v8, v10
 ; RV64D-NEXT:    vfwcvt.f.xu.v v12, v8
-; RV64D-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
-; RV64D-NEXT:    vsrl.vi v8, v12, 23
-; RV64D-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
-; RV64D-NEXT:    vnsrl.wx v12, v8, zero
+; RV64D-NEXT:    vnsrl.wi v8, v12, 23
 ; RV64D-NEXT:    li a0, 127
-; RV64D-NEXT:    vsub.vx v8, v12, a0
+; RV64D-NEXT:    vsub.vx v8, v8, a0
 ; RV64D-NEXT:    ret
   %a = call <vscale x 8 x i16> @llvm.cttz.nxv8i16(<vscale x 8 x i16> %va, i1 true)
   ret <vscale x 8 x i16> %a
@@ -2701,12 +2627,9 @@ define <vscale x 16 x i16> @cttz_zero_undef_nxv16i16(<vscale x 16 x i16> %va) {
 ; RV32D-NEXT:    vrsub.vi v12, v8, 0
 ; RV32D-NEXT:    vand.vv v8, v8, v12
 ; RV32D-NEXT:    vfwcvt.f.xu.v v16, v8
-; RV32D-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; RV32D-NEXT:    vsrl.vi v8, v16, 23
-; RV32D-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
-; RV32D-NEXT:    vnsrl.wx v16, v8, zero
+; RV32D-NEXT:    vnsrl.wi v8, v16, 23
 ; RV32D-NEXT:    li a0, 127
-; RV32D-NEXT:    vsub.vx v8, v16, a0
+; RV32D-NEXT:    vsub.vx v8, v8, a0
 ; RV32D-NEXT:    ret
 ;
 ; RV64D-LABEL: cttz_zero_undef_nxv16i16:
@@ -2715,12 +2638,9 @@ define <vscale x 16 x i16> @cttz_zero_undef_nxv16i16(<vscale x 16 x i16> %va) {
 ; RV64D-NEXT:    vrsub.vi v12, v8, 0
 ; RV64D-NEXT:    vand.vv v8, v8, v12
 ; RV64D-NEXT:    vfwcvt.f.xu.v v16, v8
-; RV64D-NEXT:    vsetvli zero, zero, e32, m8, ta, mu
-; RV64D-NEXT:    vsrl.vi v8, v16, 23
-; RV64D-NEXT:    vsetvli zero, zero, e16, m4, ta, mu
-; RV64D-NEXT:    vnsrl.wx v16, v8, zero
+; RV64D-NEXT:    vnsrl.wi v8, v16, 23
 ; RV64D-NEXT:    li a0, 127
-; RV64D-NEXT:    vsub.vx v8, v16, a0
+; RV64D-NEXT:    vsub.vx v8, v8, a0
 ; RV64D-NEXT:    ret
   %a = call <vscale x 16 x i16> @llvm.cttz.nxv16i16(<vscale x 16 x i16> %va, i1 true)
   ret <vscale x 16 x i16> %a

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vnsra-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vnsra-sdnode.ll
index 7aea161155239..47d04a80573b4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnsra-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnsra-sdnode.ll
@@ -30,6 +30,20 @@ define <vscale x 1 x i32> @vnsra_wx_i32_nxv1i32_sext(<vscale x 1 x i64> %va, i32
   ret <vscale x 1 x i32> %y
 }
 
+define <vscale x 1 x i32> @vnsra_wi_i32_nxv1i32_sext(<vscale x 1 x i64> %va) {
+; CHECK-LABEL: vnsra_wi_i32_nxv1i32_sext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
+; CHECK-NEXT:    vnsra.wi v8, v8, 15
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x i32> poison, i32 15, i32 0
+  %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
+  %vb = sext <vscale x 1 x i32> %splat to <vscale x 1 x i64>
+  %x = ashr <vscale x 1 x i64> %va, %vb
+  %y = trunc <vscale x 1 x i64> %x to <vscale x 1 x i32>
+  ret <vscale x 1 x i32> %y
+}
+
 define <vscale x 2 x i32> @vnsra_wv_nxv2i32_sext(<vscale x 2 x i64> %va, <vscale x 2 x i32> %vb) {
 ; CHECK-LABEL: vnsra_wv_nxv2i32_sext:
 ; CHECK:       # %bb.0:
@@ -58,6 +72,21 @@ define <vscale x 2 x i32> @vnsra_wx_i32_nxv2i32_sext(<vscale x 2 x i64> %va, i32
   ret <vscale x 2 x i32> %y
 }
 
+define <vscale x 2 x i32> @vnsra_wi_i32_nxv2i32_sext(<vscale x 2 x i64> %va) {
+; CHECK-LABEL: vnsra_wi_i32_nxv2i32_sext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
+; CHECK-NEXT:    vnsra.wi v10, v8, 15
+; CHECK-NEXT:    vmv.v.v v8, v10
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x i32> poison, i32 15, i32 0
+  %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
+  %vb = sext <vscale x 2 x i32> %splat to <vscale x 2 x i64>
+  %x = ashr <vscale x 2 x i64> %va, %vb
+  %y = trunc <vscale x 2 x i64> %x to <vscale x 2 x i32>
+  ret <vscale x 2 x i32> %y
+}
+
 define <vscale x 4 x i32> @vnsra_wv_nxv4i32_sext(<vscale x 4 x i64> %va, <vscale x 4 x i32> %vb) {
 ; CHECK-LABEL: vnsra_wv_nxv4i32_sext:
 ; CHECK:       # %bb.0:
@@ -86,6 +115,21 @@ define <vscale x 4 x i32> @vnsra_wx_i32_nxv4i32_sext(<vscale x 4 x i64> %va, i32
   ret <vscale x 4 x i32> %y
 }
 
+define <vscale x 4 x i32> @vnsra_wi_i32_nxv4i32_sext(<vscale x 4 x i64> %va) {
+; CHECK-LABEL: vnsra_wi_i32_nxv4i32_sext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
+; CHECK-NEXT:    vnsra.wi v12, v8, 15
+; CHECK-NEXT:    vmv.v.v v8, v12
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x i32> poison, i32 15, i32 0
+  %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+  %vb = sext <vscale x 4 x i32> %splat to <vscale x 4 x i64>
+  %x = ashr <vscale x 4 x i64> %va, %vb
+  %y = trunc <vscale x 4 x i64> %x to <vscale x 4 x i32>
+  ret <vscale x 4 x i32> %y
+}
+
 define <vscale x 8 x i32> @vnsra_wv_nxv8i32_sext(<vscale x 8 x i64> %va, <vscale x 8 x i32> %vb) {
 ; CHECK-LABEL: vnsra_wv_nxv8i32_sext:
 ; CHECK:       # %bb.0:
@@ -114,6 +158,21 @@ define <vscale x 8 x i32> @vnsra_wx_i32_nxv8i32_sext(<vscale x 8 x i64> %va, i32
   ret <vscale x 8 x i32> %y
 }
 
+define <vscale x 8 x i32> @vnsra_wi_i32_nxv8i32_sext(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: vnsra_wi_i32_nxv8i32_sext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
+; CHECK-NEXT:    vnsra.wi v16, v8, 15
+; CHECK-NEXT:    vmv.v.v v8, v16
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x i32> poison, i32 15, i32 0
+  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
+  %vb = sext <vscale x 8 x i32> %splat to <vscale x 8 x i64>
+  %x = ashr <vscale x 8 x i64> %va, %vb
+  %y = trunc <vscale x 8 x i64> %x to <vscale x 8 x i32>
+  ret <vscale x 8 x i32> %y
+}
+
 define <vscale x 1 x i32> @vnsra_wv_nxv1i32_zext(<vscale x 1 x i64> %va, <vscale x 1 x i32> %vb) {
 ; CHECK-LABEL: vnsra_wv_nxv1i32_zext:
 ; CHECK:       # %bb.0:
@@ -140,6 +199,20 @@ define <vscale x 1 x i32> @vnsra_wx_i32_nxv1i32_zext(<vscale x 1 x i64> %va, i32
   ret <vscale x 1 x i32> %y
 }
 
+define <vscale x 1 x i32> @vnsra_wi_i32_nxv1i32_zext(<vscale x 1 x i64> %va) {
+; CHECK-LABEL: vnsra_wi_i32_nxv1i32_zext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
+; CHECK-NEXT:    vnsra.wi v8, v8, 15
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x i32> poison, i32 15, i32 0
+  %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
+  %vb = zext <vscale x 1 x i32> %splat to <vscale x 1 x i64>
+  %x = ashr <vscale x 1 x i64> %va, %vb
+  %y = trunc <vscale x 1 x i64> %x to <vscale x 1 x i32>
+  ret <vscale x 1 x i32> %y
+}
+
 define <vscale x 2 x i32> @vnsra_wv_nxv2i32_zext(<vscale x 2 x i64> %va, <vscale x 2 x i32> %vb) {
 ; CHECK-LABEL: vnsra_wv_nxv2i32_zext:
 ; CHECK:       # %bb.0:
@@ -168,6 +241,21 @@ define <vscale x 2 x i32> @vnsra_wx_i32_nxv2i32_zext(<vscale x 2 x i64> %va, i32
   ret <vscale x 2 x i32> %y
 }
 
+define <vscale x 2 x i32> @vnsra_wi_i32_nxv2i32_zext(<vscale x 2 x i64> %va) {
+; CHECK-LABEL: vnsra_wi_i32_nxv2i32_zext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
+; CHECK-NEXT:    vnsra.wi v10, v8, 15
+; CHECK-NEXT:    vmv.v.v v8, v10
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x i32> poison, i32 15, i32 0
+  %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
+  %vb = zext <vscale x 2 x i32> %splat to <vscale x 2 x i64>
+  %x = ashr <vscale x 2 x i64> %va, %vb
+  %y = trunc <vscale x 2 x i64> %x to <vscale x 2 x i32>
+  ret <vscale x 2 x i32> %y
+}
+
 define <vscale x 4 x i32> @vnsra_wv_nxv4i32_zext(<vscale x 4 x i64> %va, <vscale x 4 x i32> %vb) {
 ; CHECK-LABEL: vnsra_wv_nxv4i32_zext:
 ; CHECK:       # %bb.0:
@@ -196,6 +284,21 @@ define <vscale x 4 x i32> @vnsra_wx_i32_nxv4i32_zext(<vscale x 4 x i64> %va, i32
   ret <vscale x 4 x i32> %y
 }
 
+define <vscale x 4 x i32> @vnsra_wi_i32_nxv4i32_zext(<vscale x 4 x i64> %va) {
+; CHECK-LABEL: vnsra_wi_i32_nxv4i32_zext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
+; CHECK-NEXT:    vnsra.wi v12, v8, 15
+; CHECK-NEXT:    vmv.v.v v8, v12
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x i32> poison, i32 15, i32 0
+  %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+  %vb = zext <vscale x 4 x i32> %splat to <vscale x 4 x i64>
+  %x = ashr <vscale x 4 x i64> %va, %vb
+  %y = trunc <vscale x 4 x i64> %x to <vscale x 4 x i32>
+  ret <vscale x 4 x i32> %y
+}
+
 define <vscale x 8 x i32> @vnsra_wv_nxv8i32_zext(<vscale x 8 x i64> %va, <vscale x 8 x i32> %vb) {
 ; CHECK-LABEL: vnsra_wv_nxv8i32_zext:
 ; CHECK:       # %bb.0:
@@ -223,3 +326,18 @@ define <vscale x 8 x i32> @vnsra_wx_i32_nxv8i32_zext(<vscale x 8 x i64> %va, i32
   %y = trunc <vscale x 8 x i64> %x to <vscale x 8 x i32>
   ret <vscale x 8 x i32> %y
 }
+
+define <vscale x 8 x i32> @vnsra_wi_i32_nxv8i32_zext(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: vnsra_wi_i32_nxv8i32_zext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
+; CHECK-NEXT:    vnsra.wi v16, v8, 15
+; CHECK-NEXT:    vmv.v.v v8, v16
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x i32> poison, i32 15, i32 0
+  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
+  %vb = zext <vscale x 8 x i32> %splat to <vscale x 8 x i64>
+  %x = ashr <vscale x 8 x i64> %va, %vb
+  %y = trunc <vscale x 8 x i64> %x to <vscale x 8 x i32>
+  ret <vscale x 8 x i32> %y
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vnsrl-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vnsrl-sdnode.ll
index 4016d223c4407..70141889e10ab 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnsrl-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnsrl-sdnode.ll
@@ -30,6 +30,20 @@ define <vscale x 1 x i32> @vnsrl_wx_i32_nxv1i32_sext(<vscale x 1 x i64> %va, i32
   ret <vscale x 1 x i32> %y
 }
 
+define <vscale x 1 x i32> @vnsrl_wi_i32_nxv1i32_sext(<vscale x 1 x i64> %va) {
+; CHECK-LABEL: vnsrl_wi_i32_nxv1i32_sext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
+; CHECK-NEXT:    vnsrl.wi v8, v8, 15
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x i32> poison, i32 15, i32 0
+  %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
+  %vb = sext <vscale x 1 x i32> %splat to <vscale x 1 x i64>
+  %x = lshr <vscale x 1 x i64> %va, %vb
+  %y = trunc <vscale x 1 x i64> %x to <vscale x 1 x i32>
+  ret <vscale x 1 x i32> %y
+}
+
 define <vscale x 2 x i32> @vnsrl_wv_nxv2i32_sext(<vscale x 2 x i64> %va, <vscale x 2 x i32> %vb) {
 ; CHECK-LABEL: vnsrl_wv_nxv2i32_sext:
 ; CHECK:       # %bb.0:
@@ -58,6 +72,21 @@ define <vscale x 2 x i32> @vnsrl_wx_i32_nxv2i32_sext(<vscale x 2 x i64> %va, i32
   ret <vscale x 2 x i32> %y
 }
 
+define <vscale x 2 x i32> @vnsrl_wi_i32_nxv2i32_sext(<vscale x 2 x i64> %va) {
+; CHECK-LABEL: vnsrl_wi_i32_nxv2i32_sext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
+; CHECK-NEXT:    vnsrl.wi v10, v8, 15
+; CHECK-NEXT:    vmv.v.v v8, v10
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x i32> poison, i32 15, i32 0
+  %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
+  %vb = sext <vscale x 2 x i32> %splat to <vscale x 2 x i64>
+  %x = lshr <vscale x 2 x i64> %va, %vb
+  %y = trunc <vscale x 2 x i64> %x to <vscale x 2 x i32>
+  ret <vscale x 2 x i32> %y
+}
+
 define <vscale x 4 x i32> @vnsrl_wv_nxv4i32_sext(<vscale x 4 x i64> %va, <vscale x 4 x i32> %vb) {
 ; CHECK-LABEL: vnsrl_wv_nxv4i32_sext:
 ; CHECK:       # %bb.0:
@@ -86,6 +115,21 @@ define <vscale x 4 x i32> @vnsrl_wx_i32_nxv4i32_sext(<vscale x 4 x i64> %va, i32
   ret <vscale x 4 x i32> %y
 }
 
+define <vscale x 4 x i32> @vnsrl_wi_i32_nxv4i32_sext(<vscale x 4 x i64> %va) {
+; CHECK-LABEL: vnsrl_wi_i32_nxv4i32_sext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
+; CHECK-NEXT:    vnsrl.wi v12, v8, 15
+; CHECK-NEXT:    vmv.v.v v8, v12
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x i32> poison, i32 15, i32 0
+  %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+  %vb = sext <vscale x 4 x i32> %splat to <vscale x 4 x i64>
+  %x = lshr <vscale x 4 x i64> %va, %vb
+  %y = trunc <vscale x 4 x i64> %x to <vscale x 4 x i32>
+  ret <vscale x 4 x i32> %y
+}
+
 define <vscale x 8 x i32> @vnsrl_wv_nxv8i32_sext(<vscale x 8 x i64> %va, <vscale x 8 x i32> %vb) {
 ; CHECK-LABEL: vnsrl_wv_nxv8i32_sext:
 ; CHECK:       # %bb.0:
@@ -114,6 +158,21 @@ define <vscale x 8 x i32> @vnsrl_wx_i32_nxv8i32_sext(<vscale x 8 x i64> %va, i32
   ret <vscale x 8 x i32> %y
 }
 
+define <vscale x 8 x i32> @vnsrl_wi_i32_nxv8i32_sext(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: vnsrl_wi_i32_nxv8i32_sext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
+; CHECK-NEXT:    vnsrl.wi v16, v8, 15
+; CHECK-NEXT:    vmv.v.v v8, v16
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x i32> poison, i32 15, i32 0
+  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
+  %vb = sext <vscale x 8 x i32> %splat to <vscale x 8 x i64>
+  %x = lshr <vscale x 8 x i64> %va, %vb
+  %y = trunc <vscale x 8 x i64> %x to <vscale x 8 x i32>
+  ret <vscale x 8 x i32> %y
+}
+
 define <vscale x 1 x i32> @vnsrl_wv_nxv1i32_zext(<vscale x 1 x i64> %va, <vscale x 1 x i32> %vb) {
 ; CHECK-LABEL: vnsrl_wv_nxv1i32_zext:
 ; CHECK:       # %bb.0:
@@ -140,6 +199,20 @@ define <vscale x 1 x i32> @vnsrl_wx_i32_nxv1i32_zext(<vscale x 1 x i64> %va, i32
   ret <vscale x 1 x i32> %y
 }
 
+define <vscale x 1 x i32> @vnsrl_wi_i32_nxv1i32_zext(<vscale x 1 x i64> %va) {
+; CHECK-LABEL: vnsrl_wi_i32_nxv1i32_zext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
+; CHECK-NEXT:    vnsrl.wi v8, v8, 15
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 1 x i32> poison, i32 15, i32 0
+  %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer
+  %vb = zext <vscale x 1 x i32> %splat to <vscale x 1 x i64>
+  %x = lshr <vscale x 1 x i64> %va, %vb
+  %y = trunc <vscale x 1 x i64> %x to <vscale x 1 x i32>
+  ret <vscale x 1 x i32> %y
+}
+
 define <vscale x 2 x i32> @vnsrl_wv_nxv2i32_zext(<vscale x 2 x i64> %va, <vscale x 2 x i32> %vb) {
 ; CHECK-LABEL: vnsrl_wv_nxv2i32_zext:
 ; CHECK:       # %bb.0:
@@ -168,6 +241,21 @@ define <vscale x 2 x i32> @vnsrl_wx_i32_nxv2i32_zext(<vscale x 2 x i64> %va, i32
   ret <vscale x 2 x i32> %y
 }
 
+define <vscale x 2 x i32> @vnsrl_wi_i32_nxv2i32_zext(<vscale x 2 x i64> %va) {
+; CHECK-LABEL: vnsrl_wi_i32_nxv2i32_zext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
+; CHECK-NEXT:    vnsrl.wi v10, v8, 15
+; CHECK-NEXT:    vmv.v.v v8, v10
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 2 x i32> poison, i32 15, i32 0
+  %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
+  %vb = zext <vscale x 2 x i32> %splat to <vscale x 2 x i64>
+  %x = lshr <vscale x 2 x i64> %va, %vb
+  %y = trunc <vscale x 2 x i64> %x to <vscale x 2 x i32>
+  ret <vscale x 2 x i32> %y
+}
+
 define <vscale x 4 x i32> @vnsrl_wv_nxv4i32_zext(<vscale x 4 x i64> %va, <vscale x 4 x i32> %vb) {
 ; CHECK-LABEL: vnsrl_wv_nxv4i32_zext:
 ; CHECK:       # %bb.0:
@@ -196,6 +284,21 @@ define <vscale x 4 x i32> @vnsrl_wx_i32_nxv4i32_zext(<vscale x 4 x i64> %va, i32
   ret <vscale x 4 x i32> %y
 }
 
+define <vscale x 4 x i32> @vnsrl_wi_i32_nxv4i32_zext(<vscale x 4 x i64> %va) {
+; CHECK-LABEL: vnsrl_wi_i32_nxv4i32_zext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
+; CHECK-NEXT:    vnsrl.wi v12, v8, 15
+; CHECK-NEXT:    vmv.v.v v8, v12
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 4 x i32> poison, i32 15, i32 0
+  %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+  %vb = zext <vscale x 4 x i32> %splat to <vscale x 4 x i64>
+  %x = lshr <vscale x 4 x i64> %va, %vb
+  %y = trunc <vscale x 4 x i64> %x to <vscale x 4 x i32>
+  ret <vscale x 4 x i32> %y
+}
+
 define <vscale x 8 x i32> @vnsrl_wv_nxv8i32_zext(<vscale x 8 x i64> %va, <vscale x 8 x i32> %vb) {
 ; CHECK-LABEL: vnsrl_wv_nxv8i32_zext:
 ; CHECK:       # %bb.0:
@@ -223,3 +326,18 @@ define <vscale x 8 x i32> @vnsrl_wx_i32_nxv8i32_zext(<vscale x 8 x i64> %va, i32
   %y = trunc <vscale x 8 x i64> %x to <vscale x 8 x i32>
   ret <vscale x 8 x i32> %y
 }
+
+define <vscale x 8 x i32> @vnsrl_wi_i32_nxv8i32_zext(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: vnsrl_wi_i32_nxv8i32_zext:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
+; CHECK-NEXT:    vnsrl.wi v16, v8, 15
+; CHECK-NEXT:    vmv.v.v v8, v16
+; CHECK-NEXT:    ret
+  %head = insertelement <vscale x 8 x i32> poison, i32 15, i32 0
+  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> poison, <vscale x 8 x i32> zeroinitializer
+  %vb = zext <vscale x 8 x i32> %splat to <vscale x 8 x i64>
+  %x = lshr <vscale x 8 x i64> %va, %vb
+  %y = trunc <vscale x 8 x i64> %x to <vscale x 8 x i32>
+  ret <vscale x 8 x i32> %y
+}


        


More information about the llvm-commits mailing list